forked from GautamSridhar/BASS
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtemp.py
More file actions
93 lines (88 loc) · 5.87 KB
/
temp.py
File metadata and controls
93 lines (88 loc) · 5.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import numpy as np
import pickle
import matplotlib.pyplot as plt
import scipy.io
# synth_covars = np.load(".\\GMM\\synth_covars.npy")
# synth_means = np.load(".\\GMM\\synth_means.npy")
# synth_weights = np.load(".\\GMM\\synth_weights.npy")
# synth_dataset_condition0 = np.load(".\\Data\\synth_dataset_condition0.npy")
# synth_lengths_condition0 = np.load(".\\Data\\synth_lengths_condition0.npy")
# synth_condition_0_seg_bouttypes = np.load(".\\Results\\synth\\synth_condition_0_seg_bouttypes.npy")
# synth_condition_0_seg_lengths = np.load(".\\Results\\synth\\synth_condition_0_seg_lengths.npy")
# synth_condition_0_seg_words = np.load(".\\Results\\synth\\synth_condition_0_seg_words.npy")
#
# toy_condition_0_seg_bouttypes = np.load(".\\Results\\toy\\toy_condition_0_seg_bouttypes.npy")
# toy_condition_0_seg_lengths = np.load(".\\Results\\toy\\toy_condition_0_seg_lengths.npy")
# toy_condition_0_seg_words = np.load(".\\Results\\toy\\toy_condition_0_seg_words.npy")
#
# fw = open(".\\Results\\synth\\synth_condition_0\\BASSresults",'rb')
# data = pickle.load(fw)
#
# synth_condition_0_seg_bouttypes_recon = []
# word_start = 0
# for word_start_next in synth_condition_0_seg_lengths:
# words_end = word_start_next-1
# word_ID_present = synth_condition_0_seg_words[word_start]
# for idx_char in range(word_start,word_start_next):
# if synth_condition_0_seg_words[idx_char] != word_ID_present:
# print("Inconsistent word! position:",idx_char)
# print("\n")
# word_present = data[2][word_ID_present]
# if len(word_present) != word_start_next-word_start:
# print("Inconsistent word length! position:", word_start,"length difference:",len(word_present)-(word_start_next-word_start))
# print("\n")
# synth_condition_0_seg_bouttypes_recon = np.concatenate((synth_condition_0_seg_bouttypes_recon, word_present))
# word_start = word_start_next
# print("\n")
# if np.array_equal(synth_condition_0_seg_bouttypes_recon, synth_condition_0_seg_bouttypes):
# print("seg_bouttypes reconstruction right!")
# print("\n")
# else:
# print("seg_bouttypes reconstruction wrong!")
# print("\n")
#
# unique_elements_in_synth_condition_0_seg_words = np.unique(synth_condition_0_seg_words)
# plt.hist(synth_condition_0_seg_words, bins=31, color='skyblue', edgecolor='black')
# plt.show()
# # Prepare the data. Transform mat files to npy files.
# mat = scipy.io.loadmat('D:\\Nutstore\\我的坚果云\\临时\\2023_11_28-16_56_8\\behavior\\bouts_softmax_output_withHeadingVelocity_extent4.mat')
# data = np.transpose(mat['softmaxOutput'])
# np.save('.\\Data\\20231128_dataset_condition0.npy', data.astype(np.float64))
# np.save('.\\Data\\20231128_lengths_condition0.npy', np.array([data.shape[0]], dtype=np.int64))
# ldg_covars = mat['covs']
# ldg_means = mat['means']
# ldg_weights = mat['weights']
# np.save('.\\GMM\\20231128_covars.npy', ldg_covars.astype(np.float64))
# np.save('.\\GMM\\20231128_means.npy', ldg_means.astype(np.float64))
# np.save('.\\GMM\\20231128_weights.npy', ldg_weights.astype(np.float64))
# ldg_dataset_condition0 = np.load(".\\Data\\20231128_dataset_condition0.npy")
# ldg_lengths_condition0 = np.load(".\\Data\\20231128_lengths_condition0.npy")
# ldg_covars = np.load(".\\GMM\\20231128_covars.npy")
# ldg_means = np.load(".\\GMM\\20231128_means.npy")
# ldg_weights = np.load(".\\GMM\\20231128_weights.npy")
# mat = scipy.io.loadmat('D:\\文件\\学习工作\\WenLab\\behavior_analysis\\working\\images\\bouts_merged_softmax_output_withHeadingVelocity_extent4.mat')
# data = np.transpose(mat['softmaxOutput_merged'])
# lengths = mat['lengths']
# np.save('D:\\github_repositories\\BASS\\Data\\lidaguang_merged_dataset_condition0.npy', data.astype(np.float64))
# np.save('D:\\github_repositories\\BASS\\Data\\lidaguang_merged_lengths_condition0.npy', np.reshape(lengths.astype(np.int64),lengths.shape[0]))
# ldg_covars = mat['covs']
# ldg_means = mat['means']
# ldg_weights = mat['weights']
# np.save('D:\\github_repositories\\BASS\\GMM\\lidaguang_merged_covars.npy', ldg_covars.astype(np.float64))
# np.save('D:\\github_repositories\\BASS\\GMM\\lidaguang_merged_means.npy', ldg_means.astype(np.float64))
# np.save('D:\\github_repositories\\BASS\\GMM\\lidaguang_merged_weights.npy', ldg_weights.astype(np.float64))
# ldg_dataset_condition0 = np.load("D:\\github_repositories\\BASS\\Data\\lidaguang_merged_dataset_condition0.npy")
# ldg_lengths_condition0 = np.load("D:\\github_repositories\\BASS\\Data\\lidaguang_merged_lengths_condition0.npy")
# ldg_covars = np.load("D:\\github_repositories\\BASS\\GMM\\lidaguang_merged_covars.npy")
# ldg_means = np.load("D:\\github_repositories\\BASS\\GMM\\lidaguang_merged_means.npy")
# ldg_weights = np.load("D:\\github_repositories\\BASS\\GMM\\lidaguang_merged_weights.npy")
# # Transform npy result files to mat files.
ldg_condition_0_seg_bouttypes = np.load("D:\\github_repositories\\BASS\\Results\\lidaguang_merged_all_plus_kexin\\lidaguang_merged_condition_0_seg_bouttypes.npy")
ldg_condition_0_seg_lengths = np.load("D:\\github_repositories\\BASS\\Results\\lidaguang_merged_all_plus_kexin\\lidaguang_merged_condition_0_seg_lengths.npy")
ldg_condition_0_seg_words = np.load("D:\\github_repositories\\BASS\\Results\\lidaguang_merged_all_plus_kexin\\lidaguang_merged_condition_0_seg_words.npy")
fw = open("D:\\github_repositories\\BASS\\Results\\lidaguang_merged_all_plus_kexin\\lidaguang_merged_condition_0\\BASSresults",'rb')
data = pickle.load(fw)
scipy.io.savemat('D:\\github_repositories\\BASS\\Results\\lidaguang_merged_all_plus_kexin\\lidaguang_merged_results.mat', {'seg_bouttypes': ldg_condition_0_seg_bouttypes, 'seg_lengths': ldg_condition_0_seg_lengths, 'seg_words': ldg_condition_0_seg_words})
dictionary = {f'word_{i:03d}': arr for i, arr in enumerate(data[2])}
scipy.io.savemat('D:\\github_repositories\\BASS\\Results\\lidaguang_merged_all_plus_kexin\\lidaguang_merged_dictionary.mat', dictionary)
temp_var4 = np.load(".\\Data\\synth_dataset_condition0.npy")