From a25dd33e13d37dd51bccac9fd8d7b8a6d9e9121e Mon Sep 17 00:00:00 2001 From: smonfas Date: Mon, 3 Mar 2025 10:38:59 +0100 Subject: [PATCH 1/4] modularize as functions --- asdf.py | 10 +++ calculate_psths.py | 165 ++++++++++++++++++++++++++++----------------- environment.yaml | Bin 480 -> 424 bytes 3 files changed, 112 insertions(+), 63 deletions(-) create mode 100644 asdf.py diff --git a/asdf.py b/asdf.py new file mode 100644 index 0000000..a8ef0ee --- /dev/null +++ b/asdf.py @@ -0,0 +1,10 @@ + + +# %% + +a = 3 +b = 6 +c=7 + + +#%% \ No newline at end of file diff --git a/calculate_psths.py b/calculate_psths.py index 5205bc6..c02efce 100644 --- a/calculate_psths.py +++ b/calculate_psths.py @@ -4,6 +4,10 @@ url = 'https://uni-bonn.sciebo.de/s/oTfGigwXQ4g0raW' filename = 'data.nc' + +import xarray as xr + +#from netCDF4 import Dataset # %% Download Data # Exercise (Example): Make a download_data(url, filename) function: @@ -26,24 +30,39 @@ def download_data(url, filename): # Exercise: Make a `load_data(filename)` function, returning the `dset` variable. +def load_data(filename): + dset=xr.open_dataset(filename) + return dset + + +dset=load_data(filename) +print(type(dset)) + + + + # %% Extract Experiment-Level Data # Exercise: Make an `extract_trials(filename)` function, returning the `trials` variable. import xarray as xr -dset = xr.load_dataset(filename) -trials = dset[['contrast_left', 'contrast_right', 'stim_onset']].to_dataframe() -trials +def extract_trials(dset): + trials = dset[['contrast_left', 'contrast_right', 'stim_onset']].to_dataframe() + return trials + +trials = extract_trials(dset) # %% Extract Spike-Time Data # Exercise: Make an `extract_spikes(filename)` function, returning the `spikes` variable. import xarray as xr -dset = xr.load_dataset(filename) -spikes = dset[['spike_trial', 'spike_cell', 'spike_time']].to_dataframe() -spikes +def extract_spikes(dset): + spikes = dset[['spike_trial', 'spike_cell', 'spike_time']].to_dataframe() + return spikes + +spikes = extract_spikes(dset) # %% Extract Cell-Level Data @@ -51,51 +70,61 @@ def download_data(url, filename): import xarray as xr -dset = xr.load_dataset(filename) -cells = dset['brain_groups'].to_dataframe() -cells +def extract_cells(dset): + cells = dset['brain_groups'].to_dataframe() + return cells + +cells = extract_cells(dset) # %% Merge and Compress Extracted Data # Exercise: Make a `merge_data(trials, cells, spikes)` function, returning the `merged` variable. import pandas as pd -merged = pd.merge(left=cells, left_index=True, right=spikes, right_on='spike_cell') -merged = pd.merge(left=trials, right=merged, left_index=True, right_on='spike_trial').reset_index(drop=True) -merged.columns -merged = (merged - .rename(columns=dict( - brain_groups="brain_area", - spike_trial="trial_id", - spike_cell="cell_id", - spike_time="time" - )) - [[ - 'trial_id', - 'contrast_left', - 'contrast_right', - 'stim_onset', - 'cell_id', - 'brain_area', - 'time' - ]] - .astype(dict( - brain_area = 'category', - )) - # -) -merged.info() + + + +def merge_data(trials, cells, spikes): + merged = pd.merge(left=cells, left_index=True, right=spikes, right_on='spike_cell') + merged = pd.merge(left=trials, right=merged, left_index=True, right_on='spike_trial').reset_index(drop=True) + merged.columns + merged = (merged + .rename(columns=dict( + brain_groups="brain_area", + spike_trial="trial_id", + spike_cell="cell_id", + spike_time="time" + )) + [[ + 'trial_id', + 'contrast_left', + 'contrast_right', + 'stim_onset', + 'cell_id', + 'brain_area', + 'time' + ]] + .astype(dict( + brain_area = 'category', + )) + # + ) + return merged + +merged= merge_data(trials, cells, spikes) # %% Calculate Time Bins for PSTH # Exercise: Make a `compute_time_bins(time, bin_interval)` function, returning the `time_bins` variable. import numpy as np -time = merged['time'] -time = np.round(time, decimals=6) # Round time to the nearest microsecond, to reduce floating point errors. -bin_interval = 0.05 -time_bins = np.floor(time /bin_interval) * bin_interval # Round down to the nearest time bin start -time_bins +def compute_time_bins(time, bin_interval): + #time = merged['time'] + time = np.round(time, decimals=6) # Round time to the nearest microsecond, to reduce floating point errors. + + time_bins = np.floor(time /bin_interval) * bin_interval # Round down to the nearest time bin start + return time_bins +time_bins = compute_time_bins(merged['time'], bin_interval=0.05) # %% filter out stimuli with contrast on the right. # No function needed here for this exercise. @@ -106,33 +135,43 @@ def download_data(url, filename): # %% Make PSTHs # Exercise: Make a `compute_psths(data, time_bins)` function here, returning the `psth` variable. - -psth = ( - filtered - .groupby([time_bins, 'trial_id', 'contrast_left', 'cell_id', 'brain_area'], observed=True, ) - .size() - .rename('spike_count') - .reset_index() -) -psth -psth = ( +def compute_psths(data,time_bins): + psth = ( + filtered + .groupby([time_bins, 'trial_id', 'contrast_left', 'cell_id', 'brain_area'], observed=True, ) + .size() + .rename('spike_count') + .reset_index() + ) psth - .groupby(['time', 'contrast_left', 'brain_area'], observed=True) - .spike_count - .mean() - .rename('avg_spike_count') - .reset_index() -) -psth -psth['avg_spike_rate'] = psth['avg_spike_count'] * bin_interval -psth - + psth = ( + psth + .groupby(['time', 'contrast_left', 'brain_area'], observed=True) + .spike_count + .mean() + .rename('avg_spike_count') + .reset_index() + ) + psth + psth['avg_spike_rate'] = psth['avg_spike_count'] * bin_interval + return psth +psth = compute_psths(dset, time_bins) # %% Plot PSTHs # Make a `plot_psths(psth)` function here, returning the `g` variable. + + + import seaborn as sns -g = sns.FacetGrid(data=psth, col='brain_area', col_wrap=2) -g.map_dataframe(sns.lineplot, x='time', y='avg_spike_count', hue='contrast_left') -g.add_legend() -g.savefig('PSTHs.png') +def plot_psths(psth): + + g = sns.FacetGrid(data=psth, col='brain_area', col_wrap=2) + g.map_dataframe(sns.lineplot, x='time', y='avg_spike_count', hue='contrast_left') + g.add_legend() + g.savefig('PSTHs.png') + return None + +plot_psths(psth) + +# %% diff --git a/environment.yaml b/environment.yaml index fdae823a26faad838d0c15069cb25086282a93d0..5020a6d7e7083fb7268da6d8f873efb1c218c4af 100644 GIT binary patch delta 30 jcmaFByn=bcmr0Czlg$`)CKoVjOkTjK#lg$K#Q?$po#O|J delta 84 zcmZ3%{D67F7j7K}1qK@i0|q??^NFAJ*&$r>$%>4+yb!h#P|}FOY_cz-CM#HEaxtT> Q7($yNg9!sK0~Z(r0KyLqPXGV_ From 992a6913c9f028f4b007fc734e436eb18b3c555f Mon Sep 17 00:00:00 2001 From: smonfas Date: Sun, 9 Mar 2025 16:30:57 +0100 Subject: [PATCH 2/4] deleted unrelated files --- __pycache__/utils.cpython-312.pyc | Bin 0 -> 3910 bytes calculate_psths.py | 110 +++++------------------------- utils.py | 100 +++++++++++++++++++++++++++ 3 files changed, 117 insertions(+), 93 deletions(-) create mode 100644 __pycache__/utils.cpython-312.pyc create mode 100644 utils.py diff --git a/__pycache__/utils.cpython-312.pyc b/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53c3bd962b5719102d5a1d14da61af5e7561dd0b GIT binary patch literal 3910 zcmcInO>7(25#C)c$^GGqlq^eD;vd-vh&nfJpx%lQt0SffgnYUa; zQslZl^Z`5jerM-#zWHYOSHsW*XuJRLmr@>j|G+`qq;_ZhZRp$?NCIS<$RHJx1gVk~NR6aH>O=vVAu33NXdpYL#k~F)KTxpZ@8C#=Cjrm; z=OEylB9UC7BF;!6_61j(Nz@bN6fACQWp&#G4C}&OF`Au`>vCED{tl9;i+9`K7X?YE zOE7MqDI4G4f44nF^)MT+5;;C03e!S9S)iyeFEaUp8BQ{B==!b^|9#K(P11j2eusGqkHcQ!4 zP7nqxS*4B_=94T@qb^f$%`+=DS4q>TH)U~cK@tZ2*#32DXwt9PL&1a@1fFS4hC?If z^s#}_nrDTM?**qla)J6~+X~OqlIc0uO`IO8g^n8xR;QUk+Se%(Gc;)@C`RSMD2Rse zH+k;^dDkOj_s!y5an%@T8UqW~vT>|2+LE>3pTBYb*6{qE75UKL)B5b0-~DDay|jc9@rF2b^FsfkA+J`uk}PXH;}nbh};{;|+t|AR59~s($mp+=2N$%j%)V zNK4N)&e1Md`A^2yg%W`GQ?U>Bl{XBBLA%AEnu&~+3i$*xEZ+;M8H7dGo(NgT#*jLt z$&hJ5=v0c}D?9v5WSP+C#!gUhJ>04X6th%F9n%fA+^fT$0;N`1jMnG@*od2h=Rq`t zmeKiT*KpGqSv4*;jf>01C2*SF*?8;!az#C;I#=z;6u1x{ShnlNeYl}b_kv*?H{yGq za-(RC9)bPOZM)IQrg3W3IM+1JEgNq=^+xD_jYO@i0fiKZKd~ZLRjCO+)GPzs(o0a?;TO;CPD(1hZakD4@XYatz;RYj< zG*O63v_(Ds)?y~s3c+KvgJ%au-lU^rF&m$W-)IrUf39$u9sp@=AfKi$;TrM+MdwotLwXDa(j0vm1XCQKJ^1Yrfj;i5XD8+eyW^*<{J=F*F5mkgzNh>pNO2983&et zgl*Gnrpr`>NfD-k0Bzs!LZ{_dsn3w868AKup zBA1dR31QyWP{xznTAagnq=Br- zXqWoG*hN%~(=Zc%6hQ!ls=Wpi91412Q#fw~@nwXDKOTU`ZNl0)&jM(KZA?Vrk8WjI z^IECcPK3CFb$mBLfdwG+(>%qDPcu1iuG{pdaEzWnaTXQG72EXa2y_92GOy0w;PcfW zoZN~rC@><7@g;f@w!H~Y@JA30;gOPgsOIK+Z{_E&EF8F(zyI!1=i5uaGG}|2Rq{|Z z=DI)Z;qiRSSXTExRJ-S%|8U_FQEz>kZiErp|o2Z|9?-h=gD!xJE0fQl6T zrZH#CAHVbNz09(DQd$KP@fRI3gSsZ6vYR^t_MOF$Te9Lb8CAf5Sk=* cttoBMxu*5O@I+GFy_VZ0=GF&OVvZ~SH`q-zG5`Po literal 0 HcmV?d00001 diff --git a/calculate_psths.py b/calculate_psths.py index c02efce..01bf5b1 100644 --- a/calculate_psths.py +++ b/calculate_psths.py @@ -1,41 +1,31 @@ # %% Script Parameters +import utils + url = 'https://uni-bonn.sciebo.de/s/oTfGigwXQ4g0raW' filename = 'data.nc' + import xarray as xr #from netCDF4 import Dataset # %% Download Data # Exercise (Example): Make a download_data(url, filename) function: -def download_data(url, filename): - from pathlib import Path - import owncloud - - client = owncloud.Client.from_public_link(url) - client.get_file('/', filename) - if Path(filename).exists(): - print('Download Succeeded.') - return None - -download_data(url=url, filename=filename) +utils.download_data(url=url, filename=filename) # %% Load Data # Exercise: Make a `load_data(filename)` function, returning the `dset` variable. -def load_data(filename): - dset=xr.open_dataset(filename) - return dset -dset=load_data(filename) +dset=utils.load_data(filename) print(type(dset)) @@ -47,22 +37,17 @@ def load_data(filename): import xarray as xr -def extract_trials(dset): - trials = dset[['contrast_left', 'contrast_right', 'stim_onset']].to_dataframe() - return trials -trials = extract_trials(dset) + +trials = utils.extract_trials(dset) # %% Extract Spike-Time Data # Exercise: Make an `extract_spikes(filename)` function, returning the `spikes` variable. import xarray as xr -def extract_spikes(dset): - spikes = dset[['spike_trial', 'spike_cell', 'spike_time']].to_dataframe() - return spikes -spikes = extract_spikes(dset) +spikes = utils.extract_spikes(dset) # %% Extract Cell-Level Data @@ -70,11 +55,8 @@ def extract_spikes(dset): import xarray as xr -def extract_cells(dset): - cells = dset['brain_groups'].to_dataframe() - return cells -cells = extract_cells(dset) +cells = utils.extract_cells(dset) # %% Merge and Compress Extracted Data # Exercise: Make a `merge_data(trials, cells, spikes)` function, returning the `merged` variable. @@ -83,48 +65,17 @@ def extract_cells(dset): -def merge_data(trials, cells, spikes): - merged = pd.merge(left=cells, left_index=True, right=spikes, right_on='spike_cell') - merged = pd.merge(left=trials, right=merged, left_index=True, right_on='spike_trial').reset_index(drop=True) - merged.columns - merged = (merged - .rename(columns=dict( - brain_groups="brain_area", - spike_trial="trial_id", - spike_cell="cell_id", - spike_time="time" - )) - [[ - 'trial_id', - 'contrast_left', - 'contrast_right', - 'stim_onset', - 'cell_id', - 'brain_area', - 'time' - ]] - .astype(dict( - brain_area = 'category', - )) - # - ) - return merged - -merged= merge_data(trials, cells, spikes) + + +merged= utils.merge_data(trials, cells, spikes) # %% Calculate Time Bins for PSTH # Exercise: Make a `compute_time_bins(time, bin_interval)` function, returning the `time_bins` variable. import numpy as np -def compute_time_bins(time, bin_interval): - #time = merged['time'] - time = np.round(time, decimals=6) # Round time to the nearest microsecond, to reduce floating point errors. - - time_bins = np.floor(time /bin_interval) * bin_interval # Round down to the nearest time bin start - return time_bins - -time_bins = compute_time_bins(merged['time'], bin_interval=0.05) +bin_interval=0.05 +time_bins = utils.compute_time_bins(merged['time'], bin_interval=0.05) # %% filter out stimuli with contrast on the right. # No function needed here for this exercise. @@ -135,28 +86,8 @@ def compute_time_bins(time, bin_interval): # %% Make PSTHs # Exercise: Make a `compute_psths(data, time_bins)` function here, returning the `psth` variable. -def compute_psths(data,time_bins): - psth = ( - filtered - .groupby([time_bins, 'trial_id', 'contrast_left', 'cell_id', 'brain_area'], observed=True, ) - .size() - .rename('spike_count') - .reset_index() - ) - psth - psth = ( - psth - .groupby(['time', 'contrast_left', 'brain_area'], observed=True) - .spike_count - .mean() - .rename('avg_spike_count') - .reset_index() - ) - psth - psth['avg_spike_rate'] = psth['avg_spike_count'] * bin_interval - return psth - -psth = compute_psths(dset, time_bins) +#data=filtered +psth = utils.compute_psths(filtered, time_bins) # %% Plot PSTHs # Make a `plot_psths(psth)` function here, returning the `g` variable. @@ -164,14 +95,7 @@ def compute_psths(data,time_bins): import seaborn as sns -def plot_psths(psth): - - g = sns.FacetGrid(data=psth, col='brain_area', col_wrap=2) - g.map_dataframe(sns.lineplot, x='time', y='avg_spike_count', hue='contrast_left') - g.add_legend() - g.savefig('PSTHs.png') - return None -plot_psths(psth) +utils.plot_psths(psth) # %% diff --git a/utils.py b/utils.py new file mode 100644 index 0000000..5dd6c5b --- /dev/null +++ b/utils.py @@ -0,0 +1,100 @@ +import xarray as xr +import numpy as np +import pandas as pd + +def download_data(url, filename): + from pathlib import Path + import owncloud + + client = owncloud.Client.from_public_link(url) + client.get_file('/', filename) + + if Path(filename).exists(): + print('Download Succeeded.') + + return None + +def load_data(filename): + dset=xr.open_dataset(filename) + return dset + + +def extract_trials(dset): + trials = dset[['contrast_left', 'contrast_right', 'stim_onset']].to_dataframe() + return trials + +def extract_spikes(dset): + spikes = dset[['spike_trial', 'spike_cell', 'spike_time']].to_dataframe() + return spikes + +def extract_cells(dset): + cells = dset['brain_groups'].to_dataframe() + return cells + + +def merge_data(trials, cells, spikes): + merged = pd.merge(left=cells, left_index=True, right=spikes, right_on='spike_cell') + merged = pd.merge(left=trials, right=merged, left_index=True, right_on='spike_trial').reset_index(drop=True) + merged.columns + merged = (merged + .rename(columns=dict( + brain_groups="brain_area", + spike_trial="trial_id", + spike_cell="cell_id", + spike_time="time" + )) + [[ + 'trial_id', + 'contrast_left', + 'contrast_right', + 'stim_onset', + 'cell_id', + 'brain_area', + 'time' + ]] + .astype(dict( + brain_area = 'category', + )) + # + ) + return merged + + +def compute_time_bins(time, bin_interval): + #time = merged['time'] + time = np.round(time, decimals=6) # Round time to the nearest microsecond, to reduce floating point errors. + + time_bins = np.floor(time /bin_interval) * bin_interval # Round down to the nearest time bin start + return time_bins + + +def compute_psths(data,time_bins): + psth = ( + data + .groupby([time_bins, 'trial_id', 'contrast_left', 'cell_id', 'brain_area'], observed=True, ) + .size() + .rename('spike_count') + .reset_index() + ) + psth + psth = ( + psth + .groupby(['time', 'contrast_left', 'brain_area'], observed=True) + .spike_count + .mean() + .rename('avg_spike_count') + .reset_index() + ) + psth + psth['avg_spike_rate'] = psth['avg_spike_count'] * bin_interval + return psth + + + +def plot_psths(psth): + + g = sns.FacetGrid(data=psth, col='brain_area', col_wrap=2) + g.map_dataframe(sns.lineplot, x='time', y='avg_spike_count', hue='contrast_left') + g.add_legend() + g.savefig('PSTHs.png') + return None From b1f775febfca7f52d7993a6523531ba5b590caab Mon Sep 17 00:00:00 2001 From: smonfas Date: Sun, 9 Mar 2025 18:53:04 +0100 Subject: [PATCH 3/4] added utils for modularity --- __pycache__/utils.cpython-310.pyc | Bin 0 -> 2575 bytes __pycache__/utils.cpython-312.pyc | Bin 3910 -> 3939 bytes asdf.py | 10 ---------- calculate_psths.py | 26 +++++++++++++++----------- utils.py | 5 +++-- 5 files changed, 18 insertions(+), 23 deletions(-) create mode 100644 __pycache__/utils.cpython-310.pyc delete mode 100644 asdf.py diff --git a/__pycache__/utils.cpython-310.pyc b/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca480019b4ab2748a94af1f5f5a8beacd9ea2af9 GIT binary patch literal 2575 zcmZ`*OK;mo5auqI6eUZx<2X(8Xq&#l7LAMa0kjR$1__#;)BwqehYB=zEmNULVo4>I zL7&pAF8u{kZuueo6MO9`w*omeEs%b*vh%PJ3Y>incW1x(W=2J?*KzQi`Qy)Fh~8f` zc=h2J{DilX5Ymw>b0TJ29=Wy^ktexye|I8Z3hAM3Nnf_m1~QOsv~AguD`-2iD|={H zdl+m-9`6xyELkf+hEN_NKyH=p1nr=QTtM~{b&zA)EUPM_;O2Qaoh%#=%KmUo-3#G9zxs_3ksawSW3% z@gUENSYEm_NfM=`l9$6EYL!^e(rryfnE*OTR!n5%-^fyx*U|cpF2;j$vYn;LAWQRm zQ7}~XU?a@tK>$$wGi;+s|U-dA~)BTvHwB+;p3}jr# z1Yxxb(PaVa@-E(h39eT#l@JU^4_(p4Yq6#eYG=+Ys;nXZuztu5-_udIC{=Fvu2daz z9YEIziGj6$DTrOBO^8~}Qy5ls*$xv-)9!CP`{^cD8)pdrK4RybxwCK%mKN}S=$c+q zU6kUn@&}y;{i}o^%tztqk%X~?O|)<*E)@}>bSA1Y7hG+ z^O%)Q53OpSJ3U##o&<8?#(K zk1sS?q%T06FoaxZ-L{TS1K^IqX1d!tPV>P~7n8Dj?R<}{J8^vP2n)OVE_Suf>k&4}sLeYc1GDr*=kgm}Cv71Thl_uAOlj7a@yOF%u(>oU>{)W1Vc|H#kic*2(=@PA8|@W zN3HS=4xWCE?g{&8-P$kFX`B)4CHti-#n6);ILYU1ft$$Ig3Vo|9^8UT$%qnC@Q5vS zMkq5@tNpr`Y@c&#e-rw$a}^TzU+KTXi5E<`OL!F96|SkIIm{Xf@Td=gSaOIwX4c~S z!@*%HA&qGD<%9;6z@L_l?Cq%$Rl2YA=lD#2M&cWYNQ_mS>nrF2c|k=%e}#b&WmWGU z=L_=d5?ETSZb7di)^d(K_*`(GNb|2~agI#>oC9b(^GpObPyPVa6Y1#%=-*@YC25cv zT*~YtjyX7hpp~te0NiXY7LIK_87z+KcDrxmN6BvaR@ a%vY2y11Eqf0S~%?7YO{k;9?MT&;AdX2u9KX literal 0 HcmV?d00001 diff --git a/__pycache__/utils.cpython-312.pyc b/__pycache__/utils.cpython-312.pyc index 53c3bd962b5719102d5a1d14da61af5e7561dd0b..92f7f8fc35ef8c38ef99a87deefc0d849899fb5d 100644 GIT binary patch delta 435 zcmXZXze^lJ6bJBmJF~ZYvp=$~9(qJi4@9{}1T6&RRE((Dgrv$L$jL-4#2>ph$|)8$ zf*|q=e>HYuBgSj|3oMirftccqm1zUI_Xp@(xmSEupWK`YvKj*&e zJ878O@63!a1q+FSp|ovQFcl}0N{~|Nq@@z1tqjspmNK`Y;Ho604@#2W8szLBo3{W7 zoX{S@$4>qi8c~Q}@DcxrD`+k6=m#3Km$T+Bzz|*}^UXXSqgDp3ajxcpu>)4Zte(Oa zxn%GX2Ug;^!W-~RZnV_0N~Y;{Nn6)RuH--dP{q|^FiI!EC!!*j z!`1mv(6$XfaL?_qDn|IdII|Qi2H)_?9s4{>o%911=80zx01vGGf%)&IA|b(xnlwUc c;74YSx=5zMulDJXioAO8s$B{20-v6E1B4K2G5`Po delta 404 zcmaDXcTA4=G%qg~0}zN#I-0IIkyny2Xrj8DB~uGS6k7^2kYrC`0g@aktU!`8g$+n@ zrLY4@?i3Cn$&EGda4iAAnJ&MlUL z;*yMFkogJ<3Pl1SA>QQt+=9}Q)OfH|Q6NyveX<}=595}}^LUK8fm{g4VxIhtLu~UW z9#2N;IG|9mJCJB#c*4Qm&)dm6!(>6s4#y7O%acR+tQq+y&*oE`e4fumIu)ph5v1B0 rNPJ*sWMsU}Abgua2uMyg2vx5KtKaa&KG& diff --git a/asdf.py b/asdf.py deleted file mode 100644 index a8ef0ee..0000000 --- a/asdf.py +++ /dev/null @@ -1,10 +0,0 @@ - - -# %% - -a = 3 -b = 6 -c=7 - - -#%% \ No newline at end of file diff --git a/calculate_psths.py b/calculate_psths.py index 01bf5b1..899fcbd 100644 --- a/calculate_psths.py +++ b/calculate_psths.py @@ -1,14 +1,17 @@ # %% Script Parameters import utils - - +import xarray as xr +import pandas as pd +import numpy as np +import seaborn as sns url = 'https://uni-bonn.sciebo.de/s/oTfGigwXQ4g0raW' filename = 'data.nc' +from utils import compute_psths + -import xarray as xr #from netCDF4 import Dataset # %% Download Data @@ -35,7 +38,7 @@ # %% Extract Experiment-Level Data # Exercise: Make an `extract_trials(filename)` function, returning the `trials` variable. -import xarray as xr + @@ -44,7 +47,7 @@ # %% Extract Spike-Time Data # Exercise: Make an `extract_spikes(filename)` function, returning the `spikes` variable. -import xarray as xr + spikes = utils.extract_spikes(dset) @@ -53,7 +56,6 @@ # %% Extract Cell-Level Data # Exercise: Make an `extract_cells(filename)` function, returning the `cells` variable. -import xarray as xr cells = utils.extract_cells(dset) @@ -61,7 +63,7 @@ # %% Merge and Compress Extracted Data # Exercise: Make a `merge_data(trials, cells, spikes)` function, returning the `merged` variable. -import pandas as pd + @@ -73,8 +75,8 @@ # %% Calculate Time Bins for PSTH # Exercise: Make a `compute_time_bins(time, bin_interval)` function, returning the `time_bins` variable. -import numpy as np -bin_interval=0.05 + + time_bins = utils.compute_time_bins(merged['time'], bin_interval=0.05) # %% filter out stimuli with contrast on the right. @@ -87,13 +89,15 @@ # %% Make PSTHs # Exercise: Make a `compute_psths(data, time_bins)` function here, returning the `psth` variable. #data=filtered -psth = utils.compute_psths(filtered, time_bins) +import inspect +print(inspect.signature(compute_psths)) +psth = utils.compute_psths(filtered, time_bins, 0.05) # %% Plot PSTHs # Make a `plot_psths(psth)` function here, returning the `g` variable. -import seaborn as sns + utils.plot_psths(psth) diff --git a/utils.py b/utils.py index 5dd6c5b..e6e36d7 100644 --- a/utils.py +++ b/utils.py @@ -1,6 +1,7 @@ import xarray as xr import numpy as np import pandas as pd +import seaborn as sns def download_data(url, filename): from pathlib import Path @@ -60,7 +61,7 @@ def merge_data(trials, cells, spikes): return merged -def compute_time_bins(time, bin_interval): +def compute_time_bins(time,bin_interval=0.05): #time = merged['time'] time = np.round(time, decimals=6) # Round time to the nearest microsecond, to reduce floating point errors. @@ -68,7 +69,7 @@ def compute_time_bins(time, bin_interval): return time_bins -def compute_psths(data,time_bins): +def compute_psths(data, time_bins, bin_interval): psth = ( data .groupby([time_bins, 'trial_id', 'contrast_left', 'cell_id', 'brain_area'], observed=True, ) From 4aeb1dec6ad3f3f71db751aa935b367e056eb778 Mon Sep 17 00:00:00 2001 From: smonfas Date: Sun, 9 Mar 2025 19:50:33 +0100 Subject: [PATCH 4/4] changed folder structure, created toml and implemented package structure --- exercise1.md => docs/exercise1.md | 0 exercise2.md => docs/exercise2.md | 0 environment.yaml => environment.yml | Bin pyproject.toml | 7 +++++++ calculate_psths.py => scripts/calculate_psths.py | 5 ++--- src/psth/__init__.py | 1 + src/psth/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 204 bytes src/psth/__pycache__/utils.cpython-312.pyc | Bin 0 -> 3948 bytes utils.py => src/psth/utils.py | 0 9 files changed, 10 insertions(+), 3 deletions(-) rename exercise1.md => docs/exercise1.md (100%) rename exercise2.md => docs/exercise2.md (100%) rename environment.yaml => environment.yml (100%) create mode 100644 pyproject.toml rename calculate_psths.py => scripts/calculate_psths.py (95%) create mode 100644 src/psth/__init__.py create mode 100644 src/psth/__pycache__/__init__.cpython-312.pyc create mode 100644 src/psth/__pycache__/utils.cpython-312.pyc rename utils.py => src/psth/utils.py (100%) diff --git a/exercise1.md b/docs/exercise1.md similarity index 100% rename from exercise1.md rename to docs/exercise1.md diff --git a/exercise2.md b/docs/exercise2.md similarity index 100% rename from exercise2.md rename to docs/exercise2.md diff --git a/environment.yaml b/environment.yml similarity index 100% rename from environment.yaml rename to environment.yml diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..540db27 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,7 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "psth" +version = "0.0.1" \ No newline at end of file diff --git a/calculate_psths.py b/scripts/calculate_psths.py similarity index 95% rename from calculate_psths.py rename to scripts/calculate_psths.py index 899fcbd..3e49b1b 100644 --- a/calculate_psths.py +++ b/scripts/calculate_psths.py @@ -1,14 +1,13 @@ # %% Script Parameters -import utils import xarray as xr import pandas as pd import numpy as np import seaborn as sns url = 'https://uni-bonn.sciebo.de/s/oTfGigwXQ4g0raW' filename = 'data.nc' -from utils import compute_psths +from psth import utils @@ -90,7 +89,7 @@ # Exercise: Make a `compute_psths(data, time_bins)` function here, returning the `psth` variable. #data=filtered import inspect -print(inspect.signature(compute_psths)) + psth = utils.compute_psths(filtered, time_bins, 0.05) # %% Plot PSTHs # Make a `plot_psths(psth)` function here, returning the `g` variable. diff --git a/src/psth/__init__.py b/src/psth/__init__.py new file mode 100644 index 0000000..9f9161b --- /dev/null +++ b/src/psth/__init__.py @@ -0,0 +1 @@ +from . import utils \ No newline at end of file diff --git a/src/psth/__pycache__/__init__.cpython-312.pyc b/src/psth/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fce065d73cab4a4200e2dbd327136de5290b9c8 GIT binary patch literal 204 zcmX@j%ge<81o;oorV9h<#~=<2FhLog1%Qm{3@HpLj5!Rsj8Tk?43$ip%r6;%!kUb? zSW8PXbBg^mnQk!@F$0BGGJFPU{N<&ek)NBYU!0a$T%4DfoKd3hl2~b^>sFeVT#}ie zSL~RV5>S+%m6}}QUzDDhmsynv64oy+O4ctZF3Hf3kI&4@EQycTE2zB1VUwGmQks)$ aSHuA{8f0-Xi1C4$k&*EpgK!ZWkOKfi?ll7d literal 0 HcmV?d00001 diff --git a/src/psth/__pycache__/utils.cpython-312.pyc b/src/psth/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c7e7adb31665fe4604341176eb28b0b15bbac19 GIT binary patch literal 3948 zcmcInU2GKB6`q-$-JPEu@7frH6MhT?wcB8v5E@Z6kRluh0$CtXHL<8RuPC{iWMm)QdHck4=7I{{E&w}@X!YyBUP2Wu`91Gsi10Jcq4YCN`2}%cXo_z zOi3Slr8)QK+QomBH)<#^%D!Pt$ktroR zERH&^%~JNb6NCXvR;lBK`6Nr!sLK@Od1l4tB54}srYtTkNW!EK+rQr)81pOkKrm_s zfoED{;lQ9daiD*w=2@ZRd%;nUoS}Z%w!+i2WO~kJ6PE`9Y7JC_aBQF!I&QGPI>8Lm zepiv0p-DU82DD5t1f(JSUEX$I-t^GeJYAeBE*kw!qyLsQZyab0wPfwLr>~s8er#sz zg1q}5Y5m&q-~4JZy{(zvwvgWOwPMV5>{(D=dZ=fYL6Ts#4T59t9Na-(fyoUaT;Y{h zyAj{I#sS2-cn*wG$V*Hbr!3=FZI7!R*kN9x9dLrl1SSE>>Fc98Tv5?#(d)LrybHcy zGmwVxwW?3=oZ30Fbza@w7;Nd;#z~6o|E6qRD1jG$F80E{@`~axX}8$BCL?8~LO#I^ z%lATR24T^)M?==JGNn#wEM!^`I+Y?A%?@viEED=%*-;8U0I%u+!7LS0$8>`=@9MC} zL8uiLBOcue8}Wg`i$EGe%jo>7>sZqmTr|!$jkEK{IWU^u*?9N=@1xjgw8|Es6nCWk3`sXtP zjkj8Qu5l`|0csdG1qNK>ZQxYTjR}#6b%{u`i92#6U^4N%D5@s_eC63>e4>nCvaS(v zHWdTmvp`xD0K7K<>Tm^`Nt!4`CEBbW-!+?wr9$u+?cmt}nper_n9as#;vO}E_|Fy2 z)7_wWUS=|nCk<)Q@rXUnl2J%0yn~pcyZBD}GIX>b33>$GkE?+0g!)ycMfNtSZ8C)i zQ_&$TjRwVmjJ}(f=@0NAisQG_J&}mC<-4_t7v$6QRpcQqP&7V8&!q>DAk5L%fH1`} zL%ZbDiM+^H^Jykk2}?q7lx(I`8!#Yx2|bAWQ+bWoxj`j<-P^u7mcBK?e$wdcL%>b zykLB=XxL4|p1bJJ8TPzUjX=Nf>q4ue`+=IB>iMJG>fPJgy1TWdueD|ScWSa%yVkQT zBsG00qiEW<*=$z-SD&Eerg}but$V&pNS)fUAZhyJT|!cS{39XL730xrDTJuAfpz~t&|f^ycix=do9w8z)+N_nh4QG<0Bz zz<;hI96IV7=A}A729ZdD$fYDnLYTLI@G_oU*Ww(uAw;C(pRSDeu50m03{?&kpwapU z5TJZ6uF(O(Z(ZA{(u<{UsF(V`=tWeF)4GJl2Vm6Rz!`#zURV{zE5UmnUBVv;z|uBh z?ObPpON4D-h{7Jt$+G69Qn8%?aR=*ovOppF^C_0*sbyq>$$@j(raysW^bnF0C_t{* zrib9h3$nLxeUPtX;kGWsp|Xgai}UDN*moSh;CDb8!b2tVK+R2Uxt^aHzP0mi{@#VT z&iCeiXX(`>q1ifwgX6f{HxAG_YUHj<9y5N zgLAoEP&>sZ4Lq3x(%QajZue`=?FYZ8H@6>a49=;&P5Fgq6j&H?+1&(nV zI+lR^0I=`7#gC|2<-kGT0S5ONKo@bGi4!a_R*39Bo_Fe9lLSr(^EHx=JvmNuh=-fApaa0r>>m3 zTEM}HD<`g9T;R6lz?2tDfA~GN+i&JOS!&#*)SRH*a0g3zwMun@9>sC0YI?*B=<7I1 z2evulQ;&WCGyV^RVjas1T3!S4s)}L?_blg=frSL|9oloU5F