diff --git a/autoqbaseline.sh b/autoqbaseline.sh new file mode 100755 index 0000000..d6f2246 --- /dev/null +++ b/autoqbaseline.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +##mdir="baseline_full/" +##for k in {0..3} +##do +## for j in 0000 #{0000..0020} +## do +## output=$mdir"sim0$k/output$j/" +## mkdir -p $output +## cp config.yml $output +## cp config.yml $mdir +## +## datadir="/mnt/zfsusers/susanna/BBHybrid/data/sim0$k/" +## coadd=$datadir"cls_coadd_baseline_$j.fits" +## noise=$datadir"cls_noise_baseline_$j.fits" +## fid=$datadir"cls_fid_baseline_$j.fits" +## chain=$output"param_chains.npz" +## configcopy=$output"config_copy.yml" +## config=$mdir"config.yml" +## +## addqueue -q berg -c "2 hours" -m 1 -s -n 1x8 /usr/bin/python3 -m bbpower BBCompSep --cells_coadded=$coadd --cells_noise=$noise --cells_fiducial=$fid --param_chains=$chain --config_copy=$configcopy --config=$config +## done +##done +ellmin=2 #30 #2 #10 +#simt=d1s1_maskpysm_Bonly +#simt=d1s1_maskpysm_Bonly_r0.01 +#simt=d1s1_maskpysm_Bonly_r0.01_whitenoiONLY +simt=d1s1_maskpysm_Bonly_whitenoiONLY +fsky=0.8 #0.6 #0.8 +#mdir="/mnt/extraspace/susanna/BBHybrid/baseline_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_ellmin${ellmin}/" +#mdir="/mnt/extraspace/susanna/BBHybrid/baseline_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_ellmin${ellmin}_r0.01/" +#mdir="/mnt/extraspace/susanna/BBHybrid/baseline_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_ellmin${ellmin}_r0.01_whitenoiONLY/" +mdir="/mnt/extraspace/susanna/BBHybrid/baseline_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_ellmin${ellmin}_whitenoiONLY_HandLlik/" +cf=$mdir"config.yml" +for j in {0000..0020} #0000 +do + output=$mdir"output$j/" + mkdir -p $output + + #cp test_BBSims_Hybrid/config_baseline_d1s1.yaml ${mdir}config.yml + cp test_BBSims_Hybrid/config_baseline_fsky.yaml ${mdir}config.yml + + cp $cf $output + config=$output"config.yml" + sed -i "s/KK/${ellmin}/" $config + + echo $config + + datadir="/mnt/zfsusers/susanna/BBHybrid/data/sim${simt}/fsky${fsky}/" + coadd=$datadir"cls_coadd_baseline_masked_$j.fits" + noise=$datadir"cls_noise_baseline_masked_$j.fits" + fid=$datadir"cls_fid_baseline_masked_$j.fits" + chain=$output"param_chains.npz" + configcopy=$output"config_copy.yml" + + addqueue -s -q berg -c 'baseline_fsky0.8_ellm2 2.5hr' -m 4 -s -n 1x12 /usr/bin/python3 -m bbpower BBCompSep --cells_coadded=$coadd --cells_coadded_cov=$coadd --cells_noise=$noise --cells_fiducial=$fid --param_chains=$chain --config_copy=$configcopy --config=$config + +done + + diff --git a/autoqresiduals.sh b/autoqresiduals.sh new file mode 100755 index 0000000..a2dda2a --- /dev/null +++ b/autoqresiduals.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +ellmin=2 #2 #10 #30 +#simt=d1s1_maskpysm_Bonly +#simt=d1s1_maskpysm_Bonly_r0.01 +simt=d1s1_maskpysm_Bonly_r0.01_whitenoiONLY +#simt=d1s1_maskpysm_Bonly_whitenoiONLY + +fsky=0.8 #0.3 0.6 #0.8 +zz=masked_ #full_ + +#mdir="/mnt/extraspace/susanna/BBHybrid/hybrid_masked_outs/mask_planck/realistic/d1s1/fsky${fsky}/" +#mdir="/mnt/extraspace/susanna/BBHybrid/hybrid_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_epsilonfixed/" +#mdir="/mnt/extraspace/susanna/BBHybrid/hybrid_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_ellmin${ellmin}/" +#mdir="/mnt/extraspace/susanna/BBHybrid/hybrid_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_ellmin${ellmin}_r0.01/" +#mdir="/mnt/extraspace/susanna/BBHybrid/hybrid_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_ellmin${ellmin}_r0.01_whitenoiONLY/" +#mdir="/mnt/extraspace/susanna/BBHybrid/hybrid_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_ellmin${ellmin}_whitenoiONLY/" +#mdir="/mnt/extraspace/susanna/BBHybrid/hybrid_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_ellmin${ellmin}_whitenoiONLY_HandLlik/" +mdir="/mnt/extraspace/susanna/BBHybrid/hybrid_masked_outs/mask_pysm/realistic/d1s1/fsky${fsky}_ellmin${ellmin}_r0.01_whitenoiONLY_HandLlik/" +cf=$mdir"config.yml" + +for j in {0000..0020} #0000 +do + output=$mdir"output$j/" + echo $output + mkdir -p $output + + #cp test_BBSims_Hybrid/config_hybrid.yaml ${mdir}config.yml + #cp test_BBSims_Hybrid/config_hybrid_fixedepsilon.yaml ${mdir}config.yml + cp test_BBSims_Hybrid/config_hybrid_ellmin_fsky.yaml ${mdir}config.yml + + cp $cf $output + config=$output"config.yml" + sed -i "s/simJ/sim${simt}/" $config + sed -i "s/fskyX/fsky${fsky}/" $config + sed -i "s/paramsY/params_$j/" $config + sed -i "s/Zhybrid/${zz}hybrid/" $config + + sed -i "s/KK/${ellmin}/" $config + + datadir="/mnt/zfsusers/susanna/BBHybrid/data/sim${simt}/fsky${fsky}/" + #datadir="/mnt/zfsusers/susanna/BBHybrid/data/simd1s1_maskpysm_Bonly/fsky${fsky}/" + coadd=$datadir"cls_coadd_residual_masked_$j.fits" + noise=$datadir"cls_noise_residual_masked_$j.fits" + fid=$datadir"cls_fid_residual_masked_$j.fits" + chain=$output"param_chains.npz" + configcopy=$output"config_copy.yml" + + addqueue -s -q cmb -c 'res_fsky0.8_ellm10_h&l 2.5 hour' -m 4 -s -n 1x12 /usr/bin/python3 -m bbpower BBCompSep --cells_coadded=$coadd --cells_coadded_cov=$coadd --cells_noise=$noise --cells_fiducial=$fid --param_chains=$chain --config_copy=$configcopy --config=$config +done + diff --git a/bbpower/__init__.py b/bbpower/__init__.py index 2635b16..4ae7ebc 100644 --- a/bbpower/__init__.py +++ b/bbpower/__init__.py @@ -1,8 +1,5 @@ from bbpipe import PipelineStage -from .mask_preproc import BBMaskPreproc -from .maps_preproc import BBMapsPreproc from .power_specter import BBPowerSpecter from .power_summarizer import BBPowerSummarizer -from .covfefe import BBCovFeFe from .compsep import BBCompSep from .plotter import BBPlotter diff --git a/bbpower/compsep.py b/bbpower/compsep.py index b4c54ce..4db286b 100644 --- a/bbpower/compsep.py +++ b/bbpower/compsep.py @@ -17,25 +17,74 @@ class BBCompSep(PipelineStage): The foreground model parameters are defined in the config.yml file. """ name = "BBCompSep" - inputs = [('cells_coadded', FitsFile),('cells_noise', FitsFile),('cells_fiducial', FitsFile)] - outputs = [('param_chains', NpzFile), ('config_copy', YamlFile)] + inputs = [('cells_coadded', FitsFile), + ('cells_noise', FitsFile), + ('cells_fiducial', FitsFile), + ('cells_coadded_cov', FitsFile)] # Read covariance from separate file + outputs = [('param_chains', NpzFile), + ('config_copy', YamlFile)] config_options={'likelihood_type':'h&l', 'n_iters':32, 'nwalkers':16, 'r_init':1.e-3, - 'sampler':'emcee'} + 'sampler':'emcee', 'bands': 'all'} def setup_compsep(self): """ Pre-load the data, CMB BB power spectrum, and foreground models. """ self.parse_sacc_file() + if self.config['fg_model'].get('use_moments'): + print('Running moments') + self.precompute_w3j() self.load_cmb() self.fg_model = FGModel(self.config) self.params = ParameterManager(self.config) + if self.config.get("diff"): + print('Running hybrid') + self.hybridparams = np.load(self.config['resid_seds']) + hyb_params = self.hybridparams['params_cent'] + hyb_sigma = self.hybridparams['params_sigma'] + pnames = np.array(self.params.p_free_names) + ind_bs = int(np.where(pnames == 'beta_s')[0]) + ind_bd = int(np.where(pnames == 'beta_d')[0]) + # Beta_s: [centre, sigma] + self.params.p_free_priors[ind_bs][2] = [hyb_params[0], hyb_sigma[0]] + # Beta_d: [centre, sigma] + self.params.p_free_priors[ind_bd][2] = [hyb_params[1], hyb_sigma[1]] if self.use_handl: self.prepare_h_and_l() return + def get_moments_lmax(self): + return self.config['fg_model'].get('moments_lmax', 384) + + def precompute_w3j(self): + from pyshtools.utils import Wigner3j + + lmax = self.get_moments_lmax() + ells_w3j = np.arange(0, lmax) + w3j = np.zeros_like(ells_w3j, dtype=float) + self.big_w3j = np.zeros((lmax, lmax, lmax)) + for ell1 in ells_w3j[1:]: + for ell2 in ells_w3j[1:]: + w3j_array, ellmin, ellmax = Wigner3j(ell1, ell2, 0, 0, 0) + w3j_array = w3j_array[:ellmax - ellmin + 1] + # make the w3j_array the same shape as the w3j + if len(w3j_array) < len(ells_w3j): + reference = np.zeros(len(w3j)) + reference[:w3j_array.shape[0]] = w3j_array + w3j_array = reference + + w3j_array = np.concatenate([w3j_array[-ellmin:], + w3j_array[:-ellmin]]) + w3j_array = w3j_array[:len(ells_w3j)] + w3j_array[:ellmin] = 0 + + self.big_w3j[:, ell1, ell2] = w3j_array + + self.big_w3j = self.big_w3j**2 + + def matrix_to_vector(self, mat): - return mat[..., self.index_ut[0], self.index_ut[1]] + return mat[..., self.index_ut_modes[0], self.index_ut_modes[1]] def vector_to_matrix(self, vec): if vec.ndim == 1: @@ -68,13 +117,15 @@ def _freq_pol_iterator(self): def parse_sacc_file(self): """ - Reads the data in the sacc file included the power spectra, bandpasses, and window functions. + Reads the data in the sacc file included the power spectra, + bandpasses, and window functions. """ - #Decide if you're using H&L + # Decide if you're using H&L self.use_handl = self.config['likelihood_type'] == 'h&l' - #Read data + # Read data self.s = sacc.Sacc.load_fits(self.get_input('cells_coadded')) + self.s_cov = sacc.Sacc.load_fits(self.get_input('cells_coadded_cov')) if self.use_handl: s_fid = sacc.Sacc.load_fits(self.get_input('cells_fiducial')) s_noi = sacc.Sacc.load_fits(self.get_input('cells_noise')) @@ -90,20 +141,26 @@ def parse_sacc_file(self): for c in corr_all: if c not in corr_keep: self.s.remove_selection(c) + self.s_cov.remove_selection(c) if self.use_handl: s_fid.remove_selection(c) s_noi.remove_selection(c) - # Scale cuts + # Scale cuts self.s.remove_selection(ell__gt=self.config['l_max']) self.s.remove_selection(ell__lt=self.config['l_min']) + self.s_cov.remove_selection(ell__gt=self.config['l_max']) + self.s_cov.remove_selection(ell__lt=self.config['l_min']) if self.use_handl: s_fid.remove_selection(ell__gt=self.config['l_max']) s_fid.remove_selection(ell__lt=self.config['l_min']) s_noi.remove_selection(ell__gt=self.config['l_max']) s_noi.remove_selection(ell__lt=self.config['l_min']) - tr_names = sorted(list(self.s.tracers.keys())) + if self.config['bands'] == 'all': + tr_names = sorted(list(self.s.tracers.keys())) + else: + tr_names = self.config['bands'] self.nfreqs = len(tr_names) self.npol = len(self.pols) self.nmaps = self.nfreqs * self.npol @@ -111,7 +168,7 @@ def parse_sacc_file(self): self.ncross = (self.nmaps * (self.nmaps + 1)) // 2 self.pol_order=dict(zip(self.pols,range(self.npol))) - #Collect bandpasses + # Collect bandpasses self.bpss = [] for i_t, tn in enumerate(tr_names): t = self.s.tracers[tn] @@ -124,10 +181,10 @@ def parse_sacc_file(self): self.bpss.append(Bandpass(nu, dnu, bnu, i_t+1, self.config)) # Get ell sampling - # Example power spectrum + # Example power spectrum self.ell_b, _ = self.s.get_ell_cl('cl_' + 2 * self.pols[0].lower(), tr_names[0], tr_names[0]) - #Avoid l<2 + # Avoid l<2 win0 = self.s.data[0]['window'] mask_w = win0.values > 1 self.bpw_l = win0.values[mask_w] @@ -137,11 +194,10 @@ def parse_sacc_file(self): self.dl2cl = 2 * np.pi / (self.bpw_l * (self.bpw_l + 1)) self.windows = np.zeros([self.ncross, self.n_bpws, self.n_ell]) - #Get power spectra and covariances - if not (self.s.covariance.covmat.shape[-1] == len(self.s.mean) == self.n_bpws * self.ncross): - raise ValueError("C_ell vector's size is wrong") - ###cv = self.s.precision.getCovarianceMatrix() - + # Get power spectra and covariances + if self.config['bands'] == 'all': + if not (self.s_cov.covariance.covmat.shape[-1] == len(self.s.mean) == self.n_bpws * self.ncross): + raise ValueError("C_ell vector's size is wrong") v2d = np.zeros([self.n_bpws, self.ncross]) if self.use_handl: v2d_noi = np.zeros([self.n_bpws, self.ncross]) @@ -151,7 +207,7 @@ def parse_sacc_file(self): self.vector_indices = self.vector_to_matrix(np.arange(self.ncross, dtype=int)).astype(int) self.indx = [] - #Parse into the right ordering + # Parse into the right ordering for b1, b2, p1, p2, m1, m2, ind_vec in self._freq_pol_iterator(): t1 = tr_names[b1] t2 = tr_names[b2] @@ -174,17 +230,63 @@ def parse_sacc_file(self): pol2b = self.pols[p2b].lower() cl_typb = f'cl_{pol1b}{pol2b}' ind_b = self.s.indices(cl_typb, (t1b, t2b)) - cv2d[:, ind_vec, :, ind_vecb] = self.s.covariance.covmat[ind_a][:, ind_b] + cv2d[:, ind_vec, :, ind_vecb] = self.s_cov.covariance.covmat[ind_a][:, ind_b] #Store data - self.bbdata = self.vector_to_matrix(v2d) + self.bbdata = self.vector_to_matrix(v2d) #(nbpw, nf, nf) + if self.config.get("diff"): + from scipy.linalg import eig, inv, pinv + self.hybridparams = np.load(self.config['resid_seds']) + Qmat = self.hybridparams['Q'] + eval, evec = eig(Qmat) + mask = eval > 0.1 #take away rows equivalent to eigenvalue=0 + self.R = np.linalg.inv(evec)[mask, :] + self.bbdata = np.einsum("aj,bk,ijk->iab", self.R, self.R, self.bbdata) + cov_all = np.zeros([self.n_bpws, self.nmaps, self.nmaps, self.n_bpws, self.nmaps, self.nmaps]) + for ix in range(self.ncross): + i, j = np.array(self.index_ut)[:,ix] + for jx in range(self.ncross): + m, n = np.array(self.index_ut)[:,jx] + cov_all[:, i, j, :, m, n] = cv2d[:, ix, :, jx] + cov_all[:, j, i, :, m, n] = cv2d[:, ix, :, jx] + cov_all[:, i, j, :, n, m] = cv2d[:, ix, :, jx] + cov_all[:, j, i, :, n, m] = cv2d[:, ix, :, jx] + ccov = np.einsum("aj,bk,gm,dn,ijklmn->iablgd", self.R, self.R, self.R, self.R, cov_all) + self.nmodes = 4 + self.nmaps_modes = self.nmodes * self.npol + self.index_ut_modes = np.triu_indices(self.nmodes) + self.ncross_modes = (self.nmaps_modes * (self.nmaps_modes + 1)) // 2 + cv2d = np.zeros([self.n_bpws, self.ncross_modes, self.n_bpws, self.ncross_modes]) + for ix in range(self.ncross_modes): + i, j = np.array(self.index_ut_modes)[:,ix] + for jx in range(self.ncross_modes): + m, n = np.array(self.index_ut_modes)[:,jx] + #import traceback + #traceback.print_stack() + cv2d[:, ix, :, jx] = ccov[:, i, j, :, m, n] + else: + self.nmodes = self.nfreqs + self.nmaps_modes = self.nmaps + self.index_ut_modes = self.index_ut + self.ncross_modes = self.ncross if self.use_handl: self.bbnoise = self.vector_to_matrix(v2d_noi) self.bbfiducial = self.vector_to_matrix(v2d_fid) - self.bbcovar = cv2d.reshape([self.n_bpws * self.ncross, self.n_bpws * self.ncross]) - self.invcov = np.linalg.solve(self.bbcovar, np.identity(len(self.bbcovar))) + if self.config.get("diff"): + from scipy.linalg import eig, inv, pinv + self.hybridparams = np.load(self.config['resid_seds']) + Qmat = self.hybridparams['Q'] + eval, evec = eig(Qmat) + mask = eval > 0.1 #take away rows equivalent to eigenvalue=0 + self.R = np.linalg.inv(evec)[mask, :] + self.bbnoise = np.einsum("aj,bk,ijk->iab", self.R, self.R, self.bbnoise) + self.bbfiducial = np.einsum("aj,bk,ijk->iab", self.R, self.R, self.bbfiducial) + self.bbcovar = cv2d.reshape([self.n_bpws * self.ncross_modes, + self.n_bpws * self.ncross_modes]) + self.invcov = np.linalg.inv(self.bbcovar) + return - + def load_cmb(self): """ Loads the CMB BB spectrum as defined in the config file. @@ -211,7 +313,7 @@ def load_cmb(self): self.cmb_scal[ind, ind] = cmb_lensingfile[:, 2][mask] return - def integrate_seds(self, params): + def integrate_seds(self, params): #NB different from NERSC fg_scaling = np.zeros([self.fg_model.n_components, self.nfreqs]) rot_matrices = [] @@ -221,20 +323,25 @@ def integrate_seds(self, params): sed_params = [params[comp['names_sed_dict'][k]] for k in comp['sed'].params] rot_matrices.append([]) - def sed(nu): - return comp['sed'].eval(nu, *sed_params) - + + if self.config.get("diff"): + def sed(nu): + return comp['sed'].diff(nu, *sed_params) + else: + def sed(nu): + return comp['sed'].eval(nu, *sed_params) + for tn in range(self.nfreqs): sed_b, rot = self.bpss[tn].convolve_sed(sed, params) fg_scaling[i_c, tn] = sed_b * units rot_matrices[i_c].append(rot) - return fg_scaling.T,rot_matrices + return fg_scaling.T, rot_matrices - def evaluate_power_spectra(self, params): + def evaluate_power_spectra(self, params): #NB different from NERSC fg_pspectra = np.zeros([self.fg_model.n_components, self.fg_model.n_components, - self.npol, self.npol, self.n_ell]) + self.npol, self.npol, self.n_ell]) #NB different from NERSC # Fill diagonal for i_c, c_name in enumerate(self.fg_model.component_names): @@ -247,7 +354,7 @@ def evaluate_power_spectra(self, params): for k in clfunc.params] fg_pspectra[i_c, i_c, ip1, ip2, :] = clfunc.eval(self.bpw_l, *pspec_params) * self.dl2cl - # Off diagonals + # Off diagonals #NB different from NERSC for i_c1, c_name1 in enumerate(self.fg_model.component_names): for c_name2, epsname in self.fg_model.components[c_name1]['names_x_dict'].items(): i_c2 = self.fg_model.component_order[c_name2] @@ -258,39 +365,119 @@ def evaluate_power_spectra(self, params): return fg_pspectra - def model(self, params): + def model(self, params): #NB different from NERSC (dimensions) """ Defines the total model and integrates over the bandpasses and windows. """ + # [npol,npol,nell] cmb_cell = (params['r_tensor'] * self.cmb_tens + \ params['A_lens'] * self.cmb_lens + \ - self.cmb_scal) * self.dl2cl # [npol,npol,nell] - fg_scaling, rot_m = self.integrate_seds(params) # [nfreq, ncomp], [ncomp,nfreq,[matrix]] - fg_cell = self.evaluate_power_spectra(params) # [ncomp,ncomp,npol,npol,nell] + self.cmb_scal) * self.dl2cl + # [nell,npol,npol] + cmb_cell = np.transpose(cmb_cell, axes = [2,0,1]) + if self.config['cmb_model'].get('use_birefringence'): + bi_angle = np.radians(params['birefringence']) + c = np.cos(2*bi_angle) + s = np.sin(2*bi_angle) + bmat = np.array([[c, s], + [-s, c]]) + cmb_cell = rotate_cells_mat(bmat, bmat, cmb_cell) + + # [nfreq, ncomp], [ncomp,nfreq,[matrix]] + fg_scaling, rot_m = self.integrate_seds(params) #NB different from NERSC (dimensions) + # [ncomp,ncomp,npol,npol,nell] + fg_cell = self.evaluate_power_spectra(params) #NB different from NERSC (dimensions) # Add all components scaled in frequency (and HWP-rotated if needed) - cls_array_fg = np.zeros([self.nfreqs,self.nfreqs,self.n_ell,self.npol,self.npol]) - fg_cell = np.transpose(fg_cell, axes = [0,1,4,2,3]) # [ncomp,ncomp,nell,npol,npol] - cmb_cell = np.transpose(cmb_cell, axes = [2,0,1]) # [nell,npol,npol] + # [nfreq, nfreq, nell, npol, npol] + cls_array_fg = np.zeros([self.nfreqs, self.nfreqs, + self.n_ell, self.npol, self.npol]) + # [ncomp,ncomp,nell,npol,npol] + fg_cell = np.transpose(fg_cell, axes = [0,1,4,2,3]) #NB different from NERSC + + #NB different from NERSC no CMB scaling + + # Loop over frequencies for f1 in range(self.nfreqs): - for f2 in range(f1,self.nfreqs): # Note that we only need to fill in half of the frequencies - cls=cmb_cell.copy() + # Note that we only need to fill in half of the frequencies + for f2 in range(f1,self.nfreqs): + cls=cmb_cell.copy() #NB different from NERSC # Loop over component pairs for c1 in range(self.fg_model.n_components): - mat1=rot_m[c1][f1] + mat1=rot_m[c1][f1] #NB different from NERSC a1=fg_scaling[f1,c1] for c2 in range(self.fg_model.n_components): mat2=rot_m[c2][f2] a2=fg_scaling[f2,c2] - # Rotate if needed clrot=rotate_cells_mat(mat2,mat1,fg_cell[c1,c2]) - # Scale in frequency and add cls += clrot*a1*a2 cls_array_fg[f1,f2]=cls + # Add moment terms if needed + # Start by assuming that there is no cross-correlation between components and betas + if self.config['fg_model'].get('use_moments'): + # TODO: moments work with: + # - B-only + # - No polarization angle business + # - Only power-law beta power spectra at l_pivot=80 + + # Evaluate 1st/2nd order SED derivatives. + # [nfreq, ncomp] + fg_scaling_d1 = self.integrate_seds_der(params, order=1) + fg_scaling_d2 = self.integrate_seds_der(params, order=2) + + # Compute 1x1 for each component + # Compute 0x2 for each component (essentially this is sigma_beta) + # Evaluate beta power spectra. + lmax_mom = self.get_moments_lmax() + # [ncomp, nell, npol, npol] + cls_11 = np.zeros([self.fg_model.n_components, self.n_ell, + self.npol, self.npol]) + # [ncomp, nell, npol, npol] + cls_02 = np.zeros([self.fg_model.n_components, self.n_ell, + self.npol, self.npol]) + + # component_1: dust, component_2: sync + for i_c, c_name in enumerate(self.fg_model.component_names): + comp = self.fg_model.components[c_name] + gamma = params[comp['names_moments_dict']['gamma_beta']] + amp = params[comp['names_moments_dict']['amp_beta']] * 1E-6 + cl_betas = self.bcls(lmax=lmax_mom, gamma=gamma, amp=amp) + #cl_cc = fg_cell[i_c, :] #NERSC version + cl_cc = fg_cell[i_c, i_c, :] #NB different from NERSC + cls_1x1 = self.evaluate_1x1(params, lmax=lmax_mom, + cls_cc=cl_cc, + cls_bb=cl_betas) + cls_11[i_c, :lmax_mom, :, :] = cls_1x1 + + cls_0x2 = self.evaluate_0x2(params, lmax=lmax_mom, + cls_cc=cl_cc, + cls_bb=cl_betas) + cls_02[i_c, :lmax_mom, :, :] = cls_0x2 + + # Add components scaled in frequency + for f1 in range(self.nfreqs): + # Note that we only need to fill in half of the frequencies + for f2 in range(f1, self.nfreqs): + cls = np.zeros([self.n_ell, self.npol, self.npol]) + for c1 in range(self.fg_model.n_components): + cls += (fg_scaling_d1[f1, c1] * fg_scaling_d1[f2, c1] * + cls_11[c1]) + #NERSC version + #cls += 0.5 * (fg_scaling_d2[f1, c1] * + # fg_scaling[c1, c1, f2, f2] + + # fg_scaling_d2[f2, c1] * + # fg_scaling[c1, c1, f1, f1]) * cls_02[c1] + cls += 0.5 * (fg_scaling_d2[f1, c1] * + fg_scaling[f2, c1] + + fg_scaling_d2[f2, c1] * + fg_scaling[f1, c1]) * cls_02[c1] + cls_array_fg[f1, f2] += cls + # Window convolution - cls_array_list = np.zeros([self.n_bpws, self.nfreqs, self.npol, self.nfreqs, self.npol]) + cls_array_list = np.zeros([self.n_bpws, self.nfreqs, + self.npol, self.nfreqs, self.npol]) for f1 in range(self.nfreqs): for p1 in range(self.npol): m1 = f1*self.npol+p1 @@ -307,11 +494,75 @@ def model(self, params): # Polarization angle rotation for f1 in range(self.nfreqs): for f2 in range(self.nfreqs): - cls_array_list[:,f1,:,f2,:] = rotate_cells(self.bpss[f2], self.bpss[f1], + cls_array_list[:,f1,:,f2,:] = rotate_cells(self.bpss[f2], + self.bpss[f1], cls_array_list[:,f1,:,f2,:], params) + cls_array_list = cls_array_list.reshape([self.n_bpws, self.nmaps, self.nmaps]) + if self.config.get("diff"): + cls_array_list = np.einsum("aj,bk,ijk->iab", self.R, self.R, cls_array_list) + return cls_array_list + + def bcls(self, lmax, gamma, amp): + """ + Model beta spectra as power laws. + """ + ls = np.arange(lmax) + bcls = np.zeros(len(ls)) + bcls[2:] = (ls[2:] / 80.)**gamma + return bcls*amp + + def integrate_seds_der(self, params, order=1): + """ + Define the first order derivative of the SED + """ + fg_scaling_der = np.zeros([self.fg_model.n_components, + self.nfreqs]) + + for i_c, c_name in enumerate(self.fg_model.component_names): + comp = self.fg_model.components[c_name] + units = comp['cmb_n0_norm'] + sed_params = [params[comp['names_sed_dict'][k]] + for k in comp['sed'].params] - return cls_array_list.reshape([self.n_bpws, self.nmaps, self.nmaps]) + # Set SED function with scaling beta + def sed_der(nu): + nu0 = params[comp['names_sed_dict']['nu0']] + x = np.log(nu / nu0) + # This is only valid for spectral indices + return x**order * comp['sed'].eval(nu, *sed_params) + + for tn in range(self.nfreqs): + sed_b = self.bpss[tn].convolve_sed(sed_der, params)[0] + fg_scaling_der[i_c, tn] = sed_b * units + + return fg_scaling_der.T + + def evaluate_1x1(self, params, lmax, cls_cc, cls_bb): + """ + Evaluate the 1x1 moment for auto-spectra + """ + ls = np.arange(lmax) + v_left = (2*ls+1)[:, None, None] * cls_cc[:lmax, :, :] + v_right = (2*ls+1) * cls_bb[:lmax] + + mat = self.big_w3j + v_left = np.transpose(v_left, axes=[1, 0, 2]) + moment1x1 = np.dot(np.dot(mat, v_right), v_left) / (4*np.pi) + return moment1x1 + + def evaluate_0x2(self, params, lmax, cls_cc, cls_bb, + gamma=None): + """ + Evaluate the 0x2 moment for auto-spectra + Assume power law for beta + """ + ls = np.arange(lmax) + if gamma is not None: + prefac = amp * (2 * sp.zeta(-gamma-1) + sp.zeta(-gamma) - 3) / (4 * np.pi * 80**gamma) + else: + prefac = np.sum( (2 * ls + 1) * cls_bb) / (4*np.pi) + return cls_cc[:lmax] * prefac def chi_sq_dx(self, params): """ @@ -323,39 +574,50 @@ def chi_sq_dx(self, params): def prepare_h_and_l(self): fiducial_noise = self.bbfiducial + self.bbnoise self.Cfl_sqrt = np.array([sqrtm(f) for f in fiducial_noise]) + print(self.bbdata.shape) + print(self.bbnoise.shape) self.observed_cls = self.bbdata + self.bbnoise return def h_and_l_dx(self, params): - """ - Hamimeche and Lewis likelihood. - Taken from Cobaya written by H, L and Torrado - See: https://github.com/CobayaSampler/cobaya/blob/master/cobaya/likelihoods/_cmblikes_prototype/_cmblikes_prototype.py + """ + Hamimeche and Lewis likelihood. + Taken from Cobaya written by H, L and Torrado + See: https://github.com/CobayaSampler/cobaya/blob/master/cobaya/likelihoods/_cmblikes_prototype/_cmblikes_prototype.py """ model_cls = self.model(params) dx_vec = [] for k in range(model_cls.shape[0]): C = model_cls[k] + self.bbnoise[k] X = self.h_and_l(C, self.observed_cls[k], self.Cfl_sqrt[k]) + if np.any(np.isinf(X)): + return [np.inf] dx = self.matrix_to_vector(X).flatten() dx_vec = np.concatenate([dx_vec, dx]) return dx_vec - + def h_and_l(self, C, Chat, Cfl_sqrt): - diag, U = np.linalg.eigh(C) + try: + diag, U = np.linalg.eigh(C) + except: + return [np.inf] rot = U.T.dot(Chat).dot(U) roots = np.sqrt(diag) for i, root in enumerate(roots): rot[i, :] /= root rot[:, i] /= root U.dot(rot.dot(U.T), rot) - diag, rot = np.linalg.eigh(rot) - diag = np.sign(diag - 1) * np.sqrt(2 * np.maximum(0, diag - np.log(diag) - 1)) + try: + diag, rot = np.linalg.eigh(rot) + except: + return [np.inf] + diag = (np.sign(diag - 1) * + np.sqrt(2 * np.maximum(0, diag - np.log(diag) - 1))) Cfl_sqrt.dot(rot, U) for i, d in enumerate(diag): rot[:, i] = U[:, i] * d return rot.dot(U.T) - + def lnprob(self, par): """ Likelihood with priors. @@ -363,24 +625,29 @@ def lnprob(self, par): prior = self.params.lnprior(par) if not np.isfinite(prior): return -np.inf + return prior + self.lnlike(par) + def lnlike(self, par): + """ + Likelihood without priors. + """ params = self.params.build_params(par) if self.use_handl: dx = self.h_and_l_dx(params) + if np.any(np.isinf(dx)): + return -np.inf else: dx = self.chi_sq_dx(params) - like = -0.5 * np.einsum('i, ij, j',dx,self.invcov,dx) - return prior + like + like = -0.5 * np.dot(dx, np.dot(self.invcov, dx)) + return like def emcee_sampler(self): """ Sample the model with MCMC. """ import emcee - from multiprocessing import Pool fname_temp = self.get_output('param_chains')+'.h5' - backend = emcee.backends.HDFBackend(fname_temp) nwalkers = self.config['nwalkers'] @@ -390,21 +657,28 @@ def emcee_sampler(self): if not found_file: backend.reset(nwalkers,ndim) - pos = [self.params.p0 + 1.e-3*np.random.randn(ndim) for i in range(nwalkers)] + #p0 = self.minimizer() + #pos = [p0 + 1.e-3*np.random.randn(ndim) for i in range(nwalkers)] + #nsteps_use = n_iters + pos = [self.params.p0 + 1.e-3*np.random.randn(ndim) + for i in range(nwalkers)] nsteps_use = n_iters else: print("Restarting from previous run") pos = None nsteps_use = max(n_iters-len(backend.get_chain()), 0) - - with Pool() as pool: - sampler = emcee.EnsembleSampler(nwalkers, ndim, self.lnprob,backend=backend) - if nsteps_use > 0: - sampler.run_mcmc(pos, nsteps_use, store=True, progress=True); - return sampler + import time + start = time.time() + sampler = emcee.EnsembleSampler(nwalkers, ndim, self.lnprob, backend=backend) + if nsteps_use > 0: + sampler.run_mcmc(pos, nsteps_use, store=True, progress=True); + end = time.time() + return sampler, end-start - def minimizer(self): + #NB different from NERSC: Add Polychord + + def minimizer(self, save_spectra=False): """ Find maximum likelihood """ @@ -413,19 +687,87 @@ def chi2(par): c2=-2*self.lnprob(par) return c2 res=minimize(chi2, self.params.p0, method="Powell") - return res.x + + if len(self.params.p0)<2: + x = np.array([res.x]) + else: + x = res.x + + if save_spectra: + bbdata_cls = self.bbdata # n_bpws, nfreqs, nfreqs + names=self.params.p_free_names + parameters = dict(zip(list(names),res.x)) + #print(parameters) + parameters['nu0_d'] = 220. + parameters['nu0_s'] = 40. + model_cls = self.model(parameters) # n_bpws, nfreqs, nfreqs + bbcov = self.bbcovar + l = self.ell_b + lmax = 300 + msk = l $ndir"settings.yml" +#sed "s/xxx/$sname" $fdir"settings.yml" > $ndir"settings.yml" + diff --git a/moments/settings.yml b/moments/settings.yml new file mode 100644 index 0000000..98d6f98 --- /dev/null +++ b/moments/settings.yml @@ -0,0 +1,17 @@ +modules: bbpower +launcher: local +stages: + - name: BBCompSep + nprocess: 1 + +inputs: + cells_coadded: /mnt/zfsusers/mabitbol/BBHybrid/data/sim02/cls_coadd0.fits + cells_fiducial: /mnt/zfsusers/mabitbol/BBHybrid/data/sim02/cls_fid0.fits + cells_noise: /mnt/zfsusers/mabitbol/BBHybrid/data/sim02/cls_noise0.fits + +resume: False +config: ./moments/xxx/config.yml +output_dir: ./moments/xxx/output +log_dir: ./moments/xxx/output +pipeline_log: ./moments/xxx/output/log.txt + diff --git a/plotting/combined.py b/plotting/combined.py new file mode 100755 index 0000000..978b384 --- /dev/null +++ b/plotting/combined.py @@ -0,0 +1,89 @@ +import os.path +import numpy as np +import glob +import emcee +from labels import alllabels + +import matplotlib as mpl +from getdist import plots, MCSamples +from labels import alllabels, allranges + +hybrid = 1 +baseline = 0 + +if hybrid: + #npf = 'hybrid_masked_alphap' + #allfs = glob.glob(f'/mnt/zfsusers/susanna/BBPower/test_hybrid/{npf}/sim0*/output*/') + npf = 'hybrid_masked_outs' + #allfs = glob.glob(f'/mnt/extraspace/susanna/BBHybrid/{npf}/gaussian_priorsfromBfore/sim0*/output*/') + allfs = glob.glob(f'/mnt/extraspace/susanna/BBHybrid/{npf}/realistic/d1s1/output000_pinv/') +elif baseline: + allfs = glob.glob('/mnt/zfsusers/susanna/BBPower/baseline_masked/sim0*/output*/') +allfs.sort() + + +def save_cleaned_chains(fdir): + reader = emcee.backends.HDFBackend(f'{fdir}param_chains.npz.h5') + x = np.load(f'{fdir}param_chains.npz') + labels = ['$%s$' %alllabels[k] for k in x['names']] + + try: + tau = reader.get_autocorr_time(tol=50) + taust = 'good tau' + except: + taust = 'POTENTIALLY BAD TAU' + tau = reader.get_autocorr_time(tol=0) + burnin = int(5. * np.max(tau)) + thin = int(0.5 * np.max(tau)) + + samples = reader.get_chain(discard=burnin, flat=True, thin=thin) + N = samples.shape[0] + + with open(f'{fdir}chain_info.txt', 'w') as of: + print("N: ", N, file=of) + print("burnin: %d, thin: %d" %(burnin, thin), file=of) + print("tau:", file=of) + print(tau, file=of) + print(taust, file=of) + np.savez(f'{fdir}cleaned_chains', samples=samples, names=x['names'], labels=labels) + return + +for af in allfs: + save_cleaned_chains(af) + + +def getdist_clean(fdir): + sampler = np.load(f'{fdir}cleaned_chains.npz') + vals = [allranges[k] for k in sampler['labels']] + ranges = dict(zip(sampler['labels'], vals)) + #gd_samps = MCSamples(samples=sampler['samples'], + # names=sampler['labels'], + # labels=[p.strip('$') for p in sampler['labels']], + # ranges=ranges) + gd_samps = MCSamples(samples=sampler['samples'], + names=sampler['labels'], + labels=[p.strip('$') for p in sampler['labels']]) + return gd_samps + +for af in allfs: + sname = af.split('/')[-4] + af.split('/')[-3] + af.split('/')[-2] + #sname = af.split('/')[-3] + outf = f'{af}results.txt' + + samps = getdist_clean(af) + samps.getTable(limit=1).write(outf) + + z = samps.paramNames.list() + g = plots.get_subplot_plotter(width_inch=16) + g.settings.axes_fontsize=14 + g.settings.axes_labelsize=14 + g.settings.line_styles = 'tab10' + g.triangle_plot([samps], z, shaded=True, title_limit=1) + if hybrid: + #mpl.pyplot.savefig(f'/mnt/zfsusers/susanna/BBPower/test_hybrid/plots/{sname}_triangle') + #mpl.pyplot.savefig(f'/mnt/zfsusers/susanna/BBPower/plotting/{sname}_triangle_betasB4') + mpl.pyplot.savefig(f'/mnt/zfsusers/susanna/BBPower/plotting/{sname}_triangle_d1s1pinv') + elif baseline: + mpl.pyplot.savefig(f'/mnt/zfsusers/susanna/BBPower/baseline_masked/plots/{sname}_triangle') + mpl.pyplot.close() + diff --git a/plotting/labels.py b/plotting/labels.py new file mode 100644 index 0000000..65814d5 --- /dev/null +++ b/plotting/labels.py @@ -0,0 +1,42 @@ +alllabels = {'A_lens':'A_{lens}', 'r_tensor':'r', 'beta_d':'\\beta_d', 'epsilon_ds':'\epsilon', 'birefringence':'\\theta', + 'alpha_d_ee':'\\alpha^{EE}_d', 'amp_d_ee':'A^{EE}_d', 'alpha_d_bb':'\\alpha^{BB}_d', + 'amp_d_bb':'A^{BB}_d', 'beta_s':'\\beta_s', 'alpha_s_ee':'\\alpha^{EE}_s', + 'amp_s_ee':'A^{EE}_s', 'alpha_s_bb':'\\alpha^{BB}_s', 'amp_s_bb':'A^{BB}_s', + 'amp_d_eb':'A^{EB}_d', 'alpha_d_eb':'\\alpha^{EB}_d', 'amp_s_eb':'A^{EB}_s', + 'alpha_s_eb':'\\alpha^{EB}_s', 'decorr_amp_d':'\Delta_d', 'decorr_amp_s':'\Delta_s', + 'gain_1':'G_1', 'gain_2':'G_2', 'gain_3':'G_3', 'gain_4':'G_4', 'gain_5':'G_5', 'gain_6':'G_6', + 'gain_7':'G_7', 'gain_8':'G_8', 'gain_9':'G_9', 'gain_10':'{G_10}', 'gain_11':'{G_11}', 'gain_12':'{G_12}', + 'shift_1':'\Delta\\nu_1', 'shift_2':'\Delta\\nu_2', 'shift_3':'\Delta\\nu_3', + 'shift_4':'\Delta\\nu_4', 'shift_5':'\Delta\\nu_5', 'shift_6':'\Delta\\nu_6', + 'shift_7':'\Delta\\nu_7', 'shift_8':'\Delta\\nu_8', 'shift_9':'\Delta\\nu_9', + 'shift_10':'\Delta\\nu_{10}', 'shift_11':'\Delta\\nu_{11}', 'shift_12':'\Delta\\nu_{12}', + 'angle_1':'\phi_1', 'angle_2':'\phi_2', 'angle_3':'\phi_3', 'angle_4':'\phi_4', + 'angle_5':'\phi_5', 'angle_6':'\phi_6', 'dphi1_1':'\Delta\phi_1', 'dphi1_2':'\Delta\phi_2', + 'dphi1_3':'\Delta\phi_3', 'dphi1_4':'\Delta\phi_4', 'dphi1_5':'\Delta\phi_5', 'dphi1_6':'\Delta\phi_6', + 'amp_d_beta': 'B_d', 'gamma_d_beta': '\\gamma_{d}', 'amp_s_beta': 'B_s', 'gamma_s_beta': '\\gamma_{s}'} + + +allranges={'$r$': [-1.0, 1.0], + '$A_{lens}$': [0.0, 2.0], + '$\\beta_d$': [0.1, 10.0], + '$A^{EE}_d$': [0.0, None], + '$\\alpha^{EE}_d$': [-3.0, 1.0], + '$A^{BB}_d$': [-10.0, None], + '$\\alpha^{BB}_d$': [-3.0, 3.0], + '$A^{EB}_d$': [-100.0, 100.0], + '$\\alpha^{EB}_d$': [-3.0, 1.0], + '$\\epsilon$': [-1.0, 1.0], + '$\\Delta_d$': [0.9, 1.1], + '$\\beta_s$': [-10.0, 0.0], + '$A^{EE}_s$': [0.0, None], + '$\\alpha^{EE}_s$': [-3.0, 1.0], + '$A^{BB}_s$': [-10.0, None], + '$\\alpha^{BB}_s$': [-3.0, 1.0], + '$A^{EB}_s$': [-100.0, 100.0], + '$\\alpha^{EB}_s$': [-3.0, 1.0], + '$\\theta$': [-30., 30.], + '$\\Delta_s$': [0.9, 1.1], + '$B_d$':[0., 10], + '$\\gamma_{d}$':[-6, -2], + '$B_s$':[0, 10], + '$\\gamma_{s}$':[-6, -2]} diff --git a/plotting/plotter.py b/plotting/plotter.py new file mode 100644 index 0000000..12dfa5a --- /dev/null +++ b/plotting/plotter.py @@ -0,0 +1,37 @@ +import numpy as np +import matplotlib as mpl +import glob +from getdist import plots, MCSamples +from labels import alllabels, allranges + +allfs = glob.glob('/mnt/zfsusers/mabitbol/BBPower/baseline_full/sim0*/output*/') +allfs.sort() + +def getdist_clean(fdir): + sampler = np.load(fdir+'cleaned_chains.npz') + vals = [allranges[k] for k in sampler['labels']] + ranges = dict(zip(sampler['labels'], vals)) + gd_samps = MCSamples(samples=sampler['samples'], + names=sampler['labels'], + labels=[p.strip('$') for p in sampler['labels']], + ranges=ranges) + return gd_samps + +for af in allfs: + sname = af.split('/')[-4] + af.split('/')[-3] + af.split('/')[-2] + #sname = af.split('/')[-3] + outf = af+'results.txt' + + samps = getdist_clean(af) + samps.getTable(limit=1).write(outf) + + z = samps.paramNames.list() + g = plots.get_subplot_plotter(width_inch=16) + g.settings.axes_fontsize=14 + g.settings.axes_labelsize=14 + g.settings.line_styles = 'tab10' + g.triangle_plot([samps], z, shaded=True, title_limit=1) + mpl.pyplot.savefig(f'/mnt/zfsusers/mabitbol/notebooks/data_and_figures/{sname}_triangle') + mpl.pyplot.close() + + diff --git a/plotting/saveclean.py b/plotting/saveclean.py new file mode 100755 index 0000000..e1fae51 --- /dev/null +++ b/plotting/saveclean.py @@ -0,0 +1,48 @@ +import os.path +import numpy as np +import glob +import emcee +from labels import alllabels + +allfs = glob.glob('/mnt/zfsusers/mabitbol/BBPower/baseline_full/sim0*/output*/') +allfs.sort() + +dones = [] +for af in allfs: + if os.path.isfile(af+'cleaned_chains.npz'): + dones.append(af) +for df in dones: + allfs.remove(df) + + +def save_cleaned_chains(fdir): + outf = fdir+'chain_info.txt' + reader = emcee.backends.HDFBackend(fdir+'param_chains.npz.h5') + x = np.load(fdir+'param_chains.npz') + labels = ['$%s$' %alllabels[k] for k in x['names']] + + try: + tau = reader.get_autocorr_time(tol=50) + taust = 'good tau' + except: + taust = 'POTENTIALLY BAD TAU' + tau = reader.get_autocorr_time(tol=0) + burnin = int(5. * np.max(tau)) + thin = int(0.5 * np.max(tau)) + + samples = reader.get_chain(discard=burnin, flat=True, thin=thin) + N = samples.shape[0] + + with open(outf, 'w') as of: + print("N: ", N, file=of) + print("burnin: %d, thin: %d" %(burnin, thin), file=of) + print("tau:", file=of) + print(tau, file=of) + print(taust, file=of) + np.savez(fdir+'cleaned_chains', samples=samples, names=x['names'], labels=labels) + return + +for af in allfs: + save_cleaned_chains(af) + + diff --git a/run_pipeline/bbcompsep_parallel.sh b/run_pipeline/bbcompsep_parallel.sh new file mode 100755 index 0000000..0ea5d67 --- /dev/null +++ b/run_pipeline/bbcompsep_parallel.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Reads in seeds to sample +IFS=$'\r\n' GLOBIGNORE='*' command eval 'seeds=($(cat ${dir}/seeds.txt))' + +# Runs pipeline 'nseeds' times in parallel. +for k in $( eval echo {0..$( expr $nseeds / $(( $SLURM_NTASKS + 1 )) )} ) +do + tmp1=$(( $SLURM_PROCID + $k*$SLURM_NTASKS )) + tmp2=$(( $tmp1 < $nseeds ? $tmp1 : $nseeds )) # numerical min + export data_seed=${seeds[$tmp2]} + + export simdir=${dir}${data_seed}/ + export output=$simdir + export config=$output"config.yml" + cp $dir"config.yml" $output"config.yml" + + sed -i "s|ODIR|${dir}|" $config + sed -i "s/0X/${data_seed}/" $config + sed -i "s/0Y/${data_seed}/" $config + + # Only run seed if param_chains file is not there + [ -f ${output}/param_chains.npz ] || + echo "data seed ${data_seed}" + + # Runs BBCompSep + python3 -m bbpower BBCompSep \ + --cells_coadded=${output}cells_coadded.sacc \ + --cells_noise=${output}cells_noise.sacc \ + --cells_fiducial=${simdir}cls_fid_residual_masked_${data_seed}.fits \ + --cells_coadded_cov=${dirall}/cells_coadded.fits \ #this is the precomputed file that contains the covariance + --param_chains=${output}param_chains.npz \ + --config_copy=${output}config_copy.yml \ + --config=$config + + echo "data seed ${data_seed} done at task ${SLURM_PROCID}" +done diff --git a/run_pipeline/bbpowerspecter_parallel.sh b/run_pipeline/bbpowerspecter_parallel.sh new file mode 100755 index 0000000..b4ed51a --- /dev/null +++ b/run_pipeline/bbpowerspecter_parallel.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# Reads in seeds to sample +IFS=$'\r\n' GLOBIGNORE='*' command eval 'seeds=($(cat ${mdir}/seeds.txt))' + +# Runs pipeline 'nseeds' times in parallel. +for k in $( eval echo {0..$( expr $nseeds / $(( $SLURM_NTASKS + 1 )) )} ) +do + tmp1=$(( $SLURM_PROCID + $k*$SLURM_NTASKS )) + tmp2=$(( $tmp1 < $nseeds ? $tmp1 : $nseeds )) # numerical min + export data_seed=${seeds[$tmp2]} + + echo $data_seed + + export simdir=${mdir}${data_seed}/ + export output=$simdir + + export config=$mdir"config_hybrid.yml" + + # Compile txt file with all Cls in + rm -rf ${output}cells_all_sims.txt + echo ${output}"cells_all_splits_sim0.fits">> ${output}cells_all_sims.txt + for nsim in {1..499} + do + echo ${dirall}"cells_all_splits_sim${nsim}.fits" >> ${output}cells_all_sims.txt + done + + # Creating full simulations list" + rm -rf ${output}list_sims.txt + echo $simdir >> ${output}list_sims.txt + for nsim in {001..499} + do + sim=/dir/to/sim/0${nsim}/ + echo $sim >> ${output}list_sims.txt + done + + # Only run seed if param_chains file is not there + [ -f ${output}/param_chains.npz ] || + echo "data seed ${data_seed}" + + # Runs BBPowerSpecter + python3 -m bbpower BBPowerSpecter \ + --splits_list=${simdir}splits_list.txt \ + --masks_apodized=./test_hybrid/masks_SAT_ns512.fits \ + --bandpasses_list=./examples/data/bpass_list.txt \ + --sims_list=${output}list_sims.txt \ + --beams_list=./examples/data/beams_list.txt \ + --cells_all_splits=${output}cells_all_splits.sacc \ + --cells_all_sims=${output}cells_all_sims.txt \ + --mcm=${output}mcm.dum \ + --config=$config + + echo "data seed ${data_seed} done at task ${SLURM_PROCID}" +done diff --git a/run_pipeline/bbpowersummarizer_parallel.sh b/run_pipeline/bbpowersummarizer_parallel.sh new file mode 100755 index 0000000..f4f405b --- /dev/null +++ b/run_pipeline/bbpowersummarizer_parallel.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Reads in seeds to sample +IFS=$'\r\n' GLOBIGNORE='*' command eval 'seeds=($(cat ${dir}/seeds.txt))' + +# Runs pipeline 'nseeds' times in parallel. +for k in $( eval echo {0..$( expr $nseeds / $(( $SLURM_NTASKS + 1 )) )} ) +do + tmp1=$(( $SLURM_PROCID + $k*$SLURM_NTASKS )) + tmp2=$(( $tmp1 < $nseeds ? $tmp1 : $nseeds )) # numerical min + export data_seed=${seeds[$tmp2]} + + echo $data_seed + + export simdir=${dir}${data_seed}/ + export output=$simdir + export covdir=$dirall + cp $dir"config.yml" $output"config.yml" + export config=$output"config_hybrid.yml" + + #sed -i -r "s/ZZ/block_diagonal/" $config #Calculate covariance + sed -i -r "s|ZZ|$covdir|" $config #Use pre-computed covariance + + # Runs BBPowerSummarizer + python3 -m bbpower BBPowerSummarizer \ + --splits_list=${simdir}splits_list.txt \ + --bandpasses_list=./examples/data/bpass_list.txt \ + --cells_all_splits=${output}cells_all_splits.sacc \ + --cells_all_sims=${output}cells_all_sims.txt \ + --cells_coadded_total=${output}cells_coadded_total.sacc \ + --cells_coadded=${output}cells_coadded.sacc \ + --cells_noise=${output}cells_noise.sacc \ + --cells_null=${output}cells_null.sacc \ + --config=${config} + + echo "data seed ${data_seed} done at task ${SLURM_PROCID}" +done diff --git a/run_pipeline/config.yml b/run_pipeline/config.yml new file mode 100644 index 0000000..eb9a259 --- /dev/null +++ b/run_pipeline/config.yml @@ -0,0 +1,137 @@ +# These parameters are accessible to all stages +global: + nside: 512 + compute_dell: True + +BBPowerSpecter: + # Bandpower definition + bpw_edges: "/global/homes/s/susannaz/BBPower/examples/data/bpw_edges.txt" + purify_B: True + n_iter : 3 + # Activates parallelization with mpi4py + parallel: False + +BBPowerSummarizer: + # Covariance types + nulls_covar_type: "diagonal" + nulls_covar_diag_order: 0 + data_covar_type: "block_diagonal" + data_covar_diag_order: 0 + +BBCompSep: + # Sampler type (choose 'emcee', 'polychord', 'maximum_likelihood', + # 'single_point' or 'timing') + sampler: 'emcee' + # If you chose polychord: + nlive: 50 + nrepeat: 50 + # If you chose emcee: + nwalkers: 128 + n_iters: 2000 + # Likelihood type (choose 'chi2' or 'h&l') + likelihood_type: 'chi2' + # What is the starting point? + r_init: 1.e-3 + # Which polarization channels do you want to include? + pol_channels: ['B'] + # Scale cuts (will apply to all frequencies) + l_min: 30 + l_max: 300 + diff: False + + # CMB model + cmb_model: + # Template power spectrum. Should contained the lensed power spectra + # with r=0 and r=1 respectively. + cmb_templates: ['/global/homes/s/susannaz/BBPower/examples/data/camb_lens_nobb_nico.dat', + '/global/homes/s/susannaz/BBPower/examples/data/camb_lens_r1_nico.dat'] + # Free parameters + params: + # tensor-to-scalar ratio + # See below for the meaning of the different elements in the list. + r_tensor: ['r_tensor', 'tophat', [-0.1, 0.00, 0.1]] + # Lensing amplitude + A_lens: ['A_lens', 'tophat', [0.00, 1.0, 2.00]] + + # Foreground model + fg_model: + use_moments: False + # Add one section per component. They should be called `component_X`, + # starting with X=1 + component_1: + # Name for this component + name: Dust + # Type of SED. Should be one of the classes stored in fgbuster.components + # https://github.com/fgbuster/fgbuster/blob/master/fgbuster/component_model.py + sed: Dust + # Type of power spectra for all possible polarization channel combinations. + # Any combinations not added here will be assumed to be zero. + # The names should be one of the classes in bbpower/fgcls.py + cl: + EE: ClPowerLaw + BB: ClPowerLaw + # Parameters of the SED + sed_parameters: + # The key can be anything you want, but each parameter in the model + # must have a different name. + # The first item in the list is the name of the parameter used by fgbuster + # The second item is the type of prior. The last item are the numbers + # necessary to define the prior. They should be: + # - Gaussian: [mean,sigma] + # - tophat: [lower edge, start value, upper edge] + # - fixed: [parameter value] + # nu0-type parameters can only be fixed. + beta_d: ['beta_d', 'Gaussian', [1.54, 0.2]] + temp_d: ['temp', 'fixed', [20.]] + nu0_d: ['nu0', 'fixed', [353.]] + #nu0_d: ['nu0', 'fixed', [220.]] + cl_parameters: + # Same for power spectrum parameters + # (broken down by polarization channel combinations) + EE: + #amp_d_ee: ['amp', 'tophat', [0., 10., "inf"]] + amp_d_ee: ['amp', 'tophat', [40., 56., 80.]] + alpha_d_ee: ['alpha', 'tophat', [-1., -0.32, 0.]] + l0_d_ee: ['ell0', 'fixed', [80.]] + BB: + #amp_d_bb: ['amp', 'tophat', [0., 0.5, "inf"]] + amp_d_bb: ['amp', 'tophat', [0., 10., "inf"]] + #alpha_d_bb: ['alpha', 'tophat', [-1., -0.16, 0.5]] + alpha_d_bb: ['alpha', 'tophat', [-1, -0.42, 0.]] + #alpha_d_bb: ['alpha', 'tophat', [-1, -0.2, 0.]] + l0_d_bb: ['ell0', 'fixed', [80.]] + # If this component should be correlated with any other, list them here + cross: + # In this case the list should contain: + # [component name, prior type, prior parameters] + #epsilon_ds: ['component_2', 'fixed', [0.]] + epsilon_ds: ['component_2', 'tophat', [-0.001, 0., 0.001]] + + component_2: + name: Synchrotron + sed: Synchrotron + cl: + EE: ClPowerLaw + BB: ClPowerLaw + sed_parameters: + beta_s: ['beta_pl', 'Gaussian', [-3.0, 0.3]] + nu0_s: ['nu0', 'fixed', [23.]] + #nu0_s: ['nu0', 'fixed', [40.]] + cl_parameters: + EE: + #amp_s_ee: ['amp', 'tophat', [0., 4., "inf"]] + amp_s_ee: ['amp', 'tophat', [5., 9., 15.]] + alpha_s_ee: ['alpha', 'tophat', [-2., -0.7, 0.]] + l0_s_ee: ['ell0', 'fixed', [80.]] + BB: + amp_s_bb: ['amp', 'tophat', [0., 0.05, "inf"]] + alpha_s_bb: ['alpha', 'tophat', [-2., -0.93, 0.]] + #alpha_s_bb: ['alpha', 'tophat', [-2., -1.5, 0.]] + l0_s_bb: ['ell0', 'fixed', [80.]] + +BBPlotter: + lmax_plot: 300 + plot_coadded_total: False + plot_noise: False + plot_nulls: False + plot_likelihood: True diff --git a/run_pipeline/config_hybrid.yml b/run_pipeline/config_hybrid.yml new file mode 100644 index 0000000..8c5c00d --- /dev/null +++ b/run_pipeline/config_hybrid.yml @@ -0,0 +1,131 @@ +# These parameters are accessible to all stages +global: + nside: 512 + compute_dell: True + +BBPowerSpecter: + # Bandpower definition + bpw_edges: "/global/homes/s/susannaz/BBPower/examples/data/bpw_edges.txt" + purify_B: True + n_iter : 3 + # Activates parallelization with mpi4py + parallel: False + +BBPowerSummarizer: + # Covariance types + nulls_covar_type: "diagonal" + nulls_covar_diag_order: 0 + data_covar_type: "ZZ" + data_covar_diag_order: 0 + +BBCompSep: + # Sampler type (choose 'emcee', 'polychord', 'maximum_likelihood', + # 'single_point' or 'timing') + sampler: 'emcee' + # If you chose polychord: + nlive: 50 + nrepeat: 50 + # If you chose emcee: + nwalkers: 128 + n_iters: 2000 + # Likelihood type (choose 'chi2' or 'h&l') + likelihood_type: 'chi2' + # What is the starting point? + r_init: 1.e-3 + # Which polarization channels do you want to include? + pol_channels: ['B'] + # Scale cuts (will apply to all frequencies) + l_min: 30 + l_max: 300 + diff: True + + # CMB model + cmb_model: + # Template power spectrum. Should contained the lensed power spectra + # with r=0 and r=1 respectively. + cmb_templates: ['/global/homes/s/susannaz/BBPower/examples/data/camb_lens_nobb_nico.dat', + '/global/homes/s/susannaz/BBPower/examples/data/camb_lens_r1_nico.dat'] + # Free parameters + params: + # tensor-to-scalar ratio + # See below for the meaning of the different elements in the list. + r_tensor: ['r_tensor', 'tophat', [-0.1, 0.00, 0.1]] + # Lensing amplitude + A_lens: ['A_lens', 'tophat', [0.00, 1.0, 2.00]] + + # If diff: True + resid_seds: ODIR/0X/masked_hybrid_params_0Y.npz + + # Foreground model + fg_model: + use_moments: False + # Add one section per component. They should be called `component_X`, + # starting with X=1 + component_1: + # Name for this component + name: Dust + # Type of SED. Should be one of the classes stored in fgbuster.components + # https://github.com/fgbuster/fgbuster/blob/master/fgbuster/component_model.py + sed: Dust + # Type of power spectra for all possible polarization channel combinations. + # Any combinations not added here will be assumed to be zero. + # The names should be one of the classes in bbpower/fgcls.py + cl: + EE: ClPowerLaw + BB: ClPowerLaw + # Parameters of the SED + sed_parameters: + # The key can be anything you want, but each parameter in the model + # must have a different name. + # The first item in the list is the name of the parameter used by fgbuster + # The second item is the type of prior. The last item are the numbers + # necessary to define the prior. They should be: + # - Gaussian: [mean,sigma] + # - tophat: [lower edge, start value, upper edge] + # - fixed: [parameter value] + # nu0-type parameters can only be fixed. + beta_d: ['beta_d', 'Gaussian', [1.54, 0.2]] + temp_d: ['temp', 'fixed', [20.]] + nu0_d: ['nu0', 'fixed', [353.]] + cl_parameters: + # Same for power spectrum parameters + # (broken down by polarization channel combinations) + EE: + amp_d_ee: ['amp', 'tophat', [40., 56., 80.]] + alpha_d_ee: ['alpha', 'tophat', [-1., -0.32, 0.]] + l0_d_ee: ['ell0', 'fixed', [80.]] + BB: + amp_d_bb: ['amp', 'tophat', [0., 10., "inf"]] + alpha_d_bb: ['alpha', 'tophat', [-1, -0.42, 0.]] + l0_d_bb: ['ell0', 'fixed', [80.]] + # If this component should be correlated with any other, list them here + cross: + # In this case the list should contain: + # [component name, prior type, prior parameters] + epsilon_ds: ['component_2', 'tophat', [-0.001, 0., 0.001]] + + component_2: + name: Synchrotron + sed: Synchrotron + cl: + EE: ClPowerLaw + BB: ClPowerLaw + sed_parameters: + beta_s: ['beta_pl', 'Gaussian', [-3.0, 0.3]] + nu0_s: ['nu0', 'fixed', [23.]] + cl_parameters: + EE: + amp_s_ee: ['amp', 'tophat', [5., 9., 15.]] + alpha_s_ee: ['alpha', 'tophat', [-2., -0.7, 0.]] + l0_s_ee: ['ell0', 'fixed', [80.]] + BB: + amp_s_bb: ['amp', 'tophat', [0., 0.05, "inf"]] + alpha_s_bb: ['alpha', 'tophat', [-2., -0.93, 0.]] + l0_s_bb: ['ell0', 'fixed', [80.]] + +BBPlotter: + lmax_plot: 300 + plot_coadded_total: False + plot_noise: False + plot_nulls: False + plot_likelihood: True diff --git a/run_pipeline/prepare_directory_parallel.sh b/run_pipeline/prepare_directory_parallel.sh new file mode 100755 index 0000000..dfa04a0 --- /dev/null +++ b/run_pipeline/prepare_directory_parallel.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Reads in seeds to sample +IFS=$'\r\n' GLOBIGNORE='*' command eval 'seeds=($(cat ${dir}/seeds.txt))' + +# Runs pipeline 'nseeds' times in parallel. +for k in $( eval echo {0..$( expr $nseeds / $(( $SLURM_NTASKS + 1 )) )} ) +do + tmp1=$(( $SLURM_PROCID + $k*$SLURM_NTASKS )) + tmp2=$(( $tmp1 < $nseeds ? $tmp1 : $nseeds )) # numerical min + export data_seed=${seeds[$tmp2]} + + export simdir=${dir}${data_seed}/ + + # Creating simulated residual directory if not existent + mkdir -p $simdir + cp ${simdir}masked_residualmaps_${data_seed}_split0.fits ${simdir}SO_SAT_obs_map_split_1of4.fits + cp ${simdir}masked_residualmaps_${data_seed}_split1.fits ${simdir}SO_SAT_obs_map_split_2of4.fits + cp ${simdir}masked_residualmaps_${data_seed}_split2.fits ${simdir}SO_SAT_obs_map_split_3of4.fits + cp ${simdir}masked_residualmaps_${data_seed}_split3.fits ${simdir}SO_SAT_obs_map_split_4of4.fits + + # Creating splits list + rm -rf ${simdir}splits_list.txt + echo ${simdir}SO_SAT_obs_map_split_1of4.fits >> ${simdir}splits_list.txt + echo ${simdir}SO_SAT_obs_map_split_2of4.fits >> ${simdir}splits_list.txt + echo ${simdir}SO_SAT_obs_map_split_3of4.fits >> ${simdir}splits_list.txt + echo ${simdir}SO_SAT_obs_map_split_4of4.fits >> ${simdir}splits_list.txt + + echo "prepared directory ${data_seed} at task ${SLURM_PROCID}" +done diff --git a/run_pipeline/run_stage_parallel.sh b/run_pipeline/run_stage_parallel.sh new file mode 100755 index 0000000..a4c484a --- /dev/null +++ b/run_pipeline/run_stage_parallel.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +export HDF5_USE_FILE_LOCKING=FALSE + +## Utils +export idir="/input/dir/" +export dir="/output/dir/" +export dirall="/dir/containing/covariance/" +mkdir -p ${dir} + +nside=512 + +export OMP_NUM_THREADS=1 + +## Config +export cf=/dir/config_file.yml + +# Set data seeds +seeds=( $(seq -f "%04g" 0000 0005 ) ) #run for 5 simulations +printf "%s\n" "${seeds[@]}" > "${dir}/seeds.txt" +export nseeds=${#seeds[@]} + +# Set number of CPUs +ncpus=$(( $SLURM_CPUS_ON_NODE * $SLURM_JOB_NUM_NODES )) +ntasks=$(( $nseeds < $ncpus ? $nseeds : $ncpus )) +echo "Create ${ntasks} tasks" + +# Run each stage +source activate testenv # This loads custom programming environment +# +cp $cf $dir +# Uncomment for stage to be run in queue +srun "--ntasks=${ntasks}" "/global/homes/s/susannaz/BBPower/test_hybrid/prepare_directory_parallel.sh" +#srun "--ntasks=${ntasks}" "/global/homes/s/susannaz/BBPower/test_hybrid/bbpowerspecter_parallel.sh" +#srun "--ntasks=${ntasks}" "/global/homes/s/susannaz/BBPower/test_hybrid/bbpowersummarizer_parallel.sh" +#srun "--ntasks=${ntasks}" "/global/homes/s/susannaz/BBPower/test_hybrid/bbcompsep_parallel.sh"