Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions bbpower/bandpasses.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ def convolve_sed(self, sed, params):

if self.do_dphi1:
dphi1 = params[self.name_dphi1]
normed_dphi1 = dphi1 * np.pi / 180. * (self.nu - self.nu_mean) / self.nu_mean
dphi1_phase = np.cos(2.*normed_dphi1) + 1j * np.sin(2.*normed_dphi1)
normed_dphi1 = dphi1 * np.pi / 180. * (self.nu - self.nu_mean) / self.nu_mean # noqa
dphi1_phase = np.cos(2.*normed_dphi1) + 1j * np.sin(2.*normed_dphi1) # noqa

nu_prime = self.nu + dnu
# CMB sed
Expand Down
71 changes: 37 additions & 34 deletions bbpower/compsep.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from scipy.linalg import sqrtm

from bbpipe import PipelineStage
from .types import NpzFile, FitsFile, YamlFile, DirFile
from .types import FitsFile, YamlFile, DirFile
from .fg_model import FGModel
from .param_manager import ParameterManager
from .bandpasses import (Bandpass, rotate_cells, rotate_cells_mat,
Expand Down Expand Up @@ -46,7 +46,7 @@ def get_moments_lmax(self):
return self.config['fg_model'].get('moments_lmax', 384)

def precompute_w3j(self):
from pyshtools.utils import Wigner3j
from pyshtools.utils import Wigner3j # noqa

lmax = self.get_moments_lmax()
ells_w3j = np.arange(0, lmax)
Expand Down Expand Up @@ -189,7 +189,7 @@ def parse_sacc_file(self):

# Get power spectra and covariances
if self.config['bands'] == 'all':
if not (self.s_cov.covariance.covmat.shape[-1] == len(self.s.mean) == self.n_bpws * self.ncross):
if not (self.s_cov.covariance.covmat.shape[-1] == len(self.s.mean) == self.n_bpws * self.ncross): # noqa: E501
raise ValueError("C_ell vector's size is wrong")

v2d = np.zeros([self.n_bpws, self.ncross])
Expand All @@ -198,7 +198,7 @@ def parse_sacc_file(self):
v2d_fid = np.zeros([self.n_bpws, self.ncross])
cv2d = np.zeros([self.n_bpws, self.ncross, self.n_bpws, self.ncross])

self.vector_indices = self.vector_to_matrix(np.arange(self.ncross, dtype=int)).astype(int)
self.vector_indices = self.vector_to_matrix(np.arange(self.ncross, dtype=int)).astype(int) # noqa: E501
self.indx = []

# Parse into the right ordering
Expand Down Expand Up @@ -227,7 +227,7 @@ def parse_sacc_file(self):
pol2b = self.pols[p2b].lower()
cl_typb = f'cl_{pol1b}{pol2b}'
ind_b = self.s.indices(cl_typb, (t1b, t2b))
cv2d[:, ind_vec, :, ind_vecb] = self.s_cov.covariance.covmat[ind_a][:, ind_b]
cv2d[:, ind_vec, :, ind_vecb] = self.s_cov.covariance.covmat[ind_a][:, ind_b] # noqa: E501

# Store data
self.bbdata = self.vector_to_matrix(v2d)
Expand All @@ -244,7 +244,8 @@ def load_cmb(self):
"""
Loads the CMB BB spectrum as defined in the config file.
"""
cmb_lensingfile = np.loadtxt(self.config['cmb_model']['cmb_templates'][0])
cmb_lensingfile = np.loadtxt(
self.config['cmb_model']['cmb_templates'][0])
cmb_bbfile = np.loadtxt(self.config['cmb_model']['cmb_templates'][1])

self.cmb_ells = cmb_bbfile[:, 0]
Expand Down Expand Up @@ -310,7 +311,7 @@ def sed(nu):

for i_c1, c_name1 in enumerate(self.fg_model.component_names):
fg_scaling[i_c1, i_c1] = comp_scaling[i_c1]
for c_name2, epsname in self.fg_model.components[c_name1]['names_x_dict'].items():
for c_name2, epsname in self.fg_model.components[c_name1]['names_x_dict'].items(): # noqa: E501
i_c2 = self.fg_model.component_order[c_name2]
eps = params[epsname]
fg_scaling[i_c1, i_c2] = eps * np.outer(single_sed[i_c1],
Expand Down Expand Up @@ -396,8 +397,9 @@ def model(self, params):
cl_cross = np.zeros((self.n_ell,
self.npol, self.npol))
for i in range(self.npol):
cl_cross[:, i, i] = np.sqrt(fg_cell[c1, :, i, i] *
fg_cell[c2, :, i, i])
cl_cross[:, i, i] = np.sqrt(
fg_cell[c1, :, i, i] * fg_cell[c2, :, i, i]
)
clrot = rotate_cells_mat(mat2, mat1, cl_cross)
cls += clrot * fg_scaling[c1, c2, f1, f2]
cls_array_fg[f1, f2] = cls
Expand Down Expand Up @@ -452,7 +454,7 @@ def model(self, params):
cls += 0.5 * (fg_scaling_d2[f1, c1] *
(fg_scaling[c1, c1, f2, f2])**0.5 +
fg_scaling_d2[f2, c1] *
(fg_scaling[c1, c1, f1, f1])**0.5) * cls_02[c1]
(fg_scaling[c1, c1, f1, f1])**0.5) * cls_02[c1] # noqa: E501
cls_array_fg[f1, f2] += cls

# Window convolution
Expand All @@ -478,7 +480,7 @@ def model(self, params):
for f2 in range(self.nfreqs):
cls_array_list[:, f1, :, f2, :] = rotate_cells(self.bpss[f2],
self.bpss[f1],
cls_array_list[:, f1, :, f2, :],
cls_array_list[:, f1, :, f2, :], # noqa: E501
params)

return cls_array_list.reshape([self.n_bpws, self.nmaps, self.nmaps])
Expand Down Expand Up @@ -555,7 +557,7 @@ def h_and_l_dx(self, params):
"""
Hamimeche and Lewis likelihood.
Taken from Cobaya written by H, L and Torrado
See: https://github.com/CobayaSampler/cobaya/blob/master/cobaya/likelihoods/_cmblikes_prototype/_cmblikes_prototype.py
See: https://github.com/CobayaSampler/cobaya/blob/master/cobaya/likelihoods/_cmblikes_prototype/_cmblikes_prototype.py # noqa: E501
"""
model_cls = self.model(params)
dx_vec = []
Expand All @@ -571,7 +573,7 @@ def h_and_l_dx(self, params):
def h_and_l(self, C, Chat, Cfl_sqrt):
try:
diag, U = np.linalg.eigh(C)
except:
except: # noqa
return [np.inf]
rot = U.T.dot(Chat).dot(U)
roots = np.sqrt(diag)
Expand All @@ -581,7 +583,7 @@ def h_and_l(self, C, Chat, Cfl_sqrt):
U.dot(rot.dot(U.T), rot)
try:
diag, rot = np.linalg.eigh(rot)
except:
except: # noqa
return [np.inf]
diag = (np.sign(diag - 1) *
np.sqrt(2 * np.maximum(0, diag - np.log(diag) - 1)))
Expand Down Expand Up @@ -645,15 +647,15 @@ def emcee_sampler(self):
pos = None
nsteps_use = max(n_iters-nchain, 0)

with Pool() as pool:
with Pool() as pool: # noqa
import time
start = time.time()
sampler = emcee.EnsembleSampler(nwalkers, ndim,
self.lnprob,
backend=backend)
if nsteps_use > 0:
sampler.run_mcmc(pos, nsteps_use, store=True, progress=False)
end = time.time()
end = time.time()

return sampler, end-start

Expand All @@ -673,27 +675,27 @@ def prior(hypercube):
prior = []
for h, pr in zip(hypercube, self.params.p_free_priors):
if pr[1] == 'Gaussian':
prior.append(GaussianPrior(float(pr[2][0]), float(pr[2][1]))(h))
prior.append(GaussianPrior(float(pr[2][0]), float(pr[2][1]))(h)) # noqa: E501
else:
prior.append(UniformPrior(float(pr[2][0]), float(pr[2][2]))(h))
prior.append(UniformPrior(float(pr[2][0]), float(pr[2][2]))(h)) # noqa: E501
return prior

# Optional dumper function giving run-time read access to
# the live points, dead points, weights and evidences
def dumper(live, dead, logweights, logZ, logZerr):
def dumper(live, dead, logweights, logZ, logZerr): # noqa
print("Last dead point:", dead[-1])

settings = PolyChordSettings(ndim, nder)
settings.base_dir = self.get_output('output_dir')+'/polychord'
settings.file_root = 'pch'
settings.nlive = self.config['nlive']
settings.num_repeats = self.config['nrepeat']
settings.do_clustering = False # Assume unimodal posterior
settings.boost_posterior = 10 # Increase number of posterior samples
settings.nprior = 200 # Draw nprior initial prior samples
settings.maximise = True # Maximize posterior at the end
settings.read_resume = False # Read from resume file of earlier run
settings.feedback = 2 # Verbosity {0,1,2,3}
settings.do_clustering = False # Assume unimodal posterior
settings.boost_posterior = 10 # Increase number of posterior samples
settings.nprior = 200 # Draw nprior initial prior samples
settings.maximise = True # Maximize posterior at the end
settings.read_resume = False # Read from resume file of earlier run
settings.feedback = 2 # Verbosity {0,1,2,3}

output = pypolychord.run_polychord(likelihood, ndim, nder, settings,
prior, dumper)
Expand Down Expand Up @@ -729,9 +731,9 @@ def chi2(par):
method="Powell")

def lnprobd(p):
l = self.lnprob(p)
l = self.lnprob(p) # noqa
if l == -np.inf:
l = -1E100
l = -1E100 # noqa
return l

fisher = - nd.Hessian(lnprobd)(res.x)
Expand All @@ -758,8 +760,8 @@ def timing(self, n_eval=300):

def predicted_spectra(self, at_min=True, save_npz=True):
"""
Evaluates model at a the maximum likelihood and
writes predicted spectra into a numpy array
Evaluates model at a the maximum likelihood and
writes predicted spectra into a numpy array
with shape (nbpws, nmaps, nmaps).
"""
if at_min:
Expand All @@ -768,7 +770,6 @@ def predicted_spectra(self, at_min=True, save_npz=True):
else:
p = self.params.p0
pars = self.params.build_params(p)
print(pars)
model_cls = self.model(pars)
if self.config['bands'] == 'all':
tr_names = sorted(list(self.s.tracers.keys()))
Expand Down Expand Up @@ -799,7 +800,6 @@ def predicted_spectra(self, at_min=True, save_npz=True):
s.add_covariance(self.bbcovar)
s.save_fits(self.get_output('output_dir')+'/cells_model.fits',
overwrite=True)

return

def run(self):
Expand All @@ -808,12 +808,15 @@ def run(self):
self.setup_compsep()
if self.config.get('sampler') == 'emcee':
sampler, timing = self.emcee_sampler()
chi2 = -2*self.lnprob(self.minimizer())
np.savez(self.get_output('output_dir')+'/emcee.npz',
chain=sampler.chain,
names=self.params.p_free_names,
time=timing)
time=timing,
chi2=chi2,
ndof=len(self.bbcovar))
print("Finished sampling", timing)
elif self.config.get('sampler')=='polychord':
elif self.config.get('sampler') == 'polychord':
sampler = self.polychord_sampler()
print("Finished sampling")
elif self.config.get('sampler') == 'fisher':
Expand Down Expand Up @@ -848,7 +851,7 @@ def run(self):
names=self.params.p_free_names)
print("Total time:", sampler[0])
print("Time per eval:", sampler[1])
elif self.config.get('sampler')=='predicted_spectra':
elif self.config.get('sampler') == 'predicted_spectra':
at_min = self.config.get('predict_at_minimum', True)
save_npz = not self.config.get('predict_to_sacc', False)
sampler = self.predicted_spectra(at_min=at_min, save_npz=save_npz)
Expand Down
Loading