Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions src/pyclm/controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,7 @@ def __init__(self, config="MMConfig_demo.cfg", dry=False):
core=self.core, aq=self.all_queues, stop_event=self.stop_event
)
self.manager = Manager(aq=self.all_queues, stop_event=self.stop_event)
self.outbox = MicroscopeOutbox(
aq=self.all_queues, save_type="hdf5", stop_event=self.stop_event
)
self.outbox = MicroscopeOutbox(aq=self.all_queues, stop_event=self.stop_event)
self.slm_buffer = SLMBuffer(aq=self.all_queues, stop_event=self.stop_event)
self.segmentation = SegmentationProcess(
aq=self.all_queues, stop_event=self.stop_event
Expand Down Expand Up @@ -131,6 +129,7 @@ def initialize(
)
self.microscope.declare_slm()
self.outbox.base_path = out_path
self.outbox.initialize(schedule)

def run(self):
with ThreadPoolExecutor() as executor:
Expand Down
220 changes: 96 additions & 124 deletions src/pyclm/convert_hdf5s.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import re
from argparse import ArgumentParser
from pathlib import Path
Expand Down Expand Up @@ -30,127 +31,121 @@ def get_mapping(projector_api):
return at


def extract_channels_tifs(fp, chans):
for chan in chans:
collected_frames = []
channel_key = f"{chan}"
def get_binning_from_metadata(f: File, chan_key: str):
"""
Extract binning from file attributes or return default 1.
"""
if "experiment_metadata" in f.attrs:
try:
meta = json.loads(f.attrs["experiment_metadata"])
# chan_key is typically "channel_NAME"
# channel keys in metadata are "NAME"
if chan_key.startswith("channel_"):
short_name = chan_key.replace("channel_", "", 1)
if short_name in meta.get("channels", {}):
return meta["channels"][short_name].get("binning", 1)
except Exception as e:
print(f"Error reading binning from metadata: {e}")

with File(fp, mode="r") as f:
indices = []

for t_val, data in f.items():
if channel_key not in data:
continue

indices.append(t_val)

for t_val in natsorted(indices):
data = np.array(f[t_val][channel_key]["data"])
collected_frames.append(data)

outpath = f"{fp[:-5]}_{chan}.tif"
tifffile.imwrite(
outpath, np.array(collected_frames), imagej=True, metadata={"axes": "tyx"}
)
return 1


def make_tif(fp, at=None, chan="channel_638", binning=2):
if at is not None:
ati = cv2.invertAffineTransform(at)
patterned = []
def make_tif(fp, at, chan="channel_638"):
ati = cv2.invertAffineTransform(at)
patterned = []

collected_frames = []
channel_key = f"{chan}"

with File(fp, mode="r") as f:
# Open with SWMR support
try:
f = File(fp, mode="r", libver="latest", swmr=True)
except OSError:
# Fallback if file not accessible or not HDF5
print(f"Could not open {fp}")
return

# Attempt to read binning from metadata
binning = get_binning_from_metadata(f, channel_key)

try:
indices = []

for t_val, data in f.items():
if channel_key not in data:
keys = list(f.keys())
for t_val in keys:
if t_val not in f:
continue
data_group = f[t_val]
if channel_key not in data_group:
continue

indices.append(t_val)

seg_seen = False

for t_val in natsorted(indices):
data = np.array(f[t_val][channel_key]["data"])
collected_frames.append(data)
# Check if dataset exists / is complete
try:
if channel_key not in f[t_val] or "data" not in f[t_val][channel_key]:
continue

patterned_stack = [data]
data_dset = f[t_val][channel_key]["data"]
# Ensure data is accessible (SWMR safety)
data_dset.refresh()
data = np.array(data_dset)

if at is not None:
if "seg" in f[t_val][channel_key].keys():
patterned_stack.append(f[t_val][channel_key]["seg"])
except Exception as e:
# might happen if writing is in progress for this specific frame
continue

seg_seen = True
collected_frames.append(data)

elif seg_seen:
continue
patterned_stack = [data]

if "stim_aq" in f[t_val].keys():
pattern = np.array(f[t_val]["stim_aq"]["dmd"])
target_size = data.shape
tf = cv2.warpAffine(
np.round(pattern).astype(np.uint8),
ati,
(target_size[1] * binning, target_size[0] * binning),
).astype(np.uint16)
ds = downscale_local_mean(tf, (binning, binning)).astype(np.uint16)
patterned_stack.append(ds)

else:
patterned_stack.append(np.zeros(data.shape))

patterned.append(np.stack(patterned_stack).astype(np.uint16))

outpath = fp[:-5] + f"_{chan}.tif"
tifffile.imwrite(
outpath, np.array(collected_frames), imagej=True, metadata={"axes": "tyx"}
)
if "seg" in f[t_val][channel_key].keys():
patterned_stack.append(f[t_val][channel_key]["seg"])
seg_seen = True

if at is not None:
# print(np.array(patterned).shape)
tifffile.imwrite(
fp[:-5] + chan + "_patterns.tif",
np.array(patterned).astype(np.uint16),
imagej=True,
metadata={"axes": "tcyx"},
)
elif seg_seen:
continue

if "stim_aq" in f[t_val].keys() and "dmd" in f[t_val]["stim_aq"]:
pattern = np.array(f[t_val]["stim_aq"]["dmd"])
target_size = data.shape

def make_stim_tif(fp, at, binning=2):
print(fp)
# Ensure binning is valid integer
b = int(binning)

ati = cv2.invertAffineTransform(at)
patterned = []
tf = cv2.warpAffine(
np.round(pattern).astype(np.uint8),
ati,
(target_size[1] * b, target_size[0] * b),
).astype(np.uint16)
ds = downscale_local_mean(tf, (b, b)).astype(np.uint16)
patterned_stack.append(ds)

with File(fp, mode="r") as f:
indices = []
else:
patterned_stack.append(np.zeros(data.shape))

for t_val, data in f.items():
if "stim_aq" not in data:
continue
patterned.append(np.stack(patterned_stack).astype(np.uint16))

indices.append(t_val)
finally:
f.close()

for t_val in natsorted(indices):
pattern = np.array(f[t_val]["stim_aq"]["dmd"])
target_size = (1600, 1600)
tf = cv2.warpAffine(
np.round(pattern).astype(np.uint8),
ati,
(target_size[1] * binning, target_size[0] * binning),
).astype(np.uint16)
ds = downscale_local_mean(tf, (binning, binning)).astype(np.uint16)
patterned.append(ds)

tifffile.imwrite(
str(fp)[:-5] + "patterns_only.tif",
np.array(patterned).astype(np.uint16),
imagej=True,
metadata={"axes": "tyx"},
)
if not collected_frames:
return

# Save patterned output
if patterned:
# Construct output filename
outpath_pattern = str(fp)[:-5] + f"_{chan}_patterns.tif"
tifffile.imwrite(
outpath_pattern,
np.array(patterned).astype(np.uint16),
imagej=True,
metadata={"axes": "tcyx"},
)
print(f"Saved {outpath_pattern}")


def process_args():
Expand All @@ -160,15 +155,7 @@ def process_args():
parser.add_argument(
"--config", type=str, help="path to pyclm_config.toml file", default=None
)
parser.add_argument("--binning", type=int, help="binning", default=2)
parser.add_argument(
"--overlay_pattern",
action="store_true",
help="whether to overlay the pattern on the tif",
)
parser.add_argument(
"--just_patterns", action="store_true", help="just add the stimulation"
)
# Removed binning, overlay_pattern, just_patterns args

return parser.parse_args()

Expand All @@ -186,11 +173,10 @@ def find_affine_transform(input_dir, config_path):

config_path = Path(config_path)

assert input_dir.exists(), f"experiment directory {input_dir} does not exist"
assert config_path.exists(), (
f"config file {config_path} does not exist: pyclm_config.toml must be specified or be "
f"present in the experiment directory"
)
if not config_path.exists():
raise FileNotFoundError(
f"Config file not found at {config_path}. Affine transform is required."
)

config = load(config_path)
return np.array(config["affine_transform"], dtype=np.float32)
Expand All @@ -201,27 +187,13 @@ def main():
input_dir = args.directory
config_path = args.config
channels = args.channels
overlay_pattern = args.overlay_pattern

if args.just_patterns:
at = find_affine_transform(Path(input_dir), config_path)
for val in tqdm(Path(input_dir).glob("*.hdf5")):
make_stim_tif(val, at, args.binning)

return 0
# We require overlay pattern approach, so we need affine transform
at = find_affine_transform(Path(input_dir), config_path)

if overlay_pattern:
at = find_affine_transform(Path(input_dir), config_path)
else:
at = None

for val in tqdm(Path(input_dir).glob("*.hdf5")):
for val in tqdm(list(Path(input_dir).glob("*.hdf5"))):
for c in channels:
if overlay_pattern:
make_tif(str(val), at, f"channel_{c}", binning=args.binning)

else:
extract_channels_tifs(str(val), [f"channel_{c}"])
make_tif(str(val), at, f"channel_{c}")


if __name__ == "__main__":
Expand Down
29 changes: 6 additions & 23 deletions src/pyclm/core/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ def __init__(
scheduled_time_since_start=0,
exposure_time_ms=10,
needs_slm=False,
super_axes=None,
sub_axes=None,
t_index=0,
config_groups: list[ConfigGroup] | None = None,
Expand Down Expand Up @@ -110,9 +109,6 @@ def __init__(
self.binning = binning

# axis-name, axis-value pairs
# super-axes (determines folder structure containing experiment)
self.super_axes = super_axes

# sub-axes (determines folder within hdf5_file)
self.sub_axes = sub_axes

Expand All @@ -139,23 +135,16 @@ def __init__(

self.pixel_width_um = None

def get_rel_path(self, leading=3) -> (str, str):
fstring = ""

if self.super_axes is not None:
for _ax, val in enumerate(self.super_axes):
if val is int:
val = str(val).zfill(leading)

fstring += f"{val}/"

fstring += f"{self.experiment_name}"
def get_rel_path(self, leading=3) -> str:
"""
Returns dset path within the hdf5 structure
"""

dset = ""

if self.sub_axes is not None:
for _ax, val in enumerate(self.sub_axes):
if val is int:
if isinstance(val, int):
val = str(val).zfill(leading)

dset += f"{val}/"
Expand All @@ -165,7 +154,7 @@ def get_rel_path(self, leading=3) -> (str, str):
else:
dset = "UNNAMED_DATA"

return fstring, dset
return dset

def write_attrs(self, dset: Dataset):
dset.attrs["id"] = str(self.id)
Expand All @@ -189,9 +178,6 @@ def write_attrs(self, dset: Dataset):
dset.attrs["needs_slm"] = self.needs_slm
dset.attrs["binning"] = self.binning

if self.super_axes is not None:
dset.attrs["super_axes"] = [str(a) for a in self.super_axes]

if self.sub_axes is not None:
dset.attrs["sub_axes"] = [str(a) for a in self.sub_axes]

Expand Down Expand Up @@ -239,9 +225,6 @@ def __repr__(self):
repr_out["needs_slm"] = self.needs_slm
repr_out["binning"] = self.binning

if self.super_axes is not None:
repr_out["super_axes"] = [str(a) for a in self.super_axes]

if self.sub_axes is not None:
repr_out["sub_axes"] = [str(a) for a in self.sub_axes]

Expand Down
Loading