Skip to content
Merged
5 changes: 5 additions & 0 deletions imap_processing/cdf/config/imap_hi_global_cdf_attrs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,11 @@ imap_hi_l1b_hk_attrs:
Logical_source: imap_hi_l1b_{sensor}-hk
Logical_source_description: IMAP-Hi Instrument Level-1B Housekeeping Data.

imap_hi_l1b_goodtimes_attrs:
Data_type: L1B_GOODTIMES>Level-1B Good Times
Logical_source: imap_hi_l1b_{sensor}-goodtimes
Logical_source_description: IMAP-Hi Instrument Level-1B Good Times Data.

imap_hi_l1c_pset_attrs:
Data_type: L1C_PSET>Level-1C Pointing Set
Logical_source: imap_hi_l1c_{sensor}-pset
Expand Down
62 changes: 61 additions & 1 deletion imap_processing/cdf/config/imap_hi_variable_attrs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -633,4 +633,64 @@ hi_pset_label_vector_HAE:
CATDESC: Label cartesian despun_z
FIELDNAM: Label cartesian despun_z
FORMAT: A5
VAR_TYPE: metadata
VAR_TYPE: metadata

# <=== L1B Goodtimes Attributes ===>
hi_goodtimes_met:
<<: *default_float64
CATDESC: Mission Elapsed Time for each 8-spin histogram packet
FIELDNAM: MET
DEPEND_0: epoch
LABLAXIS: MET
UNITS: s
VAR_TYPE: support_data
VALIDMIN: 0
VALIDMAX: 1.7976931348623157e+308

hi_goodtimes_cull_flags:
<<: *default_uint8
CATDESC: Cull flags indicating good (0) or bad (non-zero) times per spin bin
FIELDNAM: Cull Flags
DEPEND_0: epoch
DEPEND_1: spin_bin
LABL_PTR_1: spin_bin_label
LABLAXIS: Cull Code
UNITS: " "
DISPLAY_TYPE: spectrogram
VAR_NOTES: >
Cull flags array with dimensions (epoch, spin_bin). Value of 0 indicates good time,
non-zero values indicate bad times with specific cull reason codes.
Cull code 1 (LOOSE) indicates times removed by quality filters.

hi_goodtimes_esa_step:
<<: *default_uint8
CATDESC: ESA energy step for each 8-spin histogram packet
FIELDNAM: ESA Step
DEPEND_0: epoch
LABLAXIS: ESA Step
UNITS: " "
VAR_TYPE: support_data
VALIDMIN: 1
VALIDMAX: 10

hi_goodtimes_spin_bin:
<<: *default_uint8
CATDESC: Spin angle bin index
FIELDNAM: Spin Bin
FORMAT: I2
LABLAXIS: Spin Bin
UNITS: " "
VAR_TYPE: support_data
VALIDMIN: 0
VALIDMAX: 89
VAR_NOTES: >
Spin angle bins numbered 0-89, covering 0-360 degrees of spacecraft spin.
Each bin is 4 degrees wide.

hi_goodtimes_spin_bin_label:
CATDESC: Label for spin bin
FIELDNAM: Spin Bin Label
DEPEND_1: spin_bin
FORMAT: A3
VAR_TYPE: metadata

69 changes: 56 additions & 13 deletions imap_processing/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
from imap_processing.glows.l1a.glows_l1a import glows_l1a
from imap_processing.glows.l1b.glows_l1b import glows_l1b, glows_l1b_de
from imap_processing.glows.l2.glows_l2 import glows_l2
from imap_processing.hi import hi_l1a, hi_l1b, hi_l1c, hi_l2
from imap_processing.hi import hi_goodtimes, hi_l1a, hi_l1b, hi_l1c, hi_l2
from imap_processing.hit.l1a.hit_l1a import hit_l1a
from imap_processing.hit.l1b.hit_l1b import hit_l1b
from imap_processing.hit.l2.hit_l2 import hit_l2
Expand Down Expand Up @@ -770,9 +770,9 @@ def do_processing(
class Hi(ProcessInstrument):
"""Process IMAP-Hi."""

def do_processing(
def do_processing( # noqa: PLR0912
self, dependencies: ProcessingInputCollection
) -> list[xr.Dataset]:
) -> list[xr.Dataset | Path]:
"""
Perform IMAP-Hi specific processing.

Expand All @@ -789,6 +789,10 @@ def do_processing(
print(f"Processing IMAP-Hi {self.data_level}")
datasets: list[xr.Dataset] = []

# Check self.repointing is not None (for mypy type checking)
if self.repointing is None:
raise ValueError("Repointing must be provided for Hi processing.")

if self.data_level == "l1a":
science_files = dependencies.get_file_paths(source="hi")
if len(science_files) != 1:
Expand All @@ -801,6 +805,41 @@ def do_processing(
l0_files = dependencies.get_file_paths(source="hi", descriptor="raw")
if l0_files:
datasets = hi_l1b.housekeeping(l0_files[0])
elif "goodtimes" in self.descriptor:
# Goodtimes processing
l1b_de_paths = dependencies.get_file_paths(
source="hi", data_type="l1b", descriptor="de"
)
if not l1b_de_paths:
raise ValueError("No L1B DE files found for goodtimes processing")

l1b_hk_paths = dependencies.get_file_paths(
source="hi", data_type="l1b", descriptor="hk"
)
if len(l1b_hk_paths) != 1:
raise ValueError(
f"Expected one L1B HK file, got {len(l1b_hk_paths)}"
)

cal_prod_paths = dependencies.get_file_paths(
data_type="ancillary", descriptor="cal-prod"
)
if len(cal_prod_paths) != 1:
raise ValueError(
f"Expected one cal-prod ancillary file, "
f"got {len(cal_prod_paths)}"
)

# Load CDFs before passing to hi_goodtimes
l1b_de_datasets = [load_cdf(path) for path in l1b_de_paths]
l1b_hk = load_cdf(l1b_hk_paths[0])

datasets = hi_goodtimes.hi_goodtimes(
l1b_de_datasets,
self.repointing,
l1b_hk,
cal_prod_paths[0],
)
else:
l1a_de_file = dependencies.get_file_paths(
source="hi", data_type="l1a", descriptor="de"
Expand All @@ -813,17 +852,21 @@ def do_processing(
load_cdf(l1a_de_file), load_cdf(l1b_hk_file), esa_energies_csv
)
elif self.data_level == "l1c":
science_paths = dependencies.get_file_paths(source="hi", data_type="l1b")
if len(science_paths) != 1:
raise ValueError(
f"Expected only one science dependency. Got {science_paths}"
if "pset" in self.descriptor:
# L1C PSET processing
science_paths = dependencies.get_file_paths(
source="hi", data_type="l1b"
)
anc_paths = dependencies.get_file_paths(data_type="ancillary")
if len(anc_paths) != 1:
raise ValueError(
f"Expected only one ancillary dependency. Got {anc_paths}"
)
datasets = hi_l1c.hi_l1c(load_cdf(science_paths[0]), anc_paths[0])
if len(science_paths) != 1:
raise ValueError(
f"Expected only one science dependency. Got {science_paths}"
)
anc_paths = dependencies.get_file_paths(data_type="ancillary")
if len(anc_paths) != 1:
raise ValueError(
f"Expected only one ancillary dependency. Got {anc_paths}"
)
datasets = hi_l1c.hi_l1c(load_cdf(science_paths[0]), anc_paths[0])
elif self.data_level == "l2":
science_paths = dependencies.get_file_paths(source="hi", data_type="l1c")
anc_dependencies = dependencies.get_processing_inputs(data_type="ancillary")
Expand Down
Loading