From 3e7d0fe2b9807e659683f1448187a72882232360 Mon Sep 17 00:00:00 2001 From: Clifford Ondieki Date: Thu, 19 Feb 2026 20:48:59 +0100 Subject: [PATCH 1/7] fix: update config and add functionality for pypsa-de --- config/config.de.yaml | 5 ++++- scripts/pypsa-de/additional_functionality.py | 12 ++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/config/config.de.yaml b/config/config.de.yaml index 42e014928..8ab1e2dfb 100644 --- a/config/config.de.yaml +++ b/config/config.de.yaml @@ -445,7 +445,10 @@ solving: gas pipeline new: 0.3 H2 pipeline: 0.05 H2 pipeline retrofitted: 0.05 - fractional_last_unit_size: true + fractional_last_unit_size: true + solver: + name: highs + options: highs-default constraints: # The default CO2 budget uses the KSG targets, and the non CO2 emissions from the REMIND model in the KN2045_Mix scenario co2_budget_national: diff --git a/scripts/pypsa-de/additional_functionality.py b/scripts/pypsa-de/additional_functionality.py index e49bd55c3..0a457fbf6 100644 --- a/scripts/pypsa-de/additional_functionality.py +++ b/scripts/pypsa-de/additional_functionality.py @@ -228,6 +228,18 @@ def h2_import_limits(n, investment_year, limits_volume_max): & (n.links.bus1.str[:2] != ct) ] + if incoming.empty and outgoing.empty: + logger.warning( + f"No hydrogen import/export links found for {ct}; skipping limit enforcement." + ) + continue + + if incoming.empty and outgoing.empty: + logger.warning( + f"No hydrogen import/export links found for {ct}; skipping limit enforcement." + ) + continue + incoming_p = ( n.model["Link-p"].loc[:, incoming] * n.snapshot_weightings.generators ).sum() From 223634bc5643c67bca0a592df2067a97a421dad6 Mon Sep 17 00:00:00 2001 From: Clifford Ondieki Date: Thu, 19 Feb 2026 21:24:35 +0100 Subject: [PATCH 2/7] fix: update snakefile for pypsa-de --- scripts/pypsa-de/additional_functionality.py | 81 +++++++++++++++----- 1 file changed, 63 insertions(+), 18 deletions(-) diff --git a/scripts/pypsa-de/additional_functionality.py b/scripts/pypsa-de/additional_functionality.py index 0a457fbf6..de784cd47 100644 --- a/scripts/pypsa-de/additional_functionality.py +++ b/scripts/pypsa-de/additional_functionality.py @@ -9,6 +9,33 @@ logger = logging.getLogger(__name__) +def h2_import_limits_enabled(config): + return config.get("pypsa-de", {}).get("h2_import_limits", {}).get("enable", True) + + +def safe_add_constraint(model, expr, rhs, sense, name): + """Wrap solver call to skip constant-constant constraints.""" + try: + if sense == "<=": + model.add_constraints(expr <= rhs, name=name) + elif sense == ">=": + model.add_constraints(expr >= rhs, name=name) + else: + raise ValueError(f"Unsupported sense '{sense}'") + return True + except ValueError as exc: + if "Both sides of the constraint are constant" in str(exc): + logger.debug( + "Skipping constraint %s because both sides are constant (%s %s %s)", + name, + expr, + sense, + rhs, + ) + return False + raise + + def add_capacity_limits(n, investment_year, limits_capacity, sense="maximum"): for c in n.iterate_components(limits_capacity): logger.info(f"Adding {sense} constraints for {c.list_name}") @@ -208,6 +235,10 @@ def add_pos_neg_aux_variables(n, idx, var_name, infix): def h2_import_limits(n, investment_year, limits_volume_max): + if not h2_import_limits_enabled(n.config): + logger.info("Skipping H2 import limit constraints because pypsa-de.h2_import_limits.enable is False.") + return + for ct in limits_volume_max["h2_import"]: limit = limits_volume_max["h2_import"][ct][investment_year] * 1e6 @@ -251,7 +282,13 @@ def h2_import_limits(n, investment_year, limits_volume_max): cname = f"H2_import_limit-{ct}" - n.model.add_constraints(lhs <= limit, name=f"GlobalConstraint-{cname}") + added = safe_add_constraint( + n.model, + lhs, + limit, + "<=", + name=f"GlobalConstraint-{cname}", + ) if cname in n.global_constraints.index: logger.warning( @@ -259,20 +296,27 @@ def h2_import_limits(n, investment_year, limits_volume_max): ) n.global_constraints.drop(cname, inplace=True) - n.add( - "GlobalConstraint", - cname, - constant=limit, - sense="<=", - type="", - carrier_attribute="", - ) + if added: + n.add( + "GlobalConstraint", + cname, + constant=limit, + sense="<=", + type="", + carrier_attribute="", + ) logger.info("Adding H2 export ban") cname = f"H2_export_ban-{ct}" - n.model.add_constraints(lhs >= 0, name=f"GlobalConstraint-{cname}") + added_export = safe_add_constraint( + n.model, + lhs, + 0, + ">=", + name=f"GlobalConstraint-{cname}", + ) if cname in n.global_constraints.index: logger.warning( @@ -280,14 +324,15 @@ def h2_import_limits(n, investment_year, limits_volume_max): ) n.global_constraints.drop(cname, inplace=True) - n.add( - "GlobalConstraint", - cname, - constant=0, - sense=">=", - type="", - carrier_attribute="", - ) + if added_export: + n.add( + "GlobalConstraint", + cname, + constant=0, + sense=">=", + type="", + carrier_attribute="", + ) def h2_production_limits(n, investment_year, limits_volume_min, limits_volume_max): From 5f35aa908a1565cab147e415306223972f3243e5 Mon Sep 17 00:00:00 2001 From: Clifford Ondieki Date: Thu, 26 Feb 2026 23:27:28 +0100 Subject: [PATCH 3/7] Add STRANSIENT export feature --- scripts/export_stransient.py | 41 ++++++++++++++ stransient_loader.py | 100 +++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+) create mode 100644 scripts/export_stransient.py create mode 100644 stransient_loader.py diff --git a/scripts/export_stransient.py b/scripts/export_stransient.py new file mode 100644 index 000000000..eff70028d --- /dev/null +++ b/scripts/export_stransient.py @@ -0,0 +1,41 @@ +import pandas as pd +from pathlib import Path +base = Path('results/20260114_limit_cross_border_flows/KN2045_Mix/exports') +out = base.parent / 'stransient' +out.mkdir(parents=True, exist_ok=True) +bus_df = ( + pd.read_csv(base / 'buses.csv') + .rename(columns={'name': 'bus_id', 'v_nom': 'vn_kv'}) +) +bus_df['vm_pu'] = bus_df['v_mag_pu_set'] +strans_bus = bus_df[['bus_id', 'vn_kv', 'type', 'vm_pu']].copy() +strans_bus['area'] = 'DE' +strans_bus.to_csv(out / 'stransient_bus.csv', index=False) +lines = pd.read_csv(base / 'lines.csv').rename(columns={'name': 'branch_id'}) +lines['type'] = 'AC_line' +lines[['branch_id','bus0','bus1','r_pu','x_pu','length','i_nom','type']].to_csv(out / 'stransient_branch.csv', index=False) +gens = pd.read_csv(base / 'generators.csv') +q_source = 'q_nom' if 'q_nom' in gens.columns else 'q_set' if 'q_set' in gens.columns else None +rename_map = {'name': 'gen_id', 'p_nom': 'p_max_mw', 'carrier': 'type'} +if q_source is not None: + rename_map[q_source] = 'q_max_mvar' +else: + gens['q_max_mvar'] = 0.0 +gens = gens.rename(columns=rename_map) +required_cols = ['gen_id', 'bus', 'p_max_mw', 'q_max_mvar', 'type'] +gens[required_cols].to_csv(out / 'stransient_gen.csv', index=False) +loads = pd.read_csv(base / 'loads.csv') +p_source = 'p_mw' if 'p_mw' in loads.columns else 'p_set' if 'p_set' in loads.columns else None +q_source = 'q_mvar' if 'q_mvar' in loads.columns else 'q_set' if 'q_set' in loads.columns else None +load_rename = {'name': 'load_id'} +if p_source: + load_rename[p_source] = 'p_mw' +else: + loads['p_mw'] = 0.0 +if q_source: + load_rename[q_source] = 'q_mvar' +else: + loads['q_mvar'] = 0.0 +loads = loads.rename(columns=load_rename) +loads[['load_id', 'bus', 'p_mw', 'q_mvar']].to_csv(out / 'stransient_load.csv', index=False) +print('wired exports done') diff --git a/stransient_loader.py b/stransient_loader.py new file mode 100644 index 000000000..6e22059cc --- /dev/null +++ b/stransient_loader.py @@ -0,0 +1,100 @@ +from pathlib import Path +from typing import Optional + +import pandas as pd + +try: + import pandapower as pp +except ImportError as exc: # pragma: no cover - fallback for environments without pandapower + raise ImportError("pandapower is required to build a STRANSIENT grid but is not installed.") from exc + + +def _safe_per_km(value: float, length_km: float) -> float: + return float(value) / max(float(length_km), 1e-6) + + +def build_net_from_stransient( + folder: Path, + slack_bus_id: Optional[str] = None, + vm_pu: float = 1.02, +) -> pp.pandapowerNet: + """ + Translate exported STRANSIENT CSVs into a runnable pandapower network. + """ + folder = Path(folder) + if not folder.exists(): + raise FileNotFoundError(f"STRANSIENT folder not found: {folder}") + + # Required files + files = { + "buses": folder / "stransient_bus.csv", + "branches": folder / "stransient_branch.csv", + "generators": folder / "stransient_gen.csv", + "loads": folder / "stransient_load.csv", + } + + for name, path in files.items(): + if not path.exists(): + raise FileNotFoundError(f"Missing {name} export at {path}") + + bus_df = pd.read_csv(files["buses"]) + line_df = pd.read_csv(files["branches"]) + gen_df = pd.read_csv(files["generators"]) + load_df = pd.read_csv(files["loads"]) + + net = pp.create_empty_network() + bus_map: dict[str, int] = {} + for _, row in bus_df.iterrows(): + bus_id = row["bus_id"] + bus_map[bus_id] = pp.create_bus(net, vn_kv=row["vn_kv"], name=bus_id) + + slack_bus_id = slack_bus_id or (bus_df["bus_id"].iloc[0] if len(bus_df) else None) + if slack_bus_id and slack_bus_id in bus_map: + pp.create_ext_grid(net, bus=bus_map[slack_bus_id], vm_pu=vm_pu, name="STRANSIENT slack") + elif bus_map: + first_bus = next(iter(bus_map.values())) + pp.create_ext_grid(net, bus=first_bus, vm_pu=vm_pu, name="STRANSIENT slack (default)") + + for _, row in gen_df.iterrows(): + bus_name = row["bus"] + if bus_name not in bus_map: + continue + pp.create_sgen( + net, + bus=bus_map[bus_name], + p_mw=row.get("p_max_mw", 0.0), + q_mvar=row.get("q_max_mvar", 0.0), + name=row.get("gen_id", f"sgen-{bus_name}"), + ) + + for _, row in load_df.iterrows(): + bus_name = row["bus"] + if bus_name not in bus_map: + continue + pp.create_load( + net, + bus=bus_map[bus_name], + p_mw=row.get("p_mw", 0.0), + q_mvar=row.get("q_mvar", 0.0), + name=row.get("load_id", f"load-{bus_name}"), + ) + + for _, row in line_df.iterrows(): + bus0 = row["bus0"] + bus1 = row["bus1"] + if bus0 not in bus_map or bus1 not in bus_map: + continue + length_km = float(row.get("length", 1.0)) + pp.create_line_from_parameters( + net, + bus_map[bus0], + bus_map[bus1], + length_km=length_km, + r_ohm_per_km=_safe_per_km(row.get("r", 0.0), length_km), + x_ohm_per_km=_safe_per_km(row.get("x", 0.0), length_km), + c_nf_per_km=float(row.get("c_nf_per_km", 0.0)), + max_i_ka=float(row.get("i_nom", 1.0)), + name=row.get("branch_id", f"branch-{bus0}-{bus1}"), + ) + + return net From ba6278089f278eaf97e34cd43d6d0e07a51517dd Mon Sep 17 00:00:00 2001 From: Clifford Ondieki Date: Thu, 26 Feb 2026 23:32:46 +0100 Subject: [PATCH 4/7] Format STRANSIENT files --- scripts/export_stransient.py | 75 +++++++++++++++++++++--------------- stransient_loader.py | 19 ++++++--- 2 files changed, 58 insertions(+), 36 deletions(-) diff --git a/scripts/export_stransient.py b/scripts/export_stransient.py index eff70028d..b7420e416 100644 --- a/scripts/export_stransient.py +++ b/scripts/export_stransient.py @@ -1,41 +1,56 @@ -import pandas as pd from pathlib import Path -base = Path('results/20260114_limit_cross_border_flows/KN2045_Mix/exports') -out = base.parent / 'stransient' + +import pandas as pd + +base = Path("results/20260114_limit_cross_border_flows/KN2045_Mix/exports") +out = base.parent / "stransient" out.mkdir(parents=True, exist_ok=True) -bus_df = ( - pd.read_csv(base / 'buses.csv') - .rename(columns={'name': 'bus_id', 'v_nom': 'vn_kv'}) +bus_df = pd.read_csv(base / "buses.csv").rename( + columns={"name": "bus_id", "v_nom": "vn_kv"} +) +bus_df["vm_pu"] = bus_df["v_mag_pu_set"] +strans_bus = bus_df[["bus_id", "vn_kv", "type", "vm_pu"]].copy() +strans_bus["area"] = "DE" +strans_bus.to_csv(out / "stransient_bus.csv", index=False) +lines = pd.read_csv(base / "lines.csv").rename(columns={"name": "branch_id"}) +lines["type"] = "AC_line" +lines[["branch_id", "bus0", "bus1", "r_pu", "x_pu", "length", "i_nom", "type"]].to_csv( + out / "stransient_branch.csv", index=False +) +gens = pd.read_csv(base / "generators.csv") +q_source = ( + "q_nom" if "q_nom" in gens.columns else "q_set" if "q_set" in gens.columns else None ) -bus_df['vm_pu'] = bus_df['v_mag_pu_set'] -strans_bus = bus_df[['bus_id', 'vn_kv', 'type', 'vm_pu']].copy() -strans_bus['area'] = 'DE' -strans_bus.to_csv(out / 'stransient_bus.csv', index=False) -lines = pd.read_csv(base / 'lines.csv').rename(columns={'name': 'branch_id'}) -lines['type'] = 'AC_line' -lines[['branch_id','bus0','bus1','r_pu','x_pu','length','i_nom','type']].to_csv(out / 'stransient_branch.csv', index=False) -gens = pd.read_csv(base / 'generators.csv') -q_source = 'q_nom' if 'q_nom' in gens.columns else 'q_set' if 'q_set' in gens.columns else None -rename_map = {'name': 'gen_id', 'p_nom': 'p_max_mw', 'carrier': 'type'} +rename_map = {"name": "gen_id", "p_nom": "p_max_mw", "carrier": "type"} if q_source is not None: - rename_map[q_source] = 'q_max_mvar' + rename_map[q_source] = "q_max_mvar" else: - gens['q_max_mvar'] = 0.0 + gens["q_max_mvar"] = 0.0 gens = gens.rename(columns=rename_map) -required_cols = ['gen_id', 'bus', 'p_max_mw', 'q_max_mvar', 'type'] -gens[required_cols].to_csv(out / 'stransient_gen.csv', index=False) -loads = pd.read_csv(base / 'loads.csv') -p_source = 'p_mw' if 'p_mw' in loads.columns else 'p_set' if 'p_set' in loads.columns else None -q_source = 'q_mvar' if 'q_mvar' in loads.columns else 'q_set' if 'q_set' in loads.columns else None -load_rename = {'name': 'load_id'} +required_cols = ["gen_id", "bus", "p_max_mw", "q_max_mvar", "type"] +gens[required_cols].to_csv(out / "stransient_gen.csv", index=False) +loads = pd.read_csv(base / "loads.csv") +p_source = ( + "p_mw" if "p_mw" in loads.columns else "p_set" if "p_set" in loads.columns else None +) +q_source = ( + "q_mvar" + if "q_mvar" in loads.columns + else "q_set" + if "q_set" in loads.columns + else None +) +load_rename = {"name": "load_id"} if p_source: - load_rename[p_source] = 'p_mw' + load_rename[p_source] = "p_mw" else: - loads['p_mw'] = 0.0 + loads["p_mw"] = 0.0 if q_source: - load_rename[q_source] = 'q_mvar' + load_rename[q_source] = "q_mvar" else: - loads['q_mvar'] = 0.0 + loads["q_mvar"] = 0.0 loads = loads.rename(columns=load_rename) -loads[['load_id', 'bus', 'p_mw', 'q_mvar']].to_csv(out / 'stransient_load.csv', index=False) -print('wired exports done') +loads[["load_id", "bus", "p_mw", "q_mvar"]].to_csv( + out / "stransient_load.csv", index=False +) +print("wired exports done") diff --git a/stransient_loader.py b/stransient_loader.py index 6e22059cc..55c91b077 100644 --- a/stransient_loader.py +++ b/stransient_loader.py @@ -1,12 +1,15 @@ from pathlib import Path -from typing import Optional import pandas as pd try: import pandapower as pp -except ImportError as exc: # pragma: no cover - fallback for environments without pandapower - raise ImportError("pandapower is required to build a STRANSIENT grid but is not installed.") from exc +except ( + ImportError +) as exc: # pragma: no cover - fallback for environments without pandapower + raise ImportError( + "pandapower is required to build a STRANSIENT grid but is not installed." + ) from exc def _safe_per_km(value: float, length_km: float) -> float: @@ -15,7 +18,7 @@ def _safe_per_km(value: float, length_km: float) -> float: def build_net_from_stransient( folder: Path, - slack_bus_id: Optional[str] = None, + slack_bus_id: str | None = None, vm_pu: float = 1.02, ) -> pp.pandapowerNet: """ @@ -50,10 +53,14 @@ def build_net_from_stransient( slack_bus_id = slack_bus_id or (bus_df["bus_id"].iloc[0] if len(bus_df) else None) if slack_bus_id and slack_bus_id in bus_map: - pp.create_ext_grid(net, bus=bus_map[slack_bus_id], vm_pu=vm_pu, name="STRANSIENT slack") + pp.create_ext_grid( + net, bus=bus_map[slack_bus_id], vm_pu=vm_pu, name="STRANSIENT slack" + ) elif bus_map: first_bus = next(iter(bus_map.values())) - pp.create_ext_grid(net, bus=first_bus, vm_pu=vm_pu, name="STRANSIENT slack (default)") + pp.create_ext_grid( + net, bus=first_bus, vm_pu=vm_pu, name="STRANSIENT slack (default)" + ) for _, row in gen_df.iterrows(): bus_name = row["bus"] From f679a1450eb920a59a937659f7f81b320c292018 Mon Sep 17 00:00:00 2001 From: Clifford Ondieki Date: Thu, 26 Feb 2026 23:39:47 +0100 Subject: [PATCH 5/7] Refactor export_stransient.py for argparse/snakemake --- scripts/export_stransient.py | 144 ++++++++++++++++++++++------------- 1 file changed, 92 insertions(+), 52 deletions(-) diff --git a/scripts/export_stransient.py b/scripts/export_stransient.py index b7420e416..f28af53fe 100644 --- a/scripts/export_stransient.py +++ b/scripts/export_stransient.py @@ -1,56 +1,96 @@ +import argparse from pathlib import Path import pandas as pd -base = Path("results/20260114_limit_cross_border_flows/KN2045_Mix/exports") -out = base.parent / "stransient" -out.mkdir(parents=True, exist_ok=True) -bus_df = pd.read_csv(base / "buses.csv").rename( - columns={"name": "bus_id", "v_nom": "vn_kv"} -) -bus_df["vm_pu"] = bus_df["v_mag_pu_set"] -strans_bus = bus_df[["bus_id", "vn_kv", "type", "vm_pu"]].copy() -strans_bus["area"] = "DE" -strans_bus.to_csv(out / "stransient_bus.csv", index=False) -lines = pd.read_csv(base / "lines.csv").rename(columns={"name": "branch_id"}) -lines["type"] = "AC_line" -lines[["branch_id", "bus0", "bus1", "r_pu", "x_pu", "length", "i_nom", "type"]].to_csv( - out / "stransient_branch.csv", index=False -) -gens = pd.read_csv(base / "generators.csv") -q_source = ( - "q_nom" if "q_nom" in gens.columns else "q_set" if "q_set" in gens.columns else None -) -rename_map = {"name": "gen_id", "p_nom": "p_max_mw", "carrier": "type"} -if q_source is not None: - rename_map[q_source] = "q_max_mvar" -else: - gens["q_max_mvar"] = 0.0 -gens = gens.rename(columns=rename_map) -required_cols = ["gen_id", "bus", "p_max_mw", "q_max_mvar", "type"] -gens[required_cols].to_csv(out / "stransient_gen.csv", index=False) -loads = pd.read_csv(base / "loads.csv") -p_source = ( - "p_mw" if "p_mw" in loads.columns else "p_set" if "p_set" in loads.columns else None -) -q_source = ( - "q_mvar" - if "q_mvar" in loads.columns - else "q_set" - if "q_set" in loads.columns - else None -) -load_rename = {"name": "load_id"} -if p_source: - load_rename[p_source] = "p_mw" -else: - loads["p_mw"] = 0.0 -if q_source: - load_rename[q_source] = "q_mvar" -else: - loads["q_mvar"] = 0.0 -loads = loads.rename(columns=load_rename) -loads[["load_id", "bus", "p_mw", "q_mvar"]].to_csv( - out / "stransient_load.csv", index=False -) -print("wired exports done") + +def export_stransient(base: Path, out: Path): + out.mkdir(parents=True, exist_ok=True) + bus_df = pd.read_csv(base / "buses.csv").rename( + columns={"name": "bus_id", "v_nom": "vn_kv"} + ) + bus_df["vm_pu"] = bus_df["v_mag_pu_set"] + strans_bus = bus_df[["bus_id", "vn_kv", "type", "vm_pu"]].copy() + strans_bus["area"] = "DE" + strans_bus.to_csv(out / "stransient_bus.csv", index=False) + lines = pd.read_csv(base / "lines.csv").rename(columns={"name": "branch_id"}) + lines["type"] = "AC_line" + lines[ + ["branch_id", "bus0", "bus1", "r_pu", "x_pu", "length", "i_nom", "type"] + ].to_csv(out / "stransient_branch.csv", index=False) + gens = pd.read_csv(base / "generators.csv") + q_source = ( + "q_nom" + if "q_nom" in gens.columns + else "q_set" + if "q_set" in gens.columns + else None + ) + rename_map = {"name": "gen_id", "p_nom": "p_max_mw", "carrier": "type"} + if q_source is not None: + rename_map[q_source] = "q_max_mvar" + else: + gens["q_max_mvar"] = 0.0 + gens = gens.rename(columns=rename_map) + required_cols = ["gen_id", "bus", "p_max_mw", "q_max_mvar", "type"] + gens[required_cols].to_csv(out / "stransient_gen.csv", index=False) + loads = pd.read_csv(base / "loads.csv") + p_source = ( + "p_mw" + if "p_mw" in loads.columns + else "p_set" + if "p_set" in loads.columns + else None + ) + q_source = ( + "q_mvar" + if "q_mvar" in loads.columns + else "q_set" + if "q_set" in loads.columns + else None + ) + load_rename = {"name": "load_id"} + if p_source: + load_rename[p_source] = "p_mw" + else: + loads["p_mw"] = 0.0 + if q_source: + load_rename[q_source] = "q_mvar" + else: + loads["q_mvar"] = 0.0 + loads = loads.rename(columns=load_rename) + loads[["load_id", "bus", "p_mw", "q_mvar"]].to_csv( + out / "stransient_load.csv", index=False + ) + print("wired exports done") + + +if __name__ == "__main__": + if "snakemake" in globals(): + snakemake = globals()["snakemake"] + base_dir = Path(snakemake.input.exports_dir) + out_dir = Path(snakemake.output.stransient_dir) + export_stransient(base_dir, out_dir) + else: + parser = argparse.ArgumentParser( + description="Export STRANSIENT grids from PyPSA" + ) + parser.add_argument( + "--exports-dir", + type=Path, + default=Path( + "results/20260114_limit_cross_border_flows/KN2045_Mix/exports" + ), + help="Directory containing PyPSA export CSVs", + ) + parser.add_argument( + "--out-dir", + type=Path, + default=None, + help="Output directory. Defaults to /../stransient", + ) + args = parser.parse_args() + + base_dir = args.exports_dir + out_dir = args.out_dir if args.out_dir else base_dir.parent / "stransient" + export_stransient(base_dir, out_dir) From 0a10e8fde0fd5f9cc64fd0fa8e2b1a4359e59d5f Mon Sep 17 00:00:00 2001 From: Clifford Ondieki Date: Thu, 26 Feb 2026 23:52:30 +0100 Subject: [PATCH 6/7] Apply pre-commit linters to all files --- scripts/pypsa-de/additional_functionality.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/pypsa-de/additional_functionality.py b/scripts/pypsa-de/additional_functionality.py index de784cd47..8b0e5a95b 100644 --- a/scripts/pypsa-de/additional_functionality.py +++ b/scripts/pypsa-de/additional_functionality.py @@ -236,7 +236,9 @@ def add_pos_neg_aux_variables(n, idx, var_name, infix): def h2_import_limits(n, investment_year, limits_volume_max): if not h2_import_limits_enabled(n.config): - logger.info("Skipping H2 import limit constraints because pypsa-de.h2_import_limits.enable is False.") + logger.info( + "Skipping H2 import limit constraints because pypsa-de.h2_import_limits.enable is False." + ) return for ct in limits_volume_max["h2_import"]: From 4fb6d4e5ef3f3488e3f2224cabe6a915fffe2804 Mon Sep 17 00:00:00 2001 From: Clifford Ondieki Date: Fri, 27 Feb 2026 00:51:15 +0100 Subject: [PATCH 7/7] Document STRANSIENT exports in scripts and README --- README.md | 2 +- scripts/export_stransient.py | 17 +++++++++++++++++ stransient_loader.py | 26 ++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c4832758b..d860c27e4 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ PyPSA-DE is a softfork of PyPSA-EUR. As such, large parts of the functionality a - Additional constraints that limit maximum capacity of specific technologies - Import constraints on Efuels, hydrogen and electricity - Renewable build out according to the Wind-an-Land, Wind-auf-See and Solarstrategie laws -- A comprehensive reporting module that exports Capacity Expansion, Primary/Secondary/Final Energy, CO2 Emissions per Sector, Trade, Investments, and more. +- A comprehensive reporting module that exports Capacity Expansion, Primary/Secondary/Final Energy, CO2 Emissions per Sector, Trade, Investments, and more. Including a new `STRANSIENT` utility script for power systems stability exports. - Plotting functionality to compare different scenarios - Electricity Network development until 2030 (and for AC beyond) according to the NEP23 - Offshore development until 2030 according to the Offshore NEP23 diff --git a/scripts/export_stransient.py b/scripts/export_stransient.py index f28af53fe..e2e2eefda 100644 --- a/scripts/export_stransient.py +++ b/scripts/export_stransient.py @@ -1,3 +1,10 @@ +""" +Utility script to extract network components from PyPSA-DE exports and format +them into STRANSIENT-compatible CSV files. + +This can be run standalone via the CLI, or automatically as part of a Snakemake workflow. +""" + import argparse from pathlib import Path @@ -5,6 +12,16 @@ def export_stransient(base: Path, out: Path): + """ + Parses PyPSA network CSV exports and translates them into the STRANSIENT format. + + Parameters + ---------- + base : Path + Input directory containing standard PyPSA exports (buses.csv, lines.csv, generators.csv, loads.csv). + out : Path + Output directory to save the formatted STRANSIENT CSV files. + """ out.mkdir(parents=True, exist_ok=True) bus_df = pd.read_csv(base / "buses.csv").rename( columns={"name": "bus_id", "v_nom": "vn_kv"} diff --git a/stransient_loader.py b/stransient_loader.py index 55c91b077..b3fd7814f 100644 --- a/stransient_loader.py +++ b/stransient_loader.py @@ -1,3 +1,8 @@ +""" +Provides utilities to load STRANSIENT export CSV files generated by PyPSA-DE +and translate them back into an executable Pandapower network. +""" + from pathlib import Path import pandas as pd @@ -23,6 +28,27 @@ def build_net_from_stransient( ) -> pp.pandapowerNet: """ Translate exported STRANSIENT CSVs into a runnable pandapower network. + + This function expects four specific CSV exports from the export_stransient script: + - stransient_bus.csv + - stransient_branch.csv + - stransient_gen.csv + - stransient_load.csv + + Parameters + ---------- + folder : Path + Directory where the STRANSIENT CSV files are located. + slack_bus_id : str | None, optional + ID of the bus to assign as the external grid (slack node). If None, the + first bus found in the dataset is used. Default is None. + vm_pu : float, optional + Voltage magnitude setpoint (in p.u.) for the slack node. Default is 1.02. + + Returns + ------- + pp.pandapowerNet + A fully constructed Pandapower network. """ folder = Path(folder) if not folder.exists():