diff --git a/.codex/skills/pyhazards-pr-maintainer/SKILL.md b/.codex/skills/pyhazards-pr-maintainer/SKILL.md new file mode 100644 index 00000000..62c6cc43 --- /dev/null +++ b/.codex/skills/pyhazards-pr-maintainer/SKILL.md @@ -0,0 +1,100 @@ +--- +name: pyhazards-pr-maintainer +description: Review and process pull requests for the PyHazard/PyHazards repository, especially model contribution PRs. Use when asked to inspect a new PR, decide whether the implementation matches the described model or paper, run the catalog smoke checks, update generated model tables/docs, merge a good PR, or draft an actionable blocker comment for a weak PR without rediscovering the repo workflow. +--- + +# PyHazards PR Maintainer + +## Overview + +Use the model-catalog workflow that already exists in this repo instead of re-deriving the review process from scratch. +Start from the established scripts and generated-doc pipeline, then inspect only the PR-specific files that the scripts flag. + +## Fast Start + +Open these files first and treat them as the source of truth for model PR handling: + +- `scripts/review_model_pr.py` +- `scripts/smoke_test_models.py` +- `scripts/render_model_docs.py` +- `scripts/verify_table_entries.py` +- `pyhazards/model_catalog.py` +- `pyhazards/model_cards/*.yaml` +- `.github/workflows/model-pr-validation.yml` +- `.github/workflows/model-pr-bot.yml` +- `.github/workflows/model-docs-sync.yml` +- `.github/IMPLEMENTATION.md` +- `docs/README.md` + +Do not begin with broad repo discovery unless one of those files is missing or broken. + +## Workflow + +1. Identify whether the PR is a model PR. + Use the changed files, the PR description, and `pyhazards/model_cards/*.yaml`. + If it is not a model PR, say so and fall back to normal review. + +2. Run the existing automated review path first. + If GitHub event data and a base SHA are available, run: + ```bash + python scripts/review_model_pr.py \ + --event "$GITHUB_EVENT_PATH" \ + --base-sha "" \ + --report-json /tmp/model-pr-review.json \ + --report-md /tmp/model-pr-review.md + ``` + If you are reviewing locally without GitHub event payloads, inspect the current diff and run the targeted checks below instead. + +3. For touched models, use the cataloged checks instead of inventing ad hoc smoke tests. + Run: + ```bash + python scripts/smoke_test_models.py --models + python scripts/render_model_docs.py --check + python -c "import pyhazards; print(pyhazards.__version__)" + ``` + When you changed model cards or model code, also run: + ```bash + python scripts/render_model_docs.py + python scripts/verify_table_entries.py + python -m pytest tests/test_model_catalog.py + ``` + If the change should be visible on the published docs site, rebuild the committed HTML too: + ```bash + cd docs + sphinx-build -b html source build/html + cp -r build/html/* . + ``` + Do not stop after updating `docs/source/`; this repo publishes the committed `docs/` HTML artifacts. + +4. Decide whether to fix or block. + Patch the PR yourself when the issue is localized and the correct change is clear. + Write a blocker comment only when the implementation is materially off-spec and fixing it would take substantial time. + Reuse the report structure from `scripts/review_model_pr.py` so the contributor gets concrete action items. + +5. If the PR passes, keep docs aligned and merge when the user asked for processing rather than pure review. + Generated model tables come from `pyhazards/model_cards/*.yaml`, so a new hazard scenario appears as a new section automatically after `python scripts/render_model_docs.py`. + Do not hand-edit `docs/source/pyhazards_models.rst` or the generated module pages. + If a model should remain implemented but not appear in the public catalog, set `include_in_public_catalog: false` in its model card instead of editing generated docs. + +6. Report back concisely. + Include: + - whether the PR passed or was blocked + - which model names were touched + - which commands/tests were run + - whether merge happened + - any remaining permission or workflow limitations + +## Review Standard + +- Treat `pyhazards/model_catalog.py` plus the model cards as the source of truth for model-table/docs behavior. +- Require a valid builder contract: `task` support, `**kwargs`, registry wiring, and explicit shape validation. +- Require a smoke test that matches the card. +- Prefer targeted inspection of touched model files over reading unrelated modules. +- Keep blocker comments technical and specific; do not be vague. + +## Operational Notes + +- The GitHub Actions bot comments on blocked model PRs, merges passing ones, and syncs generated model docs on push. +- The published site is driven by committed files under `docs/`, so after catalog changes you must think in two stages: generate `docs/source`, then build/copy the HTML site artifacts. +- Email notification is intentionally not part of the workflow. +- If GitHub posting or merging is unavailable in the current environment, still run the local review path and return the exact comment or merge recommendation to the user. diff --git a/.codex/skills/pyhazards-pr-maintainer/agents/openai.yaml b/.codex/skills/pyhazards-pr-maintainer/agents/openai.yaml new file mode 100644 index 00000000..746b88ee --- /dev/null +++ b/.codex/skills/pyhazards-pr-maintainer/agents/openai.yaml @@ -0,0 +1,4 @@ +interface: + display_name: "PyHazards PR Maintainer" + short_description: "Review and process PyHazards PRs" + default_prompt: "Use $pyhazards-pr-maintainer to process this PyHazards model PR." diff --git a/.codex/skills/pyhazards-roadmap-team/SKILL.md b/.codex/skills/pyhazards-roadmap-team/SKILL.md new file mode 100644 index 00000000..90f6edd9 --- /dev/null +++ b/.codex/skills/pyhazards-roadmap-team/SKILL.md @@ -0,0 +1,69 @@ +--- +name: pyhazards-roadmap-team +description: Use when asked to turn `pyhazard_plan.pdf` into a staged multi-agent execution plan for this repository, or to run the PyHazards hazard-expansion roadmap with fixed worker ownership, copy-paste agent prompts, phase gates, and an integrator workflow instead of re-deriving the team split. +--- + +# PyHazards Roadmap Team + +## Overview + +Use the checked-in execution package in `.github/ROADMAP_EXECUTION.md` as the +source of truth for the long-range PyHazards roadmap, and use +`docs/source/appendix_a_coverage.rst` as the audited coverage baseline for +what is still missing. + +Do not re-split ownership or redesign the phase order unless the user +explicitly asks to change the roadmap. + +## Fast Start + +Open these files first: + +- `.github/ROADMAP_EXECUTION.md` +- `pyhazard_plan.pdf` +- `docs/source/appendix_a_coverage.rst` +- `.github/IMPLEMENTATION.md` +- `pyhazards/models/__init__.py` +- `pyhazards/datasets/__init__.py` +- `pyhazards/model_catalog.py` +- `scripts/render_model_docs.py` +- `scripts/smoke_test_models.py` +- `scripts/verify_table_entries.py` + +## Workflow + +1. Identify the wave the user wants to execute. + If unspecified, start from the earliest incomplete wave in + `.github/ROADMAP_EXECUTION.md`, using the Appendix A coverage page to avoid + counting variants or experimental wrappers as finished baseline work. + +2. Keep file ownership fixed. + Worker agents must not edit shared choke-point files. Only the integrator + owns registries, top-level docs, generated docs, and workflows. + +3. Hand out the exact agent prompt from `.github/ROADMAP_EXECUTION.md`. + Do not paraphrase unless the user asks for a smaller or larger team. + +4. Require manifests from worker agents. + Every worker should return: + - changed files + - registration changes + - model-card changes + - config names + - tests run + - open issues + +5. Use the integrator workflow for merge and validation. + The integrator must run the full validation set listed in the execution + package before merge or push. + +## Guardrails + +- Keep the current `build_model(name, task, **kwargs)` contract intact. +- Keep public model discovery driven by model cards and generated docs. +- Keep `catalog_status` truthful so same-paper variants and experimental + wrappers do not inflate Appendix A coverage. +- Keep `docs/` published artifacts integrator-owned. +- Delay storm foundation adapters until the shared TC evaluator is stable. +- If a worker needs a shared-file change, escalate to the integrator instead of + patching around ownership. diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md deleted file mode 100644 index 3a22752a..00000000 --- a/.github/CONTRIBUTING.md +++ /dev/null @@ -1,125 +0,0 @@ -# 🧩 Contributing to This Project - -Thank you for your interest in contributing! We welcome all contributions — bug reports, feature requests, pull -requests, and documentation improvements. - -Please follow the guidelines below to ensure a smooth contribution process. - ---- - -## 🐛 Reporting Bugs - -1. Check if the bug has already been reported. -2. Open a new [bug report](https://github.com/your-org/your-repo/issues/new?template=bug_report.md). -3. Include: - - Clear reproduction steps - - Expected vs. actual behavior - - Logs, screenshots, or minimal code snippets if possible - ---- - -## 🚀 Suggesting Features - -1. Check if a similar feature request already exists. -2. Open a new [feature request](https://github.com/your-org/your-repo/issues/new?template=feature_request.md). -3. Include: - - Motivation and use case - - Desired functionality - - Alternatives you considered - ---- - -## 🔧 Submitting Pull Requests - -> All code changes should be proposed via Pull Request (PR) from a feature branch — **do not commit directly to `main` -**. - -### Steps: - -1. Fork the repository -2. Create a new branch: - -```shell -git checkout -b feat/your-feature-name -``` - -3. Commit your changes: - -```shell -git push origin feat/your-feature-name -``` - -4. Push to your fork: - -```shell -git push origin feat/your-feature-name -``` - -5. Open a PR against the `main` branch - ---- - -## 🔧 Internal Code Submission Guideline - -For internal team members with write access to the repository: - -1. Always Use Feature/Fix Branches - -- Never commit directly to the main or develop branch. -- Create a new branch for each feature, bug fix. - -```shell -git checkout -b feat/your-feature-name -``` - -```shell -git checkout -b fix/your-fix-name -``` - -2. Keep Commits Clean & Meaningful - -- feat: add data loader for graph dataset -- fix: resolve crash on edge cases - -Use clear commit messages following the format: - -```shell -: -``` - -3. Test Before Pushing - -- Test your implementation in `example.py`, and compare the performance with the results in original paper. - -4. Push to Internal Branch - -- Always run `git pull origin pygip-release` before pushing your changes -- Submit a pull request targeting the `pygip-release` branch -- Write a brief summary describing the features you’ve added, how to run your method, and how to evaluate its - performance - -Push to the remote feature branch. - -```shell -git push origin feat/your-feature-name -``` - ---- - -## 📄 Code Style & Testing - -- Follow existing code conventions -- Use meaningful names and comments -- Add tests for new features or bug fixes -- Run all tests before submitting a PR - ---- - -## 💬 Questions or Help? - -- Use Discussions for general questions -- Feel free to open an issue if something is unclear - ---- - -Thank you for contributing! 🙌 \ No newline at end of file diff --git a/.github/IMPLEMENTATION.md b/.github/IMPLEMENTATION.md index 71b6e3e3..c254a38b 100644 --- a/.github/IMPLEMENTATION.md +++ b/.github/IMPLEMENTATION.md @@ -1,238 +1,183 @@ -## Implementation +# Model Implementation Guide for Collaborators -PyGIP is built to be modular and extensible, allowing contributors to implement their own attack and defense strategies. -Below, we detail how to extend the framework by implementing custom attack and defense classes, with a focus on how to -leverage the provided dataset structure. +This guide explains how to port a model from an original paper repository into PyHazards with minimal friction and maximum reproducibility. -### Dataset +## 1. Start from a paper-to-library mapping -The `Dataset` class standardizes the data format across PyGIP. Here’s its structure: +Before coding, build a short mapping table from the original repo: + +- paper module/class name -> new `pyhazards/models/.py` class +- paper training inputs/targets -> PyHazards `DataBundle` split format +- paper config keys -> builder kwargs/defaults in `register_model(...)` +- paper loss/metrics -> PyTorch loss and optional `pyhazards.metrics` usage + +This avoids ad-hoc ports and makes review easier. + +## 2. Define the PyHazards model contract first + +In PyHazards, models are built with: ```python -class Dataset(object): - def __init__(self, api_type='dgl', path='./data'): - assert api_type in {'dgl', 'pyg'}, 'API type must be dgl or pyg' - self.api_type = api_type - self.path = path - self.dataset_name = self.get_name() - - # DGLGraph or PyGData - self.graph_dataset = None - self.graph_data = None - - # meta data - self.num_nodes = 0 - self.num_features = 0 - self.num_classes = 0 +from pyhazards.models import build_model +model = build_model(name="", task="", **kwargs) ``` -- **Importance**: We are currently using the default api_type='pyg' to load the data. It is important to note that when - api_type='pyg', `self.graph_data` should be an instance of `torch_geometric.data.Data`. In your implementation, make - sure - to use our defined Dataset class to build your code. +Your builder must: -### Device +- accept `task: str` +- accept model hyperparameters (for example, `in_dim`, `hidden_dim`) +- return `nn.Module` +- validate unsupported tasks early with clear errors -To ensure consistency and simplicity when managing CUDA devices across attacks and defenses, we follow the convention -below: +For portability, always include `**kwargs` in the builder signature so extra config keys do not break the call path. -- Both `BaseAttack` and `BaseDefense` define the device attribute `self.device` in their `__init__()` method. -- Subclasses should not manually redefine or modify the device logic. -- If you are implementing a custom attack or defense class, simply inherit from `BaseAttack` or `BaseDefense`. -- You can directly access the device using: `x = x.to(self.device)` +## 3. Implement the model module -### Implementing Attack +Create `pyhazards/models/.py` and include: -To create a custom attack, you need to extend the abstract base class `BaseAttack`. Here’s the structure -of `BaseAttack`: +1. main model class inheriting `nn.Module` +2. optional helper blocks/losses +3. builder function `_builder(...)` -```python -class BaseAttack(ABC): - supported_api_types = set() - supported_datasets = set() - - def __init__(self, dataset: Dataset, attack_node_fraction: float = None, model_path: str = None, - device: Optional[Union[str, torch.device]] = None): - self.device = torch.device(device) if device else get_device() - print(f"Using device: {self.device}") - - # graph data - self.dataset = dataset - self.graph_dataset = dataset.graph_dataset - self.graph_data = dataset.graph_data - - # meta data - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes - - # params - self.attack_node_fraction = attack_node_fraction - self.model_path = model_path - - self._check_dataset_compatibility() -``` +Use explicit input-shape checks in `forward()` (existing models do this) so failures are actionable. -To implement your own attack: +Template: -1. **Inherit from `BaseAttack`**: - Create a new class that inherits from `BaseAttack`. You’ll need to provide the following required parameters in the - constructor: +```python +from __future__ import annotations +import torch +import torch.nn as nn + + +class MyModel(nn.Module): + def __init__(self, in_dim: int, out_dim: int, hidden_dim: int = 128): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, out_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 2: + raise ValueError(f"Expected (B, F), got {tuple(x.shape)}") + return self.net(x) + + +def my_model_builder(task: str, in_dim: int, out_dim: int, hidden_dim: int = 128, **kwargs) -> nn.Module: + _ = kwargs + if task.lower() not in {"classification", "regression"}: + raise ValueError(f"MyModel does not support task='{task}'") + return MyModel(in_dim=in_dim, out_dim=out_dim, hidden_dim=hidden_dim) +``` -- `dataset`: An instance of the `Dataset` class (see below for details). -- `attack_node_fraction`: A float between 0 and 1 representing the fraction of nodes to attack. -- `model_path` (optional): A string specifying the path to a pre-trained model (defaults to `None`). +## 4. Register the model in the registry -You need to implement following methods: +Edit `pyhazards/models/__init__.py`: -- `attack()`: Add main attack logic here. If multiple attack types are supported, define the attack type as an optional - argument to this function. - For each specific attack type, implement a corresponding helper function such as `_attack_type1()` - or `_attack_type2()`, - and call the appropriate helper inside `attack()` based on the given method name. -- `_load_model()`: Load victim model. -- `_train_target_model()`: Train victim model. -- `_train_attack_model()`: Train attack model. -- `_helper_func()`(optional): Add your helper functions based on your needs, but keep the methods private. +1. import the class and builder +2. add symbols to `__all__` +3. call `register_model(...)` with stable defaults -2. **Implement the `attack()` Method**: - Override the abstract `attack()` method with your attack logic, and return a dict of results. For example: +Example: ```python -class MyCustomAttack(BaseAttack): - supported_api_types = {"pyg"} # "pyg" or "dgl" - supported_datasets = {"Cora"} # you can leave this blank if your method supports all datasets - - def __init__(self, dataset: Dataset, attack_node_fraction: float, model_path: str = None): - super().__init__(dataset, attack_node_fraction, model_path) - # Additional initialization if needed - - def attack(self): - # Example: Access the graph and perform an attack - print(f"Attacking {self.attack_node_fraction * 100}% of nodes") - num_nodes = self.graph.num_nodes() - print(f"Graph has {num_nodes} nodes") - # Add your attack logic here - return { - 'metric1': 'metric1 here', - 'metric2': 'metric2 here' - } - - def _load_model(self): - # add your logic here - pass - - def _train_target_model(self): - # add your logic here - pass - - def _train_attack_model(self): - # add your logic here - pass +from .my_model import MyModel, my_model_builder + +register_model( + "my_model", + my_model_builder, + defaults={"hidden_dim": 128}, +) ``` -### Implementing Defense +If you skip registration, `build_model(name="my_model", ...)` will fail. -To create a custom defense, you need to extend the abstract base class `BaseDefense`. Here’s the structure -of `BaseDefense`: +## 5. Match data format to your forward signature -```python -class BaseDefense(ABC): - supported_api_types = set() - supported_datasets = set() +`Trainer` supports two input patterns: - def __init__(self, dataset: Dataset, attack_node_fraction: float, - device: Optional[Union[str, torch.device]] = None): - self.device = torch.device(device) if device else get_device() - print(f"Using device: {self.device}") +- tensor pairs: `inputs` and `targets` as tensors +- dataset objects: `inputs` as `torch.utils.data.Dataset` (recommended for graph/structured inputs) - # graph data - self.dataset = dataset - self.graph_dataset = dataset.graph_dataset - self.graph_data = dataset.graph_data +For complex models (for example graph models), return dict-like batches from your dataset/collate function so `model(batch_dict)` works directly. - # meta data - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes +Use `DataBundle` metadata to make construction explicit: - # params - self.attack_node_fraction = attack_node_fraction +- `FeatureSpec(input_dim=..., channels=...)` +- `LabelSpec(task_type="classification|regression|segmentation", num_targets=...)` - self._check_dataset_compatibility() -``` +## 6. Port training logic carefully -To implement your own defense: +Do not copy the paper repo training loop verbatim unless required. In most cases: -1. **Inherit from `BaseDefense`**: - Create a new class that inherits from `BaseDefense`. You’ll need to provide the following required parameters in the - constructor: +- keep model logic inside `nn.Module` +- use `pyhazards.engine.Trainer` for fit/evaluate/predict +- keep custom losses as separate classes in the model module -- `dataset`: An instance of the `Dataset` class (see below for details). -- `attack_node_fraction`: A float between 0 and 1 representing the fraction of nodes to attack. -- `model_path` (optional): A string specifying the path to a pre-trained model (defaults to `None`). +If the paper model needs custom multi-output behavior, document output shape and expected loss computation in the PR. -You need to implement following methods: +## 7. Add a reproducible smoke test -- `defense()`: Add main defense logic here. If multiple defense types are supported, define the defense type as an - optional argument to this function. - For each specific defense type, implement a corresponding helper function such as `_defense_type1()` - or `_defense_type2()`, - and call the appropriate helper inside `defense()` based on the given method name. -- `_load_model()`: Load victim model. -- `_train_target_model()`: Train victim model. -- `_train_defense_model()`: Train defense model. -- `_train_surrogate_model()`: Train attack model. -- `_helper_func()`(optional): Add your helper functions based on your needs, but keep the methods private. +At minimum, verify: +1. model builds from registry +2. one forward pass succeeds with realistic tensor shapes +3. one short `Trainer.fit(...)` + `evaluate(...)` run works -2. **Implement the `defense()` Method**: - Override the abstract `defense()` method with your defense logic, and return a dict of results. For example: +Use existing examples (`test.py`, `pyhazards/models/hydrographnet.py`) as reference for strict shape checks and integration behavior. -```python -class MyCustomDefense(BaseDefense): - supported_api_types = {"pyg"} # "pyg" or "dgl" - supported_datasets = {"Cora"} # you can leave this blank if your method supports all datasets - - def defend(self): - # Step 1: Train target model - target_model = self._train_target_model() - # Step 2: Attack target model - attack = MyCustomAttack(self.dataset, attack_node_fraction=0.3) - attack.attack(target_model) - # Step 3: Train defense model - defense_model = self._train_defense_model() - # Step 4: Test defense against attack - attack = MyCustomAttack(self.dataset, attack_node_fraction=0.3) - attack.attack(defense_model) - # Print performance metrics - - def _load_model(self): - # add your logic here - pass - - def _train_target_model(self): - # add your logic here - pass - - def _train_defense_model(self): - # add your logic here - pass - - def _train_surrogate_model(self): - # add your logic here - pass -``` +## 8. Document the new model + +Update docs so users can discover and run it: + +1. add or update `pyhazards/model_cards/.yaml` +2. keep the paper citation, usage snippet, and smoke-test spec in that card +3. set `include_in_public_catalog: false` in the card when a model should stay implemented but not appear in the public model table +4. run `python scripts/render_model_docs.py` if you want to preview the generated pages locally +5. when you need the published GitHub Pages site updated locally too, run: + ```bash + cd docs + sphinx-build -b html source build/html + cp -r build/html/* . + ``` + +The model page and per-model docs are generated automatically from the card, including new +hazard-scenario tables when needed. Keep the card focused on I/O contract, supported tasks, +and one runnable example. + +## 9. Recommended collaborator workflow + +For each new paper model contribution: + +1. open an issue with paper link + proposed API (`name`, `task`, required kwargs) +2. submit PR with model file, registry wiring, and smoke-test commands +3. include a short “paper parity note” listing intentional differences from the original repo (for example, optimizer, scheduler, or preprocessing changes) +4. complete the PR template so the automation bot can match the described model to the implementation + +This keeps implementations reviewable and scientifically traceable. + +## 10. Pre-PR checklist + +- [ ] model file added under `pyhazards/models/` +- [ ] builder validates task and returns `nn.Module` +- [ ] model registered in `pyhazards/models/__init__.py` +- [ ] `pyhazards/model_cards/.yaml` added or updated +- [ ] `build_model(name=..., task=...)` works +- [ ] forward pass shape checks and error messages are clear +- [ ] minimal train/eval smoke test executed +- [ ] PR template sections completed with paper/source, smoke-test, and parity notes + +## 11. Automation setup + +The PR automation added in `.github/workflows/` expects: -### Miscellaneous Tips +- repository workflow permissions that allow `contents: write` and `pull-requests: write` -- **Reference Implementation**: The `ModelExtractionAttack0` class is a fully implemented attack example. Study it for - inspiration or as a template. -- **Flexibility**: Add as many helper functions as needed within your class to keep your code clean and modular. -- **Backbone Models**: We provide several basic backbone models like `GCN, GraphSAGE`. You can use or add more - at `from models.nn import GraphSAGE`. -- **Example Scripts**: Please provide an example script in the `examples/` folder demonstrating how to run your code. This - will significantly speed up our code review process. +Once configured, the workflow does the following for catalog-backed model PRs: -By following these guidelines, you can seamlessly integrate your custom attack or defense strategies into PyGIP. Happy -coding! \ No newline at end of file +1. validate the PR against the model contract and smoke-test spec +2. comment with actionable blockers when the implementation is not ready +3. merge passing PRs automatically +4. regenerate the model page and module docs on the resulting push +5. rebuild the committed `docs/` HTML site so GitHub Pages reflects the new catalog diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index ce8a5ba7..28d3f33e 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,45 +1,23 @@ --- -name: 🐛 Bug Report -about: Report a bug or unexpected behavior -title: "[Bug] " +name: Bug report +about: Report a problem with PyHazards labels: bug -assignees: '' - --- -## 🐞 Describe the Bug - -A clear and concise description of what the bug is. - -## 📋 To Reproduce - -Steps to reproduce the behavior: -1. Go to '...' -2. Run '...' -3. Observe '...' - -## ✅ Expected Behavior - -A clear and concise description of what you expected to happen. +**Describe the bug** +A clear and concise description of what went wrong. -## 🖼️ Screenshots or Logs +**To Reproduce** +Steps or code to reproduce the behavior. -If applicable, add screenshots or log snippets to help explain your problem. +**Expected behavior** +What you expected to happen. -## 🧾 System Information - -Please complete the following information: -- OS: [e.g., Ubuntu 20.04 / macOS 13] -- Python version: [e.g., 3.8] -- Framework version (e.g., PyTorch, TensorFlow, etc.): -- Package version (if applicable): - -## 📎 Additional Context - -Add any other context about the problem here. - ---- +**Environment** +- Python version: +- OS: +- PyHazards version: +- Torch version: - \ No newline at end of file +**Additional context** +Add any other context, logs, or screenshots about the problem. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index 4b1627da..00000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,18 +0,0 @@ -blank_issues_enabled: false -contact_links: - - name: 💬 Ask a Question - url: https://github.com/your-org/your-repo/discussions - about: Please ask and answer questions here. - -issue_templates: - - name: 🐛 Bug Report - description: Report a reproducible bug or unexpected behavior. - title: "[Bug] " - labels: [bug] - file: bug_report.md - - - name: 🚀 Feature Request - description: Suggest a new feature or improvement. - title: "[Feature] " - labels: [enhancement] - file: feature_request.md \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 19ec93bf..86602650 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,34 +1,17 @@ --- -name: 🚀 Feature Request -about: Suggest a new feature or improvement -title: "[Feature] " +name: Feature request +about: Suggest an idea for PyHazards labels: enhancement -assignees: '' - --- -## 🚀 Describe the Feature - -A clear and concise description of the feature you are requesting. - -## 📈 Motivation - -Why do you need this feature? What problem does it solve or what use case does it support? - -## 🧩 Describe the Solution You'd Like +**Is your feature request related to a problem? Please describe.** +A clear description of the problem or user need. -Provide a clear description of what you want to happen, and how it might be implemented if you have ideas. +**Describe the solution you'd like** +What you want to add or change. -## 🔄 Alternatives Considered - -Have you considered any alternative approaches or solutions? If so, describe them here. - -## 📎 Additional Context - -Add any other context, mockups, or references here. - ---- +**Describe alternatives you've considered** +Any alternative solutions or workarounds. - \ No newline at end of file +**Additional context** +Any extra details, references, or examples. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e06e22b8..b355265b 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,37 +1,27 @@ -# 📦 Pull Request Template + -Thank you for your contribution! Please complete the checklist and provide relevant details below to help us review your PR effectively. +## PR Type +- [ ] Model contribution +- [ ] Non-model change ---- +## Model Summary +Describe the model architecture, intended public API, and what was ported from the paper or source repository. +If this is not a model PR, write `N/A`. -## 📋 Summary +## Hazard Scenario +State the hazard family that should own the model table entry (for example, Wildfire or Flood). +If this introduces a new hazard scenario, name it explicitly here. - +## Registry Name +List the `build_model(name=...)` entrypoints added or changed in this PR. ---- +## Paper / Source +Link the paper, upstream repository, or technical reference used for the implementation. -## 🧪 Related Issues +## Smoke Test +Document the smoke test command(s) you ran locally, or reference the updated +`pyhazards/model_cards/.yaml` smoke-test spec. - - ---- - -## ✅ Checklist - -- [ ] My code follows the project's coding style -- [ ] I have tested the changes and verified that they work -- [ ] I have added necessary documentation (if applicable) -- [ ] I have linked related issues above (if any) -- [ ] The PR is made from a feature branch, not `main` - ---- - -## 🧠 Additional Context (Optional) - - - ---- - - \ No newline at end of file +## Parity Notes +List any intentional differences from the original implementation, especially if optimizer, +preprocessing, outputs, or training objectives changed. diff --git a/.github/ROADMAP_EXECUTION.md b/.github/ROADMAP_EXECUTION.md new file mode 100644 index 00000000..9eec4144 --- /dev/null +++ b/.github/ROADMAP_EXECUTION.md @@ -0,0 +1,747 @@ +# PyHazards Roadmap Execution Package + +This file turns `pyhazard_plan.pdf` into a repo-adjusted multi-agent execution +package that can be handed directly to parallel agents. + +## Repo-adjusted roadmap + +The PDF roadmap assumes a greenfield architecture. This repository already has +working dataset and model registries, model cards, generated model docs, and a +published `docs/` site. Use that existing structure as the foundation. + +Apply these adjustments before execution: + +1. Keep the current dataset and model registries and extend them. + Do not replace `register_dataset(...)`, `register_model(...)`, or + `build_model(name, task, **kwargs)`. +2. Introduce hazard taxonomy at the benchmark and config layer. + Keep current low-level model task labels such as `classification`, + `regression`, and `segmentation`. +3. Keep `Dataset.load()` and `DataBundle` as the runtime dataset contract. + Treat `download`, `prepare`, and split helpers as hazard-specific adapters, + not mandatory methods on every existing dataset class. +4. Add `pyhazards/benchmarks`, `pyhazards/configs`, and `pyhazards/reports` + incrementally. +5. Keep public model discovery driven by `pyhazards/model_cards/*.yaml` and + `pyhazards/model_catalog.py`. +6. Keep top-level docs and published `docs/` artifacts owned by the integrator. + +## Current audited baseline + +Use `docs/source/appendix_a_coverage.rst` as the checked-in audit of the +current repo against Appendix A in `pyhazard_plan.pdf`. + +Key corrections from the audit: + +- Wildfire is the largest gap. None of the Appendix A wildfire baselines are + implemented yet. The current FPA-FOD family and `CNN-ASPP` stay public, but + they are non-core variants and must not be counted as finished Appendix A + coverage. +- Earthquake has the main model adapters (`PhaseNet`, `EQTransformer`, `GPD`, + `EQNet`), but the SeisBench / pick-benchmark / pyCSEP / AEFA benchmark-data + stack is still missing. +- Flood has the main model adapters (`NeuralHydrology`, `FloodCast`, + `UrbanFloodCast`), but the Caravan / WaterBench / FloodCastBench / + HydroBench benchmark-data stack is still missing. +- Storm has the main model adapters, but `TCBench`, `IBTrACS`, and + `TropiCycloneNet-Dataset` are still missing. `GraphCast`, `Pangu`, and + `FourCastNet` remain experimental wrappers and must not be counted as + completed core baselines. +- Synthetic datasets remain smoke fixtures only. They are not evidence that the + Appendix A benchmark adapters are finished. + +## Staged delivery plan + +Run the corrective roadmap as six waves: + +### Wave 0: Truthful portfolio and audit enforcement + +Goal: +- keep the public catalog, benchmark page, and roadmap docs aligned with the + Appendix A audit, +- separate `core`, `variant`, and `experimental` entries, +- prevent future merges from over-counting same-paper variants or lightweight + wrappers as finished coverage. + +Exit criteria: +- generated docs show truthful status sections, +- Appendix A coverage page is in sync, +- CI checks the generated Appendix A page, +- worker agents use the audited gap list as their task queue. + +### Wave 1: Shared contracts + +Goal: +- land the shared task taxonomy, +- add benchmark core contracts and runner entrypoints, +- add report exporters, +- add config schema support, +- keep the existing public API intact. + +Exit criteria: +- shared benchmark registry exists, +- one dummy benchmark path can run end to end, +- shared tests for task taxonomy, benchmark registry, and report export pass. + +### Wave 2: First vertical slices + +Goal: +- land one credible benchmark path per hazard family on top of Wave 1. + +Hazard deliverables: +- Earthquake: first picking evaluator plus one credible baseline. +- Wildfire: first danger evaluator plus one credible baseline. +- Flood: first streamflow evaluator plus one credible baseline. +- Storm: first shared `tc.track_intensity` evaluator plus Hurricast. + +Exit criteria: +- each hazard agent passes its owned tests, +- each hazard agent returns a registration manifest, +- each hazard family has at least one smoke config. + +### Wave 3: Breadth expansion + +Goal: +- add the remaining 2-3 baselines per hazard family from the PDF plan. + +Exit criteria: +- all added baselines have model cards, +- smoke configs exist, +- evaluator contracts remain shared rather than duplicated inside adapters. + +### Wave 4: Foundation-weather adapters + +Goal: +- land GraphCast, Pangu-Weather, and FourCastNet style storm adapters only + after the shared TC evaluator is stable. + +Exit criteria: +- foundation adapters remain wrapper-style integrations, +- storm evaluator remains the single scoring entrypoint, +- dependencies are marked experimental where appropriate. + +### Wave 5: Integration and release polish + +Goal: +- finalize shared registry wiring, +- update generated docs and published docs, +- align CI and smoke-test scripts, +- write release-quality docs and examples. + +Exit criteria: +- CI passes on `main`, +- docs build cleanly, +- generated model docs are in sync, +- the published `docs/` site reflects the merged work. + +## Ownership model + +Use five worker agents plus one integrator. Worker ownership must not overlap. + +### Agent 1: Core Platform + +Own: +- `pyhazards/tasks.py` +- `pyhazards/benchmarks/__init__.py` +- `pyhazards/benchmarks/base.py` +- `pyhazards/benchmarks/registry.py` +- `pyhazards/benchmarks/runner.py` +- `pyhazards/benchmarks/schemas.py` +- `pyhazards/configs/__init__.py` +- `pyhazards/configs/_schema.py` +- `pyhazards/reports/**` +- `pyhazards/engine/runner.py` +- `scripts/run_benchmark.py` +- `tests/test_tasks.py` +- `tests/test_benchmark_registry.py` +- `tests/test_benchmark_runner.py` +- `tests/test_report_exports.py` + +Do not edit: +- `pyhazards/__init__.py` +- `pyhazards/datasets/__init__.py` +- `pyhazards/datasets/registry.py` +- `pyhazards/models/__init__.py` +- `pyhazards/models/registry.py` +- `pyhazards/models/builder.py` +- `pyhazards/model_catalog.py` +- `docs/**` +- `.github/**` + +### Agent 2: Earthquake + +Own: +- `pyhazards/datasets/earthquake/**` +- `pyhazards/benchmarks/earthquake.py` +- `pyhazards/configs/earthquake/**` +- `pyhazards/models/wavecastnet.py` +- `pyhazards/models/phasenet.py` +- `pyhazards/models/eqtransformer.py` +- `pyhazards/models/gpd.py` +- `pyhazards/models/eqnet.py` +- `pyhazards/model_cards/wavecastnet.yaml` +- `pyhazards/model_cards/phasenet.yaml` +- `pyhazards/model_cards/eqtransformer.yaml` +- `pyhazards/model_cards/gpd.yaml` +- `pyhazards/model_cards/eqnet.yaml` +- `tests/test_earthquake_*.py` + +Do not edit: +- shared registries and package `__init__` files +- `pyhazards/model_catalog.py` +- `docs/**` +- `.github/**` + +### Agent 3: Wildfire + +Own: +- `pyhazards/benchmarks/wildfire.py` +- `pyhazards/configs/wildfire/**` +- `pyhazards/models/wildfire_*.py` +- `pyhazards/models/cnn_aspp.py` +- `pyhazards/model_cards/wildfire_*.yaml` +- `pyhazards/datasets/firms/**` +- `pyhazards/datasets/mtbs/**` +- `pyhazards/datasets/landfire/**` +- `pyhazards/datasets/wfigs/**` +- `pyhazards/datasets/fpa_fod.py` +- `pyhazards/datasets/fpa_fod_tabular/**` +- `pyhazards/datasets/fpa_fod_weekly/**` +- `tests/test_wildfire_*.py` +- `tests/test_fpa_fod_*.py` + +Do not edit: +- shared registries and package `__init__` files +- `pyhazards/model_catalog.py` +- `docs/**` +- `.github/**` + +### Agent 4: Flood + +Own: +- `pyhazards/datasets/flood/**` +- `pyhazards/datasets/noaa_flood/**` +- `pyhazards/benchmarks/flood.py` +- `pyhazards/configs/flood/**` +- `pyhazards/models/hydrographnet.py` +- `pyhazards/models/neuralhydrology_*.py` +- `pyhazards/models/floodcast.py` +- `pyhazards/models/urbanfloodcast.py` +- `pyhazards/model_cards/hydrographnet.yaml` +- `pyhazards/model_cards/neuralhydrology_*.yaml` +- `pyhazards/model_cards/floodcast.yaml` +- `pyhazards/model_cards/urbanfloodcast.yaml` +- `pyhazards/data/load_hydrograph_data.py` +- `tests/test_flood_*.py` + +Do not edit: +- shared registries and package `__init__` files +- `pyhazards/model_catalog.py` +- `docs/**` +- `.github/**` + +### Agent 5: Storm + +Own: +- `pyhazards/datasets/tc/**` +- `pyhazards/benchmarks/tc.py` +- `pyhazards/configs/tc/**` +- `pyhazards/models/hurricast.py` +- `pyhazards/models/tropicalcyclone_mlp.py` +- `pyhazards/models/tropicyclonenet.py` +- `pyhazards/models/saf_net.py` +- `pyhazards/models/tcif_fusion.py` +- `pyhazards/models/graphcast_tc.py` +- `pyhazards/models/pangu_tc.py` +- `pyhazards/models/fourcastnet_tc.py` +- `pyhazards/model_cards/hurricast.yaml` +- `pyhazards/model_cards/tropicalcyclone_mlp.yaml` +- `pyhazards/model_cards/tropicyclonenet.yaml` +- `pyhazards/model_cards/saf_net.yaml` +- `pyhazards/model_cards/tcif_fusion.yaml` +- `pyhazards/model_cards/graphcast_tc.yaml` +- `pyhazards/model_cards/pangu_tc.yaml` +- `pyhazards/model_cards/fourcastnet_tc.yaml` +- `tests/test_tc_*.py` + +Do not edit: +- shared registries and package `__init__` files +- `pyhazards/model_catalog.py` +- `docs/**` +- `.github/**` + +### Integrator + +Own exclusively: +- `pyhazards/__init__.py` +- `pyhazards/datasets/__init__.py` +- `pyhazards/datasets/registry.py` +- `pyhazards/models/__init__.py` +- `pyhazards/models/registry.py` +- `pyhazards/models/builder.py` +- `pyhazards/model_catalog.py` +- `scripts/render_model_docs.py` +- `scripts/smoke_test_models.py` +- `scripts/verify_table_entries.py` +- `.github/workflows/**` +- `.github/PULL_REQUEST_TEMPLATE.md` +- `.github/IMPLEMENTATION.md` +- `docs/source/*.rst` +- `docs/source/api/**` +- `docs/source/modules/**` +- `docs/**` + +Workers must not edit those files. They must instead return: +- registration manifests, +- model-card manifests, +- smoke-config names, +- doc notes for the integrator. + +## Worker prompts + +### Agent 1 prompt + +```text +You are Agent 1 for the PyHazards roadmap. + +Mission: +Implement the shared platform layer required by the roadmap without breaking the +current dataset/model registry API. + +You own: +- pyhazards/tasks.py +- pyhazards/benchmarks/{__init__.py,base.py,registry.py,runner.py,schemas.py} +- pyhazards/configs/{__init__.py,_schema.py} +- pyhazards/reports/** +- pyhazards/engine/runner.py +- scripts/run_benchmark.py +- tests/test_tasks.py +- tests/test_benchmark_registry.py +- tests/test_benchmark_runner.py +- tests/test_report_exports.py + +Do not edit: +- pyhazards/__init__.py +- pyhazards/models/__init__.py +- pyhazards/datasets/__init__.py +- pyhazards/models/registry.py +- pyhazards/datasets/registry.py +- pyhazards/model_catalog.py +- docs/** +- .github/** + +Implementation requirements: +- Keep the current build_model(name, task, **kwargs) contract intact. +- Add benchmark-level hazard taxonomy separately from low-level model task + values. +- Define benchmark contracts with evaluate(), aggregate_metrics(), and + export_report(). +- Add shared report exporters and a benchmark runner entrypoint. +- Add shared config schema support without requiring a whole-repo dataclass + rewrite. +- Make the core layer stable enough that hazard agents can implement evaluators + without inventing their own contracts. + +Required outputs: +- code in owned files +- tests in owned files +- a short contract note for hazard agents +- a summary of any interface assumptions the integrator must expose publicly + +Validation: +- python -m pytest tests/test_tasks.py tests/test_benchmark_registry.py tests/test_benchmark_runner.py tests/test_report_exports.py +- python scripts/run_benchmark.py --help + +Escalate if: +- you need changes in shared current registry files or top-level docs +- the current engine lifecycle blocks a benchmark runner without changing shared exports +``` + +### Agent 2 prompt + +```text +You are Agent 2 for the PyHazards roadmap. + +Mission: +Own the earthquake workstream across the staged roadmap: first the vertical +slice, then breadth expansion. + +You own: +- pyhazards/datasets/earthquake/** +- pyhazards/benchmarks/earthquake.py +- pyhazards/configs/earthquake/** +- pyhazards/models/wavecastnet.py +- pyhazards/models/phasenet.py +- pyhazards/models/eqtransformer.py +- pyhazards/models/gpd.py +- pyhazards/models/eqnet.py +- pyhazards/model_cards/wavecastnet.yaml +- pyhazards/model_cards/phasenet.yaml +- pyhazards/model_cards/eqtransformer.yaml +- pyhazards/model_cards/gpd.yaml +- pyhazards/model_cards/eqnet.yaml +- tests/test_earthquake_*.py + +Do not edit: +- shared registries +- pyhazards/model_catalog.py +- docs/** +- .github/** + +Implementation requirements: +- Treat wavecastnet as an existing starting asset, not a new baseline. +- Land the first working slice around earthquake picking first. +- Standardize waveform tensor and metadata assumptions inside your owned module set. +- Implement one evaluator path for picking before breadth expansion. +- Add later baselines only after the first evaluator and one baseline pass + cleanly. +- Return a registration manifest for every dataset and model you add or change. + +Required outputs: +- owned code and tests +- model-card files for public baselines +- config files for smoke and fuller runs +- a registration manifest for the integrator + +Validation: +- python -m pytest tests/test_earthquake_*.py + +Escalate if: +- a required change touches shared registries, top-level docs, or generated docs +- the benchmark contract from Agent 1 is insufficient for picking or forecasting reports +``` + +### Agent 3 prompt + +```text +You are Agent 3 for the PyHazards roadmap. + +Mission: +Own the wildfire workstream across the staged roadmap: danger first, then +spread, then breadth. + +You own: +- pyhazards/benchmarks/wildfire.py +- pyhazards/configs/wildfire/** +- pyhazards/models/wildfire_*.py +- pyhazards/models/cnn_aspp.py +- pyhazards/model_cards/wildfire_*.yaml +- pyhazards/datasets/firms/** +- pyhazards/datasets/mtbs/** +- pyhazards/datasets/landfire/** +- pyhazards/datasets/wfigs/** +- pyhazards/datasets/fpa_fod.py +- pyhazards/datasets/fpa_fod_tabular/** +- pyhazards/datasets/fpa_fod_weekly/** +- tests/test_wildfire_*.py +- tests/test_fpa_fod_*.py + +Do not edit: +- shared registries +- pyhazards/model_catalog.py +- docs/** +- .github/** + +Implementation requirements: +- Treat existing wildfire assets as the seed state. +- Separate wildfire.danger and wildfire.spread at the benchmark and config layer. +- Land one danger baseline before expanding the spread stack. +- Standardize raster/tile output conventions for spread evaluators. +- Keep physics-style simulator integration as an external adapter, not a deep + vendor import. +- Return a registration manifest for every dataset and model you add or change. + +Required outputs: +- owned code and tests +- smoke and full configs under pyhazards/configs/wildfire/ +- model cards for public baselines +- a registration manifest for the integrator + +Validation: +- python -m pytest tests/test_fpa_fod_datasets.py tests/test_fpa_fod_models.py tests/test_fpa_fod_trainer_smoke.py tests/test_wildfire_*.py + +Escalate if: +- shared registry wiring is required +- generic dataset helpers outside your ownership must change +``` + +### Agent 4 prompt + +```text +You are Agent 4 for the PyHazards roadmap. + +Mission: +Own the flood workstream across the staged roadmap: streamflow first, then +inundation, then breadth. + +You own: +- pyhazards/datasets/flood/** +- pyhazards/datasets/noaa_flood/** +- pyhazards/benchmarks/flood.py +- pyhazards/configs/flood/** +- pyhazards/models/hydrographnet.py +- pyhazards/models/neuralhydrology_*.py +- pyhazards/models/floodcast.py +- pyhazards/models/urbanfloodcast.py +- pyhazards/model_cards/hydrographnet.yaml +- pyhazards/model_cards/neuralhydrology_*.yaml +- pyhazards/model_cards/floodcast.yaml +- pyhazards/model_cards/urbanfloodcast.yaml +- pyhazards/data/load_hydrograph_data.py +- tests/test_flood_*.py + +Do not edit: +- shared registries +- pyhazards/model_catalog.py +- docs/** +- .github/** + +Implementation requirements: +- Treat hydrographnet as an existing starting asset. +- Expose flood.streamflow and flood.inundation as separate benchmark and config + tracks. +- Land streamflow evaluation first. +- Use adapter-style integration for NeuralHydrology-family baselines. +- Add inundation evaluation only after streamflow is stable. +- Return a registration manifest for every dataset and model you add or change. + +Required outputs: +- owned code and tests +- smoke and full configs under pyhazards/configs/flood/ +- model cards for public baselines +- a registration manifest for the integrator + +Validation: +- python -m pytest tests/test_flood_*.py + +Escalate if: +- shared registry wiring is required +- generic shared datasets outside your ownership must change +``` + +### Agent 5 prompt + +```text +You are Agent 5 for the PyHazards roadmap. + +Mission: +Own the combined hurricane and tropical-cyclone workstream across the staged +roadmap, including the late foundation-adapter phase. + +You own: +- pyhazards/datasets/tc/** +- pyhazards/benchmarks/tc.py +- pyhazards/configs/tc/** +- pyhazards/models/hurricast.py +- pyhazards/models/tropicalcyclone_mlp.py +- pyhazards/models/tropicyclonenet.py +- pyhazards/models/saf_net.py +- pyhazards/models/tcif_fusion.py +- pyhazards/models/graphcast_tc.py +- pyhazards/models/pangu_tc.py +- pyhazards/models/fourcastnet_tc.py +- pyhazards/model_cards/hurricast.yaml +- pyhazards/model_cards/tropicalcyclone_mlp.yaml +- pyhazards/model_cards/tropicyclonenet.yaml +- pyhazards/model_cards/saf_net.yaml +- pyhazards/model_cards/tcif_fusion.yaml +- pyhazards/model_cards/graphcast_tc.yaml +- pyhazards/model_cards/pangu_tc.yaml +- pyhazards/model_cards/fourcastnet_tc.yaml +- tests/test_tc_*.py + +Do not edit: +- shared registries +- pyhazards/model_catalog.py +- docs/** +- .github/** + +Implementation requirements: +- Implement one shared storm-centric evaluator for track and intensity first. +- Treat hurricane as basin presets layered on top of the shared TC module. +- Land Hurricast before the broader TC stack. +- Delay GraphCast, Pangu, and FourCastNet adapters until the shared TC evaluator + is stable. +- Keep foundation-model integrations as external-field adapters plus extraction + and evaluation wrappers. +- Return a registration manifest for every dataset and model you add or change. + +Required outputs: +- owned code and tests +- smoke and full configs under pyhazards/configs/tc/ +- model cards for public baselines +- a registration manifest for the integrator + +Validation: +- python -m pytest tests/test_tc_*.py + +Escalate if: +- shared registry wiring is required +- the benchmark contract from Agent 1 is insufficient for track and intensity reports +``` + +## Integrator prompt + +```text +You are the integrator agent for the PyHazards roadmap. + +Mission: +Merge the staged outputs from Agents 1-5 into the existing repo without +breaking the current public API, docs generation flow, or CI. + +You exclusively own: +- pyhazards/__init__.py +- pyhazards/datasets/__init__.py +- pyhazards/datasets/registry.py +- pyhazards/models/__init__.py +- pyhazards/models/registry.py +- pyhazards/models/builder.py +- pyhazards/model_catalog.py +- scripts/render_model_docs.py +- scripts/smoke_test_models.py +- scripts/verify_table_entries.py +- .github/workflows/** +- .github/PULL_REQUEST_TEMPLATE.md +- .github/IMPLEMENTATION.md +- docs/source/*.rst +- docs/source/api/** +- docs/source/modules/** +- docs/** + +Responsibilities: +- merge Agent 1 first and expose shared contracts publicly only where needed +- wire dataset and model registrations from worker manifests +- keep existing build_model(name, task, **kwargs) behavior intact +- update model catalog generation and smoke scripts for new public baselines +- update top-level docs and implementation guidance +- regenerate model docs and published docs +- run final validation +- resolve conflicts without letting worker agents edit shared choke points directly + +Final validation: +- python -c "import pyhazards; print(pyhazards.__version__)" +- python -m pytest tests +- python scripts/render_model_docs.py +- python scripts/render_model_docs.py --check +- python scripts/verify_table_entries.py +- python scripts/smoke_test_models.py +- cd docs && sphinx-build -b html source build/html + +Do not: +- redesign worker-owned implementations unless integration requires it +- drop model cards or smoke specs from worker branches +- merge foundation adapters before the TC evaluator is stable +``` + +## Dependency and merge order + +1. Phase A: + - Agent 1 only. + - Integrator merges Agent 1 after its tests pass. +2. Phase B: + - Agents 2-5 branch from the integrated Phase A commit and work in parallel. + - Integrator merges in this order: + - Earthquake + - Wildfire + - Flood + - Storm +3. Phase C: + - Hazard agents continue breadth expansion on fresh branches from updated + `main`. + - Integrator merges in the same order: + - Earthquake + - Wildfire + - Flood + - Storm +4. Phase D: + - Agent 5 alone handles foundation-weather adapters. + - Integrator merges the storm foundation branch last. +5. Phase E: + - Integrator finalizes shared docs, CI, packaging, generated docs, and + release polish. + +## Validation by role + +### Worker validation + +- Agent 1: + - `python -m pytest tests/test_tasks.py tests/test_benchmark_registry.py tests/test_benchmark_runner.py tests/test_report_exports.py` + - `python scripts/run_benchmark.py --help` +- Agent 2: + - `python -m pytest tests/test_earthquake_*.py` +- Agent 3: + - `python -m pytest tests/test_fpa_fod_datasets.py tests/test_fpa_fod_models.py tests/test_fpa_fod_trainer_smoke.py tests/test_wildfire_*.py` +- Agent 4: + - `python -m pytest tests/test_flood_*.py` +- Agent 5: + - `python -m pytest tests/test_tc_*.py` + +### Integrator validation + +- `python -c "import pyhazards; print(pyhazards.__version__)"` +- `python -m pytest tests` +- `python scripts/render_model_docs.py` +- `python scripts/render_model_docs.py --check` +- `python scripts/verify_table_entries.py` +- `python scripts/smoke_test_models.py` +- `cd docs && sphinx-build -b html source build/html` + +## Likely conflict points + +### Shared registries and package exports + +Risk: +- every hazard wants to touch `__init__` files and registries. + +Avoidance: +- only the integrator edits those files, +- workers return registration manifests instead. + +### Model catalog and generated docs + +Risk: +- public model additions naturally collide in generated docs and catalog logic. + +Avoidance: +- workers edit only model cards and their own model files, +- integrator runs `render_model_docs.py` and rebuilds the published site. + +### Top-level docs and implementation guidance + +Risk: +- every hazard can generate docs requests that overlap the same pages. + +Avoidance: +- workers provide doc notes only, +- integrator owns `docs/source/*.rst`, `docs/source/api/**`, + `docs/source/modules/**`, and `docs/**`. + +### Shared dataset helpers + +Risk: +- hazard agents may attempt to patch common helpers when adding loaders. + +Avoidance: +- add new hazard-specific loaders under owned directories first, +- escalate before touching shared generic helpers. + +### Foundation-weather adapters + +Risk: +- GraphCast and Pangu style integrations can destabilize the whole roadmap if + merged too early. + +Avoidance: +- hard phase gate, +- no foundation adapter merges before the shared TC evaluator is stable on + `main`. + +## How to use this package + +1. Decide the current wave. +2. Start the agents allowed in that wave. +3. Give each worker only the prompt from this file plus the current base commit. +4. Require a manifest of registrations, model cards, configs, tests, and open + issues from every worker. +5. Hand the manifests and branches to the integrator. +6. Require the integrator to run the full validation set before merge or push. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..09a073ee --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,50 @@ +name: CI + +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install dependencies (CPU Torch) + run: | + python -m pip install --upgrade pip + python -m pip install torch --index-url https://download.pytorch.org/whl/cpu + python -m pip install pytest + python -m pip install -e . + + - name: Import smoke test + run: | + python - <<'PY' + import pyhazards + print("pyhazards version:", pyhazards.__version__) + PY + + - name: Full pytest suite + run: python -m pytest tests + + - name: Generated model docs check + run: python scripts/render_model_docs.py --check + + - name: Generated benchmark docs check + run: python scripts/render_benchmark_docs.py --check + + - name: Generated dataset docs check + run: python scripts/render_dataset_docs.py --check + + - name: Generated coverage audit docs check + run: python scripts/render_appendix_a_docs.py --check + + - name: Catalog smoke test + run: python scripts/verify_table_entries.py diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml deleted file mode 100644 index 95ae8279..00000000 --- a/.github/workflows/docs.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: build docs - -on: - push: - branches: [ main, feat/docs ] - workflow_dispatch: - -permissions: - contents: read - pages: write - id-token: write - -concurrency: - group: pages - cancel-in-progress: true - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - uses: actions/setup-python@v5 - with: - python-version: "3.8" - - - name: Install doc deps - run: | - if [ -f docs/requirements.txt ]; then - pip install -r docs/requirements.txt - else - pip install sphinx myst-parser sphinx-rtd-theme sphinx-autodoc-typehints sphinx-copybutton - fi - - - name: Install package - run: | - pip install -e . - - - name: Sanity check package discovery - run: | - python - <<'PY' - import pkgutil, pygip - print("pygip is at:", pygip.__file__) - print("submodules:", [m.name for m in pkgutil.walk_packages(pygip.__path__, pygip.__name__ + ".") if m.name.startswith("pygip.models")]) - PY - - - name: Build Sphinx html - run: | - sphinx-build -b html docs/source docs/build/html - - - name: Upload Pages artifact - uses: actions/upload-pages-artifact@v3 - with: - path: docs/build/html - - deploy: - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest - needs: build - steps: - - id: deployment - uses: actions/deploy-pages@v4 \ No newline at end of file diff --git a/.github/workflows/model-docs-sync.yml b/.github/workflows/model-docs-sync.yml new file mode 100644 index 00000000..cc8f207f --- /dev/null +++ b/.github/workflows/model-docs-sync.yml @@ -0,0 +1,61 @@ +name: Sync Generated Docs + +on: + push: + branches: [main, master] + paths: + - "pyhazards/model_cards/**" + - "pyhazards/dataset_cards/**" + - "pyhazards/benchmark_cards/**" + - "pyhazards/models/**" + - "pyhazards/model_catalog.py" + - "pyhazards/dataset_catalog.py" + - "pyhazards/benchmark_catalog.py" + - "pyhazards/appendix_a_catalog.py" + - "scripts/render_model_docs.py" + - "scripts/render_dataset_docs.py" + - "scripts/render_benchmark_docs.py" + - "scripts/render_appendix_a_docs.py" + +permissions: + contents: write + +jobs: + sync: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install dependencies (CPU Torch + docs) + run: | + python -m pip install --upgrade pip + python -m pip install torch --index-url https://download.pytorch.org/whl/cpu + python -m pip install -e . + python -m pip install -r docs/requirements.txt + + - name: Regenerate and publish generated docs + run: | + python scripts/render_dataset_docs.py + python scripts/render_model_docs.py + python scripts/render_benchmark_docs.py + python scripts/render_appendix_a_docs.py + cd docs + sphinx-build -b html source build/html + cp -r build/html/* . + + - name: Commit generated docs + run: | + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + if git diff --quiet -- docs; then + echo "Published docs already up to date." + exit 0 + fi + git add docs + git commit -m "docs(catalog): refresh published docs" + git push diff --git a/.github/workflows/model-pr-bot.yml b/.github/workflows/model-pr-bot.yml new file mode 100644 index 00000000..1c0c0657 --- /dev/null +++ b/.github/workflows/model-pr-bot.yml @@ -0,0 +1,141 @@ +name: Model PR Bot + +on: + workflow_run: + workflows: ["Model PR Validation"] + types: [completed] + +permissions: + actions: read + contents: write + pull-requests: write + +jobs: + process: + if: ${{ github.event.workflow_run.event == 'pull_request' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Resolve PR + id: pr + run: | + python - <<'PY' + import json + import os + + event = json.loads(open(os.environ["GITHUB_EVENT_PATH"], "r", encoding="utf-8").read()) + workflow_run = event["workflow_run"] + pull_requests = workflow_run.get("pull_requests", []) + output_path = os.environ["GITHUB_OUTPUT"] + with open(output_path, "a", encoding="utf-8") as handle: + if not pull_requests: + handle.write("skip=true\n") + else: + pr = pull_requests[0] + handle.write("skip=false\n") + handle.write(f"number={pr['number']}\n") + handle.write(f"url={pr['url']}\n") + PY + + - name: Download review artifact + id: artifact + if: steps.pr.outputs.skip != 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + python - <<'PY' + import json + import os + import subprocess + import sys + + repo = os.environ["GITHUB_REPOSITORY"] + run_id = json.loads(open(os.environ["GITHUB_EVENT_PATH"], "r", encoding="utf-8").read())["workflow_run"]["id"] + result = subprocess.run( + ["gh", "api", f"repos/{repo}/actions/runs/{run_id}/artifacts"], + check=True, + capture_output=True, + text=True, + ) + artifacts = json.loads(result.stdout)["artifacts"] + artifact = next((item for item in artifacts if item["name"] == "model-pr-review"), None) + if artifact is None: + raise SystemExit("model-pr-review artifact not found") + with open(os.environ["GITHUB_OUTPUT"], "a", encoding="utf-8") as handle: + handle.write(f"artifact_id={artifact['id']}\n") + PY + + gh api "repos/${GITHUB_REPOSITORY}/actions/artifacts/${{ steps.artifact.outputs.artifact_id }}/zip" > /tmp/model-pr-review.zip + unzip -o /tmp/model-pr-review.zip -d /tmp/model-pr-review + + - name: Read review report + id: report + if: steps.pr.outputs.skip != 'true' + run: | + python - <<'PY' + import json + import os + + report = json.loads(open("/tmp/model-pr-review/model-pr-review.json", "r", encoding="utf-8").read()) + with open(os.environ["GITHUB_OUTPUT"], "a", encoding="utf-8") as handle: + handle.write(f"status={report['status']}\n") + handle.write(f"is_model_pr={str(report['is_model_pr']).lower()}\n") + handle.write(f"draft={str(report.get('draft', False)).lower()}\n") + handle.write("models< comment.user.type === "Bot" && comment.body && comment.body.includes(marker)); + if (existing) { + await github.rest.issues.updateComment({ + owner, + repo, + comment_id: existing.id, + body, + }); + } else { + await github.rest.issues.createComment({ + owner, + repo, + issue_number: prNumber, + body, + }); + } + + - name: Merge passing PR + id: merge + if: steps.report.outputs.status == 'pass' && steps.report.outputs.draft != 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + set +e + gh pr merge "${{ steps.pr.outputs.number }}" --squash + exit_code=$? + echo "exit_code=$exit_code" >> "$GITHUB_OUTPUT" + if [ "$exit_code" -eq 0 ]; then + echo "result=merged" >> "$GITHUB_OUTPUT" + else + echo "result=merge_failed" >> "$GITHUB_OUTPUT" + fi + exit 0 diff --git a/.github/workflows/model-pr-validation.yml b/.github/workflows/model-pr-validation.yml new file mode 100644 index 00000000..45515408 --- /dev/null +++ b/.github/workflows/model-pr-validation.yml @@ -0,0 +1,45 @@ +name: Model PR Validation + +on: + pull_request: + branches: [main, master] + types: [opened, synchronize, reopened, edited, ready_for_review] + +permissions: + contents: read + +jobs: + review: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.10" + + - name: Install dependencies (CPU Torch) + run: | + python -m pip install --upgrade pip + python -m pip install torch --index-url https://download.pytorch.org/whl/cpu + python -m pip install -e . + + - name: Review model PR + run: | + python scripts/review_model_pr.py \ + --event "$GITHUB_EVENT_PATH" \ + --base-sha "${{ github.event.pull_request.base.sha }}" \ + --report-json .github/reports/model-pr-review.json \ + --report-md .github/reports/model-pr-review.md + + - name: Upload model review report + if: always() + uses: actions/upload-artifact@v4 + with: + name: model-pr-review + path: .github/reports + if-no-files-found: error diff --git a/.gitignore b/.gitignore index e0afa395..b57b888e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,20 +1,22 @@ -# Byte-compiled / optimized / DLL files +# Byte-compiled / cache __pycache__/ *.py[cod] -*$py.class +*.so -# build -PyGIP.egg-info/ -dist/ +# Virtual environments +.env/ +.venv/ +env/ +venv/ -# IDEs -.vscode/ -.idea/ +# Build artifacts +dist/ +build/ +*.egg-info/ -# OS +# Editor / OS .DS_Store -.AppleDouble -.LSOverride +.vscode/ -#virtual environments folder -.venv +# Docs build outputs +docs/build/ diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 00000000..6d7f7952 --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,107 @@ +# PyHazards Architecture & Public API Sketch (Hazard-Centric) + +PyHazards targets hazard prediction (earthquake, wildfire, flood, hurricane, etc.) with an easy, batteries-included API. The design is hazard-first, GPU/multi-GPU ready, and keeps the API familiar to users of popular ML libraries. + +## Design Principles +- **Minimal onboarding**: one-liner load/train/evaluate for common hazards. +- **Backend-agnostic ML**: standard PyTorch models (tabular, image, sequence); plug-and-play custom models. +- **Hazard-aware datasets**: consistent interface across raster (remote sensing), tabular (climate/soil), time-series (buoys/stations), and vector/geospatial metadata. +- **GPU-first**: `device="auto"` everywhere; optional multi-GPU via DDP; mixed precision for speed. +- **Composable pipelines**: data → transforms → model → metrics → reporting with sensible defaults. +- **Extensible registry**: datasets, models, transforms, metrics, and pipelines are all discoverable by string names. + +## Proposed Package Layout (building on current `pyhazards/`) +- `pyhazards.datasets` + - `Dataset` base: unified API (`.load(split) -> DataBundle`, exposes `feature_spec`, `label_spec`, `splits`, `metadata`). + - `hazards/` (new): curated loaders + - `EarthquakeUSGS`, `WildfireMODIS`, `FloodCopernicus`, `HurricaneNOAA`, `LandslideNASA`, etc. + - Each handles download/cache, normalization, split logic, CRS handling for geospatial data, and returns tensors ready for models. + - `transforms/`: common preprocessing (standardize, log-scale precip, NDVI/NDWI indices, temporal windowing, patch extraction for rasters). + - `registry.py`: `load_dataset(name, split="train", cache_dir=None, **kwargs)`. +- `pyhazards.models` + - `backbones.py`: generic modules (MLP, CNN patch encoder, temporal encoder with GRU/Transformer-lite). + - `heads.py`: task heads (`ClassificationHead`, `RegressionHead`, `SegmentationHead` for raster masks). + - `builder.py`: `build_model(name="mlp"|"cnn"|"temporal", task="regression"|..., **kwargs)`. + - `registry.py`: map model names to builders + metadata (input types supported). +- `pyhazards.engine` + - `Trainer`: fit/eval/predict abstraction with callbacks, early stopping, checkpointing, mixed precision, gradient accumulation. + - `distributed`: thin wrapper for PyTorch DDP; `strategy="auto"|"ddp"|"dp"`. + - `inference.py`: batch/stream inference; sliding-window raster tiling for large scenes. +- `pyhazards.metrics` + - Classification: Acc/F1/Precision/Recall/AUROC. + - Regression: MAE/RMSE/R². + - Segmentation: IoU/Dice. + - Calibration: ECE/Brier. +- `pyhazards.utils` + - `hardware.py`: `auto_device(prefer="cuda")`, `num_devices()`, simple device validation. + - `seed_all`, logging helpers (stdout/JSON/CSV), timer/memory profilers. +- `pyhazards.cli` + - `pyhazards run --dataset wildfire_modis --model cnn --task segmentation --device auto --strategy ddp --mixed-precision`. + +## Core Python Flow +```python +from pyhazards import datasets, models +from pyhazards.engine import Trainer +from pyhazards.metrics import RegressionMetrics +from pyhazards.utils import auto_device + +# 1) Load data +data = datasets.load("flood_copernicus", split_config="standard", cache_dir="~/.pyhazards") + +# 2) Build model +model = models.build( + name="temporal", + task="regression", + in_dim=data.feature_spec.input_dim, + out_dim=data.label_spec.num_targets, + hidden_dim=256, +) + +# 3) Train & evaluate +trainer = Trainer( + model=model, + device=auto_device(), + strategy="auto", # promotes to DDP on multi-GPU + mixed_precision=True, + metrics=[RegressionMetrics()], +) +trainer.fit(data, max_epochs=50, train_split="train", val_split="val") +results = trainer.evaluate(data, split="test") +print(results) # {"RMSE": ..., "MAE": ...} + +# 4) Predict / export +preds = trainer.predict(data, split="test") +trainer.save_checkpoint("checkpoints/flood_temporal.pt") +``` + +## CLI Flow +```bash +pyhazards run \ + --dataset earthquake_usgs \ + --model mlp \ + --task classification \ + --device auto \ + --strategy auto \ + --mixed-precision +``` + +## Data Model +- `DataBundle`: typed container with `tensors`, `splits` (`train/val/test`), `spatial_meta` (CRS, bounding boxes), `temporal_index`, and `feature_spec/label_spec`. +- Supports tabular (climate/soil), raster patches (satellite), and time-series (sensors). +- Transform pipelines apply lazily (composition over datasets) and can be reused for inference. + +## GPU & Multi-GPU +- `auto_device()` picks `cuda:0` if available else CPU; `num_devices()` detects multi-GPU. +- `Trainer(strategy="auto")`: + - Single GPU: optional AMP for speed. + - Multi-GPU: PyTorch DDP (`torchrun`) with synchronized metrics and gradient accumulation. + - CPU fallback: same API, no code changes. + +## Extensibility Checklist +- New dataset: subclass `Dataset`, implement `load(split, transforms=None)`, register via `datasets.registry`. +- New transform: add a callable, register via `datasets.transforms`. +- New model: implement `nn.Module`, expose via `models.registry` with metadata (`supported_inputs`, `task`). +- New metric: subclass `MetricBase`, add to `metrics` namespace. +- New pipeline/workflow: compose dataset + transforms + model + metrics in `pyhazards.workflows`. + +This design keeps the surface area small (load → build → train → evaluate) while being hazard-first, GPU-ready, and easy to extend as new hazards, data modalities, or models are added. diff --git a/LICENSE b/LICENSE index aac97e7a..f3f44329 100644 --- a/LICENSE +++ b/LICENSE @@ -1,24 +1,21 @@ -BSD 2-Clause License +MIT License -Copyright (c) 2025 Bolin Shen +Copyright (c) 2025 Xueqi Cheng -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in index e640dd9b..f7e7587b 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,9 +1,9 @@ include LICENSE include README.md -graft pygip +graft pyhazards prune .github prune tests -global-exclude __pycache__ *.py[cod] *.so *.dylib \ No newline at end of file +global-exclude __pycache__ *.py[cod] *.so *.dylib diff --git a/README.md b/README.md index 2b9fa53c..cf1481c8 100644 --- a/README.md +++ b/README.md @@ -1,118 +1,181 @@ -# PyGIP +

+ PyHazards logo +

+ +

PyHazards: A Python framework for AI-powered hazard prediction

+ +

+ Datasets · Models · Benchmarks · Training Pipelines · Evaluation +

+ +

+ + PyPI version + + + Build status + + + License + + + GitHub stars + + + GitHub forks + +

+ +

+ Documentation · + GitHub · + Slack +

+ +## Overview + +PyHazards is built for hazard-AI work that needs more than a single model or +paper reproduction. It unifies dataset discovery, model construction, +benchmark-aligned evaluation, and experiment plumbing so the same library can +support first-run baselines, comparative studies, and contributor extensions. + +Intended users: + +- **Researchers**: run benchmark-aligned experiments and compare baselines across hazard tasks. +- **Practitioners**: reuse hazard-specific workflows for data inspection, model building, and evaluation. +- **Contributors**: extend datasets, models, and benchmarks through registry and catalog patterns already used in the repo. + +## Why PyHazards + +- **Unified datasets**: public hazard datasets, forcing sources, and inspection entrypoints live in one curated catalog. +- **Benchmark-aligned evaluation**: shared benchmark families, smoke configs, and reports keep experiments comparable. +- **Registry-based models**: published baselines and adapters are built through a consistent model-registry surface. +- **Shared training and inference pipelines**: one engine layer supports fit, evaluate, predict, and benchmark execution workflows. + +## Hazard Coverage + +- **Wildfire**: danger forecasting, weekly forecasting, spread baselines, fuels, burn products, and active-fire sources. +- **Earthquake**: waveform picking, dense-grid forecasting adapters, and linked benchmark ecosystems for picking and forecasting. +- **Flood**: streamflow and inundation baselines with benchmark-backed evaluation paths. +- **Tropical Cyclone**: track-and-intensity forecasting baselines plus shared benchmark ecosystems and adapters. -[![PyPI - Version](https://img.shields.io/pypi/v/PyGIP)](https://pypi.org/project/PyGIP) -[![Build Status](https://img.shields.io/github/actions/workflow/status/LabRAI/PyGIP/docs.yml)](https://github.com/LabRAI/PyGIP/actions) -[![License](https://img.shields.io/github/license/LabRAI/PyGIP.svg)](https://github.com/LabRAI/PyGIP/blob/main/LICENSE) -[![PyPI - Downloads](https://img.shields.io/pypi/dm/pygip)](https://github.com/LabRAI/PyGIP) -[![Issues](https://img.shields.io/github/issues/LabRAI/PyGIP)](https://github.com/LabRAI/PyGIP) -[![Pull Requests](https://img.shields.io/github/issues-pr/LabRAI/PyGIP)](https://github.com/LabRAI/PyGIP) -[![Stars](https://img.shields.io/github/stars/LabRAI/PyGIP)](https://github.com/LabRAI/PyGIP) -[![GitHub forks](https://img.shields.io/github/forks/LabRAI/PyGIP)](https://github.com/LabRAI/PyGIP) - -PyGIP is a Python library designed for experimenting with graph-based model extraction attacks and defenses. It provides -a modular framework to implement and test attack and defense strategies on graph datasets. - -## How to Cite +## Installation -If you find it useful, please considering cite the following work: +Install PyHazards from PyPI: -```bibtex -@article{li2025intellectual, - title={Intellectual Property in Graph-Based Machine Learning as a Service: Attacks and Defenses}, - author={Li, Lincan and Shen, Bolin and Zhao, Chenxi and Sun, Yuxiang and Zhao, Kaixiang and Pan, Shirui and Dong, Yushun}, - journal={arXiv preprint arXiv:2508.19641}, - year={2025} -} +```bash +pip install pyhazards ``` +If you need GPU execution, install a compatible PyTorch build first and then +select the device as needed: -## Installation +```bash +export PYHAZARDS_DEVICE=cuda:0 +``` -PyGIP supports both CPU and GPU environments. Make sure you have Python installed (version >= 3.8, <3.13). +## Quick Start -### Base Installation +Use this as the shortest benchmark-aware starter path: verify the package, +build one registered model, and run one smoke benchmark config. -First, install the core package: +1. Verify the installation: ```bash -pip install PyGIP +python -c "import pyhazards; print(pyhazards.__version__)" ``` -This will install PyGIP with minimal dependencies. +2. Build a registered model: -### CPU Version - -```bash -pip install "PyGIP[torch,dgl]" \ - --index-url https://download.pytorch.org/whl/cpu \ - --extra-index-url https://pypi.org/simple \ - -f https://data.dgl.ai/wheels/repo.html +```python +from pyhazards.models import build_model + +model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, +) +print(type(model).__name__) ``` -### GPU Version (CUDA 12.1) +3. Run a benchmark-aligned smoke configuration: ```bash -pip install "PyGIP[torch,dgl]" \ - --index-url https://download.pytorch.org/whl/cu121 \ - --extra-index-url https://pypi.org/simple \ - -f https://data.dgl.ai/wheels/torch-2.3/cu121/repo.html +python scripts/run_benchmark.py --config pyhazards/configs/flood/hydrographnet_smoke.yaml ``` -## Quick Start +4. Continue with the full docs for dataset inspection, benchmark pages, and +training workflows. -Here’s a simple example to launch a Model Extraction Attack using PyGIP: +## Project Structure -```python -from datasets import Cora -from models.attack import ModelExtractionAttack0 +- `pyhazards.datasets` - dataset catalog, registry surfaces, and inspection entrypoints. +- `pyhazards.models` - model registry, builders, and reusable baseline implementations. +- `pyhazards.benchmarks` - benchmark families, ecosystem mappings, and evaluation contracts. +- `pyhazards.engine` - shared training, inference, runner, and experiment utilities. +- `pyhazards.configs` - smoke and example benchmark configurations. +- `docs/` and `docs/source/` - published documentation, generated catalogs, and contributor guides. -# Load the Cora dataset -dataset = Cora() +## Supported Workflows -# Initialize the attack with a sampling ratio of 0.25 -mea = ModelExtractionAttack0(dataset, 0.25) +- inspect hazard datasets and forcing sources before training, +- build baseline and adapter models through the unified registry, +- run smoke tests and benchmark configs for hazard-specific tasks, +- export benchmark reports and compare metrics across models, +- extend the library with new datasets, models, benchmarks, and catalog entries. -# Execute the attack -mea.attack() -``` +## Documentation -This code loads the Cora dataset, initializes a basic model extraction attack (`ModelExtractionAttack0`), and runs the -attack with a specified sampling ratio. +Full documentation: [https://labrai.github.io/PyHazards](https://labrai.github.io/PyHazards) -And a simple example to run a Defense method against Model Extraction Attack: +Recommended reading order: -```python -from datasets import Cora -from models.defense import RandomWM +1. [Installation](https://labrai.github.io/PyHazards/installation.html) +2. [Quick Start](https://labrai.github.io/PyHazards/quick_start.html) +3. [Datasets](https://labrai.github.io/PyHazards/pyhazards_datasets.html) +4. [Models](https://labrai.github.io/PyHazards/pyhazards_models.html) +5. [Benchmarks](https://labrai.github.io/PyHazards/pyhazards_benchmarks.html) +6. [Implementation Guide](https://labrai.github.io/PyHazards/implementation.html) -# Load the Cora dataset -dataset = Cora() +## Contributing -# Initialize the attack with a sampling ratio of 0.25 -med = RandomWM(dataset, 0.25) +If you want to extend PyHazards: -# Execute the defense -med.defend() -``` +- **Contributing guide**: [.github/CONTRIBUTING.md](.github/CONTRIBUTING.md) +- **Developer implementation guide**: [docs/source/implementation.rst](docs/source/implementation.rst) +- **Maintainer notes**: [.github/IMPLEMENTATION.md](.github/IMPLEMENTATION.md) -which runs the Random Watermarking Graph to defend against MEA. +Roadmap themes: -If you want to use cuda, please set environment variable: +- more benchmark ecosystems and external data adapters, +- more hazard-specific baselines and evaluation coverage, +- expanded reproducibility, report tooling, and smoke-test coverage, +- stronger examples, tutorials, and contributor automation. -```shell -export PYGIP_DEVICE=cuda:0 -``` +## Community -## Implementation & Contributors Guideline +- **Slack**: [RAI Lab Slack Channel](https://rai-lab-workspace.slack.com/archives/C0AKAJCTY4F) -Refer to [Implementation Guideline](.github/IMPLEMENTATION.md) +Project activity: -Refer to [Contributors Guideline](.github/CONTRIBUTING.md) +[![Star History Chart](https://api.star-history.com/svg?repos=LabRAI/PyHazards&type=Date&from=2026-01-01)](https://www.star-history.com/#LabRAI/PyHazards&Date) -## License +## Citation + +If you use PyHazards in your research, please cite: -[BSD 2-Clause License](LICENSE) +```bibtex +@misc{pyhazards2025, + title = {PyHazards: An Open-Source Library for AI-Powered Hazard Prediction}, + author = {Cheng et al.}, + year = {2025}, + howpublished = {\url{https://github.com/LabRAI/PyHazards}}, + note = {GitHub repository} +} +``` -## Contact +## License -For questions or contributions, please contact blshen@fsu.edu. +[MIT License](LICENSE) diff --git a/benchmark/HOWTORUN.md b/benchmark/HOWTORUN.md deleted file mode 100644 index fde22b8b..00000000 --- a/benchmark/HOWTORUN.md +++ /dev/null @@ -1,13 +0,0 @@ -chmod +x scripts/*.sh - -# RQ1: attacks (includes MEA 0..5) -bash scripts/RQ1_attacks.sh 0 - -# RQ2: defenses (best operating points) -bash scripts/RQ2_defenses.sh 0 - -# RQ3: dense sweeps for trade-off curves -bash scripts/RQ3_tradeoff.sh 0 - -# RQ4: overhead summaries -bash scripts/RQ4_overhead.sh diff --git a/benchmark/run/__init__.py b/benchmark/run/__init__.py deleted file mode 100644 index 8b137891..00000000 --- a/benchmark/run/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/benchmark/run/run_attack_track.py b/benchmark/run/run_attack_track.py deleted file mode 100644 index 8ef0b3e5..00000000 --- a/benchmark/run/run_attack_track.py +++ /dev/null @@ -1,152 +0,0 @@ -# run/run_attack_track.py -import argparse -import json -import os -import traceback -from itertools import product - -# Ensure this folder is importable for `utils_benchmark` -import sys -sys.path.insert(0, os.path.dirname(__file__)) - -import torch - -from utils_benchmark import ( - load_dataset, compute_fraction_for_budget, instantiate_attack, call_attack, - write_jsonl, normalize_attack_return, timestamp, ensure_dir -) - - -def parse_args(): - p = argparse.ArgumentParser(description="GraphIP-Bench: Attack track runner (RQ1 + grid search).") - p.add_argument("--datasets", nargs="+", required=True) - p.add_argument("--attacks", nargs="+", required=True, - help="Keys: mea0 mea1 mea2 mea3 mea4 mea5 advmea cega realistic dfea_i dfea_ii dfea_iii") - p.add_argument("--budgets", nargs="+", type=float, default=[0.25, 0.5, 1.0, 2.0, 4.0]) - p.add_argument("--regimes", nargs="+", default=["both", "x_only", "a_only", "data_free"]) - p.add_argument("--seeds", nargs="+", type=int, default=[0, 1, 2]) - p.add_argument("--attack_grid_json", type=str, default=None, - help="JSON file with per-attack grids of ctor/run kwargs.") - p.add_argument("--device", type=str, default="cuda:0") - p.add_argument("--outdir", type=str, default="outputs/RQ1") - p.add_argument("--root", type=str, default=None) - p.add_argument("--dry_run", action="store_true") - return p.parse_args() - - -def regime_to_ratios(regime: str, fraction: float): - if regime == "both": - return fraction, fraction - if regime == "x_only": - return fraction, 0.0 - if regime == "a_only": - return 0.0, fraction - if regime == "data_free": - return 0.0, 0.0 - raise ValueError(f"Unknown regime: {regime}") - - -def load_attack_grid(path: str, attacks): - if not path: - return {k: [{"ctor": {}, "run": {}}] for k in attacks} - with open(path, "r", encoding="utf-8") as f: - raw = json.load(f) - grid = {} - for k in attacks: - cfgs = raw.get(k, None) - if not cfgs: - cfgs = [{"ctor": {}, "run": {}}] - norm = [] - for item in cfgs: - if "ctor" in item or "run" in item: - norm.append({"ctor": item.get("ctor", {}), "run": item.get("run", {})}) - else: - norm.append({"ctor": item, "run": {}}) - grid[k] = norm - return grid - - -def main(): - args = parse_args() - os.environ["CUDA_VISIBLE_DEVICES"] = args.device.split(":")[-1] if "cuda" in args.device else "" - ensure_dir(args.outdir) - - header = { - "runner": "attack_track", - "timestamp": timestamp(), - "datasets": args.datasets, - "attacks": args.attacks, - "budgets": args.budgets, - "regimes": args.regimes, - "seeds": args.seeds, - "device": args.device, - } - print(header) - - grid = load_attack_grid(args.attack_grid_json, args.attacks) - - for ds_name in args.datasets: - dataset = load_dataset(ds_name, root=args.root) - for budget, regime, seed, attack_key in product(args.budgets, args.regimes, args.seeds, args.attacks): - fraction = compute_fraction_for_budget(dataset, budget) - x_ratio, a_ratio = regime_to_ratios(regime, fraction) - - cfg_list = grid[attack_key] - for cfg_idx, cfg in enumerate(cfg_list): - ctor_kwargs = cfg.get("ctor", {}) - run_kwargs = cfg.get("run", {}) - - print(f"[RUN] ds={ds_name} atk={attack_key} cfg#{cfg_idx} " - f"budget={budget} frac={fraction:.5f} regime={regime} " - f"x={x_ratio:.5f} a={a_ratio:.5f} seed={seed} ctor={ctor_kwargs} run={run_kwargs}") - - if args.dry_run: - continue - - try: - torch.manual_seed(seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(seed) - attack = instantiate_attack(attack_key, dataset, fraction, x_ratio, a_ratio, ctor_kwargs) - perf, comp = normalize_attack_return(call_attack(attack, run_kwargs, seed)) - - record = { - "track": "attack", - "dataset": ds_name, - "attack": attack_key, - "config_index": cfg_idx, - "config": cfg, - "budget_mult": budget, - "fraction": fraction, - "regime": regime, - "attack_x_ratio": x_ratio, - "attack_a_ratio": a_ratio, - "seed": seed, - "perf": perf, - "comp": comp, - } - except Exception as e: - record = { - "track": "attack", - "dataset": ds_name, - "attack": attack_key, - "config_index": cfg_idx, - "config": cfg, - "budget_mult": budget, - "fraction": fraction, - "regime": regime, - "attack_x_ratio": x_ratio, - "attack_a_ratio": a_ratio, - "seed": seed, - "error": str(e), - "traceback": traceback.format_exc(), - } - - out_jsonl = os.path.join(args.outdir, f"{ds_name}.jsonl") - write_jsonl(out_jsonl, record) - - print(f"[DONE] Results saved to: {args.outdir}") - - -if __name__ == "__main__": - main() diff --git a/benchmark/run/run_ownership_track.py b/benchmark/run/run_ownership_track.py deleted file mode 100644 index 547823fc..00000000 --- a/benchmark/run/run_ownership_track.py +++ /dev/null @@ -1,120 +0,0 @@ -# run/run_ownership_track.py -import argparse -import json -import os -import traceback -import sys - -# Ensure this folder is importable for `utils_benchmark` -sys.path.insert(0, os.path.dirname(__file__)) - -import torch - -from utils_benchmark import ( - load_dataset, instantiate_defense, call_defense, - write_jsonl, normalize_defense_return, timestamp, ensure_dir -) - - -def parse_args(): - p = argparse.ArgumentParser(description="GraphIP-Bench: Ownership track (RQ2/RQ3 + grid search).") - p.add_argument("--datasets", nargs="+", required=True) - p.add_argument("--defenses", nargs="+", required=True, - help="Keys: randomwm backdoorwm survivewm imperceptiblewm") - p.add_argument("--seeds", nargs="+", type=int, default=[0, 1, 2]) - p.add_argument("--defense_grid_json", type=str, default=None, - help="JSON file with per-defense grids of ctor/run kwargs.") - p.add_argument("--device", type=str, default="cuda:0") - p.add_argument("--outdir", type=str, default="outputs/RQ2_RQ3") - p.add_argument("--root", type=str, default=None) - p.add_argument("--dry_run", action="store_true") - return p.parse_args() - - -def load_defense_grid(path: str, defenses): - if not path: - return {k: [{"ctor": {}, "run": {}}] for k in defenses} - with open(path, "r", encoding="utf-8") as f: - raw = json.load(f) - grid = {} - for k in defenses: - cfgs = raw.get(k, None) - if not cfgs: - cfgs = [{"ctor": {}, "run": {}}] - norm = [] - for item in cfgs: - if "ctor" in item or "run" in item: - norm.append({"ctor": item.get("ctor", {}), "run": item.get("run", {})}) - else: - norm.append({"ctor": item, "run": {}}) - grid[k] = norm - return grid - - -def main(): - args = parse_args() - os.environ["CUDA_VISIBLE_DEVICES"] = args.device.split(":")[-1] if "cuda" in args.device else "" - ensure_dir(args.outdir) - - header = { - "runner": "ownership_track", - "timestamp": timestamp(), - "datasets": args.datasets, - "defenses": args.defenses, - "seeds": args.seeds, - "device": args.device, - } - print(header) - - grid = load_defense_grid(args.defense_grid_json, args.defenses) - - for ds_name in args.datasets: - dataset = load_dataset(ds_name, root=args.root) - for defense_key in args.defenses: - cfg_list = grid[defense_key] - for cfg_idx, cfg in enumerate(cfg_list): - ctor_kwargs = cfg.get("ctor", {}) - run_kwargs = cfg.get("run", {}) - for seed in args.seeds: - print(f"[RUN] ds={ds_name} defense={defense_key} cfg#{cfg_idx} ctor={ctor_kwargs} run={run_kwargs} seed={seed}") - - if args.dry_run: - continue - - try: - torch.manual_seed(seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(seed) - defense = instantiate_defense(defense_key, dataset, ctor_kwargs) - perf, comp = normalize_defense_return(call_defense(defense, run_kwargs, seed)) - - record = { - "track": "ownership", - "dataset": ds_name, - "defense": defense_key, - "config_index": cfg_idx, - "config": cfg, - "seed": seed, - "perf": perf, - "comp": comp, - } - except Exception as e: - record = { - "track": "ownership", - "dataset": ds_name, - "defense": defense_key, - "config_index": cfg_idx, - "config": cfg, - "seed": seed, - "error": str(e), - "traceback": traceback.format_exc(), - } - - out_jsonl = os.path.join(args.outdir, f"{ds_name}.jsonl") - write_jsonl(out_jsonl, record) - - print(f"[DONE] Results saved to: {args.outdir}") - - -if __name__ == "__main__": - main() diff --git a/benchmark/run/select_best.py b/benchmark/run/select_best.py deleted file mode 100644 index 6d1238eb..00000000 --- a/benchmark/run/select_best.py +++ /dev/null @@ -1,142 +0,0 @@ -# run/select_best.py -import argparse -import glob -import json -import os -from typing import Dict, Any, List - -import pandas as pd - - -def read_jsonl(folder: str) -> List[Dict[str, Any]]: - items = [] - for p in sorted(glob.glob(os.path.join(folder, "*.jsonl"))): - with open(p, "r", encoding="utf-8") as f: - for line in f: - line = line.strip() - if line: - items.append(json.loads(line)) - return items - - -def _score_attack(perf: Dict[str, Any]) -> float: - fid = perf.get("fid", None) or perf.get("fidelity", None) - if fid is not None: - return float(fid) - acc = perf.get("acc", None) or perf.get("accuracy", None) or perf.get("test_accuracy", None) - return float(acc) if acc is not None else float("nan") - - -def _score_defense(perf: Dict[str, Any]) -> float: - fid = perf.get("fid", None) or perf.get("fidelity", None) or perf.get("post_fidelity", None) - acc = perf.get("def_acc", None) or perf.get("acc", None) or perf.get("defense_accuracy", None) - if fid is None and acc is None: - return float("nan") - if fid is None: - fid = 1.0 - if acc is None: - acc = 1.0 - return (1.0 - float(fid)) * float(acc) - - -def select_best_attack(records: List[Dict[str, Any]]): - rows = [] - for r in records: - if r.get("track") != "attack" or r.get("error"): - continue - perf = r.get("perf", {}) - score = _score_attack(perf) - row = {**r} - row["score_attack"] = score - rows.append(row) - if not rows: - return None, None - df = pd.DataFrame(rows) - keys = ["dataset", "attack", "budget_mult", "regime"] - idx = df.groupby(keys)["score_attack"].idxmax() - best_df = df.loc[idx].reset_index(drop=True) - return df, best_df - - -def select_best_defense(records: List[Dict[str, Any]]): - rows = [] - for r in records: - if r.get("track") != "ownership" or r.get("error"): - continue - perf = r.get("perf", {}) - score = _score_defense(perf) - row = {**r} - row["score_defense"] = score - rows.append(row) - if not rows: - return None, None - df = pd.DataFrame(rows) - keys = ["dataset", "defense"] - idx = df.groupby(keys)["score_defense"].idxmax() - best_df = df.loc[idx].reset_index(drop=True) - return df, best_df - - -def main(): - ap = argparse.ArgumentParser() - ap.add_argument("--rq1_dir", type=str, default="outputs/RQ1") - ap.add_argument("--rq2_dir", type=str, default="outputs/RQ2_RQ3") - ap.add_argument("--outdir", type=str, default="outputs/leaderboards") - args = ap.parse_args() - - os.makedirs(args.outdir, exist_ok=True) - - rec1 = read_jsonl(args.rq1_dir) - rec2 = read_jsonl(args.rq2_dir) - - if rec1: - full1, best1 = select_best_attack(rec1) - if full1 is not None: - full1.to_csv(os.path.join(args.outdir, "RQ1_full.csv"), index=False) - best1.to_csv(os.path.join(args.outdir, "RQ1_best.csv"), index=False) - out = {} - for _, row in best1.iterrows(): - ds = row["dataset"]; atk = row["attack"] - key = f"{ds}::{atk}::budget{row['budget_mult']}::regime{row['regime']}" - out[key] = { - "dataset": ds, - "attack": atk, - "budget_mult": row["budget_mult"], - "regime": row["regime"], - "config_index": int(row["config_index"]), - "config": row["config"], - "seed": int(row["seed"]), - "score_attack": float(row["score_attack"]), - "perf": row["perf"], - "comp": row["comp"], - } - with open(os.path.join(args.outdir, "RQ1_best_configs.json"), "w", encoding="utf-8") as f: - json.dump(out, f, indent=2) - - if rec2: - full2, best2 = select_best_defense(rec2) - if full2 is not None: - full2.to_csv(os.path.join(args.outdir, "RQ2_RQ3_full.csv"), index=False) - best2.to_csv(os.path.join(args.outdir, "RQ2_RQ3_best.csv"), index=False) - out = {} - for _, row in best2.iterrows(): - ds = row["dataset"]; d = row["defense"] - key = f"{ds}::{d}" - out[key] = { - "dataset": ds, - "defense": d, - "config_index": int(row["config_index"]), - "config": row["config"], - "seed": int(row["seed"]), - "score_defense": float(row["score_defense"]), - "perf": row["perf"], - "comp": row["comp"], - } - with open(os.path.join(args.outdir, "RQ2_RQ3_best_configs.json"), "w", encoding="utf-8") as f: - json.dump(out, f, indent=2) - - print(f"[DONE] Leaderboards written under {args.outdir}") - - -if __name__ == "__main__": - main() diff --git a/benchmark/run/to_latex.py b/benchmark/run/to_latex.py deleted file mode 100644 index 103f79f0..00000000 --- a/benchmark/run/to_latex.py +++ /dev/null @@ -1,61 +0,0 @@ -# run/to_latex.py -import argparse -import json -import os -import pandas as pd - - -def to_percent(x, digits=1): - try: - return f"{float(x) * 100:.{digits}f}" - except Exception: - return "-" - - -def table_rq1(best_csv: str, out_tex: str): - df = pd.read_csv(best_csv) - keep = ["dataset", "attack", "budget_mult", "regime", "perf"] - df = df[keep].copy() - df["fid"] = df["perf"].apply(lambda s: json.loads(s.replace("'", '"')).get("fid", None) - or json.loads(s.replace("'", '"')).get("fidelity", None)) - df["acc"] = df["perf"].apply(lambda s: json.loads(s.replace("'", '"')).get("acc", None) - or json.loads(s.replace("'", '"')).get("accuracy", None) - or json.loads(s.replace("'", '"')).get("test_accuracy", None)) - df["Fidelity(%)"] = df["fid"].apply(lambda v: to_percent(v, 1)) - df["Accuracy(%)"] = df["acc"].apply(lambda v: to_percent(v, 1)) - df = df.drop(columns=["perf", "fid", "acc"]).sort_values(["dataset", "budget_mult", "regime", "attack"]) - with open(out_tex, "w", encoding="utf-8") as f: - f.write(df.to_latex(index=False, escape=False)) - print(f"[LaTeX] RQ1 table -> {out_tex}") - - -def table_rq2(best_csv: str, out_tex: str): - df = pd.read_csv(best_csv) - keep = ["dataset", "defense", "perf"] - df = df[keep].copy() - df["fid"] = df["perf"].apply(lambda s: json.loads(s.replace("'", '"')).get("fid", None) - or json.loads(s.replace("'", '"')).get("fidelity", None)) - df["def_acc"] = df["perf"].apply(lambda s: json.loads(s.replace("'", '"')).get("def_acc", None) - or json.loads(s.replace("'", '"')).get("acc", None) - or json.loads(s.replace("'", '"')).get("defense_accuracy", None)) - df["1-Fid(%)"] = df["fid"].apply(lambda v: to_percent(1.0 - float(v) if v is not None else None, 1)) - df["Utility(%)"] = df["def_acc"].apply(lambda v: to_percent(v, 1)) - df = df.drop(columns=["perf", "fid", "def_acc"]).sort_values(["dataset", "defense"]) - with open(out_tex, "w", encoding="utf-8") as f: - f.write(df.to_latex(index=False, escape=False)) - print(f"[LaTeX] RQ2/RQ3 table -> {out_tex}") - - -def main(): - ap = argparse.ArgumentParser() - ap.add_argument("--rq1_best", type=str, default="outputs/leaderboards/RQ1_best.csv") - ap.add_argument("--rq2_best", type=str, default="outputs/leaderboards/RQ2_RQ3_best.csv") - ap.add_argument("--outdir", type=str, default="outputs/leaderboards") - args = ap.parse_args() - os.makedirs(args.outdir, exist_ok=True) - table_rq1(args.rq1_best, os.path.join(args.outdir, "RQ1_best_table.tex")) - table_rq2(args.rq2_best, os.path.join(args.outdir, "RQ2_RQ3_best_table.tex")) - - -if __name__ == "__main__": - main() diff --git a/benchmark/run/utils_benchmark.py b/benchmark/run/utils_benchmark.py deleted file mode 100644 index c09a8f60..00000000 --- a/benchmark/run/utils_benchmark.py +++ /dev/null @@ -1,206 +0,0 @@ -# run/utils_benchmark.py -import importlib -import inspect -import json -import os -import sys -import time -from typing import Any, Dict, Tuple - -# Ensure repository root is on sys.path so imports like `pygip.*` always work -_REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) -if _REPO_ROOT not in sys.path: - sys.path.insert(0, _REPO_ROOT) - -import torch - - -def _try_import(path: str): - mod, cls = path.rsplit(".", 1) - m = importlib.import_module(mod) - return getattr(m, cls) - - -# ==== Registries (paths match your repo layout) ==== -ATTACK_REGISTRY = { - # MEA.py — six attacks (0..5) - "mea0": "pygip.models.attack.mea.MEA.ModelExtractionAttack0", - "mea1": "pygip.models.attack.mea.MEA.ModelExtractionAttack1", - "mea2": "pygip.models.attack.mea.MEA.ModelExtractionAttack2", - "mea3": "pygip.models.attack.mea.MEA.ModelExtractionAttack3", - "mea4": "pygip.models.attack.mea.MEA.ModelExtractionAttack4", - "mea5": "pygip.models.attack.mea.MEA.ModelExtractionAttack5", - - # Other attacks - "advmea": "pygip.models.attack.mea.AdvMEA.AdvMEA", - "cega": "pygip.models.attack.mea.CEGA.CEGA", - "realistic": "pygip.models.attack.mea.Realistic.RealisticAttack", - "dfea_i": "pygip.models.attack.mea.DataFreeMEA.DFEATypeI", - "dfea_ii": "pygip.models.attack.mea.DataFreeMEA.DFEATypeII", - "dfea_iii": "pygip.models.attack.mea.DataFreeMEA.DFEATypeIII", -} - -DEFENSE_REGISTRY = { - "randomwm": "pygip.models.defense.atom.RandomWM.RandomWM", - "backdoorwm": "pygip.models.defense.atom.BackdoorWM.BackdoorWM", - "survivewm": "pygip.models.defense.atom.SurviveWM.SurviveWM", - "imperceptiblewm": "pygip.models.defense.atom.ImperceptibleWM.ImperceptibleWM", - # Add more if needed (e.g., GROVE) using their import paths. -} - - -def load_dataset(name: str, root: str = None): - """Robust loader that tries several known entry points.""" - tried = [] - for mod, fn in [ - ("pygip.datasets.datasets", "get_dataset"), - ("pygip.datasets", "get_dataset"), - ("pygip.data", "get_dataset"), - ("pygip.data", "build_dataset"), - ]: - try: - m = importlib.import_module(mod) - f = getattr(m, fn) - if root is None: - ds = f(name) - else: - try: - ds = f(name, root=root) - except TypeError: - ds = f(name) - return ds - except Exception as e: - tried.append(f"{mod}.{fn}: {e}") - raise RuntimeError("Could not load dataset. Tried: " + " | ".join(tried)) - - -def get_masks(dataset): - g = dataset.graph_data - n = g.number_of_nodes() if hasattr(g, "number_of_nodes") else dataset.num_nodes - nd = g.ndata - test_mask = nd["test_mask"] - train_mask = nd.get("train_mask", torch.zeros(n, dtype=torch.bool)) - val_mask = nd.get("val_mask", torch.zeros(n, dtype=torch.bool)) - return train_mask, val_mask, test_mask - - -def get_nums(dataset) -> Tuple[int, int, int]: - n = getattr(dataset, "num_nodes", None) or dataset.graph_data.number_of_nodes() - d = getattr(dataset, "num_features", None) or dataset.graph_data.ndata["feat"].shape[1] - c = getattr(dataset, "num_classes", None) or int(dataset.graph_data.ndata["label"].max().item() + 1) - return n, d, c - - -def test_size(dataset) -> int: - _, _, tmask = get_masks(dataset) - return int(tmask.sum().item()) - - -def compute_fraction_for_budget(dataset, budget_mult: float) -> float: - """Convert a test-size–relative budget multiplier to a node fraction.""" - tsz = test_size(dataset) - n, _, _ = get_nums(dataset) - queries = max(1, int(round(budget_mult * tsz))) - return min(0.99, queries / float(n)) - - -def _has_var_kw(fn): - try: - sig = inspect.signature(fn) - for p in sig.parameters.values(): - if p.kind == inspect.Parameter.VAR_KEYWORD: - return True - return False - except (TypeError, ValueError): - return False - - -def _filter_kwargs(fn, cfg: Dict[str, Any]) -> Dict[str, Any]: - """Keep only kwargs accepted by `fn` unless it provides **kwargs.""" - if not cfg: - return {} - try: - sig = inspect.signature(fn) - except (TypeError, ValueError): - return {} - if _has_var_kw(fn): - return dict(cfg) - valid = set(sig.parameters.keys()) - return {k: v for k, v in cfg.items() if k in valid} - - -def instantiate_attack(key: str, dataset, fraction: float, x_ratio: float, a_ratio: float, ctor_kwargs: Dict[str, Any]): - cls = _try_import(ATTACK_REGISTRY[key]) - sig = inspect.signature(cls.__init__) - params = sig.parameters - if "attack_x_ratio" in params and "attack_a_ratio" in params: - safe_ctor = _filter_kwargs(cls.__init__, ctor_kwargs) - return cls(dataset=dataset, attack_x_ratio=x_ratio, attack_a_ratio=a_ratio, **safe_ctor) - if "attack_node_fraction" in params: - safe_ctor = _filter_kwargs(cls.__init__, ctor_kwargs) - return cls(dataset=dataset, attack_node_fraction=fraction, **safe_ctor) - if "attack_ratio" in params: - safe_ctor = _filter_kwargs(cls.__init__, ctor_kwargs) - return cls(dataset=dataset, attack_ratio=fraction, **safe_ctor) - safe_ctor = _filter_kwargs(cls.__init__, ctor_kwargs) - return cls(dataset=dataset, attack_node_fraction=fraction, **safe_ctor) - - -def call_attack(attack_obj, run_kwargs: Dict[str, Any], seed: int): - if not hasattr(attack_obj, "attack"): - raise RuntimeError("Attack object has no `.attack()` method.") - fn = getattr(attack_obj, "attack") - safe_run = _filter_kwargs(fn, dict(run_kwargs or {})) - safe_run["seed"] = seed - return fn(**safe_run) - - -def instantiate_defense(key: str, dataset, ctor_kwargs: Dict[str, Any]): - cls = _try_import(DEFENSE_REGISTRY[key]) - safe_ctor = _filter_kwargs(cls.__init__, ctor_kwargs or {}) - return cls(dataset=dataset, **safe_ctor) - - -def call_defense(defense_obj, run_kwargs: Dict[str, Any], seed: int): - for m in ["defend", "run", "fit", "train"]: - if hasattr(defense_obj, m) and callable(getattr(defense_obj, m)): - fn = getattr(defense_obj, m) - safe_run = _filter_kwargs(fn, dict(run_kwargs or {})) - safe_run["seed"] = seed - return fn(**safe_run) - if hasattr(defense_obj, "__call__"): - fn = getattr(defense_obj, "__call__") - safe_run = _filter_kwargs(fn, dict(run_kwargs or {})) - safe_run["seed"] = seed - return fn(**safe_run) - raise RuntimeError("No callable entrypoint found on defense object.") - - -def ensure_dir(path: str): - os.makedirs(path, exist_ok=True) - - -def write_jsonl(path: str, record: Dict[str, Any]): - ensure_dir(os.path.dirname(path)) - with open(path, "a", encoding="utf-8") as f: - f.write(json.dumps(record) + "\n") - - -def normalize_attack_return(ret) -> Tuple[Dict[str, Any], Dict[str, Any]]: - if isinstance(ret, tuple) and len(ret) == 2 and isinstance(ret[0], dict) and isinstance(ret[1], dict): - return ret[0], ret[1] - if isinstance(ret, dict): - return ret, {} - raise RuntimeError("attack.attack() did not return the expected (perf_dict, comp_dict) or dict.") - - -def normalize_defense_return(ret) -> Tuple[Dict[str, Any], Dict[str, Any]]: - if isinstance(ret, tuple) and len(ret) == 2 and isinstance(ret[0], dict) and isinstance(ret[1], dict): - return ret[0], ret[1] - if isinstance(ret, dict): - return ret, {} - raise RuntimeError("defense call did not return the expected (perf_dict, comp_dict) or dict.") - - -def timestamp() -> str: - return time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) diff --git a/benchmark/scripts/RQ1_attacks.sh b/benchmark/scripts/RQ1_attacks.sh deleted file mode 100644 index 20565f64..00000000 --- a/benchmark/scripts/RQ1_attacks.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -GPU_ID="${1:-0}" - -DATASETS=("Cora" "Citeseer" "PubMed" "Amazon-Photo" "Proteins") -ATTACKS=("mea0" "mea1" "mea2" "mea3" "mea4" "mea5" "advmea" "cega" "realistic" "dfea_i" "dfea_ii" "dfea_iii") -BUDGETS=(0.25 0.5 1.0 2.0 4.0) -REGIMES=("both" "x_only" "a_only" "data_free") -SEEDS=(0 1 2) - -CONF_DIR="configs" -OUTDIR="outputs/RQ1" -LEADER_DIR="outputs/leaderboards" -mkdir -p "${CONF_DIR}" "${OUTDIR}" "${LEADER_DIR}" - -GRID_JSON="${CONF_DIR}/attack_grids_large.json" -cat > "${GRID_JSON}" <<'JSON' -{ - "mea0": [ { "ctor": {}, "run": {} } ], - "mea1": [ { "ctor": {}, "run": {} } ], - "mea2": [ { "ctor": {}, "run": {} } ], - "mea3": [ { "ctor": {}, "run": {} } ], - "mea4": [ { "ctor": {}, "run": {} } ], - "mea5": [ { "ctor": {}, "run": {} } ], - - "advmea": [ { "ctor": {}, "run": {} } ], - - "cega": [ - { "ctor": {}, "run": {"epochs_per_cycle": 1, "LR_CEGA": 0.01, "setup": "experiment"} }, - { "ctor": {}, "run": {"epochs_per_cycle": 2, "LR_CEGA": 0.01, "setup": "experiment"} }, - { "ctor": {}, "run": {"epochs_per_cycle": 1, "LR_CEGA": 0.005, "setup": "experiment"} }, - { "ctor": {}, "run": {"epochs_per_cycle": 1, "LR_CEGA": 0.01, "setup": "perturbation", "num_perturbations": 100, "noise_level": 0.05} } - ], - - "realistic": [ - { "ctor": {"hidden_dim": 32, "threshold_s": 0.60, "threshold_a": 0.40}, "run": {} }, - { "ctor": {"hidden_dim": 64, "threshold_s": 0.70, "threshold_a": 0.50}, "run": {} }, - { "ctor": {"hidden_dim": 128, "threshold_s": 0.75, "threshold_a": 0.60}, "run": {} }, - { "ctor": {"hidden_dim": 64, "threshold_s": 0.80, "threshold_a": 0.65}, "run": {} } - ], - - "dfea_i": [ { "ctor": {}, "run": {} } ], - "dfea_ii": [ { "ctor": {}, "run": {} } ], - "dfea_iii": [ { "ctor": {}, "run": {} } ] -} -JSON - -echo "[RQ1] Sweep attacks (with grids) on GPU ${GPU_ID} ..." -python run/run_attack_track.py \ - --datasets "${DATASETS[@]}" \ - --attacks "${ATTACKS[@]}" \ - --budgets "${BUDGETS[@]}" \ - --regimes "${REGIMES[@]}" \ - --seeds "${SEEDS[@]}" \ - --attack_grid_json "${GRID_JSON}" \ - --device "cuda:${GPU_ID}" \ - --outdir "${OUTDIR}" - -echo "[RQ1] Select best configs ..." -python run/select_best.py --rq1_dir "${OUTDIR}" --outdir "${LEADER_DIR}" - -echo "[RQ1] Export LaTeX tables ..." -python run/to_latex.py --outdir "${LEADER_DIR}" - -echo "[RQ1] Done. Raw -> ${OUTDIR}, Leaderboards & LaTeX -> ${LEADER_DIR}" diff --git a/benchmark/scripts/RQ2_defenses.sh b/benchmark/scripts/RQ2_defenses.sh deleted file mode 100644 index e3a3643c..00000000 --- a/benchmark/scripts/RQ2_defenses.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -GPU_ID="${1:-0}" - -DATASETS=("Cora" "Citeseer" "PubMed" "Amazon-Photo" "Proteins") -DEFENSES=("randomwm" "backdoorwm" "survivewm" "imperceptiblewm") -SEEDS=(0 1 2) - -CONF_DIR="configs" -OUTDIR="outputs/RQ2_RQ3" -LEADER_DIR="outputs/leaderboards" -mkdir -p "${CONF_DIR}" "${OUTDIR}" "${LEADER_DIR}" - -GRID_JSON="${CONF_DIR}/defense_grids_large.json" -cat > "${GRID_JSON}" <<'JSON' -{ - "randomwm": [ - {"ctor": {"wm_ratio": 0.002}}, - {"ctor": {"wm_ratio": 0.005}}, - {"ctor": {"wm_ratio": 0.010}}, - {"ctor": {"wm_ratio": 0.020}}, - {"ctor": {"wm_ratio": 0.050}} - ], - "backdoorwm": [ - {"ctor": {"trigger_density": 0.01}}, - {"ctor": {"trigger_density": 0.02}}, - {"ctor": {"trigger_density": 0.05}}, - {"ctor": {"trigger_density": 0.10}}, - {"ctor": {"trigger_density": 0.20}} - ], - "survivewm": [ - {"ctor": {"wm_strength": 0.25}}, - {"ctor": {"wm_strength": 0.50}}, - {"ctor": {"wm_strength": 1.00}}, - {"ctor": {"wm_strength": 1.50}}, - {"ctor": {"wm_strength": 2.00}} - ], - "imperceptiblewm": [ - {"ctor": {"epsilon": 0.25}}, - {"ctor": {"epsilon": 0.50}}, - {"ctor": {"epsilon": 1.00}}, - {"ctor": {"epsilon": 2.00}}, - {"ctor": {"epsilon": 4.00}} - ] -} -JSON - -echo "[RQ2] Sweep defenses on GPU ${GPU_ID} ..." -python run/run_ownership_track.py \ - --datasets "${DATASETS[@]}" \ - --defenses "${DEFENSES[@]}" \ - --defense_grid_json "${GRID_JSON}" \ - --seeds "${SEEDS[@]}" \ - --device "cuda:${GPU_ID}" \ - --outdir "${OUTDIR}" - -echo "[RQ2] Select best configs ..." -python run/select_best.py --rq2_dir "${OUTDIR}" --outdir "${LEADER_DIR}" - -echo "[RQ2] Export LaTeX tables ..." -python run/to_latex.py --outdir "${LEADER_DIR}" - -echo "[RQ2] Done. Raw -> ${OUTDIR}, Leaderboards & LaTeX -> ${LEADER_DIR}" diff --git a/benchmark/scripts/RQ3_tradeoff.sh b/benchmark/scripts/RQ3_tradeoff.sh deleted file mode 100644 index 6f8a2d93..00000000 --- a/benchmark/scripts/RQ3_tradeoff.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -GPU_ID="${1:-0}" - -DATASETS=("Cora" "Citeseer" "PubMed" "Amazon-Photo" "Proteins") -DEFENSES=("randomwm" "backdoorwm" "survivewm" "imperceptiblewm") -SEEDS=(0 1 2) - -CONF_DIR="configs" -OUTDIR="outputs/RQ2_RQ3" -LEADER_DIR="outputs/leaderboards" -mkdir -p "${CONF_DIR}" "${OUTDIR}" "${LEADER_DIR}" - -GRID_JSON="${CONF_DIR}/defense_grids_dense.json" -cat > "${GRID_JSON}" <<'JSON' -{ - "randomwm": [ - {"ctor": {"wm_ratio": 0.001}}, - {"ctor": {"wm_ratio": 0.002}}, - {"ctor": {"wm_ratio": 0.005}}, - {"ctor": {"wm_ratio": 0.010}}, - {"ctor": {"wm_ratio": 0.020}}, - {"ctor": {"wm_ratio": 0.050}} - ], - "backdoorwm": [ - {"ctor": {"trigger_density": 0.005}}, - {"ctor": {"trigger_density": 0.010}}, - {"ctor": {"trigger_density": 0.020}}, - {"ctor": {"trigger_density": 0.050}}, - {"ctor": {"trigger_density": 0.100}}, - {"ctor": {"trigger_density": 0.200}} - ], - "survivewm": [ - {"ctor": {"wm_strength": 0.10}}, - {"ctor": {"wm_strength": 0.25}}, - {"ctor": {"wm_strength": 0.50}}, - {"ctor": {"wm_strength": 1.00}}, - {"ctor": {"wm_strength": 1.50}}, - {"ctor": {"wm_strength": 2.00}} - ], - "imperceptiblewm": [ - {"ctor": {"epsilon": 0.10}}, - {"ctor": {"epsilon": 0.25}}, - {"ctor": {"epsilon": 0.50}}, - {"ctor": {"epsilon": 1.00}}, - {"ctor": {"epsilon": 2.00}}, - {"ctor": {"epsilon": 4.00}} - ] -} -JSON - -echo "[RQ3] Sweep defenses densely for trade-off curves ..." -python run/run_ownership_track.py \ - --datasets "${DATASETS[@]}" \ - --defenses "${DEFENSES[@]}" \ - --defense_grid_json "${GRID_JSON}" \ - --seeds "${SEEDS[@]}" \ - --device "cuda:${GPU_ID}" \ - --outdir "${OUTDIR}" - -python run/select_best.py --rq2_dir "${OUTDIR}" --outdir "${LEADER_DIR}" -python run/to_latex.py --outdir "${LEADER_DIR}" - -echo "[RQ3] Done. Raw -> ${OUTDIR}, Leaderboards & LaTeX -> ${LEADER_DIR}" diff --git a/benchmark/scripts/RQ4_overhead.sh b/benchmark/scripts/RQ4_overhead.sh deleted file mode 100644 index b447688e..00000000 --- a/benchmark/scripts/RQ4_overhead.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -RQ1_DIR="outputs/RQ1" -RQ2_DIR="outputs/RQ2_RQ3" -OUTDIR="outputs/RQ4_summary" -LEADER_DIR="outputs/leaderboards" -mkdir -p "${OUTDIR}" "${LEADER_DIR}" - -echo "[RQ4] Summarizing overhead ..." -python run/summarize_overhead.py \ - --rq1_dir "${RQ1_DIR}" \ - --rq2_dir "${RQ2_DIR}" \ - --outdir "${OUTDIR}" - -python run/select_best.py --rq1_dir "${RQ1_DIR}" --rq2_dir "${RQ2_DIR}" --outdir "${LEADER_DIR}" -python run/to_latex.py --outdir "${LEADER_DIR}" - -echo "[RQ4] Done. Summary CSVs -> ${OUTDIR}, Leaderboards & LaTeX -> ${LEADER_DIR}" diff --git a/docs/.buildinfo b/docs/.buildinfo new file mode 100644 index 00000000..98674ee1 --- /dev/null +++ b/docs/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file records the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 30ff9d27638d808a9cd0c81bbb87b52b +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/.buildinfo.bak b/docs/.buildinfo.bak new file mode 100644 index 00000000..d399442b --- /dev/null +++ b/docs/.buildinfo.bak @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file records the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 815519ab4f9c1d656cf63f30a8a47d13 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/.doctrees/api/modules.doctree b/docs/.doctrees/api/modules.doctree new file mode 100644 index 00000000..0446dd37 Binary files /dev/null and b/docs/.doctrees/api/modules.doctree differ diff --git a/docs/.doctrees/api/pyhazards.benchmarks.doctree b/docs/.doctrees/api/pyhazards.benchmarks.doctree new file mode 100644 index 00000000..70c03763 Binary files /dev/null and b/docs/.doctrees/api/pyhazards.benchmarks.doctree differ diff --git a/docs/.doctrees/api/pyhazards.configs.doctree b/docs/.doctrees/api/pyhazards.configs.doctree new file mode 100644 index 00000000..d4017b63 Binary files /dev/null and b/docs/.doctrees/api/pyhazards.configs.doctree differ diff --git a/docs/.doctrees/api/pyhazards.datasets.doctree b/docs/.doctrees/api/pyhazards.datasets.doctree new file mode 100644 index 00000000..7123e09c Binary files /dev/null and b/docs/.doctrees/api/pyhazards.datasets.doctree differ diff --git a/docs/.doctrees/api/pyhazards.doctree b/docs/.doctrees/api/pyhazards.doctree new file mode 100644 index 00000000..cd4fafd4 Binary files /dev/null and b/docs/.doctrees/api/pyhazards.doctree differ diff --git a/docs/.doctrees/api/pyhazards.engine.doctree b/docs/.doctrees/api/pyhazards.engine.doctree new file mode 100644 index 00000000..123ad106 Binary files /dev/null and b/docs/.doctrees/api/pyhazards.engine.doctree differ diff --git a/docs/.doctrees/api/pyhazards.metrics.doctree b/docs/.doctrees/api/pyhazards.metrics.doctree new file mode 100644 index 00000000..96316ae6 Binary files /dev/null and b/docs/.doctrees/api/pyhazards.metrics.doctree differ diff --git a/docs/.doctrees/api/pyhazards.models.doctree b/docs/.doctrees/api/pyhazards.models.doctree new file mode 100644 index 00000000..3b7e6402 Binary files /dev/null and b/docs/.doctrees/api/pyhazards.models.doctree differ diff --git a/docs/.doctrees/api/pyhazards.reports.doctree b/docs/.doctrees/api/pyhazards.reports.doctree new file mode 100644 index 00000000..258cd276 Binary files /dev/null and b/docs/.doctrees/api/pyhazards.reports.doctree differ diff --git a/docs/.doctrees/api/pyhazards.utils.doctree b/docs/.doctrees/api/pyhazards.utils.doctree new file mode 100644 index 00000000..1901a5c0 Binary files /dev/null and b/docs/.doctrees/api/pyhazards.utils.doctree differ diff --git a/docs/.doctrees/appendix_a_coverage.doctree b/docs/.doctrees/appendix_a_coverage.doctree new file mode 100644 index 00000000..8b2ec059 Binary files /dev/null and b/docs/.doctrees/appendix_a_coverage.doctree differ diff --git a/docs/.doctrees/benchmarks/aefa.doctree b/docs/.doctrees/benchmarks/aefa.doctree new file mode 100644 index 00000000..ca6cf780 Binary files /dev/null and b/docs/.doctrees/benchmarks/aefa.doctree differ diff --git a/docs/.doctrees/benchmarks/caravan.doctree b/docs/.doctrees/benchmarks/caravan.doctree new file mode 100644 index 00000000..686fa9a1 Binary files /dev/null and b/docs/.doctrees/benchmarks/caravan.doctree differ diff --git a/docs/.doctrees/benchmarks/earthquake_benchmark.doctree b/docs/.doctrees/benchmarks/earthquake_benchmark.doctree new file mode 100644 index 00000000..2051af51 Binary files /dev/null and b/docs/.doctrees/benchmarks/earthquake_benchmark.doctree differ diff --git a/docs/.doctrees/benchmarks/flood_benchmark.doctree b/docs/.doctrees/benchmarks/flood_benchmark.doctree new file mode 100644 index 00000000..c431e1ea Binary files /dev/null and b/docs/.doctrees/benchmarks/flood_benchmark.doctree differ diff --git a/docs/.doctrees/benchmarks/floodcastbench.doctree b/docs/.doctrees/benchmarks/floodcastbench.doctree new file mode 100644 index 00000000..558aa3f6 Binary files /dev/null and b/docs/.doctrees/benchmarks/floodcastbench.doctree differ diff --git a/docs/.doctrees/benchmarks/hydrobench.doctree b/docs/.doctrees/benchmarks/hydrobench.doctree new file mode 100644 index 00000000..10832d7a Binary files /dev/null and b/docs/.doctrees/benchmarks/hydrobench.doctree differ diff --git a/docs/.doctrees/benchmarks/ibtracs.doctree b/docs/.doctrees/benchmarks/ibtracs.doctree new file mode 100644 index 00000000..7adc811c Binary files /dev/null and b/docs/.doctrees/benchmarks/ibtracs.doctree differ diff --git a/docs/.doctrees/benchmarks/pick_benchmark.doctree b/docs/.doctrees/benchmarks/pick_benchmark.doctree new file mode 100644 index 00000000..fbe56257 Binary files /dev/null and b/docs/.doctrees/benchmarks/pick_benchmark.doctree differ diff --git a/docs/.doctrees/benchmarks/pycsep.doctree b/docs/.doctrees/benchmarks/pycsep.doctree new file mode 100644 index 00000000..f69240f6 Binary files /dev/null and b/docs/.doctrees/benchmarks/pycsep.doctree differ diff --git a/docs/.doctrees/benchmarks/seisbench.doctree b/docs/.doctrees/benchmarks/seisbench.doctree new file mode 100644 index 00000000..7e604061 Binary files /dev/null and b/docs/.doctrees/benchmarks/seisbench.doctree differ diff --git a/docs/.doctrees/benchmarks/tcbench_alpha.doctree b/docs/.doctrees/benchmarks/tcbench_alpha.doctree new file mode 100644 index 00000000..b765cc0d Binary files /dev/null and b/docs/.doctrees/benchmarks/tcbench_alpha.doctree differ diff --git a/docs/.doctrees/benchmarks/tropical_cyclone_benchmark.doctree b/docs/.doctrees/benchmarks/tropical_cyclone_benchmark.doctree new file mode 100644 index 00000000..ad1fe955 Binary files /dev/null and b/docs/.doctrees/benchmarks/tropical_cyclone_benchmark.doctree differ diff --git a/docs/.doctrees/benchmarks/tropicyclonenet_dataset.doctree b/docs/.doctrees/benchmarks/tropicyclonenet_dataset.doctree new file mode 100644 index 00000000..bf798a9f Binary files /dev/null and b/docs/.doctrees/benchmarks/tropicyclonenet_dataset.doctree differ diff --git a/docs/.doctrees/benchmarks/waterbench.doctree b/docs/.doctrees/benchmarks/waterbench.doctree new file mode 100644 index 00000000..831ba2b1 Binary files /dev/null and b/docs/.doctrees/benchmarks/waterbench.doctree differ diff --git a/docs/.doctrees/benchmarks/wildfire_benchmark.doctree b/docs/.doctrees/benchmarks/wildfire_benchmark.doctree new file mode 100644 index 00000000..175de03f Binary files /dev/null and b/docs/.doctrees/benchmarks/wildfire_benchmark.doctree differ diff --git a/docs/.doctrees/benchmarks/wildfirespreadts_ecosystem.doctree b/docs/.doctrees/benchmarks/wildfirespreadts_ecosystem.doctree new file mode 100644 index 00000000..4f0c907f Binary files /dev/null and b/docs/.doctrees/benchmarks/wildfirespreadts_ecosystem.doctree differ diff --git a/docs/.doctrees/cite.doctree b/docs/.doctrees/cite.doctree new file mode 100644 index 00000000..f784e4e8 Binary files /dev/null and b/docs/.doctrees/cite.doctree differ diff --git a/docs/.doctrees/datasets/aefa_forecast.doctree b/docs/.doctrees/datasets/aefa_forecast.doctree new file mode 100644 index 00000000..b6b9ef8c Binary files /dev/null and b/docs/.doctrees/datasets/aefa_forecast.doctree differ diff --git a/docs/.doctrees/datasets/caravan_streamflow.doctree b/docs/.doctrees/datasets/caravan_streamflow.doctree new file mode 100644 index 00000000..d51b56b3 Binary files /dev/null and b/docs/.doctrees/datasets/caravan_streamflow.doctree differ diff --git a/docs/.doctrees/datasets/era5.doctree b/docs/.doctrees/datasets/era5.doctree new file mode 100644 index 00000000..797cc8f8 Binary files /dev/null and b/docs/.doctrees/datasets/era5.doctree differ diff --git a/docs/.doctrees/datasets/firms.doctree b/docs/.doctrees/datasets/firms.doctree new file mode 100644 index 00000000..4ed9645a Binary files /dev/null and b/docs/.doctrees/datasets/firms.doctree differ diff --git a/docs/.doctrees/datasets/floodcastbench_inundation.doctree b/docs/.doctrees/datasets/floodcastbench_inundation.doctree new file mode 100644 index 00000000..54d61acb Binary files /dev/null and b/docs/.doctrees/datasets/floodcastbench_inundation.doctree differ diff --git a/docs/.doctrees/datasets/fpa_fod_tabular.doctree b/docs/.doctrees/datasets/fpa_fod_tabular.doctree new file mode 100644 index 00000000..1abee813 Binary files /dev/null and b/docs/.doctrees/datasets/fpa_fod_tabular.doctree differ diff --git a/docs/.doctrees/datasets/fpa_fod_weekly.doctree b/docs/.doctrees/datasets/fpa_fod_weekly.doctree new file mode 100644 index 00000000..57df31a7 Binary files /dev/null and b/docs/.doctrees/datasets/fpa_fod_weekly.doctree differ diff --git a/docs/.doctrees/datasets/goesr.doctree b/docs/.doctrees/datasets/goesr.doctree new file mode 100644 index 00000000..4280eeb3 Binary files /dev/null and b/docs/.doctrees/datasets/goesr.doctree differ diff --git a/docs/.doctrees/datasets/hydrobench_streamflow.doctree b/docs/.doctrees/datasets/hydrobench_streamflow.doctree new file mode 100644 index 00000000..ed5b9aa8 Binary files /dev/null and b/docs/.doctrees/datasets/hydrobench_streamflow.doctree differ diff --git a/docs/.doctrees/datasets/ibtracs_tracks.doctree b/docs/.doctrees/datasets/ibtracs_tracks.doctree new file mode 100644 index 00000000..8a6fef92 Binary files /dev/null and b/docs/.doctrees/datasets/ibtracs_tracks.doctree differ diff --git a/docs/.doctrees/datasets/landfire.doctree b/docs/.doctrees/datasets/landfire.doctree new file mode 100644 index 00000000..fad0d034 Binary files /dev/null and b/docs/.doctrees/datasets/landfire.doctree differ diff --git a/docs/.doctrees/datasets/merra2.doctree b/docs/.doctrees/datasets/merra2.doctree new file mode 100644 index 00000000..a5d2ad13 Binary files /dev/null and b/docs/.doctrees/datasets/merra2.doctree differ diff --git a/docs/.doctrees/datasets/mtbs.doctree b/docs/.doctrees/datasets/mtbs.doctree new file mode 100644 index 00000000..b79733ff Binary files /dev/null and b/docs/.doctrees/datasets/mtbs.doctree differ diff --git a/docs/.doctrees/datasets/noaa_flood.doctree b/docs/.doctrees/datasets/noaa_flood.doctree new file mode 100644 index 00000000..f5e7da08 Binary files /dev/null and b/docs/.doctrees/datasets/noaa_flood.doctree differ diff --git a/docs/.doctrees/datasets/pick_benchmark_waveforms.doctree b/docs/.doctrees/datasets/pick_benchmark_waveforms.doctree new file mode 100644 index 00000000..76512a96 Binary files /dev/null and b/docs/.doctrees/datasets/pick_benchmark_waveforms.doctree differ diff --git a/docs/.doctrees/datasets/seisbench_waveforms.doctree b/docs/.doctrees/datasets/seisbench_waveforms.doctree new file mode 100644 index 00000000..75ceae7c Binary files /dev/null and b/docs/.doctrees/datasets/seisbench_waveforms.doctree differ diff --git a/docs/.doctrees/datasets/tcbench_alpha.doctree b/docs/.doctrees/datasets/tcbench_alpha.doctree new file mode 100644 index 00000000..17c285bd Binary files /dev/null and b/docs/.doctrees/datasets/tcbench_alpha.doctree differ diff --git a/docs/.doctrees/datasets/tropicyclonenet_dataset.doctree b/docs/.doctrees/datasets/tropicyclonenet_dataset.doctree new file mode 100644 index 00000000..416c8a13 Binary files /dev/null and b/docs/.doctrees/datasets/tropicyclonenet_dataset.doctree differ diff --git a/docs/.doctrees/datasets/waterbench_streamflow.doctree b/docs/.doctrees/datasets/waterbench_streamflow.doctree new file mode 100644 index 00000000..1ca63294 Binary files /dev/null and b/docs/.doctrees/datasets/waterbench_streamflow.doctree differ diff --git a/docs/.doctrees/datasets/wfigs.doctree b/docs/.doctrees/datasets/wfigs.doctree new file mode 100644 index 00000000..154e9518 Binary files /dev/null and b/docs/.doctrees/datasets/wfigs.doctree differ diff --git a/docs/.doctrees/environment.pickle b/docs/.doctrees/environment.pickle new file mode 100644 index 00000000..08daeef9 Binary files /dev/null and b/docs/.doctrees/environment.pickle differ diff --git a/docs/.doctrees/implementation.doctree b/docs/.doctrees/implementation.doctree new file mode 100644 index 00000000..2807cdfa Binary files /dev/null and b/docs/.doctrees/implementation.doctree differ diff --git a/docs/.doctrees/index.doctree b/docs/.doctrees/index.doctree new file mode 100644 index 00000000..4efc9936 Binary files /dev/null and b/docs/.doctrees/index.doctree differ diff --git a/docs/.doctrees/installation.doctree b/docs/.doctrees/installation.doctree new file mode 100644 index 00000000..71385aca Binary files /dev/null and b/docs/.doctrees/installation.doctree differ diff --git a/docs/.doctrees/interactive_map.doctree b/docs/.doctrees/interactive_map.doctree new file mode 100644 index 00000000..aea0f950 Binary files /dev/null and b/docs/.doctrees/interactive_map.doctree differ diff --git a/docs/.doctrees/modules/models_asufm.doctree b/docs/.doctrees/modules/models_asufm.doctree new file mode 100644 index 00000000..def7be46 Binary files /dev/null and b/docs/.doctrees/modules/models_asufm.doctree differ diff --git a/docs/.doctrees/modules/models_eqnet.doctree b/docs/.doctrees/modules/models_eqnet.doctree new file mode 100644 index 00000000..11a15619 Binary files /dev/null and b/docs/.doctrees/modules/models_eqnet.doctree differ diff --git a/docs/.doctrees/modules/models_eqtransformer.doctree b/docs/.doctrees/modules/models_eqtransformer.doctree new file mode 100644 index 00000000..90551336 Binary files /dev/null and b/docs/.doctrees/modules/models_eqtransformer.doctree differ diff --git a/docs/.doctrees/modules/models_firecastnet.doctree b/docs/.doctrees/modules/models_firecastnet.doctree new file mode 100644 index 00000000..6fa18c27 Binary files /dev/null and b/docs/.doctrees/modules/models_firecastnet.doctree differ diff --git a/docs/.doctrees/modules/models_floodcast.doctree b/docs/.doctrees/modules/models_floodcast.doctree new file mode 100644 index 00000000..9e5acbc4 Binary files /dev/null and b/docs/.doctrees/modules/models_floodcast.doctree differ diff --git a/docs/.doctrees/modules/models_forefire.doctree b/docs/.doctrees/modules/models_forefire.doctree new file mode 100644 index 00000000..efdc8755 Binary files /dev/null and b/docs/.doctrees/modules/models_forefire.doctree differ diff --git a/docs/.doctrees/modules/models_fourcastnet_tc.doctree b/docs/.doctrees/modules/models_fourcastnet_tc.doctree new file mode 100644 index 00000000..afa1fdfd Binary files /dev/null and b/docs/.doctrees/modules/models_fourcastnet_tc.doctree differ diff --git a/docs/.doctrees/modules/models_google_flood_forecasting.doctree b/docs/.doctrees/modules/models_google_flood_forecasting.doctree new file mode 100644 index 00000000..84f547d5 Binary files /dev/null and b/docs/.doctrees/modules/models_google_flood_forecasting.doctree differ diff --git a/docs/.doctrees/modules/models_gpd.doctree b/docs/.doctrees/modules/models_gpd.doctree new file mode 100644 index 00000000..b3a55bff Binary files /dev/null and b/docs/.doctrees/modules/models_gpd.doctree differ diff --git a/docs/.doctrees/modules/models_graphcast_tc.doctree b/docs/.doctrees/modules/models_graphcast_tc.doctree new file mode 100644 index 00000000..03137526 Binary files /dev/null and b/docs/.doctrees/modules/models_graphcast_tc.doctree differ diff --git a/docs/.doctrees/modules/models_hurricast.doctree b/docs/.doctrees/modules/models_hurricast.doctree new file mode 100644 index 00000000..e291b238 Binary files /dev/null and b/docs/.doctrees/modules/models_hurricast.doctree differ diff --git a/docs/.doctrees/modules/models_hydrographnet.doctree b/docs/.doctrees/modules/models_hydrographnet.doctree new file mode 100644 index 00000000..c961111e Binary files /dev/null and b/docs/.doctrees/modules/models_hydrographnet.doctree differ diff --git a/docs/.doctrees/modules/models_neuralhydrology_ealstm.doctree b/docs/.doctrees/modules/models_neuralhydrology_ealstm.doctree new file mode 100644 index 00000000..3ef3aac8 Binary files /dev/null and b/docs/.doctrees/modules/models_neuralhydrology_ealstm.doctree differ diff --git a/docs/.doctrees/modules/models_neuralhydrology_lstm.doctree b/docs/.doctrees/modules/models_neuralhydrology_lstm.doctree new file mode 100644 index 00000000..c535ab95 Binary files /dev/null and b/docs/.doctrees/modules/models_neuralhydrology_lstm.doctree differ diff --git a/docs/.doctrees/modules/models_pangu_tc.doctree b/docs/.doctrees/modules/models_pangu_tc.doctree new file mode 100644 index 00000000..7a132d63 Binary files /dev/null and b/docs/.doctrees/modules/models_pangu_tc.doctree differ diff --git a/docs/.doctrees/modules/models_phasenet.doctree b/docs/.doctrees/modules/models_phasenet.doctree new file mode 100644 index 00000000..f86749fc Binary files /dev/null and b/docs/.doctrees/modules/models_phasenet.doctree differ diff --git a/docs/.doctrees/modules/models_saf_net.doctree b/docs/.doctrees/modules/models_saf_net.doctree new file mode 100644 index 00000000..b3166f69 Binary files /dev/null and b/docs/.doctrees/modules/models_saf_net.doctree differ diff --git a/docs/.doctrees/modules/models_tcif_fusion.doctree b/docs/.doctrees/modules/models_tcif_fusion.doctree new file mode 100644 index 00000000..67e517ce Binary files /dev/null and b/docs/.doctrees/modules/models_tcif_fusion.doctree differ diff --git a/docs/.doctrees/modules/models_tropicalcyclone_mlp.doctree b/docs/.doctrees/modules/models_tropicalcyclone_mlp.doctree new file mode 100644 index 00000000..eb2d248e Binary files /dev/null and b/docs/.doctrees/modules/models_tropicalcyclone_mlp.doctree differ diff --git a/docs/.doctrees/modules/models_tropicyclonenet.doctree b/docs/.doctrees/modules/models_tropicyclonenet.doctree new file mode 100644 index 00000000..2661d593 Binary files /dev/null and b/docs/.doctrees/modules/models_tropicyclonenet.doctree differ diff --git a/docs/.doctrees/modules/models_urbanfloodcast.doctree b/docs/.doctrees/modules/models_urbanfloodcast.doctree new file mode 100644 index 00000000..6bb60b1c Binary files /dev/null and b/docs/.doctrees/modules/models_urbanfloodcast.doctree differ diff --git a/docs/.doctrees/modules/models_wavecastnet.doctree b/docs/.doctrees/modules/models_wavecastnet.doctree new file mode 100644 index 00000000..3db172c6 Binary files /dev/null and b/docs/.doctrees/modules/models_wavecastnet.doctree differ diff --git a/docs/.doctrees/modules/models_wildfire_aspp.doctree b/docs/.doctrees/modules/models_wildfire_aspp.doctree new file mode 100644 index 00000000..486df4c6 Binary files /dev/null and b/docs/.doctrees/modules/models_wildfire_aspp.doctree differ diff --git a/docs/.doctrees/modules/models_wildfire_forecasting.doctree b/docs/.doctrees/modules/models_wildfire_forecasting.doctree new file mode 100644 index 00000000..f5ae37ff Binary files /dev/null and b/docs/.doctrees/modules/models_wildfire_forecasting.doctree differ diff --git a/docs/.doctrees/modules/models_wildfire_fpa.doctree b/docs/.doctrees/modules/models_wildfire_fpa.doctree new file mode 100644 index 00000000..d20256f3 Binary files /dev/null and b/docs/.doctrees/modules/models_wildfire_fpa.doctree differ diff --git a/docs/.doctrees/modules/models_wildfire_fpa_dnn.doctree b/docs/.doctrees/modules/models_wildfire_fpa_dnn.doctree new file mode 100644 index 00000000..bcfd9f62 Binary files /dev/null and b/docs/.doctrees/modules/models_wildfire_fpa_dnn.doctree differ diff --git a/docs/.doctrees/modules/models_wildfire_fpa_forecast.doctree b/docs/.doctrees/modules/models_wildfire_fpa_forecast.doctree new file mode 100644 index 00000000..62cce82a Binary files /dev/null and b/docs/.doctrees/modules/models_wildfire_fpa_forecast.doctree differ diff --git a/docs/.doctrees/modules/models_wildfire_fpa_lstm.doctree b/docs/.doctrees/modules/models_wildfire_fpa_lstm.doctree new file mode 100644 index 00000000..f1cc5dbe Binary files /dev/null and b/docs/.doctrees/modules/models_wildfire_fpa_lstm.doctree differ diff --git a/docs/.doctrees/modules/models_wildfire_mamba.doctree b/docs/.doctrees/modules/models_wildfire_mamba.doctree new file mode 100644 index 00000000..5f1f8f72 Binary files /dev/null and b/docs/.doctrees/modules/models_wildfire_mamba.doctree differ diff --git a/docs/.doctrees/modules/models_wildfirespreadts.doctree b/docs/.doctrees/modules/models_wildfirespreadts.doctree new file mode 100644 index 00000000..3765ddad Binary files /dev/null and b/docs/.doctrees/modules/models_wildfirespreadts.doctree differ diff --git a/docs/.doctrees/modules/models_wrf_sfire.doctree b/docs/.doctrees/modules/models_wrf_sfire.doctree new file mode 100644 index 00000000..b2db2c7a Binary files /dev/null and b/docs/.doctrees/modules/models_wrf_sfire.doctree differ diff --git a/docs/.doctrees/pyhazards_benchmarks.doctree b/docs/.doctrees/pyhazards_benchmarks.doctree new file mode 100644 index 00000000..0ced5358 Binary files /dev/null and b/docs/.doctrees/pyhazards_benchmarks.doctree differ diff --git a/docs/.doctrees/pyhazards_configs.doctree b/docs/.doctrees/pyhazards_configs.doctree new file mode 100644 index 00000000..d5876f1e Binary files /dev/null and b/docs/.doctrees/pyhazards_configs.doctree differ diff --git a/docs/.doctrees/pyhazards_datasets.doctree b/docs/.doctrees/pyhazards_datasets.doctree new file mode 100644 index 00000000..4be62086 Binary files /dev/null and b/docs/.doctrees/pyhazards_datasets.doctree differ diff --git a/docs/.doctrees/pyhazards_engine.doctree b/docs/.doctrees/pyhazards_engine.doctree new file mode 100644 index 00000000..20b88012 Binary files /dev/null and b/docs/.doctrees/pyhazards_engine.doctree differ diff --git a/docs/.doctrees/pyhazards_metrics.doctree b/docs/.doctrees/pyhazards_metrics.doctree new file mode 100644 index 00000000..39d5d609 Binary files /dev/null and b/docs/.doctrees/pyhazards_metrics.doctree differ diff --git a/docs/.doctrees/pyhazards_models.doctree b/docs/.doctrees/pyhazards_models.doctree new file mode 100644 index 00000000..441fc810 Binary files /dev/null and b/docs/.doctrees/pyhazards_models.doctree differ diff --git a/docs/.doctrees/pyhazards_reports.doctree b/docs/.doctrees/pyhazards_reports.doctree new file mode 100644 index 00000000..a93353b3 Binary files /dev/null and b/docs/.doctrees/pyhazards_reports.doctree differ diff --git a/docs/.doctrees/pyhazards_utils.doctree b/docs/.doctrees/pyhazards_utils.doctree new file mode 100644 index 00000000..ed362f98 Binary files /dev/null and b/docs/.doctrees/pyhazards_utils.doctree differ diff --git a/docs/.doctrees/quick_start.doctree b/docs/.doctrees/quick_start.doctree new file mode 100644 index 00000000..1e1ab60a Binary files /dev/null and b/docs/.doctrees/quick_start.doctree differ diff --git a/docs/.doctrees/references.doctree b/docs/.doctrees/references.doctree new file mode 100644 index 00000000..d9066f0e Binary files /dev/null and b/docs/.doctrees/references.doctree differ diff --git a/docs/.doctrees/team.doctree b/docs/.doctrees/team.doctree new file mode 100644 index 00000000..537d0ec0 Binary files /dev/null and b/docs/.doctrees/team.doctree differ diff --git a/examples/__init__.py b/docs/.nojekyll similarity index 100% rename from examples/__init__.py rename to docs/.nojekyll diff --git a/docs/README.md b/docs/README.md index e4cbaf16..36a8a5bf 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,8 +1,10 @@ -# PyGIP Documentation +# PyHazards Documentation ## Build +cd docs sphinx-build -b html source build/html +cp -r build/html/* . ## clean diff --git a/docs/_images/github.svg b/docs/_images/github.svg new file mode 100644 index 00000000..013e0253 --- /dev/null +++ b/docs/_images/github.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/docs/_images/inspection1.png b/docs/_images/inspection1.png new file mode 100644 index 00000000..d697ad84 Binary files /dev/null and b/docs/_images/inspection1.png differ diff --git a/docs/_images/logo.png b/docs/_images/logo.png new file mode 100644 index 00000000..3a7451eb Binary files /dev/null and b/docs/_images/logo.png differ diff --git a/docs/_modules/index.html b/docs/_modules/index.html new file mode 100644 index 00000000..4f80c39d --- /dev/null +++ b/docs/_modules/index.html @@ -0,0 +1,424 @@ + + + + + + + + + + Overview: module code - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

All modules for which code is available

+ +
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/benchmarks/base.html b/docs/_modules/pyhazards/benchmarks/base.html new file mode 100644 index 00000000..6a4debe5 --- /dev/null +++ b/docs/_modules/pyhazards/benchmarks/base.html @@ -0,0 +1,430 @@ + + + + + + + + + + pyhazards.benchmarks.base - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.benchmarks.base

+from __future__ import annotations
+
+from abc import ABC, abstractmethod
+from typing import Dict, Iterable, Sequence
+
+import torch.nn as nn
+
+from ..configs import ExperimentConfig
+from ..datasets.base import DataBundle
+from ..reports import BenchmarkReport, export_report_bundle
+from .schemas import BenchmarkResult
+
+
+
+[docs] +class Benchmark(ABC): + """Shared benchmark contract for hazard evaluators.""" + + name: str = "benchmark" + hazard_task: str = "" + +
+[docs] + @abstractmethod + def evaluate( + self, + model: nn.Module, + data: DataBundle, + config: ExperimentConfig, + ) -> BenchmarkResult: + raise NotImplementedError
+ + +
+[docs] + def aggregate_metrics(self, results: Sequence[BenchmarkResult]) -> Dict[str, float]: + totals: Dict[str, float] = {} + counts: Dict[str, int] = {} + for result in results: + for key, value in result.metrics.items(): + totals[key] = totals.get(key, 0.0) + float(value) + counts[key] = counts.get(key, 0) + 1 + return { + key: totals[key] / counts[key] + for key in sorted(totals.keys()) + if counts[key] > 0 + }
+ + +
+[docs] + def export_report( + self, + result: BenchmarkResult, + output_dir: str, + formats: Iterable[str], + ) -> Dict[str, str]: + report = BenchmarkReport( + benchmark_name=result.benchmark_name, + hazard_task=result.hazard_task, + metrics=result.metrics, + metadata=result.metadata, + artifacts=result.artifacts, + ) + return export_report_bundle(report, output_dir=output_dir, formats=list(formats))
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/benchmarks/earthquake.html b/docs/_modules/pyhazards/benchmarks/earthquake.html new file mode 100644 index 00000000..d71c608a --- /dev/null +++ b/docs/_modules/pyhazards/benchmarks/earthquake.html @@ -0,0 +1,469 @@ + + + + + + + + + + pyhazards.benchmarks.earthquake - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.benchmarks.earthquake

+from __future__ import annotations
+
+import json
+from pathlib import Path
+from typing import Dict
+
+import torch
+import torch.nn as nn
+
+from ..configs import ExperimentConfig
+from ..datasets.base import DataBundle
+from .base import Benchmark
+from .registry import register_benchmark
+from .schemas import BenchmarkResult
+
+
+
+[docs] +class EarthquakeBenchmark(Benchmark): + name = "earthquake" + hazard_task = "earthquake.picking" + metric_names_by_task = { + "earthquake.picking": ["p_pick_mae", "s_pick_mae", "precision", "recall", "f1"], + "earthquake.forecasting": ["mae", "mse"], + } + +
+[docs] + def evaluate(self, model: nn.Module, data: DataBundle, config: ExperimentConfig) -> BenchmarkResult: + split = data.get_split(config.benchmark.eval_split) + x = split.inputs + y = split.targets + preds = model(x) + + if config.benchmark.hazard_task == "earthquake.picking": + mae = (preds - y).abs() + tolerances = config.benchmark.params.get("detection_tolerances", [4.0, 8.0, 12.0]) + threshold_curve: Dict[str, float] = {} + detection_rate = 0.0 + for tolerance in tolerances: + hits = ((preds - y).abs() <= float(tolerance)).all(dim=1).float() + hit_rate = float(hits.mean().detach().cpu()) + threshold_curve[str(tolerance)] = hit_rate + if float(tolerance) == 8.0: + detection_rate = hit_rate + + metrics = { + "p_pick_mae": float(mae[:, 0].mean().detach().cpu()), + "s_pick_mae": float(mae[:, 1].mean().detach().cpu()), + "mean_pick_mae": float(mae.mean().detach().cpu()), + "precision": detection_rate, + "recall": detection_rate, + "f1": detection_rate, + } + else: + mse = torch.mean((preds - y) ** 2) + mae = torch.mean(torch.abs(preds - y)) + threshold_curve = {} + metrics = { + "mae": float(mae.detach().cpu()), + "mse": float(mse.detach().cpu()), + } + + return BenchmarkResult( + benchmark_name=self.name, + hazard_task=config.benchmark.hazard_task, + metrics=metrics, + metadata={ + "split": config.benchmark.eval_split, + "threshold_curve": threshold_curve, + "dataset_name": data.metadata.get("dataset"), + "source_dataset": data.metadata.get("source_dataset", data.metadata.get("dataset")), + }, + )
+ + +
+[docs] + def export_report( + self, + result: BenchmarkResult, + output_dir: str, + formats, + ) -> Dict[str, str]: + paths = super().export_report(result, output_dir=output_dir, formats=formats) + if result.hazard_task == "earthquake.forecasting": + target = Path(output_dir) + target.mkdir(parents=True, exist_ok=True) + pycsep_path = target / "earthquake_pycsep.json" + payload = { + "adapter": "pyCSEP-style", + "benchmark_name": result.benchmark_name, + "hazard_task": result.hazard_task, + "metrics": result.metrics, + "metadata": result.metadata, + } + pycsep_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") + paths["pycsep"] = str(pycsep_path) + return paths
+
+ + + +register_benchmark(EarthquakeBenchmark.name, EarthquakeBenchmark) + +__all__ = ["EarthquakeBenchmark"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/benchmarks/flood.html b/docs/_modules/pyhazards/benchmarks/flood.html new file mode 100644 index 00000000..86776bc9 --- /dev/null +++ b/docs/_modules/pyhazards/benchmarks/flood.html @@ -0,0 +1,464 @@ + + + + + + + + + + pyhazards.benchmarks.flood - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.benchmarks.flood

+from __future__ import annotations
+
+from typing import Dict
+
+import torch
+import torch.nn as nn
+from torch.utils.data import DataLoader
+
+from ..configs import ExperimentConfig
+from ..datasets.base import DataBundle
+from ..datasets.graph import graph_collate
+from .base import Benchmark
+from .registry import register_benchmark
+from .schemas import BenchmarkResult
+
+
+
+[docs] +class FloodBenchmark(Benchmark): + name = "flood" + hazard_task = "flood.streamflow" + metric_names_by_task = { + "flood.streamflow": ["mae", "rmse", "nse", "kge"], + "flood.inundation": ["pixel_mae", "iou", "f1"], + } + +
+[docs] + def evaluate(self, model: nn.Module, data: DataBundle, config: ExperimentConfig) -> BenchmarkResult: + split = data.get_split(config.benchmark.eval_split) + if ( + config.benchmark.hazard_task == "flood.streamflow" + and hasattr(split.inputs, "__len__") + and not isinstance(split.inputs, torch.Tensor) + ): + loader = DataLoader(split.inputs, batch_size=4, shuffle=False, collate_fn=graph_collate) + preds_all = [] + target_all = [] + with torch.no_grad(): + for batch, target in loader: + preds_all.append(model(batch)) + target_all.append(target) + preds = torch.cat(preds_all, dim=0) + targets = torch.cat(target_all, dim=0) + else: + preds = model(split.inputs) + targets = split.targets + + if config.benchmark.hazard_task == "flood.inundation": + pred_depth = preds.float() + target_depth = targets.float() + pred_mask = (pred_depth >= 0.5).float() + target_mask = (target_depth > 0).float() + intersection = (pred_mask * target_mask).sum() + union = pred_mask.sum() + target_mask.sum() - intersection + metrics: Dict[str, float] = { + "pixel_mae": float(torch.mean(torch.abs(pred_depth - target_depth)).detach().cpu()), + "iou": float((intersection / union.clamp(min=1.0)).detach().cpu()), + "f1": float( + ( + 2 * intersection + / (pred_mask.sum() + target_mask.sum()).clamp(min=1.0) + ).detach().cpu() + ), + } + else: + mae = torch.mean(torch.abs(preds - targets)) + rmse = torch.sqrt(torch.mean((preds - targets) ** 2)) + target_mean = torch.mean(targets) + denominator = torch.sum((targets - target_mean) ** 2).clamp(min=1e-6) + nse = 1.0 - torch.sum((preds - targets) ** 2) / denominator + pred_std = torch.std(preds).clamp(min=1e-6) + target_std = torch.std(targets).clamp(min=1e-6) + covariance = torch.mean((preds - torch.mean(preds)) * (targets - target_mean)) + correlation = covariance / (pred_std * target_std) + alpha = pred_std / target_std + beta = torch.mean(preds) / target_mean.clamp(min=1e-6) + kge = 1.0 - torch.sqrt((correlation - 1.0) ** 2 + (alpha - 1.0) ** 2 + (beta - 1.0) ** 2) + metrics = { + "mae": float(mae.detach().cpu()), + "rmse": float(rmse.detach().cpu()), + "nse": float(nse.detach().cpu()), + "kge": float(kge.detach().cpu()), + } + return BenchmarkResult( + benchmark_name=self.name, + hazard_task=config.benchmark.hazard_task, + metrics=metrics, + metadata={ + "split": config.benchmark.eval_split, + "dataset_name": data.metadata.get("dataset"), + "source_dataset": data.metadata.get("source_dataset", data.metadata.get("dataset")), + }, + )
+
+ + + +register_benchmark(FloodBenchmark.name, FloodBenchmark) + +__all__ = ["FloodBenchmark"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/benchmarks/registry.html b/docs/_modules/pyhazards/benchmarks/registry.html new file mode 100644 index 00000000..12a27685 --- /dev/null +++ b/docs/_modules/pyhazards/benchmarks/registry.html @@ -0,0 +1,417 @@ + + + + + + + + + + pyhazards.benchmarks.registry - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.benchmarks.registry

+from __future__ import annotations
+
+from typing import Callable, Dict, Type
+
+from .base import Benchmark
+
+_BENCHMARK_REGISTRY: Dict[str, Type[Benchmark]] = {}
+
+
+
+[docs] +def register_benchmark(name: str, builder: Type[Benchmark]) -> None: + key = name.strip().lower() + if key in _BENCHMARK_REGISTRY: + raise ValueError("Benchmark '{name}' already registered.".format(name=name)) + _BENCHMARK_REGISTRY[key] = builder
+ + + +
+[docs] +def available_benchmarks(): + return sorted(_BENCHMARK_REGISTRY.keys())
+ + + +
+[docs] +def get_benchmark(name: str): + return _BENCHMARK_REGISTRY.get(name.strip().lower())
+ + + +
+[docs] +def build_benchmark(name: str) -> Benchmark: + builder = get_benchmark(name) + if builder is None: + raise KeyError( + "Benchmark '{name}' is not registered. Known: {known}".format( + name=name, + known=", ".join(available_benchmarks()), + ) + ) + return builder()
+ + + +__all__ = [ + "available_benchmarks", + "build_benchmark", + "get_benchmark", + "register_benchmark", +] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/benchmarks/runner.html b/docs/_modules/pyhazards/benchmarks/runner.html new file mode 100644 index 00000000..2dd521ff --- /dev/null +++ b/docs/_modules/pyhazards/benchmarks/runner.html @@ -0,0 +1,410 @@ + + + + + + + + + + pyhazards.benchmarks.runner - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.benchmarks.runner

+from __future__ import annotations
+
+from typing import Union
+
+import torch.nn as nn
+
+from ..configs import ExperimentConfig
+from ..datasets.base import DataBundle
+from .base import Benchmark
+from .registry import build_benchmark
+from .schemas import BenchmarkRunSummary
+
+
+
+[docs] +def resolve_benchmark(benchmark: Union[str, Benchmark]) -> Benchmark: + if isinstance(benchmark, Benchmark): + return benchmark + return build_benchmark(benchmark)
+ + + +
+[docs] +def run_benchmark( + benchmark: Union[str, Benchmark], + model: nn.Module, + data: DataBundle, + config: ExperimentConfig, + output_dir: str | None = None, +) -> BenchmarkRunSummary: + benchmark_obj = resolve_benchmark(benchmark) + result = benchmark_obj.evaluate(model=model, data=data, config=config) + metrics = benchmark_obj.aggregate_metrics([result]) + result.metrics = metrics + report_dir = output_dir or config.report.output_dir + report_paths = benchmark_obj.export_report(result, output_dir=report_dir, formats=config.report.formats) + metadata = dict(result.metadata) + metadata.setdefault("eval_split", config.benchmark.eval_split) + return BenchmarkRunSummary( + benchmark_name=result.benchmark_name, + hazard_task=result.hazard_task, + metrics=metrics, + report_paths=report_paths, + metadata=metadata, + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/benchmarks/schemas.html b/docs/_modules/pyhazards/benchmarks/schemas.html new file mode 100644 index 00000000..1a8a8f84 --- /dev/null +++ b/docs/_modules/pyhazards/benchmarks/schemas.html @@ -0,0 +1,392 @@ + + + + + + + + + + pyhazards.benchmarks.schemas - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.benchmarks.schemas

+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from typing import Any, Dict, List
+
+
+
+[docs] +@dataclass +class BenchmarkResult: + benchmark_name: str + hazard_task: str + metrics: Dict[str, float] + predictions: List[Any] = field(default_factory=list) + artifacts: Dict[str, str] = field(default_factory=dict) + metadata: Dict[str, Any] = field(default_factory=dict)
+ + + +
+[docs] +@dataclass +class BenchmarkRunSummary: + benchmark_name: str + hazard_task: str + metrics: Dict[str, float] + report_paths: Dict[str, str] = field(default_factory=dict) + metadata: Dict[str, Any] = field(default_factory=dict)
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/benchmarks/tc.html b/docs/_modules/pyhazards/benchmarks/tc.html new file mode 100644 index 00000000..574e561b --- /dev/null +++ b/docs/_modules/pyhazards/benchmarks/tc.html @@ -0,0 +1,416 @@ + + + + + + + + + + pyhazards.benchmarks.tc - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.benchmarks.tc

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+from ..configs import ExperimentConfig
+from ..datasets.base import DataBundle
+from .base import Benchmark
+from .registry import register_benchmark
+from .schemas import BenchmarkResult
+
+
+
+[docs] +class TropicalCycloneBenchmark(Benchmark): + name = "tc" + hazard_task = "tc.track_intensity" + metric_names_by_task = { + "tc.track_intensity": ["track_error", "intensity_mae"], + } + +
+[docs] + def evaluate(self, model: nn.Module, data: DataBundle, config: ExperimentConfig) -> BenchmarkResult: + split = data.get_split(config.benchmark.eval_split) + preds = model(split.inputs) + targets = split.targets + + track_error = torch.norm(preds[..., :2] - targets[..., :2], dim=-1).mean() + intensity_mae = torch.mean(torch.abs(preds[..., 2] - targets[..., 2])) + metrics = { + "track_error": float(track_error.detach().cpu()), + "intensity_mae": float(intensity_mae.detach().cpu()), + } + return BenchmarkResult( + benchmark_name=self.name, + hazard_task=config.benchmark.hazard_task, + metrics=metrics, + metadata={ + "split": config.benchmark.eval_split, + "dataset_name": data.metadata.get("dataset"), + "source_dataset": data.metadata.get("source_dataset", data.metadata.get("dataset")), + "history": data.metadata.get("history"), + "horizon": data.feature_spec.extra.get("horizon") if data.feature_spec.extra else None, + }, + )
+
+ + + +register_benchmark(TropicalCycloneBenchmark.name, TropicalCycloneBenchmark) + +__all__ = ["TropicalCycloneBenchmark"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/benchmarks/wildfire.html b/docs/_modules/pyhazards/benchmarks/wildfire.html new file mode 100644 index 00000000..5f58a8a7 --- /dev/null +++ b/docs/_modules/pyhazards/benchmarks/wildfire.html @@ -0,0 +1,466 @@ + + + + + + + + + + pyhazards.benchmarks.wildfire - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.benchmarks.wildfire

+from __future__ import annotations
+
+from typing import Dict
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from sklearn.metrics import accuracy_score, average_precision_score, f1_score, roc_auc_score
+
+from ..configs import ExperimentConfig
+from ..datasets.base import DataBundle
+from .base import Benchmark
+from .registry import register_benchmark
+from .schemas import BenchmarkResult
+
+
+def _spread_metrics(logits: torch.Tensor, targets: torch.Tensor) -> Dict[str, float]:
+    probs = torch.sigmoid(logits)
+    preds = (probs >= 0.5).float()
+    targets = targets.float()
+    intersection = (preds * targets).sum()
+    union = preds.sum() + targets.sum() - intersection
+    iou = float((intersection / union.clamp(min=1.0)).detach().cpu())
+    f1 = float((2 * intersection / (preds.sum() + targets.sum()).clamp(min=1.0)).detach().cpu())
+    burned_area_mae = float(
+        torch.mean(torch.abs(preds.flatten(1).sum(dim=1) - targets.flatten(1).sum(dim=1))).detach().cpu()
+    )
+    return {"iou": iou, "f1": f1, "burned_area_mae": burned_area_mae}
+
+
+def _danger_metrics(logits: torch.Tensor, targets: torch.Tensor) -> Dict[str, float]:
+    if targets.dtype in {torch.int32, torch.int64} or targets.ndim == 1:
+        preds = logits.argmax(dim=1)
+        probs = F.softmax(logits, dim=1)
+        y_true = targets.detach().cpu().numpy()
+        y_pred = preds.detach().cpu().numpy()
+        y_score = probs.detach().cpu().numpy()
+        one_hot = F.one_hot(targets.long(), num_classes=logits.size(1)).detach().cpu().numpy()
+        try:
+            auc = float(roc_auc_score(one_hot, y_score, average="macro", multi_class="ovr"))
+        except ValueError:
+            auc = 0.0
+        try:
+            pr_auc = float(average_precision_score(one_hot, y_score, average="macro"))
+        except ValueError:
+            pr_auc = 0.0
+        return {
+            "accuracy": float(accuracy_score(y_true, y_pred)),
+            "macro_f1": float(f1_score(y_true, y_pred, average="macro")),
+            "auc": auc,
+            "pr_auc": pr_auc,
+        }
+
+    preds = logits.float()
+    targets = targets.float()
+    mae = torch.mean(torch.abs(preds - targets))
+    rmse = torch.sqrt(torch.mean((preds - targets) ** 2))
+    return {
+        "mae": float(mae.detach().cpu()),
+        "rmse": float(rmse.detach().cpu()),
+    }
+
+
+
+[docs] +class WildfireBenchmark(Benchmark): + name = "wildfire" + hazard_task = "wildfire.danger" + metric_names_by_task = { + "wildfire.danger": ["accuracy", "macro_f1", "auc", "pr_auc", "mae", "rmse"], + "wildfire.spread": ["iou", "f1", "burned_area_mae"], + } + +
+[docs] + def evaluate(self, model: nn.Module, data: DataBundle, config: ExperimentConfig) -> BenchmarkResult: + split = data.get_split(config.benchmark.eval_split) + x = split.inputs + y = split.targets + logits = model(x) + + if config.benchmark.hazard_task == "wildfire.danger": + metrics = _danger_metrics(logits, y) + else: + metrics = _spread_metrics(logits, y) + + return BenchmarkResult( + benchmark_name=self.name, + hazard_task=config.benchmark.hazard_task, + metrics=metrics, + metadata={ + "split": config.benchmark.eval_split, + "dataset_name": data.metadata.get("dataset"), + "source_dataset": data.metadata.get("source_dataset", data.metadata.get("dataset")), + }, + )
+
+ + + +register_benchmark(WildfireBenchmark.name, WildfireBenchmark) + +__all__ = ["WildfireBenchmark"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/configs/_schema.html b/docs/_modules/pyhazards/configs/_schema.html new file mode 100644 index 00000000..64714498 --- /dev/null +++ b/docs/_modules/pyhazards/configs/_schema.html @@ -0,0 +1,482 @@ + + + + + + + + + + pyhazards.configs._schema - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.configs._schema

+from __future__ import annotations
+
+from dataclasses import asdict, dataclass, field
+from pathlib import Path
+from typing import Any, Dict, List
+
+import yaml
+
+from ..tasks import get_hazard_task
+
+_REPORT_FORMATS = {"json", "md", "csv"}
+
+
+
+[docs] +@dataclass +class DatasetRef: + name: str + params: Dict[str, Any] = field(default_factory=dict)
+ + + +
+[docs] +@dataclass +class ModelRef: + name: str + task: str + params: Dict[str, Any] = field(default_factory=dict)
+ + + +
+[docs] +@dataclass +class ReportConfig: + output_dir: str = "reports" + formats: List[str] = field(default_factory=lambda: ["json"]) + + def __post_init__(self) -> None: + normalized = [fmt.lower() for fmt in self.formats] + unknown = [fmt for fmt in normalized if fmt not in _REPORT_FORMATS] + if unknown: + raise ValueError( + "Unknown report format(s): {unknown}. Known: {known}".format( + unknown=", ".join(sorted(set(unknown))), + known=", ".join(sorted(_REPORT_FORMATS)), + ) + ) + self.formats = normalized
+ + + +
+[docs] +@dataclass +class BenchmarkConfig: + name: str + hazard_task: str + metrics: List[str] = field(default_factory=list) + eval_split: str = "test" + params: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self) -> None: + self.hazard_task = get_hazard_task(self.hazard_task).name
+ + + +
+[docs] +@dataclass +class ExperimentConfig: + benchmark: BenchmarkConfig + dataset: DatasetRef + model: ModelRef + report: ReportConfig = field(default_factory=ReportConfig) + seed: int = 0 + metadata: Dict[str, Any] = field(default_factory=dict) + +
+[docs] + def to_dict(self) -> Dict[str, Any]: + return asdict(self)
+
+ + + +
+[docs] +def load_experiment_config(path: str | Path) -> ExperimentConfig: + raw = yaml.safe_load(Path(path).read_text(encoding="utf-8")) or {} + return ExperimentConfig( + benchmark=BenchmarkConfig(**raw["benchmark"]), + dataset=DatasetRef(**raw["dataset"]), + model=ModelRef(**raw["model"]), + report=ReportConfig(**raw.get("report", {})), + seed=raw.get("seed", 0), + metadata=raw.get("metadata", {}), + )
+ + + +
+[docs] +def dump_experiment_config(config: ExperimentConfig, path: str | Path) -> None: + payload = config.to_dict() + Path(path).write_text(yaml.safe_dump(payload, sort_keys=False), encoding="utf-8")
+ + + +__all__ = [ + "BenchmarkConfig", + "DatasetRef", + "ExperimentConfig", + "ModelRef", + "ReportConfig", + "dump_experiment_config", + "load_experiment_config", +] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/datasets/base.html b/docs/_modules/pyhazards/datasets/base.html new file mode 100644 index 00000000..0248c298 --- /dev/null +++ b/docs/_modules/pyhazards/datasets/base.html @@ -0,0 +1,474 @@ + + + + + + + + + + pyhazards.datasets.base - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.datasets.base

+from dataclasses import dataclass, field
+from typing import Any, Dict, List, Optional, Protocol
+
+
+
+[docs] +@dataclass +class FeatureSpec: + """Describes input features (shapes, dtypes, normalization).""" + input_dim: Optional[int] = None + channels: Optional[int] = None + description: Optional[str] = None + extra: Dict[str, Any] = field(default_factory=dict)
+ + + +
+[docs] +@dataclass +class LabelSpec: + """Describes labels/targets for downstream tasks.""" + num_targets: Optional[int] = None + task_type: str = "regression" # classification|regression|segmentation + description: Optional[str] = None + extra: Dict[str, Any] = field(default_factory=dict)
+ + + +
+[docs] +@dataclass +class DataSplit: + """Container for a single split.""" + inputs: Any + targets: Any + metadata: Dict[str, Any] = field(default_factory=dict)
+ + + +
+[docs] +@dataclass +class DataBundle: + """ + Bundle of train/val/test splits plus metadata. + Keeps feature/label specs to make model construction easy. + """ + splits: Dict[str, DataSplit] + feature_spec: FeatureSpec + label_spec: LabelSpec + metadata: Dict[str, Any] = field(default_factory=dict) + +
+[docs] + def get_split(self, name: str) -> DataSplit: + if name not in self.splits: + raise KeyError(f"Split '{name}' not found. Available: {list(self.splits.keys())}") + return self.splits[name]
+
+ + + +
+[docs] +class Transform(Protocol): + """Callable data transform.""" + + def __call__(self, bundle: DataBundle) -> DataBundle: + ...
+ + + +
+[docs] +class Dataset: + """ + Base class for hazard datasets. + Subclasses should load data and return a DataBundle with splits ready for training. + """ + + name: str = "base" + + def __init__(self, cache_dir: Optional[str] = None): + self.cache_dir = cache_dir + +
+[docs] + def load(self, split: Optional[str] = None, transforms: Optional[List[Transform]] = None) -> DataBundle: + """ + Return a DataBundle. Optionally return a specific split if provided. + """ + bundle = self._load() + if transforms: + for t in transforms: + bundle = t(bundle) + if split: + return DataBundle( + splits={split: bundle.get_split(split)}, + feature_spec=bundle.feature_spec, + label_spec=bundle.label_spec, + metadata=bundle.metadata, + ) + return bundle
+ + +
+[docs] + def _load(self) -> DataBundle: + raise NotImplementedError("Subclasses must implement _load() to return a DataBundle.")
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/datasets/earthquake.html b/docs/_modules/pyhazards/datasets/earthquake.html new file mode 100644 index 00000000..c1af6ed6 --- /dev/null +++ b/docs/_modules/pyhazards/datasets/earthquake.html @@ -0,0 +1,611 @@ + + + + + + + + + + pyhazards.datasets.earthquake - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.datasets.earthquake

+from __future__ import annotations
+
+import math
+
+import torch
+
+from ..base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec
+
+
+
+[docs] +class SyntheticEarthquakeWaveformDataset(Dataset): + """Synthetic waveform dataset for earthquake phase-picking smoke runs.""" + + name = "earthquake_waveforms" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 96, + channels: int = 3, + length: int = 256, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 24 if micro else int(samples) + self.channels = int(channels) + self.length = int(length) + +
+[docs] + def _load(self) -> DataBundle: + timeline = torch.linspace(0.0, 1.0, steps=self.length, dtype=torch.float32) + x = torch.zeros(self.samples, self.channels, self.length, dtype=torch.float32) + y = torch.zeros(self.samples, 2, dtype=torch.float32) + + for idx in range(self.samples): + p_pick = 32 + (idx % 40) + s_pick = min(self.length - 12, p_pick + 24 + (idx % 24)) + + for channel in range(self.channels): + phase = 0.5 * channel + base = torch.sin(2.0 * math.pi * (channel + 1) * timeline + phase) + pulse_p = torch.exp(-0.5 * ((torch.arange(self.length) - p_pick) / 6.0) ** 2) + pulse_s = 0.8 * torch.exp(-0.5 * ((torch.arange(self.length) - s_pick) / 8.0) ** 2) + x[idx, channel] = base + pulse_p + pulse_s + + y[idx, 0] = float(p_pick) + y[idx, 1] = float(s_pick) + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=self.channels, + description="Synthetic multichannel seismic waveforms with Gaussian phase arrivals.", + extra={"length": self.length}, + ), + label_spec=LabelSpec( + num_targets=2, + task_type="regression", + description="P- and S-arrival sample indices.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "earthquake.picking", + }, + )
+
+ + + +
+[docs] +class SyntheticEarthquakeForecastDataset(Dataset): + """Synthetic wavefield dataset for earthquake forecasting smoke runs.""" + + name = "earthquake_forecast_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 40, + channels: int = 3, + temporal_in: int = 5, + temporal_out: int = 4, + height: int = 12, + width: int = 10, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 10 if micro else int(samples) + self.channels = int(channels) + self.temporal_in = int(temporal_in) + self.temporal_out = int(temporal_out) + self.height = int(height) + self.width = int(width) + +
+[docs] + def _load(self) -> DataBundle: + grid_y = torch.linspace(-1.0, 1.0, steps=self.height, dtype=torch.float32).view(self.height, 1) + grid_x = torch.linspace(-1.0, 1.0, steps=self.width, dtype=torch.float32).view(1, self.width) + total_steps = self.temporal_in + self.temporal_out + + x = torch.zeros( + self.samples, + self.channels, + self.temporal_in, + self.height, + self.width, + dtype=torch.float32, + ) + y = torch.zeros( + self.samples, + self.channels, + self.temporal_out, + self.height, + self.width, + dtype=torch.float32, + ) + + row_index = torch.arange(self.height, dtype=torch.float32).view(self.height, 1) + col_index = torch.arange(self.width, dtype=torch.float32).view(1, self.width) + + for idx in range(self.samples): + sequence = torch.zeros( + self.channels, + total_steps, + self.height, + self.width, + dtype=torch.float32, + ) + for step in range(total_steps): + center_r = 2.0 + ((idx + step) % max(3, self.height - 2)) + center_c = 1.0 + ((2 * idx + step) % max(2, self.width - 1)) + gaussian = torch.exp( + -0.18 * ((row_index - center_r) ** 2 + (col_index - center_c) ** 2) + ) + for channel in range(self.channels): + phase = 0.5 * channel + 0.2 * step + base = torch.sin( + math.pi * (channel + 1) * grid_y + phase + ) + torch.cos(math.pi * (channel + 1) * grid_x - phase) + sequence[channel, step] = base + (0.6 + 0.1 * channel) * gaussian + + x[idx] = sequence[:, : self.temporal_in] + y[idx] = sequence[:, self.temporal_in :] + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=self.channels, + description="Synthetic dense-grid wavefield history tensors for forecasting benchmarks.", + extra={ + "temporal_in": self.temporal_in, + "temporal_out": self.temporal_out, + "height": self.height, + "width": self.width, + }, + ), + label_spec=LabelSpec( + num_targets=self.channels * self.temporal_out, + task_type="regression", + description="Future dense-grid wavefield frames over the forecast horizon.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "earthquake.forecasting", + }, + )
+
+ + + +
+[docs] +class SeisBenchWaveformDataset(SyntheticEarthquakeWaveformDataset): + """Synthetic-backed adapter with the SeisBench public dataset surface.""" + + name = "seisbench_waveforms" + +
+[docs] + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "SeisBench", "source_dataset": self.name}) + return bundle
+
+ + + +
+[docs] +class PickBenchmarkWaveformDataset(SyntheticEarthquakeWaveformDataset): + """Synthetic-backed adapter with the pick-benchmark public dataset surface.""" + + name = "pick_benchmark_waveforms" + +
+[docs] + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "pick-benchmark", "source_dataset": self.name}) + return bundle
+
+ + + +
+[docs] +class AEFADataset(SyntheticEarthquakeForecastDataset): + """Synthetic-backed adapter for AEFA-style earthquake forecasting inputs.""" + + name = "aefa_forecast" + +
+[docs] + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "AEFA", "source_dataset": self.name}) + return bundle
+
+ + + +__all__ = [ + "AEFADataset", + "PickBenchmarkWaveformDataset", + "SeisBenchWaveformDataset", + "SyntheticEarthquakeForecastDataset", + "SyntheticEarthquakeWaveformDataset", +] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/datasets/flood.html b/docs/_modules/pyhazards/datasets/flood.html new file mode 100644 index 00000000..9b38e6fa --- /dev/null +++ b/docs/_modules/pyhazards/datasets/flood.html @@ -0,0 +1,595 @@ + + + + + + + + + + pyhazards.datasets.flood - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.datasets.flood

+from __future__ import annotations
+
+import torch
+
+from ..base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec
+from ..graph import GraphTemporalDataset
+
+
+
+[docs] +class SyntheticFloodStreamflowDataset(Dataset): + """Synthetic graph-temporal flood dataset for streamflow smoke runs.""" + + name = "flood_streamflow_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 40, + history: int = 4, + nodes: int = 6, + features: int = 2, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 12 if micro else int(samples) + self.history = int(history) + self.nodes = int(nodes) + self.features = int(features) + +
+[docs] + def _make_split(self, x: torch.Tensor, y: torch.Tensor, adj: torch.Tensor) -> DataSplit: + dataset = GraphTemporalDataset(x, y, adjacency=adj) + return DataSplit(inputs=dataset, targets=None)
+ + +
+[docs] + def _load(self) -> DataBundle: + x = torch.randn(self.samples, self.history, self.nodes, self.features, dtype=torch.float32) + adjacency = torch.eye(self.nodes, dtype=torch.float32) + adjacency += torch.diag(torch.ones(self.nodes - 1), diagonal=1) + adjacency += torch.diag(torch.ones(self.nodes - 1), diagonal=-1) + y = x[:, -1, :, :1] * 0.7 + 0.1 + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": self._make_split(x[:train_end], y[:train_end], adjacency), + "val": self._make_split(x[train_end:val_end], y[train_end:val_end], adjacency), + "test": self._make_split(x[val_end:], y[val_end:], adjacency), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=self.features, + description="Synthetic node features for streamflow forecasting on a line graph.", + extra={"nodes": self.nodes, "history": self.history}, + ), + label_spec=LabelSpec( + num_targets=1, + task_type="regression", + description="Next-step nodewise streamflow target.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "flood.streamflow", + }, + )
+
+ + + +
+[docs] +class SyntheticFloodInundationDataset(Dataset): + """Synthetic raster dataset for flood inundation smoke runs.""" + + name = "flood_inundation_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 40, + history: int = 4, + channels: int = 3, + height: int = 16, + width: int = 16, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 12 if micro else int(samples) + self.history = int(history) + self.channels = int(channels) + self.height = int(height) + self.width = int(width) + +
+[docs] + def _load(self) -> DataBundle: + x = torch.randn( + self.samples, + self.history, + self.channels, + self.height, + self.width, + dtype=torch.float32, + ) + y = torch.zeros(self.samples, 1, self.height, self.width, dtype=torch.float32) + rows = torch.arange(self.height, dtype=torch.float32).view(self.height, 1) + cols = torch.arange(self.width, dtype=torch.float32).view(1, self.width) + + for idx in range(self.samples): + waterline = float(self.height // 3 + (idx % max(2, self.height // 3))) + slope = 0.25 + 0.05 * (idx % 4) + rain_band = rows >= (waterline - slope * cols) + depth = rain_band.float() * (0.4 + 0.1 * (idx % 3)) + y[idx, 0] = depth + x[idx, -1, 0] = x[idx, -1, 0] + depth + x[idx, :, 1] = x[idx, :, 1] + torch.linspace(0.0, 1.0, self.history).view(self.history, 1, 1) + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=self.channels, + description="Synthetic rainfall, terrain, and antecedent-state tensors for inundation forecasting.", + extra={ + "history": self.history, + "height": self.height, + "width": self.width, + }, + ), + label_spec=LabelSpec( + num_targets=1, + task_type="regression", + description="Next-horizon inundation depth raster.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "flood.inundation", + }, + )
+
+ + + +
+[docs] +class CaravanStreamflowDataset(SyntheticFloodStreamflowDataset): + """Synthetic-backed streamflow adapter for Caravan-style smoke runs.""" + + name = "caravan_streamflow" + +
+[docs] + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "Caravan", "source_dataset": self.name}) + return bundle
+
+ + + +
+[docs] +class WaterBenchStreamflowDataset(SyntheticFloodStreamflowDataset): + """Synthetic-backed streamflow adapter for WaterBench-style smoke runs.""" + + name = "waterbench_streamflow" + +
+[docs] + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "WaterBench", "source_dataset": self.name}) + return bundle
+
+ + + +
+[docs] +class HydroBenchStreamflowDataset(SyntheticFloodStreamflowDataset): + """Synthetic-backed streamflow adapter for HydroBench diagnostics.""" + + name = "hydrobench_streamflow" + +
+[docs] + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "HydroBench", "source_dataset": self.name}) + return bundle
+
+ + + +
+[docs] +class FloodCastBenchInundationDataset(SyntheticFloodInundationDataset): + """Synthetic-backed inundation adapter for FloodCastBench-style smoke runs.""" + + name = "floodcastbench_inundation" + +
+[docs] + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "FloodCastBench", "source_dataset": self.name}) + return bundle
+
+ + + +__all__ = [ + "CaravanStreamflowDataset", + "FloodCastBenchInundationDataset", + "HydroBenchStreamflowDataset", + "SyntheticFloodInundationDataset", + "SyntheticFloodStreamflowDataset", + "WaterBenchStreamflowDataset", +] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/datasets/fpa_fod.html b/docs/_modules/pyhazards/datasets/fpa_fod.html new file mode 100644 index 00000000..a44c8912 --- /dev/null +++ b/docs/_modules/pyhazards/datasets/fpa_fod.html @@ -0,0 +1,1032 @@ + + + + + + + + + + pyhazards.datasets.fpa_fod - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.datasets.fpa_fod

+from __future__ import annotations
+
+import argparse
+import math
+import os
+from pathlib import Path
+from typing import Any, Dict, List, Literal, Optional, Tuple
+
+import numpy as np
+import torch
+
+from .base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec
+
+CauseMode = Literal["paper5", "keep_all"]
+Region = Literal["US", "CA"]
+WeeklyFeatures = Literal["counts", "counts+time"]
+
+PAPER5_CAUSES = [
+    "Debris and open burning",
+    "Natural",
+    "Arson/incendiarism",
+    "Equipment and vehicle use",
+    "Recreation and ceremony",
+]
+
+CAUSE_SYNONYMS = {
+    "Debris/open burning": "Debris and open burning",
+    "Debris and Open Burning": "Debris and open burning",
+    "Arson": "Arson/incendiarism",
+    "Equipment/vehicle use": "Equipment and vehicle use",
+    "Recreation/ceremony": "Recreation and ceremony",
+}
+
+SIZE_GROUPS = ["A", "B", "C", "D", "EFG"]
+
+
+def _require_pandas():
+    try:
+        import pandas as pd
+    except ImportError as exc:
+        raise ImportError(
+            "FPA-FOD dataset support requires pandas. Install pandas or xarray's pandas dependency first."
+        ) from exc
+    return pd
+
+
+def _minmax_fit(x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
+    mins = np.nanmin(x, axis=0)
+    maxs = np.nanmax(x, axis=0)
+    maxs = np.where(maxs == mins, mins + 1.0, maxs)
+    return mins, maxs
+
+
+def _minmax_apply(x: np.ndarray, mins: np.ndarray, maxs: np.ndarray) -> np.ndarray:
+    return (x - mins) / (maxs - mins)
+
+
+def _stratified_split_indices(
+    y: np.ndarray,
+    train_ratio: float,
+    val_ratio: float,
+    test_ratio: float,
+    seed: int,
+) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+    if not math.isclose(train_ratio + val_ratio + test_ratio, 1.0, rel_tol=0.0, abs_tol=1e-6):
+        raise ValueError("train_ratio + val_ratio + test_ratio must equal 1.0.")
+
+    rng = np.random.default_rng(seed)
+    train_idx: List[int] = []
+    val_idx: List[int] = []
+    test_idx: List[int] = []
+
+    for class_id in np.unique(y):
+        idx = np.where(y == class_id)[0]
+        rng.shuffle(idx)
+        n = len(idx)
+        n_train = int(round(train_ratio * n))
+        n_val = int(round(val_ratio * n))
+        n_test = max(0, n - n_train - n_val)
+
+        train_idx.extend(idx[:n_train].tolist())
+        val_idx.extend(idx[n_train : n_train + n_val].tolist())
+        test_idx.extend(idx[n_train + n_val : n_train + n_val + n_test].tolist())
+
+    train = np.array(train_idx, dtype=np.int64)
+    val = np.array(val_idx, dtype=np.int64)
+    test = np.array(test_idx, dtype=np.int64)
+    rng.shuffle(train)
+    rng.shuffle(val)
+    rng.shuffle(test)
+    return train, val, test
+
+
+def _chronological_split_indices(
+    n: int,
+    train_ratio: float,
+    val_ratio: float,
+    test_ratio: float,
+) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+    if not math.isclose(train_ratio + val_ratio + test_ratio, 1.0, rel_tol=0.0, abs_tol=1e-6):
+        raise ValueError("train_ratio + val_ratio + test_ratio must equal 1.0.")
+
+    n_train = int(math.floor(train_ratio * n))
+    n_val = int(math.floor(val_ratio * n))
+    n_test = n - n_train - n_val
+
+    train = np.arange(0, n_train, dtype=np.int64)
+    val = np.arange(n_train, n_train + n_val, dtype=np.int64)
+    test = np.arange(n_train + n_val, n_train + n_val + n_test, dtype=np.int64)
+    return train, val, test
+
+
+def _load_fpa_fod_table(path: str):
+    pd = _require_pandas()
+
+    if not os.path.exists(path):
+        raise FileNotFoundError(f"Data path not found: {path}")
+
+    ext = os.path.splitext(path)[1].lower()
+    if ext in {".sqlite", ".db"}:
+        import sqlite3
+
+        con = sqlite3.connect(path)
+        try:
+            return pd.read_sql_query("SELECT * FROM Fires", con)
+        finally:
+            con.close()
+    if ext == ".csv":
+        return pd.read_csv(path)
+    if ext == ".parquet":
+        return pd.read_parquet(path)
+    raise ValueError(f"Unsupported file extension for FPA-FOD data: {ext}")
+
+
+def _coerce_required_columns(df, required: List[str]):
+    missing = [column for column in required if column not in df.columns]
+    if missing:
+        raise ValueError(f"Missing required columns: {missing}")
+    return df
+
+
+def _encode_states(states) -> Tuple[np.ndarray, Dict[str, int]]:
+    values = sorted(states.dropna().astype(str).unique().tolist())
+    mapping = {value: index for index, value in enumerate(values)}
+    encoded = states.astype(str).map(mapping).astype("int64").to_numpy()
+    return encoded, mapping
+
+
+def _normalize_cause_strings(values):
+    return values.astype(str).str.strip().map(lambda value: CAUSE_SYNONYMS.get(value, value))
+
+
+def _impute_numeric(df, columns: List[str]):
+    pd = _require_pandas()
+    filled = df.copy()
+    medians: Dict[str, float] = {}
+    for column in columns:
+        series = pd.to_numeric(filled[column], errors="coerce")
+        median = float(series.median())
+        if math.isnan(median):
+            median = 0.0
+        medians[column] = median
+        filled[column] = series.fillna(median)
+    return filled, medians
+
+
+def _micro_tabular_df(seed: int = 1337, n: int = 200):
+    pd = _require_pandas()
+
+    rng = np.random.default_rng(seed)
+    states = np.array(["CA", "TX", "FL", "NY", "WA", "CO"])
+    causes = np.array(PAPER5_CAUSES)
+
+    years = rng.integers(2010, 2019, size=n)
+    discovery_doy = rng.integers(1, 366, size=n)
+    discovery_time = rng.integers(0, 2400, size=n)
+    containment_doy = np.clip(discovery_doy + rng.integers(0, 30, size=n), 1, 366)
+    containment_time = rng.integers(0, 2400, size=n)
+
+    state = rng.choice(states, size=n, replace=True)
+    latitude = rng.uniform(25.0, 49.0, size=n)
+    longitude = rng.uniform(-124.0, -67.0, size=n)
+    california_mask = state == "CA"
+    latitude[california_mask] = rng.uniform(32.0, 42.0, size=california_mask.sum())
+    longitude[california_mask] = rng.uniform(-124.5, -114.0, size=california_mask.sum())
+
+    cause = rng.choice(causes, size=n, replace=True)
+    size_class = rng.choice(
+        ["A", "B", "C", "D", "E", "F", "G"],
+        size=n,
+        p=[0.38, 0.42, 0.12, 0.04, 0.02, 0.01, 0.01],
+    )
+
+    return pd.DataFrame(
+        {
+            "FIRE_YEAR": years,
+            "STATE": state,
+            "DISCOVERY_DOY": discovery_doy,
+            "DISCOVERY_TIME": discovery_time,
+            "CONT_DOY": containment_doy,
+            "CONT_TIME": containment_time,
+            "LATITUDE": latitude,
+            "LONGITUDE": longitude,
+            "NWCG_GENERAL_CAUSE": cause,
+            "FIRE_SIZE_CLASS": size_class,
+        }
+    )
+
+
+def _micro_weekly_counts(seed: int = 1337, weeks: int = 120, region: Region = "US"):
+    pd = _require_pandas()
+
+    rng = np.random.default_rng(seed)
+    dates = pd.date_range(pd.Timestamp("2016-01-04"), periods=weeks, freq="W-MON")
+    time = np.arange(weeks)
+    base = 50 + 20 * np.sin(2 * np.pi * time / 52.0)
+    if region == "CA":
+        base = base * 1.3
+
+    a = np.maximum(0, base + rng.normal(0, 8, size=weeks)).astype(int)
+    b = np.maximum(0, base * 0.8 + rng.normal(0, 7, size=weeks)).astype(int)
+    c = np.maximum(0, base * 0.2 + rng.normal(0, 3, size=weeks)).astype(int)
+    d = np.maximum(0, base * 0.05 + rng.normal(0, 2, size=weeks)).astype(int)
+    efg = np.maximum(0, base * 0.03 + rng.normal(0, 2, size=weeks)).astype(int)
+
+    return pd.DataFrame({"week_start": dates, "A": a, "B": b, "C": c, "D": d, "EFG": efg})
+
+
+
+[docs] +class FPAFODTabularDataset(Dataset): + """Incident-level tabular dataset for wildfire cause or size classification.""" + + name = "fpa_fod_tabular" + + def __init__( + self, + task: Literal["cause", "size"] = "cause", + region: Region = "US", + cause_mode: CauseMode = "paper5", + data_path: Optional[str] = None, + micro: bool = False, + normalize: bool = False, + train_ratio: float = 0.6, + val_ratio: float = 0.2, + test_ratio: float = 0.2, + seed: int = 1337, + cache_dir: Optional[str] = None, + ): + super().__init__(cache_dir=cache_dir) + self.task = task + self.region = region + self.cause_mode = cause_mode + self.data_path = data_path + self.micro = micro + self.normalize = normalize + self.train_ratio = train_ratio + self.val_ratio = val_ratio + self.test_ratio = test_ratio + self.seed = seed + +
+[docs] + def _load(self) -> DataBundle: + if self.micro: + df = _micro_tabular_df(seed=self.seed) + source = "micro_synthetic" + else: + if not self.data_path: + raise ValueError("data_path is required when micro=False") + df = _load_fpa_fod_table(self.data_path) + source = self.data_path + + required = [ + "FIRE_YEAR", + "STATE", + "DISCOVERY_DOY", + "DISCOVERY_TIME", + "CONT_DOY", + "CONT_TIME", + "LATITUDE", + "LONGITUDE", + "NWCG_GENERAL_CAUSE", + "FIRE_SIZE_CLASS", + ] + df = _coerce_required_columns(df, required) + + if self.region == "CA": + df = df[df["STATE"].astype(str) == "CA"].copy() + + df, numeric_impute = _impute_numeric( + df, + columns=[ + "FIRE_YEAR", + "DISCOVERY_DOY", + "DISCOVERY_TIME", + "CONT_DOY", + "CONT_TIME", + "LATITUDE", + "LONGITUDE", + ], + ) + + state_encoded, state_mapping = _encode_states(df["STATE"]) + numeric_features = [ + "FIRE_YEAR", + "DISCOVERY_DOY", + "DISCOVERY_TIME", + "CONT_DOY", + "CONT_TIME", + "LATITUDE", + "LONGITUDE", + ] + feature_names = numeric_features + ["STATE_ID"] + x_numeric = df[numeric_features].to_numpy(dtype=np.float32) + x = np.concatenate([x_numeric, state_encoded.astype(np.float32).reshape(-1, 1)], axis=1) + + metadata: Dict[str, Any] = { + "dataset": self.name, + "source": source, + "region": self.region, + "task": self.task, + "micro": self.micro, + "seed": self.seed, + "state_mapping": state_mapping, + "numeric_impute_medians": numeric_impute, + } + + if self.task == "cause": + causes = _normalize_cause_strings(df["NWCG_GENERAL_CAUSE"]) + if self.cause_mode == "paper5": + mask = causes.isin(PAPER5_CAUSES) + metadata["dropped_non_paper5_causes"] = int((~mask).sum()) + if int(mask.sum()) == 0: + raise RuntimeError("cause_mode='paper5' kept zero rows after cause normalization.") + causes = causes.loc[mask] + x = x[mask.to_numpy()] + + classes = sorted(causes.unique().tolist()) + label_mapping = {label: index for index, label in enumerate(classes)} + y = causes.map(label_mapping).astype("int64").to_numpy() + train_idx, val_idx, test_idx = _stratified_split_indices( + y=y, + train_ratio=self.train_ratio, + val_ratio=self.val_ratio, + test_ratio=self.test_ratio, + seed=self.seed, + ) + label_spec = LabelSpec( + num_targets=len(classes), + task_type="classification", + description="NWCG_GENERAL_CAUSE mapped to class ids.", + extra={"classes": classes, "label_mapping": label_mapping}, + ) + metadata["label_mapping"] = label_mapping + elif self.task == "size": + grouped = df["FIRE_SIZE_CLASS"].astype(str).str.strip().replace({"E": "EFG", "F": "EFG", "G": "EFG"}) + mask = grouped.isin(SIZE_GROUPS) + metadata["dropped_unknown_size_class"] = int((~mask).sum()) + grouped = grouped.loc[mask] + x = x[mask.to_numpy()] + label_mapping = {label: index for index, label in enumerate(SIZE_GROUPS)} + y = grouped.map(label_mapping).astype("int64").to_numpy() + train_idx, val_idx, test_idx = _stratified_split_indices( + y=y, + train_ratio=self.train_ratio, + val_ratio=self.val_ratio, + test_ratio=self.test_ratio, + seed=self.seed, + ) + label_spec = LabelSpec( + num_targets=len(SIZE_GROUPS), + task_type="classification", + description="FIRE_SIZE_CLASS grouped into A/B/C/D/EFG and mapped to class ids.", + extra={"classes": SIZE_GROUPS, "label_mapping": label_mapping}, + ) + metadata["label_mapping"] = label_mapping + else: + raise ValueError(f"Unsupported tabular task: {self.task}") + + if self.normalize: + mins, maxs = _minmax_fit(x[train_idx]) + x = _minmax_apply(x, mins, maxs).astype(np.float32) + metadata["normalization"] = {"mins": mins.tolist(), "maxs": maxs.tolist()} + else: + metadata["normalization"] = None + + splits = { + "train": DataSplit( + inputs=torch.as_tensor(x[train_idx], dtype=torch.float32), + targets=torch.as_tensor(y[train_idx], dtype=torch.long), + metadata={"source": source}, + ), + "val": DataSplit( + inputs=torch.as_tensor(x[val_idx], dtype=torch.float32), + targets=torch.as_tensor(y[val_idx], dtype=torch.long), + metadata={"source": source}, + ), + "test": DataSplit( + inputs=torch.as_tensor(x[test_idx], dtype=torch.float32), + targets=torch.as_tensor(y[test_idx], dtype=torch.long), + metadata={"source": source}, + ), + } + + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=int(splits["train"].inputs.shape[1]), + description="Incident-level FPA-FOD features for classification.", + extra={"feature_names": feature_names, "dtype": "float32"}, + ), + label_spec=label_spec, + metadata=metadata, + )
+
+ + + +
+[docs] +class FPAFODWeeklyDataset(Dataset): + """Weekly count forecasting dataset derived from FPA-FOD incident records.""" + + name = "fpa_fod_weekly" + + def __init__( + self, + region: Region = "US", + data_path: Optional[str] = None, + micro: bool = False, + lookback_weeks: int = 50, + features: WeeklyFeatures = "counts", + train_ratio: float = 0.6, + val_ratio: float = 0.2, + test_ratio: float = 0.2, + seed: int = 1337, + cache_dir: Optional[str] = None, + ): + super().__init__(cache_dir=cache_dir) + self.region = region + self.data_path = data_path + self.micro = micro + self.lookback_weeks = lookback_weeks + self.features = features + self.train_ratio = train_ratio + self.val_ratio = val_ratio + self.test_ratio = test_ratio + self.seed = seed + +
+[docs] + def _weekly_table(self): + pd = _require_pandas() + + if self.micro: + return _micro_weekly_counts(seed=self.seed, region=self.region), "micro_synthetic" + + if not self.data_path: + raise ValueError("data_path is required when micro=False") + + df = _load_fpa_fod_table(self.data_path) + required = ["FIRE_YEAR", "STATE", "DISCOVERY_DOY", "FIRE_SIZE_CLASS"] + df = _coerce_required_columns(df, required) + + if self.region == "CA": + df = df[df["STATE"].astype(str) == "CA"].copy() + + fire_year = pd.to_numeric(df["FIRE_YEAR"], errors="coerce") + discovery_doy = pd.to_numeric(df["DISCOVERY_DOY"], errors="coerce").fillna(1) + base = pd.to_datetime(fire_year.astype("Int64").astype(str) + "-01-01", errors="coerce") + discovery_dt = base + pd.to_timedelta(discovery_doy.astype(int) - 1, unit="D") + week_start = discovery_dt.dt.to_period("W-MON").dt.start_time + size_class = df["FIRE_SIZE_CLASS"].astype(str).str.strip().replace({"E": "EFG", "F": "EFG", "G": "EFG"}) + size_class = size_class.where(size_class.isin(SIZE_GROUPS), other=np.nan) + + weekly = ( + df.assign(_week_start=week_start, _size=size_class) + .dropna(subset=["_week_start", "_size"]) + .groupby(["_week_start", "_size"]) + .size() + .unstack("_size", fill_value=0) + .reset_index() + .rename(columns={"_week_start": "week_start"}) + .sort_values("week_start") + .reset_index(drop=True) + ) + for size_group in SIZE_GROUPS: + if size_group not in weekly.columns: + weekly[size_group] = 0 + return weekly, self.data_path
+ + +
+[docs] + def _load(self) -> DataBundle: + weekly, source = self._weekly_table() + lookback = int(self.lookback_weeks) + if len(weekly) <= lookback: + raise ValueError(f"Not enough weeks ({len(weekly)}) for lookback={lookback}") + + counts = weekly[SIZE_GROUPS].to_numpy(dtype=np.float32) + if self.features == "counts": + features = counts + feature_names = list(SIZE_GROUPS) + elif self.features == "counts+time": + week_of_year = weekly["week_start"].dt.isocalendar().week.to_numpy(dtype=np.float32) + sin = np.sin(2 * np.pi * week_of_year / 52.0).reshape(-1, 1).astype(np.float32) + cos = np.cos(2 * np.pi * week_of_year / 52.0).reshape(-1, 1).astype(np.float32) + features = np.concatenate([counts, sin, cos], axis=1) + feature_names = list(SIZE_GROUPS) + ["woy_sin", "woy_cos"] + else: + raise ValueError(f"Unsupported feature mode: {self.features}") + + x_windows: List[np.ndarray] = [] + y_targets: List[np.ndarray] = [] + sample_weeks: List[str] = [] + for index in range(lookback, len(weekly)): + x_windows.append(features[index - lookback : index]) + y_targets.append(counts[index]) + sample_weeks.append(str(weekly.loc[index, "week_start"])) + + x = np.stack(x_windows, axis=0).astype(np.float32) + y = np.stack(y_targets, axis=0).astype(np.float32) + train_idx, val_idx, test_idx = _chronological_split_indices( + n=int(x.shape[0]), + train_ratio=self.train_ratio, + val_ratio=self.val_ratio, + test_ratio=self.test_ratio, + ) + + splits = { + "train": DataSplit( + inputs=torch.as_tensor(x[train_idx], dtype=torch.float32), + targets=torch.as_tensor(y[train_idx], dtype=torch.float32), + metadata={"source": source, "region": self.region}, + ), + "val": DataSplit( + inputs=torch.as_tensor(x[val_idx], dtype=torch.float32), + targets=torch.as_tensor(y[val_idx], dtype=torch.float32), + metadata={"source": source, "region": self.region}, + ), + "test": DataSplit( + inputs=torch.as_tensor(x[test_idx], dtype=torch.float32), + targets=torch.as_tensor(y[test_idx], dtype=torch.float32), + metadata={"source": source, "region": self.region}, + ), + } + + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=int(splits["train"].inputs.shape[-1]), + description="Weekly FPA-FOD feature windows for next-week forecasting.", + extra={ + "feature_names": feature_names, + "lookback_weeks": lookback, + "dtype": "float32", + "region": self.region, + }, + ), + label_spec=LabelSpec( + num_targets=len(SIZE_GROUPS), + task_type="regression", + description="Next-week counts per size group (A, B, C, D, EFG).", + extra={"targets": list(SIZE_GROUPS), "dtype": "float32"}, + ), + metadata={ + "dataset": self.name, + "source": source, + "region": self.region, + "micro": self.micro, + "seed": self.seed, + "lookback_weeks": lookback, + "features_mode": self.features, + "week_start_for_each_sample": sample_weeks, + }, + )
+
+ + + +def _default_dataset_path() -> Path: + return Path("data/fpa_fod.sqlite") + + +def build_tabular_inspection_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="python -m pyhazards.datasets.fpa_fod_tabular.inspection", + description="Inspect the FPA-FOD tabular dataset and print split/label summary.", + ) + parser.add_argument("--path", default=str(_default_dataset_path()), help="Path to the FPA-FOD sqlite/csv/parquet file.") + parser.add_argument("--task", choices=["cause", "size"], default="cause", help="Tabular classification target.") + parser.add_argument("--region", choices=["US", "CA"], default="US", help="Geographic subset.") + parser.add_argument("--cause-mode", choices=["paper5", "keep_all"], default="paper5", help="Cause label mapping mode.") + parser.add_argument("--micro", action="store_true", help="Use deterministic synthetic data instead of a real file.") + parser.add_argument("--normalize", action="store_true", help="Apply train-fit min/max normalization.") + return parser + + +def build_weekly_inspection_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="python -m pyhazards.datasets.fpa_fod_weekly.inspection", + description="Inspect the FPA-FOD weekly forecasting dataset and print split/shape summary.", + ) + parser.add_argument("--path", default=str(_default_dataset_path()), help="Path to the FPA-FOD sqlite/csv/parquet file.") + parser.add_argument("--region", choices=["US", "CA"], default="US", help="Geographic subset.") + parser.add_argument("--features", choices=["counts", "counts+time"], default="counts", help="Weekly feature mode.") + parser.add_argument("--lookback-weeks", type=int, default=50, help="Sequence length used to predict the next week.") + parser.add_argument("--micro", action="store_true", help="Use deterministic synthetic data instead of a real file.") + return parser + + +def inspect_fpa_fod_tabular(argv: list[str] | None = None) -> int: + args = build_tabular_inspection_parser().parse_args(argv) + dataset = FPAFODTabularDataset( + task=args.task, + region=args.region, + cause_mode=args.cause_mode, + data_path=args.path, + micro=args.micro, + normalize=args.normalize, + ) + bundle = dataset.load() + print(f"[OK] Loaded dataset: {dataset.name}") + print(f"[OK] Source: {bundle.metadata['source']}") + print(f"[OK] Task: {bundle.metadata['task']}") + print(f"[OK] Input dim: {bundle.feature_spec.input_dim}") + print(f"[OK] Num targets: {bundle.label_spec.num_targets}") + for split_name, split in bundle.splits.items(): + print(f"[OK] {split_name}: inputs={tuple(split.inputs.shape)} targets={tuple(split.targets.shape)}") + mapping = bundle.metadata.get("label_mapping") + if mapping: + print(f"[OK] Label mapping: {mapping}") + return 0 + + +def inspect_fpa_fod_weekly(argv: list[str] | None = None) -> int: + args = build_weekly_inspection_parser().parse_args(argv) + dataset = FPAFODWeeklyDataset( + region=args.region, + data_path=args.path, + micro=args.micro, + features=args.features, + lookback_weeks=args.lookback_weeks, + ) + bundle = dataset.load() + print(f"[OK] Loaded dataset: {dataset.name}") + print(f"[OK] Source: {bundle.metadata['source']}") + print(f"[OK] Lookback weeks: {bundle.metadata['lookback_weeks']}") + print(f"[OK] Feature mode: {bundle.metadata['features_mode']}") + print(f"[OK] Input dim: {bundle.feature_spec.input_dim}") + print(f"[OK] Num targets: {bundle.label_spec.num_targets}") + for split_name, split in bundle.splits.items(): + print(f"[OK] {split_name}: inputs={tuple(split.inputs.shape)} targets={tuple(split.targets.shape)}") + return 0 + + +__all__ = [ + "CAUSE_SYNONYMS", + "PAPER5_CAUSES", + "SIZE_GROUPS", + "FPAFODTabularDataset", + "FPAFODWeeklyDataset", + "build_tabular_inspection_parser", + "build_weekly_inspection_parser", + "inspect_fpa_fod_tabular", + "inspect_fpa_fod_weekly", +] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/datasets/graph.html b/docs/_modules/pyhazards/datasets/graph.html new file mode 100644 index 00000000..19be97ca --- /dev/null +++ b/docs/_modules/pyhazards/datasets/graph.html @@ -0,0 +1,441 @@ + + + + + + + + + + pyhazards.datasets.graph - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.datasets.graph

+from __future__ import annotations
+
+from typing import Any, Dict, List, Optional, Tuple
+
+import torch
+from torch.utils.data import Dataset
+
+
+
+[docs] +class GraphTemporalDataset(Dataset): + """ + Simple container for county/day style tensors with an optional adjacency. + + Each sample is a window of shape (past_days, num_counties, num_features) and a label + of shape (num_counties,). + """ + + def __init__( + self, + x: torch.Tensor, + y: torch.Tensor, + adjacency: Optional[torch.Tensor] = None, + ): + """ + Args: + x: Tensor (samples, past_days, num_counties, num_features) + y: Tensor (samples, num_counties) or (samples, num_counties, targets) + adjacency: Optional Tensor + - (num_counties, num_counties) global adjacency + - (samples, num_counties, num_counties) per-sample adjacency + """ + if x.ndim != 4: + raise ValueError("x must be (samples, past_days, num_counties, num_features)") + if y.ndim not in (2, 3): + raise ValueError("y must be (samples, num_counties) or (samples, num_counties, targets)") + if adjacency is not None and adjacency.ndim not in (2, 3): + raise ValueError("adjacency must be None, (N,N), or (B,N,N)") + if adjacency is not None and adjacency.ndim == 2 and adjacency.size(0) != x.size(2): + raise ValueError("adjacency size mismatch with num_counties") + if adjacency is not None and adjacency.ndim == 3 and adjacency.size(1) != x.size(2): + raise ValueError("adjacency size mismatch with num_counties") + + self.x = x + self.y = y + self.adj = adjacency + + def __len__(self) -> int: + return self.x.size(0) + + def __getitem__(self, idx: int) -> Tuple[Dict[str, Any], torch.Tensor]: + adj = None + if self.adj is not None: + adj = self.adj if self.adj.ndim == 2 else self.adj[idx] + return {"x": self.x[idx], "adj": adj}, self.y[idx]
+ + + +
+[docs] +def graph_collate(batch: List[Tuple[Dict[str, Any], torch.Tensor]]): + """ + Collate function that stacks x and adjacency if provided. + """ + xs, ys = zip(*batch) + x_tensor = torch.stack([item["x"] for item in xs], dim=0) + adj_list = [item["adj"] for item in xs] + adj = None + if any(a is not None for a in adj_list): + # If some entries are None, replace with first non-None + first = next(a for a in adj_list if a is not None) + adj = torch.stack([a if a is not None else first for a in adj_list], dim=0) + y_tensor = torch.stack(ys, dim=0) + return {"x": x_tensor, "adj": adj}, y_tensor
+ + + +__all__ = ["GraphTemporalDataset", "graph_collate"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/datasets/registry.html b/docs/_modules/pyhazards/datasets/registry.html new file mode 100644 index 00000000..5d51ca24 --- /dev/null +++ b/docs/_modules/pyhazards/datasets/registry.html @@ -0,0 +1,393 @@ + + + + + + + + + + pyhazards.datasets.registry - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.datasets.registry

+from typing import Any, Callable, Dict
+
+from .base import Dataset
+
+_DATASET_REGISTRY: Dict[str, Callable[..., Dataset]] = {}
+
+
+
+[docs] +def register_dataset(name: str, builder: Callable[..., Dataset]) -> None: + if name in _DATASET_REGISTRY: + raise ValueError(f"Dataset '{name}' already registered.") + _DATASET_REGISTRY[name] = builder
+ + + +
+[docs] +def available_datasets(): + return sorted(_DATASET_REGISTRY.keys())
+ + + +
+[docs] +def load_dataset(name: str, **kwargs: Any) -> Dataset: + if name not in _DATASET_REGISTRY: + raise KeyError(f"Dataset '{name}' is not registered. Known: {available_datasets()}") + return _DATASET_REGISTRY[name](**kwargs)
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/datasets/tc.html b/docs/_modules/pyhazards/datasets/tc.html new file mode 100644 index 00000000..bc3fbd20 --- /dev/null +++ b/docs/_modules/pyhazards/datasets/tc.html @@ -0,0 +1,487 @@ + + + + + + + + + + pyhazards.datasets.tc - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.datasets.tc

+from __future__ import annotations
+
+import torch
+
+from ..base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec
+
+
+
+[docs] +class SyntheticTropicalCycloneDataset(Dataset): + """Synthetic storm-history dataset for track/intensity smoke runs.""" + + name = "tc_tracks_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 64, + history: int = 6, + horizon: int = 5, + features: int = 8, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 20 if micro else int(samples) + self.history = int(history) + self.horizon = int(horizon) + self.features = int(features) + +
+[docs] + def _load(self) -> DataBundle: + x = torch.randn(self.samples, self.history, self.features, dtype=torch.float32) + last_state = x[:, -1, :3] + deltas = torch.linspace(0.2, 1.0, steps=self.horizon, dtype=torch.float32).view(1, self.horizon, 1) + direction = torch.tensor([0.4, 0.2, 1.5], dtype=torch.float32).view(1, 1, 3) + y = last_state.unsqueeze(1) + deltas * direction + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=self.features, + description="Synthetic storm history with environmental context features.", + extra={"history": self.history, "horizon": self.horizon}, + ), + label_spec=LabelSpec( + num_targets=3, + task_type="regression", + description="Forecast track latitude/longitude and intensity trajectory.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "tc.track_intensity", + }, + )
+
+ + + +
+[docs] +class IBTrACSTropicalCycloneDataset(SyntheticTropicalCycloneDataset): + """Synthetic-backed adapter for IBTrACS-style storm tracks.""" + + name = "ibtracs_tracks" + +
+[docs] + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "IBTrACS", "source_dataset": self.name}) + return bundle
+
+ + + +
+[docs] +class TCBenchAlphaDataset(SyntheticTropicalCycloneDataset): + """Synthetic-backed adapter for TCBench Alpha evaluation runs.""" + + name = "tcbench_alpha" + +
+[docs] + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "TCBench Alpha", "source_dataset": self.name}) + return bundle
+
+ + + +
+[docs] +class TropiCycloneNetDataset(SyntheticTropicalCycloneDataset): + """Synthetic-backed adapter for TropiCycloneNet-Dataset style smoke runs.""" + + name = "tropicyclonenet_dataset" + +
+[docs] + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "TropiCycloneNet-Dataset", "source_dataset": self.name}) + return bundle
+
+ + + +__all__ = [ + "IBTrACSTropicalCycloneDataset", + "SyntheticTropicalCycloneDataset", + "TCBenchAlphaDataset", + "TropiCycloneNetDataset", +] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/datasets/wildfire.html b/docs/_modules/pyhazards/datasets/wildfire.html new file mode 100644 index 00000000..eaf2e0ff --- /dev/null +++ b/docs/_modules/pyhazards/datasets/wildfire.html @@ -0,0 +1,522 @@ + + + + + + + + + + pyhazards.datasets.wildfire - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.datasets.wildfire

+from __future__ import annotations
+
+import torch
+
+from ..base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec
+
+
+
+[docs] +class SyntheticWildfireSpreadDataset(Dataset): + """Synthetic raster dataset for wildfire spread smoke runs.""" + + name = "wildfire_spread_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 64, + channels: int = 12, + height: int = 32, + width: int = 32, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 16 if micro else int(samples) + self.channels = int(channels) + self.height = int(height) + self.width = int(width) + +
+[docs] + def _load(self) -> DataBundle: + x = torch.randn(self.samples, self.channels, self.height, self.width, dtype=torch.float32) + y = torch.zeros(self.samples, 1, self.height, self.width, dtype=torch.float32) + rows = torch.arange(self.height).view(1, self.height, 1) + cols = torch.arange(self.width).view(1, 1, self.width) + + for idx in range(self.samples): + center_r = (idx * 3) % self.height + center_c = (idx * 5) % self.width + radius = 4 + (idx % 5) + mask = ((rows - center_r).float().pow(2) + (cols - center_c).float().pow(2)) <= radius**2 + y[idx, 0] = mask.float() + x[idx, 0] = x[idx, 0] + 2.5 * mask.float() + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=self.channels, + description="Synthetic raster weather and fuel covariates for wildfire spread.", + ), + label_spec=LabelSpec( + num_targets=1, + task_type="segmentation", + description="Binary spread mask for the next forecast horizon.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "wildfire.spread", + }, + )
+
+ + + +
+[docs] +class SyntheticWildfireSpreadTemporalDataset(Dataset): + """Synthetic temporal wildfire spread dataset for sequence-based spread baselines.""" + + name = "wildfire_spread_temporal_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 48, + history: int = 4, + channels: int = 6, + height: int = 16, + width: int = 16, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 12 if micro else int(samples) + self.history = int(history) + self.channels = int(channels) + self.height = int(height) + self.width = int(width) + +
+[docs] + def _load(self) -> DataBundle: + x = torch.randn( + self.samples, + self.history, + self.channels, + self.height, + self.width, + dtype=torch.float32, + ) + y = torch.zeros(self.samples, 1, self.height, self.width, dtype=torch.float32) + rows = torch.arange(self.height).view(1, self.height, 1) + cols = torch.arange(self.width).view(1, 1, self.width) + + for idx in range(self.samples): + center_r = (idx * 2 + 3) % self.height + center_c = (idx * 3 + 5) % self.width + radius = 3 + (idx % 4) + final_mask = ( + ((rows - center_r).float().pow(2) + (cols - center_c).float().pow(2)) + <= radius**2 + ).float() + y[idx, 0] = final_mask + for step in range(self.history): + inner_radius = max(1, radius - (self.history - step - 1)) + history_mask = ( + ((rows - center_r).float().pow(2) + (cols - center_c).float().pow(2)) + <= inner_radius**2 + ).float() + x[idx, step, 0] = x[idx, step, 0] + history_mask + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=self.channels, + description="Synthetic temporal wildfire spread covariates over forecast history windows.", + extra={"history": self.history}, + ), + label_spec=LabelSpec( + num_targets=1, + task_type="segmentation", + description="Binary spread mask for the next forecast horizon.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "wildfire.spread", + }, + )
+
+ + + +__all__ = ["SyntheticWildfireSpreadDataset", "SyntheticWildfireSpreadTemporalDataset"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/engine/distributed.html b/docs/_modules/pyhazards/engine/distributed.html new file mode 100644 index 00000000..8c759703 --- /dev/null +++ b/docs/_modules/pyhazards/engine/distributed.html @@ -0,0 +1,393 @@ + + + + + + + + + + pyhazards.engine.distributed - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.engine.distributed

+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Literal
+
+import torch
+
+Strategy = Literal["auto", "ddp", "dp", "none"]
+
+
+
+[docs] +@dataclass +class DistributedConfig: + strategy: Strategy = "auto" + devices: int | None = None
+ + + +
+[docs] +def select_strategy(prefer: Strategy = "auto") -> Strategy: + if prefer == "auto": + if torch.cuda.is_available() and torch.cuda.device_count() > 1: + return "ddp" + if torch.cuda.is_available(): + return "none" + return "none" + return prefer
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/engine/inference.html b/docs/_modules/pyhazards/engine/inference.html new file mode 100644 index 00000000..14a604ea --- /dev/null +++ b/docs/_modules/pyhazards/engine/inference.html @@ -0,0 +1,392 @@ + + + + + + + + + + pyhazards.engine.inference - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.engine.inference

+from __future__ import annotations
+
+from typing import Any, Callable, Iterable, List
+
+import torch
+
+
+
+[docs] +class SlidingWindowInference: + """ + Placeholder for sliding-window inference over large rasters or grids. + Implement windowing logic and stitching as needed. + """ + + def __init__(self, model: torch.nn.Module, window_fn: Callable[..., Iterable[Any]] | None = None): + self.model = model + self.window_fn = window_fn + + def __call__(self, inputs: Any) -> List[torch.Tensor]: + if self.window_fn is None: + raise NotImplementedError("Provide a window_fn to generate windows from inputs.") + outputs: List[torch.Tensor] = [] + self.model.eval() + with torch.no_grad(): + for window in self.window_fn(inputs): + outputs.append(self.model(window)) + return outputs
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/engine/runner.html b/docs/_modules/pyhazards/engine/runner.html new file mode 100644 index 00000000..828b8524 --- /dev/null +++ b/docs/_modules/pyhazards/engine/runner.html @@ -0,0 +1,424 @@ + + + + + + + + + + pyhazards.engine.runner - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.engine.runner

+from __future__ import annotations
+
+from typing import Optional, Union
+
+import torch.nn as nn
+
+from ..benchmarks import Benchmark, BenchmarkRunSummary, run_benchmark
+from ..configs import ExperimentConfig
+from ..datasets import load_dataset
+from ..datasets.base import DataBundle
+from ..models import build_model
+
+
+
+[docs] +class BenchmarkRunner: + """High-level runner that resolves datasets/models and executes a benchmark.""" + + def __init__(self, benchmark: Optional[Union[str, Benchmark]] = None): + self.benchmark = benchmark + +
+[docs] + def run( + self, + experiment: ExperimentConfig, + model: Optional[nn.Module] = None, + data: Optional[DataBundle] = None, + output_dir: Optional[str] = None, + ) -> BenchmarkRunSummary: + built_model = model or self._build_model(experiment) + bundle = data or self._load_data(experiment) + benchmark = self.benchmark or experiment.benchmark.name + return run_benchmark( + benchmark=benchmark, + model=built_model, + data=bundle, + config=experiment, + output_dir=output_dir, + )
+ + +
+[docs] + def _build_model(self, experiment: ExperimentConfig) -> nn.Module: + return build_model( + name=experiment.model.name, + task=experiment.model.task, + **experiment.model.params, + )
+ + +
+[docs] + def _load_data(self, experiment: ExperimentConfig) -> DataBundle: + return load_dataset( + experiment.dataset.name, + **experiment.dataset.params, + ).load()
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/engine/trainer.html b/docs/_modules/pyhazards/engine/trainer.html new file mode 100644 index 00000000..bd3fd00e --- /dev/null +++ b/docs/_modules/pyhazards/engine/trainer.html @@ -0,0 +1,544 @@ + + + + + + + + + + pyhazards.engine.trainer - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.engine.trainer

+from __future__ import annotations
+
+from typing import Any, Callable, Dict, Iterable, List, Optional
+
+import torch
+import torch.nn as nn
+from torch.utils.data import DataLoader, TensorDataset, Dataset
+
+from ..datasets.base import DataBundle
+from ..metrics import MetricBase
+from ..utils.hardware import auto_device
+from .distributed import select_strategy
+
+
+
+[docs] +class Trainer: + """ + Lightweight training abstraction with a familiar API: + fit -> evaluate -> predict. + """ + + def __init__( + self, + model: nn.Module, + device: Optional[torch.device | str] = None, + metrics: Optional[List[MetricBase]] = None, + strategy: str = "auto", + mixed_precision: bool = False, + ): + self.model = model + self.device = torch.device(device) if device else auto_device() + self.metrics = metrics or [] + self.strategy = select_strategy(strategy) + self.mixed_precision = mixed_precision + self.model.to(self.device) + +
+[docs] + def fit( + self, + data: DataBundle, + train_split: str = "train", + val_split: Optional[str] = None, + max_epochs: int = 1, + optimizer: Optional[torch.optim.Optimizer] = None, + loss_fn: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, + batch_size: int = 32, + num_workers: int = 0, + collate_fn: Optional[Callable[[List[Any]], Any]] = None, + ) -> None: + """ + Minimal fit loop that works for tensor-based splits. + Extend/replace with custom DataLoaders for complex data. + """ + if optimizer is None or loss_fn is None: + raise ValueError("optimizer and loss_fn must be provided.") + + train_split_data = data.get_split(train_split) + train_loader = self._make_loader(train_split_data.inputs, train_split_data.targets, batch_size, num_workers, collate_fn) + amp_enabled = self.mixed_precision and self.device.type == "cuda" + try: + scaler = torch.amp.GradScaler("cuda", enabled=amp_enabled) + use_new_amp = True + except (AttributeError, TypeError): + scaler = torch.cuda.amp.GradScaler(enabled=amp_enabled) + use_new_amp = False + + self.model.train() + for _ in range(max_epochs): + for x, y in train_loader: + x = self._to_device(x) + y = self._to_device(y) + optimizer.zero_grad() + if use_new_amp: + with torch.amp.autocast("cuda", enabled=scaler.is_enabled()): + out = self.model(x) + loss = loss_fn(out, y) + else: + with torch.cuda.amp.autocast(enabled=scaler.is_enabled()): + out = self.model(x) + loss = loss_fn(out, y) + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + + if val_split: + self.evaluate(data, split=val_split)
+ + +
+[docs] + def evaluate( + self, + data: DataBundle, + split: str = "test", + batch_size: int = 64, + num_workers: int = 0, + collate_fn: Optional[Callable[[List[Any]], Any]] = None, + ) -> Dict[str, float]: + split_data = data.get_split(split) + loader = self._make_loader(split_data.inputs, split_data.targets, batch_size, num_workers, collate_fn, shuffle=False) + self.model.eval() + for metric in self.metrics: + metric.reset() + with torch.no_grad(): + for x, y in loader: + x = self._to_device(x) + y = self._to_device(y) + preds = self.model(x) + for metric in self.metrics: + metric.update(preds, y) + results: Dict[str, float] = {} + for metric in self.metrics: + results.update(metric.compute()) + return results
+ + +
+[docs] + def predict( + self, + data: DataBundle, + split: str = "test", + batch_size: int = 64, + num_workers: int = 0, + collate_fn: Optional[Callable[[List[Any]], Any]] = None, + ) -> List[torch.Tensor]: + split_data = data.get_split(split) + loader = self._make_loader(split_data.inputs, split_data.targets, batch_size, num_workers, collate_fn, shuffle=False) + self.model.eval() + outputs: List[torch.Tensor] = [] + with torch.no_grad(): + for x, _ in loader: + x = self._to_device(x) + preds = self.model(x) + outputs.append(preds.cpu()) + return outputs
+ + +
+[docs] + def save_checkpoint(self, path: str) -> None: + torch.save({"model_state": self.model.state_dict()}, path)
+ + +
+[docs] + def _make_loader( + self, + inputs: Any, + targets: Any, + batch_size: int, + num_workers: int, + collate_fn: Optional[Callable[[List[Any]], Any]], + shuffle: bool = True, + ) -> Iterable: + # Accept torch tensors + if isinstance(inputs, torch.Tensor) and isinstance(targets, torch.Tensor): + dataset = TensorDataset(inputs, targets) + return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, collate_fn=collate_fn) + # Accept torch.utils.data.Dataset directly (for complex dict/graph batches) + if isinstance(inputs, Dataset): + return DataLoader(inputs, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, collate_fn=collate_fn) + raise TypeError("Trainer only supports tensor pairs or torch Dataset inputs. Wrap custom logic in a Dataset.")
+ + +
+[docs] + def _to_device(self, obj: Any) -> Any: + if obj is None: + return None + if isinstance(obj, torch.Tensor): + return obj.to(self.device) + if isinstance(obj, (list, tuple)): + return type(obj)(self._to_device(o) for o in obj) + if isinstance(obj, dict): + return {k: self._to_device(v) for k, v in obj.items()} + return obj
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/interactive_map.html b/docs/_modules/pyhazards/interactive_map.html new file mode 100644 index 00000000..63ef100a --- /dev/null +++ b/docs/_modules/pyhazards/interactive_map.html @@ -0,0 +1,405 @@ + + + + + + + + + + pyhazards.interactive_map - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.interactive_map

+"""Helpers for the external RAI Fire interactive map."""
+
+from __future__ import annotations
+
+import os
+import sys
+import webbrowser
+
+
+#: Canonical URL for the external RAI Fire interactive map.
+RAI_FIRE_URL: str = "https://rai-fire.com/"
+
+
+def _can_launch_browser() -> bool:
+    if sys.platform.startswith("linux"):
+        return bool(os.environ.get("DISPLAY") or os.environ.get("WAYLAND_DISPLAY"))
+    return True
+
+
+
+[docs] +def open_interactive_map(open_browser: bool = True) -> str: + """Open the RAI Fire map in the user's browser when possible. + + Args: + open_browser: Whether to attempt to open the default browser. + + Returns: + The canonical RAI Fire URL. + """ + + if open_browser and _can_launch_browser(): + try: + webbrowser.open(RAI_FIRE_URL, new=2) + except Exception: + # Headless and restricted environments should still get the URL. + pass + return RAI_FIRE_URL
+ + + +__all__ = ["RAI_FIRE_URL", "open_interactive_map"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/metrics.html b/docs/_modules/pyhazards/metrics.html new file mode 100644 index 00000000..b7b432a4 --- /dev/null +++ b/docs/_modules/pyhazards/metrics.html @@ -0,0 +1,495 @@ + + + + + + + + + + pyhazards.metrics - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.metrics

+from abc import ABC, abstractmethod
+from typing import Dict, List, Optional
+
+import torch
+import torch.nn.functional as F
+
+
+
+[docs] +class MetricBase(ABC): +
+[docs] + @abstractmethod + def update(self, preds: torch.Tensor, targets: torch.Tensor) -> None: + ...
+ + +
+[docs] + @abstractmethod + def compute(self) -> Dict[str, float]: + ...
+ + +
+[docs] + @abstractmethod + def reset(self) -> None: + ...
+
+ + + +
+[docs] +class ClassificationMetrics(MetricBase): + def __init__(self): + self.reset() + +
+[docs] + def reset(self) -> None: + self._preds: List[torch.Tensor] = [] + self._targets: List[torch.Tensor] = []
+ + +
+[docs] + def update(self, preds: torch.Tensor, targets: torch.Tensor) -> None: + self._preds.append(preds.detach().cpu()) + self._targets.append(targets.detach().cpu())
+ + +
+[docs] + def compute(self) -> Dict[str, float]: + preds = torch.cat(self._preds) + targets = torch.cat(self._targets) + pred_labels = preds.argmax(dim=-1) + acc = (pred_labels == targets).float().mean().item() + return {"Acc": acc}
+
+ + + +
+[docs] +class RegressionMetrics(MetricBase): + def __init__(self): + self.reset() + +
+[docs] + def reset(self) -> None: + self._preds: List[torch.Tensor] = [] + self._targets: List[torch.Tensor] = []
+ + +
+[docs] + def update(self, preds: torch.Tensor, targets: torch.Tensor) -> None: + self._preds.append(preds.detach().cpu()) + self._targets.append(targets.detach().cpu())
+ + +
+[docs] + def compute(self) -> Dict[str, float]: + preds = torch.cat(self._preds) + targets = torch.cat(self._targets) + mae = F.l1_loss(preds, targets).item() + rmse = torch.sqrt(F.mse_loss(preds, targets)).item() + return {"MAE": mae, "RMSE": rmse}
+
+ + + +
+[docs] +class SegmentationMetrics(MetricBase): + def __init__(self, num_classes: Optional[int] = None): + self.num_classes = num_classes + self.reset() + +
+[docs] + def reset(self) -> None: + self._preds: List[torch.Tensor] = [] + self._targets: List[torch.Tensor] = []
+ + +
+[docs] + def update(self, preds: torch.Tensor, targets: torch.Tensor) -> None: + self._preds.append(preds.detach().cpu()) + self._targets.append(targets.detach().cpu())
+ + +
+[docs] + def compute(self) -> Dict[str, float]: + preds = torch.cat(self._preds) + targets = torch.cat(self._targets) + pred_labels = preds.argmax(dim=1) + # simple pixel accuracy; extend to IoU/Dice as needed + acc = (pred_labels == targets).float().mean().item() + return {"PixelAcc": acc}
+
+ + + +__all__ = ["MetricBase", "ClassificationMetrics", "RegressionMetrics", "SegmentationMetrics"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/asufm.html b/docs/_modules/pyhazards/models/asufm.html new file mode 100644 index 00000000..4ed85b9e --- /dev/null +++ b/docs/_modules/pyhazards/models/asufm.html @@ -0,0 +1,454 @@ + + + + + + + + + + pyhazards.models.asufm - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.asufm

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class ASUFM(nn.Module): + """Temporal convolution baseline for wildfire activity forecasting.""" + + def __init__( + self, + input_dim: int = 7, + hidden_dim: int = 64, + output_dim: int = 5, + lookback: int = 12, + dropout: float = 0.1, + ): + super().__init__() + if input_dim <= 0: + raise ValueError(f"input_dim must be positive, got {input_dim}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if output_dim <= 0: + raise ValueError(f"output_dim must be positive, got {output_dim}") + if lookback <= 0: + raise ValueError(f"lookback must be positive, got {lookback}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.lookback = int(lookback) + self.temporal = nn.Sequential( + nn.Conv1d(input_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + self.gate = nn.Sequential( + nn.AdaptiveAvgPool1d(1), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=1), + nn.Sigmoid(), + ) + self.head = nn.Sequential( + nn.Dropout(dropout), + nn.Linear(hidden_dim, output_dim), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError( + "ASUFM expects input shape (batch, lookback, features), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.lookback: + raise ValueError(f"ASUFM expected lookback={self.lookback}, got sequence length {x.size(1)}.") + encoded = self.temporal(x.transpose(1, 2)) + gated = encoded * self.gate(encoded) + pooled = torch.mean(gated, dim=-1) + return self.head(pooled)
+
+ + + +
+[docs] +def asufm_builder( + task: str, + input_dim: int = 7, + hidden_dim: int = 64, + output_dim: int = 5, + lookback: int = 12, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"forecasting", "regression"}: + raise ValueError(f"asufm supports task='forecasting' or 'regression', got {task!r}.") + return ASUFM( + input_dim=input_dim, + hidden_dim=hidden_dim, + output_dim=output_dim, + lookback=lookback, + dropout=dropout, + )
+ + + +__all__ = ["ASUFM", "asufm_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/backbones.html b/docs/_modules/pyhazards/models/backbones.html new file mode 100644 index 00000000..948d09b2 --- /dev/null +++ b/docs/_modules/pyhazards/models/backbones.html @@ -0,0 +1,433 @@ + + + + + + + + + + pyhazards.models.backbones - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.backbones

+import torch
+import torch.nn as nn
+
+
+
+[docs] +class MLPBackbone(nn.Module): + """Simple MLP for tabular features.""" + + def __init__(self, input_dim: int, hidden_dim: int = 256, depth: int = 2): + super().__init__() + layers = [] + dim = input_dim + for _ in range(depth): + layers.extend([nn.Linear(dim, hidden_dim), nn.ReLU()]) + dim = hidden_dim + self.net = nn.Sequential(*layers) + +
+[docs] + def forward(self, x): + return self.net(x)
+
+ + + +
+[docs] +class CNNPatchEncoder(nn.Module): + """Lightweight CNN encoder for raster patches.""" + + def __init__(self, in_channels: int = 3, hidden_dim: int = 64): + super().__init__() + self.features = nn.Sequential( + nn.Conv2d(in_channels, hidden_dim, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.ReLU(), + nn.AdaptiveAvgPool2d(1), + ) + +
+[docs] + def forward(self, x): + x = self.features(x) + return torch.flatten(x, 1)
+
+ + + +
+[docs] +class TemporalEncoder(nn.Module): + """GRU-based encoder for time-series signals.""" + + def __init__(self, input_dim: int, hidden_dim: int = 128, num_layers: int = 1): + super().__init__() + self.rnn = nn.GRU(input_dim, hidden_dim, num_layers=num_layers, batch_first=True) + +
+[docs] + def forward(self, x): + # x: (batch, seq, features) + out, _ = self.rnn(x) + return out[:, -1, :]
+
+ + + +__all__ = ["MLPBackbone", "CNNPatchEncoder", "TemporalEncoder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/builder.html b/docs/_modules/pyhazards/models/builder.html new file mode 100644 index 00000000..3938fad7 --- /dev/null +++ b/docs/_modules/pyhazards/models/builder.html @@ -0,0 +1,443 @@ + + + + + + + + + + pyhazards.models.builder - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.builder

+from __future__ import annotations
+
+import inspect
+from typing import Any, Dict
+
+import torch.nn as nn
+
+from .backbones import CNNPatchEncoder, MLPBackbone, TemporalEncoder
+from .heads import ClassificationHead, RegressionHead, SegmentationHead
+from .registry import get_model_config
+
+
+
+[docs] +def build_model(name: str, task: str, **kwargs: Any) -> nn.Module: + """ + Build a model by name and task. + This delegates to registry metadata to keep a consistent interface. + """ + cfg = get_model_config(name) + if cfg is None: + raise KeyError(f"Model '{name}' is not registered.") + + task = task.lower() + builder = cfg["builder"] + defaults: Dict[str, Any] = cfg.get("defaults", {}) + merged = {**defaults, **kwargs, "task": task} + + # Some builders (e.g., default_builder) require `name`, while others don't. + # Pass `name` only when the callable can accept it. + sig = inspect.signature(builder) + params = sig.parameters + accepts_kwargs = any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params.values()) + if "name" in params or accepts_kwargs: + return builder(**{**merged, "name": name}) + return builder(**merged)
+ + + +
+[docs] +def default_builder(name: str, task: str, **kwargs: Any) -> nn.Module: + """ + Generic builder for standard backbones + heads. + """ + task = task.lower() + if name == "mlp": + backbone = MLPBackbone(kwargs["in_dim"], hidden_dim=kwargs.get("hidden_dim", 256), depth=kwargs.get("depth", 2)) + head = _make_head(task, kwargs) + return _combine(backbone, head) + if name == "cnn": + backbone = CNNPatchEncoder(kwargs.get("in_channels", 3), hidden_dim=kwargs.get("hidden_dim", 64)) + head = _make_head(task, kwargs, backbone_out_dim=kwargs.get("hidden_dim", 64)) + return _combine(backbone, head) + if name == "temporal": + backbone = TemporalEncoder(kwargs["in_dim"], hidden_dim=kwargs.get("hidden_dim", 128), num_layers=kwargs.get("num_layers", 1)) + head = _make_head(task, kwargs) + return _combine(backbone, head) + raise ValueError(f"Unknown backbone '{name}'.")
+ + + +def _make_head(task: str, kwargs: Dict[str, Any], backbone_out_dim: int | None = None) -> nn.Module: + if task == "classification": + in_dim = backbone_out_dim or kwargs.get("hidden_dim") or kwargs["in_dim"] + return ClassificationHead(in_dim=in_dim, num_classes=kwargs["out_dim"]) + if task == "regression": + in_dim = backbone_out_dim or kwargs.get("hidden_dim") or kwargs["in_dim"] + return RegressionHead(in_dim=in_dim, out_dim=kwargs.get("out_dim", 1)) + if task == "segmentation": + in_channels = kwargs.get("hidden_dim") or backbone_out_dim or kwargs.get("in_channels", 1) + return SegmentationHead(in_channels=in_channels, num_classes=kwargs["out_dim"]) + raise ValueError(f"Unsupported task '{task}'.") + + +def _combine(backbone: nn.Module, head: nn.Module) -> nn.Module: + return nn.Sequential(backbone, head) + + +__all__ = ["build_model", "default_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/cnn_aspp.html b/docs/_modules/pyhazards/models/cnn_aspp.html new file mode 100644 index 00000000..7541d74e --- /dev/null +++ b/docs/_modules/pyhazards/models/cnn_aspp.html @@ -0,0 +1,544 @@ + + + + + + + + + + pyhazards.models.cnn_aspp - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.cnn_aspp

+from __future__ import annotations
+
+from typing import Sequence
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+# ---------------------------------------------------------------------
+# Basic blocks
+# ---------------------------------------------------------------------
+
+class ConvBNReLU(nn.Module):
+    def __init__(
+        self,
+        in_ch: int,
+        out_ch: int,
+        k: int = 3,
+        s: int = 1,
+        p: int = 1,
+        d: int = 1,
+    ):
+        super().__init__()
+        self.conv = nn.Conv2d(
+            in_ch,
+            out_ch,
+            kernel_size=k,
+            stride=s,
+            padding=p,
+            dilation=d,
+            bias=False,
+        )
+        self.bn = nn.BatchNorm2d(out_ch)
+        self.act = nn.ReLU(inplace=True)
+
+    def forward(self, x: torch.Tensor) -> torch.Tensor:
+        return self.act(self.bn(self.conv(x)))
+
+
+# ---------------------------------------------------------------------
+# ASPP
+# ---------------------------------------------------------------------
+
+class ASPP(nn.Module):
+    """
+    Atrous Spatial Pyramid Pooling (ASPP).
+
+    Parallel atrous convolutions + image pooling branch,
+    followed by projection.
+    """
+
+    def __init__(
+        self,
+        in_ch: int,
+        out_ch: int,
+        dilations: Sequence[int] = (1, 3, 6, 12),
+    ):
+        super().__init__()
+
+        if len(dilations) != 4:
+            raise ValueError("ASPP expects exactly 4 dilation rates")
+
+        d1, d2, d3, d4 = dilations
+
+        self.b1 = ConvBNReLU(in_ch, out_ch, k=1, p=0, d=d1)
+        self.b2 = ConvBNReLU(in_ch, out_ch, k=3, p=d2, d=d2)
+        self.b3 = ConvBNReLU(in_ch, out_ch, k=3, p=d3, d=d3)
+        self.b4 = ConvBNReLU(in_ch, out_ch, k=3, p=d4, d=d4)
+
+        self.pool = nn.Sequential(
+            nn.AdaptiveAvgPool2d(1),
+            ConvBNReLU(in_ch, out_ch, k=1, p=0),
+        )
+
+        self.proj = ConvBNReLU(out_ch * 5, out_ch, k=1, p=0)
+
+    def forward(self, x: torch.Tensor) -> torch.Tensor:
+        b, c, h, w = x.shape
+
+        p = self.pool(x)
+        p = F.interpolate(p, size=(h, w), mode="bilinear", align_corners=False)
+
+        y = torch.cat(
+            [self.b1(x), self.b2(x), self.b3(x), self.b4(x), p],
+            dim=1,
+        )
+        return self.proj(y)
+
+
+# ---------------------------------------------------------------------
+# CNN + ASPP model
+# ---------------------------------------------------------------------
+
+
+[docs] +class WildfireCNNASPP(nn.Module): + """ + CNN + ASPP wildfire segmentation model. + + Input: + x : (B, C, H, W) float tensor + + Output: + logits : (B, 1, H, W) float tensor + (sigmoid applied externally) + """ + + def __init__( + self, + in_channels: int = 12, + base_channels: int = 32, + aspp_channels: int = 32, + dilations: Sequence[int] = (1, 3, 6, 12), + dropout: float = 0.0, + ): + super().__init__() + + self.stem = nn.Sequential( + ConvBNReLU(in_channels, base_channels, k=3, p=1), + ConvBNReLU(base_channels, base_channels, k=3, p=1), + ) + + self.aspp = ASPP( + in_ch=base_channels, + out_ch=aspp_channels, + dilations=dilations, + ) + + self.drop = nn.Dropout2d(dropout) if dropout > 0 else nn.Identity() + self.head = nn.Conv2d(aspp_channels, 1, kernel_size=1) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError( + f"Expected input of shape (B,C,H,W), got {tuple(x.shape)}" + ) + + f = self.stem(x) + y = self.aspp(f) + y = self.drop(y) + return self.head(y)
+
+ + + +# --------------------------------------------------------------------- +# PyHazards model builder +# --------------------------------------------------------------------- + +
+[docs] +def cnn_aspp_builder( + task: str, + in_channels: int = 12, + base_channels: int = 32, + aspp_channels: int = 32, + dilations: Sequence[int] = (1, 3, 6, 12), + dropout: float = 0.0, + **kwargs, +) -> nn.Module: + """ + PyHazards-style model builder. + """ + _ = kwargs # explicitly ignore unused builder args + + if "segmentation" not in task: + raise ValueError( + f"WildfireCNNASPP is segmentation-only. Got task='{task}'" + ) + + return WildfireCNNASPP( + in_channels=in_channels, + base_channels=base_channels, + aspp_channels=aspp_channels, + dilations=dilations, + dropout=dropout, + )
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/eqnet.html b/docs/_modules/pyhazards/models/eqnet.html new file mode 100644 index 00000000..f661b050 --- /dev/null +++ b/docs/_modules/pyhazards/models/eqnet.html @@ -0,0 +1,437 @@ + + + + + + + + + + pyhazards.models.eqnet - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.eqnet

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class EQNet(nn.Module): + """Transformer-style earthquake phase-picking baseline.""" + + def __init__( + self, + in_channels: int = 3, + hidden_dim: int = 48, + num_heads: int = 4, + num_layers: int = 2, + dropout: float = 0.1, + ): + super().__init__() + self.proj = nn.Conv1d(in_channels, hidden_dim, kernel_size=5, padding=2) + encoder_layer = nn.TransformerEncoderLayer( + d_model=hidden_dim, + nhead=num_heads, + dim_feedforward=2 * hidden_dim, + dropout=dropout, + batch_first=True, + activation="gelu", + ) + self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers) + self.head = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.GELU(), + nn.Linear(hidden_dim, 2), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("EQNet expects inputs shaped (batch, channels, length).") + seq = self.proj(x).transpose(1, 2) + encoded = self.encoder(seq) + pooled = encoded.mean(dim=1) + return self.head(pooled)
+
+ + + +
+[docs] +def eqnet_builder( + task: str, + in_channels: int = 3, + hidden_dim: int = 48, + num_heads: int = 4, + num_layers: int = 2, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("EQNet only supports regression-style phase picking outputs.") + return EQNet( + in_channels=in_channels, + hidden_dim=hidden_dim, + num_heads=num_heads, + num_layers=num_layers, + dropout=dropout, + )
+ + + +__all__ = ["EQNet", "eqnet_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/eqtransformer.html b/docs/_modules/pyhazards/models/eqtransformer.html new file mode 100644 index 00000000..9cd0e093 --- /dev/null +++ b/docs/_modules/pyhazards/models/eqtransformer.html @@ -0,0 +1,440 @@ + + + + + + + + + + pyhazards.models.eqtransformer - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.eqtransformer

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class EQTransformer(nn.Module): + """Compact sequence model for joint earthquake phase picking.""" + + def __init__( + self, + in_channels: int = 3, + hidden_dim: int = 48, + num_layers: int = 2, + dropout: float = 0.1, + ): + super().__init__() + self.encoder = nn.Sequential( + nn.Conv1d(in_channels, hidden_dim, kernel_size=11, padding=5), + nn.ReLU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=7, padding=3), + nn.ReLU(), + ) + self.temporal = nn.LSTM( + input_size=hidden_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + bidirectional=True, + dropout=dropout if num_layers > 1 else 0.0, + ) + self.attention = nn.Linear(2 * hidden_dim, 1) + self.head = nn.Sequential( + nn.Linear(2 * hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, 2), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("EQTransformer expects inputs shaped (batch, channels, length).") + encoded = self.encoder(x).transpose(1, 2) + temporal, _ = self.temporal(encoded) + weights = torch.softmax(self.attention(temporal), dim=1) + pooled = torch.sum(weights * temporal, dim=1) + return self.head(pooled)
+
+ + + +
+[docs] +def eqtransformer_builder( + task: str, + in_channels: int = 3, + hidden_dim: int = 48, + num_layers: int = 2, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("EQTransformer only supports regression-style phase picking outputs.") + return EQTransformer( + in_channels=in_channels, + hidden_dim=hidden_dim, + num_layers=num_layers, + dropout=dropout, + )
+ + + +__all__ = ["EQTransformer", "eqtransformer_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/firecastnet.html b/docs/_modules/pyhazards/models/firecastnet.html new file mode 100644 index 00000000..c2a14995 --- /dev/null +++ b/docs/_modules/pyhazards/models/firecastnet.html @@ -0,0 +1,444 @@ + + + + + + + + + + pyhazards.models.firecastnet - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.firecastnet

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class FireCastNet(nn.Module): + """Compact encoder-decoder wildfire spread network.""" + + def __init__( + self, + in_channels: int = 12, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + ): + super().__init__() + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if out_channels <= 0: + raise ValueError(f"out_channels must be positive, got {out_channels}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.in_channels = int(in_channels) + self.encoder = nn.Sequential( + nn.Conv2d(in_channels, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + self.decoder = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Dropout2d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv2d(hidden_dim, out_channels, kernel_size=1), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError( + "FireCastNet expects input shape (batch, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.in_channels: + raise ValueError(f"FireCastNet expected in_channels={self.in_channels}, got {x.size(1)}.") + encoded = self.encoder(x) + return self.decoder(encoded)
+
+ + + +
+[docs] +def firecastnet_builder( + task: str, + in_channels: int = 12, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"segmentation", "regression"}: + raise ValueError(f"firecastnet supports task='segmentation' or 'regression', got {task!r}.") + return FireCastNet( + in_channels=in_channels, + hidden_dim=hidden_dim, + out_channels=out_channels, + dropout=dropout, + )
+ + + +__all__ = ["FireCastNet", "firecastnet_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/floodcast.html b/docs/_modules/pyhazards/models/floodcast.html new file mode 100644 index 00000000..5310b664 --- /dev/null +++ b/docs/_modules/pyhazards/models/floodcast.html @@ -0,0 +1,432 @@ + + + + + + + + + + pyhazards.models.floodcast - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.floodcast

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class FloodCast(nn.Module): + """Compact spatiotemporal inundation baseline.""" + + def __init__( + self, + in_channels: int = 3, + history: int = 4, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + ): + super().__init__() + self.history = int(history) + self.encoder = nn.Sequential( + nn.Conv3d(in_channels, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.ReLU(), + nn.Dropout3d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv3d(hidden_dim, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.ReLU(), + ) + self.head = nn.Conv2d(hidden_dim, out_channels, kernel_size=1) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 5: + raise ValueError("FloodCast expects inputs shaped (batch, history, channels, height, width).") + if x.size(1) != self.history: + raise ValueError(f"FloodCast expected history={self.history}, got {x.size(1)}.") + encoded = self.encoder(x.permute(0, 2, 1, 3, 4)) + fused = encoded.mean(dim=2) + return self.head(fused)
+
+ + + +
+[docs] +def floodcast_builder( + task: str, + in_channels: int = 3, + history: int = 4, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"regression", "segmentation"}: + raise ValueError("FloodCast only supports regression or segmentation-style inundation outputs.") + return FloodCast( + in_channels=in_channels, + history=history, + hidden_dim=hidden_dim, + out_channels=out_channels, + dropout=dropout, + )
+ + + +__all__ = ["FloodCast", "floodcast_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/forefire.html b/docs/_modules/pyhazards/models/forefire.html new file mode 100644 index 00000000..06bd859d --- /dev/null +++ b/docs/_modules/pyhazards/models/forefire.html @@ -0,0 +1,438 @@ + + + + + + + + + + pyhazards.models.forefire - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.forefire

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+
+[docs] +class ForeFireAdapter(nn.Module): + """Lightweight deterministic spread adapter inspired by simulator-style fronts.""" + + def __init__( + self, + in_channels: int = 12, + out_channels: int = 1, + diffusion_steps: int = 2, + ): + super().__init__() + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if out_channels != 1: + raise ValueError(f"ForeFireAdapter only supports out_channels=1, got {out_channels}") + if diffusion_steps <= 0: + raise ValueError(f"diffusion_steps must be positive, got {diffusion_steps}") + self.in_channels = int(in_channels) + self.diffusion_steps = int(diffusion_steps) + kernel = torch.tensor( + [[0.05, 0.15, 0.05], [0.15, 0.20, 0.15], [0.05, 0.15, 0.05]], + dtype=torch.float32, + ).view(1, 1, 3, 3) + self.register_buffer("spread_kernel", kernel) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError( + "ForeFireAdapter expects input shape (batch, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.in_channels: + raise ValueError(f"ForeFireAdapter expected in_channels={self.in_channels}, got {x.size(1)}.") + state = torch.sigmoid(x[:, :1]) + fuel = torch.sigmoid(x[:, 1:2]) + wind = torch.tanh(x[:, 2:3]).abs() + for _ in range(self.diffusion_steps): + neighborhood = F.conv2d(state, self.spread_kernel, padding=1) + state = torch.clamp(0.45 * state + 0.4 * neighborhood + 0.1 * fuel + 0.05 * wind, 0.0, 1.0) + return state
+
+ + + +
+[docs] +def forefire_builder( + task: str, + in_channels: int = 12, + out_channels: int = 1, + diffusion_steps: int = 2, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"segmentation", "regression"}: + raise ValueError(f"forefire supports task='segmentation' or 'regression', got {task!r}.") + return ForeFireAdapter( + in_channels=in_channels, + out_channels=out_channels, + diffusion_steps=diffusion_steps, + )
+ + + +__all__ = ["ForeFireAdapter", "forefire_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/fourcastnet_tc.html b/docs/_modules/pyhazards/models/fourcastnet_tc.html new file mode 100644 index 00000000..3a612c46 --- /dev/null +++ b/docs/_modules/pyhazards/models/fourcastnet_tc.html @@ -0,0 +1,436 @@ + + + + + + + + + + pyhazards.models.fourcastnet_tc - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.fourcastnet_tc

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class FourCastNetTC(nn.Module): + """Experimental wrapper-style FourCastNet storm adapter.""" + + def __init__( + self, + input_dim: int = 8, + history: int = 6, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.history = int(history) + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.net = nn.Sequential( + nn.Linear(self.history * input_dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout) if dropout > 0 else nn.Identity(), + nn.Linear(hidden_dim, hidden_dim), + nn.GELU(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("FourCastNetTC expects inputs shaped (batch, history, features).") + if x.size(1) != self.history: + raise ValueError(f"FourCastNetTC expected history={self.history}, got {x.size(1)}.") + preds = self.net(x.reshape(x.size(0), -1)) + return preds.view(x.size(0), self.horizon, self.output_dim)
+
+ + + +
+[docs] +def fourcastnet_tc_builder( + task: str, + input_dim: int = 8, + history: int = 6, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("FourCastNetTC only supports regression for track/intensity forecasting.") + return FourCastNetTC( + input_dim=input_dim, + history=history, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + dropout=dropout, + )
+ + + +__all__ = ["FourCastNetTC", "fourcastnet_tc_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/google_flood_forecasting.html b/docs/_modules/pyhazards/models/google_flood_forecasting.html new file mode 100644 index 00000000..a68c1e5a --- /dev/null +++ b/docs/_modules/pyhazards/models/google_flood_forecasting.html @@ -0,0 +1,464 @@ + + + + + + + + + + pyhazards.models.google_flood_forecasting - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.google_flood_forecasting

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class GoogleFloodForecasting(nn.Module): + """Sequence baseline for streamflow-style flood forecasting.""" + + def __init__( + self, + input_dim: int = 2, + hidden_dim: int = 64, + out_dim: int = 1, + history: int = 4, + dropout: float = 0.1, + ): + super().__init__() + if input_dim <= 0: + raise ValueError(f"input_dim must be positive, got {input_dim}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + if history <= 0: + raise ValueError(f"history must be positive, got {history}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.history = int(history) + self.proj = nn.Linear(input_dim, hidden_dim) + self.temporal = nn.TransformerEncoder( + nn.TransformerEncoderLayer( + d_model=hidden_dim, + nhead=4, + dim_feedforward=hidden_dim * 2, + dropout=dropout, + batch_first=True, + activation="gelu", + ), + num_layers=2, + ) + self.head = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, out_dim), + ) + +
+[docs] + def forward(self, batch) -> torch.Tensor: + if not isinstance(batch, dict) or "x" not in batch: + raise ValueError("GoogleFloodForecasting expects a mapping input with key 'x'.") + x = batch["x"] + if x.ndim != 4: + raise ValueError( + "GoogleFloodForecasting expects input shape (batch, history, nodes, features), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.history: + raise ValueError(f"GoogleFloodForecasting expected history={self.history}, got {x.size(1)}.") + encoded = self.proj(x) + temporal = encoded.permute(0, 2, 1, 3).reshape(-1, self.history, encoded.size(-1)) + hidden = self.temporal(temporal)[:, -1] + preds = self.head(hidden) + return preds.view(x.size(0), x.size(2), -1)
+
+ + + +
+[docs] +def google_flood_forecasting_builder( + task: str, + input_dim: int = 2, + hidden_dim: int = 64, + out_dim: int = 1, + history: int = 4, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError( + "google_flood_forecasting only supports task='regression', " + f"got {task!r}." + ) + return GoogleFloodForecasting( + input_dim=input_dim, + hidden_dim=hidden_dim, + out_dim=out_dim, + history=history, + dropout=dropout, + )
+ + + +__all__ = ["GoogleFloodForecasting", "google_flood_forecasting_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/gpd.html b/docs/_modules/pyhazards/models/gpd.html new file mode 100644 index 00000000..7d407618 --- /dev/null +++ b/docs/_modules/pyhazards/models/gpd.html @@ -0,0 +1,422 @@ + + + + + + + + + + pyhazards.models.gpd - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.gpd

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class GPD(nn.Module): + """Simple CNN baseline for generalized phase detection style picking.""" + + def __init__(self, in_channels: int = 3, hidden_dim: int = 32, dropout: float = 0.1): + super().__init__() + self.features = nn.Sequential( + nn.Conv1d(in_channels, hidden_dim, kernel_size=9, padding=4), + nn.ReLU(), + nn.MaxPool1d(kernel_size=2), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=7, padding=3), + nn.ReLU(), + nn.MaxPool1d(kernel_size=2), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=5, padding=2), + nn.ReLU(), + nn.AdaptiveAvgPool1d(1), + ) + self.head = nn.Sequential( + nn.Flatten(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, 2), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("GPD expects inputs shaped (batch, channels, length).") + return self.head(self.features(x))
+
+ + + +
+[docs] +def gpd_builder( + task: str, + in_channels: int = 3, + hidden_dim: int = 32, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("GPD only supports regression-style phase picking outputs.") + return GPD(in_channels=in_channels, hidden_dim=hidden_dim, dropout=dropout)
+ + + +__all__ = ["GPD", "gpd_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/graphcast_tc.html b/docs/_modules/pyhazards/models/graphcast_tc.html new file mode 100644 index 00000000..82726dfb --- /dev/null +++ b/docs/_modules/pyhazards/models/graphcast_tc.html @@ -0,0 +1,439 @@ + + + + + + + + + + pyhazards.models.graphcast_tc - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.graphcast_tc

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class GraphCastTC(nn.Module): + """Experimental wrapper-style GraphCast storm adapter.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + num_layers: int = 2, + num_heads: int = 4, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.proj = nn.Linear(input_dim, hidden_dim) + encoder_layer = nn.TransformerEncoderLayer( + d_model=hidden_dim, + nhead=num_heads, + dim_feedforward=2 * hidden_dim, + dropout=dropout, + batch_first=True, + ) + self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers) + self.head = nn.Linear(hidden_dim, self.horizon * self.output_dim) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("GraphCastTC expects inputs shaped (batch, history, features).") + encoded = self.encoder(self.proj(x)) + preds = self.head(encoded.mean(dim=1)) + return preds.view(x.size(0), self.horizon, self.output_dim)
+
+ + + +
+[docs] +def graphcast_tc_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + num_layers: int = 2, + num_heads: int = 4, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("GraphCastTC only supports regression for track/intensity forecasting.") + return GraphCastTC( + input_dim=input_dim, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + num_layers=num_layers, + num_heads=num_heads, + dropout=dropout, + )
+ + + +__all__ = ["GraphCastTC", "graphcast_tc_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/heads.html b/docs/_modules/pyhazards/models/heads.html new file mode 100644 index 00000000..990901ec --- /dev/null +++ b/docs/_modules/pyhazards/models/heads.html @@ -0,0 +1,418 @@ + + + + + + + + + + pyhazards.models.heads - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.heads

+import torch.nn as nn
+
+
+
+[docs] +class ClassificationHead(nn.Module): + """Simple classification head.""" + + def __init__(self, in_dim: int, num_classes: int): + super().__init__() + self.fc = nn.Linear(in_dim, num_classes) + +
+[docs] + def forward(self, x): + return self.fc(x)
+
+ + + +
+[docs] +class RegressionHead(nn.Module): + """Regression head for scalar or multi-target outputs.""" + + def __init__(self, in_dim: int, out_dim: int = 1): + super().__init__() + self.fc = nn.Linear(in_dim, out_dim) + +
+[docs] + def forward(self, x): + return self.fc(x)
+
+ + + +
+[docs] +class SegmentationHead(nn.Module): + """Segmentation head for raster masks.""" + + def __init__(self, in_channels: int, num_classes: int): + super().__init__() + self.conv = nn.Conv2d(in_channels, num_classes, kernel_size=1) + +
+[docs] + def forward(self, x): + return self.conv(x)
+
+ + + +__all__ = ["ClassificationHead", "RegressionHead", "SegmentationHead"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/hurricast.html b/docs/_modules/pyhazards/models/hurricast.html new file mode 100644 index 00000000..b11dfb3e --- /dev/null +++ b/docs/_modules/pyhazards/models/hurricast.html @@ -0,0 +1,439 @@ + + + + + + + + + + pyhazards.models.hurricast - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.hurricast

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class Hurricast(nn.Module): + """Compact storm-track and intensity baseline for Wave 2 vertical slices.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 64, + num_layers: int = 2, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.encoder = nn.LSTM( + input_dim, + hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=dropout if num_layers > 1 else 0.0, + ) + self.head = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("Hurricast expects inputs shaped (batch, history, features).") + encoded, _ = self.encoder(x) + last = encoded[:, -1, :] + preds = self.head(last) + return preds.view(x.size(0), self.horizon, self.output_dim)
+
+ + + +
+[docs] +def hurricast_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 64, + num_layers: int = 2, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("Hurricast only supports regression for track/intensity forecasting.") + return Hurricast( + input_dim=input_dim, + hidden_dim=hidden_dim, + num_layers=num_layers, + horizon=horizon, + output_dim=output_dim, + dropout=dropout, + )
+ + + +__all__ = ["Hurricast", "hurricast_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/hydrographnet.html b/docs/_modules/pyhazards/models/hydrographnet.html new file mode 100644 index 00000000..401a22a6 --- /dev/null +++ b/docs/_modules/pyhazards/models/hydrographnet.html @@ -0,0 +1,772 @@ + + + + + + + + + + pyhazards.models.hydrographnet - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.hydrographnet

+from __future__ import annotations
+
+from typing import Dict, Optional, Tuple
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class MLP(nn.Module):
+    def __init__(self, in_dim: int, out_dim: int, hidden_dim: int = 64, dropout: float = 0.0):
+        super().__init__()
+        self.layers = nn.Sequential(
+            nn.Linear(in_dim, hidden_dim),
+            nn.ReLU(),
+            nn.Dropout(dropout) if dropout > 0 else nn.Identity(),
+            nn.Linear(hidden_dim, hidden_dim),
+            nn.ReLU(),
+            nn.Dropout(dropout) if dropout > 0 else nn.Identity(),
+            nn.Linear(hidden_dim, out_dim),
+        )
+
+    def forward(self, x: torch.Tensor) -> torch.Tensor:
+        return self.layers(x)
+
+
+class KAN(nn.Module):
+    """
+    Lightweight KAN-style harmonic basis encoder for node features.
+    """
+
+    def __init__(self, in_dim: int, harmonics: int = 5, hidden_dim: int = 64):
+        super().__init__()
+        self.in_dim = in_dim
+        self.harmonics = harmonics
+        self.feature_proj = nn.ModuleList(
+            [nn.Linear(2 * harmonics + 1, hidden_dim) for _ in range(in_dim)]
+        )
+
+    def forward(self, x: torch.Tensor) -> torch.Tensor:
+        # x: (B, N, F)
+        outputs = []
+        for i in range(self.in_dim):
+            xi = x[:, :, i].unsqueeze(-1)
+            basis = [torch.ones_like(xi)]
+            for k in range(1, self.harmonics + 1):
+                basis.append(torch.sin(k * xi))
+                basis.append(torch.cos(k * xi))
+            basis = torch.cat(basis, dim=-1)
+            outputs.append(self.feature_proj[i](basis))
+        return torch.stack(outputs, dim=0).sum(dim=0)
+
+
+class GNBlock(nn.Module):
+    """
+    Message-passing block with residual edge and node updates.
+    """
+
+    def __init__(self, hidden_dim: int, dropout: float = 0.0):
+        super().__init__()
+        self.edge_mlp = MLP(3 * hidden_dim, hidden_dim, hidden_dim, dropout=dropout)
+        self.node_mlp = MLP(2 * hidden_dim, hidden_dim, hidden_dim, dropout=dropout)
+
+    def forward(
+        self,
+        node: torch.Tensor,
+        edge: torch.Tensor,
+        senders: torch.Tensor,
+        receivers: torch.Tensor,
+    ) -> Tuple[torch.Tensor, torch.Tensor]:
+        sender_feat = node[:, senders, :]
+        receiver_feat = node[:, receivers, :]
+
+        edge_input = torch.cat([edge, sender_feat, receiver_feat], dim=-1)
+        edge = edge + self.edge_mlp(edge_input)
+
+        agg = torch.zeros_like(node)
+        agg.index_add_(1, receivers, edge)
+
+        # Degree-normalized aggregation improves stability when graph density changes.
+        deg = torch.zeros(node.size(1), device=node.device, dtype=node.dtype)
+        deg.index_add_(0, receivers, torch.ones_like(receivers, dtype=node.dtype))
+        agg = agg / deg.clamp(min=1.0).view(1, -1, 1)
+
+        node_input = torch.cat([node, agg], dim=-1)
+        node = node + self.node_mlp(node_input)
+        return node, edge
+
+
+
+[docs] +class HydroGraphNet(nn.Module): + """ + PhysicsNeMo-inspired HydroGraphNet: + encoder -> message-passing processor -> residual delta-state decoder. + + Supports one-step forward prediction and autoregressive rollout. + """ + + def __init__( + self, + node_in_dim: int, + edge_in_dim: int, + out_dim: int, + hidden_dim: int = 64, + harmonics: int = 5, + num_gn_blocks: int = 5, + state_dim: Optional[int] = None, + rollout_steps: int = 1, + enforce_nonnegative: bool = False, + dropout: float = 0.0, + ): + super().__init__() + self.node_in_dim = int(node_in_dim) + self.edge_in_dim = int(edge_in_dim) + self.out_dim = int(out_dim) + self.state_dim = int(state_dim) if state_dim is not None else min(2, self.node_in_dim) + self.state_dim = max(1, min(self.state_dim, self.node_in_dim)) + if self.out_dim > self.state_dim: + raise ValueError( + f"out_dim={self.out_dim} cannot exceed residual state_dim={self.state_dim}." + ) + self.rollout_steps = max(1, int(rollout_steps)) + self.enforce_nonnegative = bool(enforce_nonnegative) + + # Encoder + self.node_encoder = KAN( + in_dim=self.node_in_dim, + hidden_dim=hidden_dim, + harmonics=harmonics, + ) + self.edge_encoder = MLP( + in_dim=self.edge_in_dim, + out_dim=hidden_dim, + hidden_dim=hidden_dim, + dropout=dropout, + ) + + # Processor + self.processor = nn.ModuleList( + [GNBlock(hidden_dim=hidden_dim, dropout=dropout) for _ in range(num_gn_blocks)] + ) + + # Decoder predicts delta of physically meaningful states. + self.decoder = MLP( + in_dim=hidden_dim, + out_dim=self.state_dim, + hidden_dim=hidden_dim, + dropout=dropout, + ) + +
+[docs] + def _edge_index(self, adj: torch.Tensor, batch_size: int) -> Tuple[torch.Tensor, torch.Tensor]: + if adj.dim() == 2: + a = adj + elif adj.dim() == 3: + if adj.size(0) != batch_size: + raise ValueError(f"adj batch size mismatch: got {adj.size(0)}, expected {batch_size}") + a = adj[0] + for i in range(1, batch_size): + if not torch.allclose(adj[i], a): + raise ValueError( + "Per-sample varying adjacency is not supported yet. " + "Provide a shared (N, N) adjacency or identical (B, N, N) adjacency." + ) + else: + raise ValueError("adj must be shaped (N, N) or (B, N, N).") + + a = (a > 0).to(dtype=torch.bool) + a.fill_diagonal_(True) + return a.nonzero(as_tuple=True)
+ + +
+[docs] + def _match_edge_dim(self, edge_feat: torch.Tensor) -> torch.Tensor: + # edge_feat: (B, E, F_edge_raw) + f_raw = edge_feat.size(-1) + if f_raw == self.edge_in_dim: + return edge_feat + if f_raw > self.edge_in_dim: + return edge_feat[..., : self.edge_in_dim] + pad = torch.zeros( + edge_feat.size(0), + edge_feat.size(1), + self.edge_in_dim - f_raw, + device=edge_feat.device, + dtype=edge_feat.dtype, + ) + return torch.cat([edge_feat, pad], dim=-1)
+ + +
+[docs] + def _prepare_edge_inputs( + self, + batch: Dict[str, torch.Tensor], + senders: torch.Tensor, + receivers: torch.Tensor, + batch_size: int, + device: torch.device, + dtype: torch.dtype, + ) -> torch.Tensor: + edge_attr = batch.get("edge_attr") + if edge_attr is not None: + edge_attr = edge_attr.to(device=device, dtype=dtype) + if edge_attr.dim() == 2: + edge_attr = edge_attr.unsqueeze(0).expand(batch_size, -1, -1) + elif edge_attr.dim() == 3 and edge_attr.size(0) == 1 and batch_size > 1: + edge_attr = edge_attr.expand(batch_size, -1, -1) + if edge_attr.dim() != 3: + raise ValueError("edge_attr must be shaped (E, F_edge) or (B, E, F_edge).") + if edge_attr.size(1) != senders.numel(): + raise ValueError( + f"edge_attr edge count mismatch: got {edge_attr.size(1)}, expected {senders.numel()}." + ) + return self._match_edge_dim(edge_attr) + + # Derive geometric edge features from coords: [dx, dy, distance] + coords = batch.get("coords") + if coords is None: + edge_feat = torch.zeros(batch_size, senders.numel(), 3, device=device, dtype=dtype) + return self._match_edge_dim(edge_feat) + + coords = coords.to(device=device, dtype=dtype) + if coords.dim() == 2: + coords = coords.unsqueeze(0).expand(batch_size, -1, -1) + elif coords.dim() == 3 and coords.size(0) == 1 and batch_size > 1: + coords = coords.expand(batch_size, -1, -1) + if coords.dim() != 3: + raise ValueError("coords must be shaped (N, 2) or (B, N, 2).") + + src = coords[:, senders, :] + dst = coords[:, receivers, :] + delta = src - dst + dist = torch.norm(delta, dim=-1, keepdim=True) + edge_feat = torch.cat([delta, dist], dim=-1) + return self._match_edge_dim(edge_feat)
+ + +
+[docs] + def _one_step( + self, + node_x: torch.Tensor, + batch: Dict[str, torch.Tensor], + ) -> torch.Tensor: + # node_x: (B, N, F) + if node_x.ndim != 3: + raise ValueError(f"Expected node_x with shape (B,N,F), got {tuple(node_x.shape)}") + if node_x.size(-1) < self.state_dim: + raise ValueError( + f"Input feature dim {node_x.size(-1)} is smaller than state_dim {self.state_dim}." + ) + + adj = batch.get("adj") + if adj is None: + raise ValueError("HydroGraphNet requires `adj` in the batch.") + adj = adj.to(device=node_x.device) + + senders, receivers = self._edge_index(adj, batch_size=node_x.size(0)) + + # ---- encoder ---- + node = self.node_encoder(node_x) + edge_in = self._prepare_edge_inputs( + batch=batch, + senders=senders, + receivers=receivers, + batch_size=node.size(0), + device=node.device, + dtype=node.dtype, + ) + edge = self.edge_encoder(edge_in) + + # ---- processor ---- + for gn in self.processor: + node, edge = gn(node, edge, senders, receivers) + + # ---- decoder: residual state update ---- + delta_state = self.decoder(node) # (B, N, state_dim) + prev_state = node_x[..., : self.state_dim] + next_state = prev_state + delta_state + if self.enforce_nonnegative: + next_state = next_state.clamp_min(0.0) + + # Return requested targets from the evolved state. + return next_state[..., : self.out_dim]
+ + +
+[docs] + def rollout(self, batch: Dict[str, torch.Tensor], predict_steps: int) -> torch.Tensor: + batch_roll = dict(batch) + batch_roll["predict_steps"] = int(predict_steps) + out = self.forward(batch_roll) + if out.ndim != 4: + raise RuntimeError("rollout expected stacked output with shape (B, S, N, out_dim).") + return out
+ + +
+[docs] + def forward(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor: + # batch["x"]: (B, T, N, F) + x = batch["x"] + if x.ndim != 4: + raise ValueError( + f"HydroGraphNet expects x shaped (B, T, N, F), got {tuple(x.shape)}" + ) + + predict_steps = int(batch.get("predict_steps", self.rollout_steps)) + predict_steps = max(1, predict_steps) + + history = x + preds = [] + for _ in range(predict_steps): + node_x = history[:, -1] + y_next = self._one_step(node_x=node_x, batch=batch) # (B, N, out_dim) + preds.append(y_next) + + if predict_steps > 1: + next_frame = history[:, -1].clone() + next_frame[..., : self.out_dim] = y_next + history = torch.cat([history[:, 1:], next_frame.unsqueeze(1)], dim=1) + + if predict_steps == 1: + return preds[0] + return torch.stack(preds, dim=1)
+
+ + + +
+[docs] +class HydroGraphNetLoss(nn.Module): + """ + Supervised regression loss with optional continuity regularization. + """ + + def __init__(self, supervised_weight: float = 1.0, continuity_weight: float = 0.0): + super().__init__() + self.supervised_weight = float(supervised_weight) + self.continuity_weight = float(continuity_weight) + +
+[docs] + def forward( + self, + preds: torch.Tensor, + targets: torch.Tensor, + prev_state: Optional[torch.Tensor] = None, + cell_area: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, Dict[str, float]]: + supervised = F.mse_loss(preds, targets) + total = self.supervised_weight * supervised + metrics: Dict[str, float] = {"mse": float(supervised.detach().cpu())} + + if ( + self.continuity_weight > 0 + and prev_state is not None + and cell_area is not None + and preds.size(-1) >= 2 + and prev_state.size(-1) >= 2 + ): + # Approximate local continuity: depth-change * area ~= volume-change + depth_delta = preds[..., 0] - prev_state[..., 0] + volume_delta = preds[..., 1] - prev_state[..., 1] + area = cell_area.to(device=preds.device, dtype=preds.dtype) + if area.dim() == 1: + area = area.unsqueeze(0) + continuity = F.mse_loss(depth_delta * area, volume_delta) + total = total + self.continuity_weight * continuity + metrics["continuity"] = float(continuity.detach().cpu()) + + metrics["total"] = float(total.detach().cpu()) + return total, metrics
+
+ + + +
+[docs] +def hydrographnet_builder( + task: str, + node_in_dim: int, + edge_in_dim: int, + out_dim: int, + **kwargs, +) -> HydroGraphNet: + if task != "regression": + raise ValueError("HydroGraphNet only supports regression") + + return HydroGraphNet( + node_in_dim=node_in_dim, + edge_in_dim=edge_in_dim, + out_dim=out_dim, + hidden_dim=kwargs.get("hidden_dim", 64), + harmonics=kwargs.get("harmonics", 5), + num_gn_blocks=kwargs.get("num_gn_blocks", 5), + state_dim=kwargs.get("state_dim"), + rollout_steps=kwargs.get("rollout_steps", 1), + enforce_nonnegative=kwargs.get("enforce_nonnegative", False), + dropout=kwargs.get("dropout", 0.0), + )
+ + + +__all__ = ["HydroGraphNet", "HydroGraphNetLoss", "hydrographnet_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/neuralhydrology_ealstm.html b/docs/_modules/pyhazards/models/neuralhydrology_ealstm.html new file mode 100644 index 00000000..c516b841 --- /dev/null +++ b/docs/_modules/pyhazards/models/neuralhydrology_ealstm.html @@ -0,0 +1,447 @@ + + + + + + + + + + pyhazards.models.neuralhydrology_ealstm - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.neuralhydrology_ealstm

+from __future__ import annotations
+
+from typing import Any
+
+import torch
+import torch.nn as nn
+
+
+def _streamflow_inputs(batch: Any) -> torch.Tensor:
+    x = batch["x"] if isinstance(batch, dict) else batch
+    if x.ndim != 4:
+        raise ValueError("EA-LSTM expects inputs shaped (batch, history, nodes, features).")
+    return x
+
+
+
+[docs] +class NeuralHydrologyEALSTM(nn.Module): + """Entity-aware LSTM style streamflow baseline.""" + + def __init__( + self, + input_dim: int = 2, + hidden_dim: int = 64, + num_layers: int = 1, + out_dim: int = 1, + dropout: float = 0.1, + ): + super().__init__() + self.hidden_dim = int(hidden_dim) + self.out_dim = int(out_dim) + self.dynamic_encoder = nn.LSTM( + input_size=input_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=dropout if num_layers > 1 else 0.0, + ) + self.static_gate = nn.Sequential( + nn.Linear(input_dim, hidden_dim), + nn.Sigmoid(), + ) + self.head = nn.Linear(hidden_dim, self.out_dim) + +
+[docs] + def forward(self, batch: Any) -> torch.Tensor: + x = _streamflow_inputs(batch) + bsz, history, nodes, features = x.shape + series = x.permute(0, 2, 1, 3).reshape(bsz * nodes, history, features) + encoded, _ = self.dynamic_encoder(series) + static_features = series.mean(dim=1) + gated = encoded[:, -1] * self.static_gate(static_features) + preds = self.head(gated) + return preds.view(bsz, nodes, self.out_dim)
+
+ + + +
+[docs] +def neuralhydrology_ealstm_builder( + task: str, + input_dim: int = 2, + hidden_dim: int = 64, + num_layers: int = 1, + out_dim: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("NeuralHydrologyEALSTM only supports regression for streamflow forecasting.") + return NeuralHydrologyEALSTM( + input_dim=input_dim, + hidden_dim=hidden_dim, + num_layers=num_layers, + out_dim=out_dim, + dropout=dropout, + )
+ + + +__all__ = ["NeuralHydrologyEALSTM", "neuralhydrology_ealstm_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/neuralhydrology_lstm.html b/docs/_modules/pyhazards/models/neuralhydrology_lstm.html new file mode 100644 index 00000000..e8a97239 --- /dev/null +++ b/docs/_modules/pyhazards/models/neuralhydrology_lstm.html @@ -0,0 +1,440 @@ + + + + + + + + + + pyhazards.models.neuralhydrology_lstm - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.neuralhydrology_lstm

+from __future__ import annotations
+
+from typing import Any
+
+import torch
+import torch.nn as nn
+
+
+def _streamflow_inputs(batch: Any) -> torch.Tensor:
+    x = batch["x"] if isinstance(batch, dict) else batch
+    if x.ndim != 4:
+        raise ValueError("NeuralHydrology-style models expect inputs shaped (batch, history, nodes, features).")
+    return x
+
+
+
+[docs] +class NeuralHydrologyLSTM(nn.Module): + """Adapter-style LSTM streamflow baseline.""" + + def __init__( + self, + input_dim: int = 2, + hidden_dim: int = 64, + num_layers: int = 2, + out_dim: int = 1, + dropout: float = 0.1, + ): + super().__init__() + self.out_dim = int(out_dim) + self.encoder = nn.LSTM( + input_size=input_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=dropout if num_layers > 1 else 0.0, + ) + self.head = nn.Linear(hidden_dim, self.out_dim) + +
+[docs] + def forward(self, batch: Any) -> torch.Tensor: + x = _streamflow_inputs(batch) + bsz, history, nodes, features = x.shape + series = x.permute(0, 2, 1, 3).reshape(bsz * nodes, history, features) + encoded, _ = self.encoder(series) + preds = self.head(encoded[:, -1]) + return preds.view(bsz, nodes, self.out_dim)
+
+ + + +
+[docs] +def neuralhydrology_lstm_builder( + task: str, + input_dim: int = 2, + hidden_dim: int = 64, + num_layers: int = 2, + out_dim: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("NeuralHydrologyLSTM only supports regression for streamflow forecasting.") + return NeuralHydrologyLSTM( + input_dim=input_dim, + hidden_dim=hidden_dim, + num_layers=num_layers, + out_dim=out_dim, + dropout=dropout, + )
+ + + +__all__ = ["NeuralHydrologyLSTM", "neuralhydrology_lstm_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/pangu_tc.html b/docs/_modules/pyhazards/models/pangu_tc.html new file mode 100644 index 00000000..1ce41c31 --- /dev/null +++ b/docs/_modules/pyhazards/models/pangu_tc.html @@ -0,0 +1,435 @@ + + + + + + + + + + pyhazards.models.pangu_tc - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.pangu_tc

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class PanguTC(nn.Module): + """Experimental wrapper-style Pangu-Weather storm adapter.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.temporal = nn.Sequential( + nn.Conv1d(input_dim, hidden_dim, kernel_size=5, padding=2), + nn.GELU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + self.head = nn.Sequential( + nn.Dropout(dropout) if dropout > 0 else nn.Identity(), + nn.Linear(hidden_dim, hidden_dim), + nn.GELU(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("PanguTC expects inputs shaped (batch, history, features).") + encoded = self.temporal(x.transpose(1, 2)).mean(dim=-1) + preds = self.head(encoded) + return preds.view(x.size(0), self.horizon, self.output_dim)
+
+ + + +
+[docs] +def pangu_tc_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("PanguTC only supports regression for track/intensity forecasting.") + return PanguTC( + input_dim=input_dim, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + dropout=dropout, + )
+ + + +__all__ = ["PanguTC", "pangu_tc_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/phasenet.html b/docs/_modules/pyhazards/models/phasenet.html new file mode 100644 index 00000000..620e9e92 --- /dev/null +++ b/docs/_modules/pyhazards/models/phasenet.html @@ -0,0 +1,418 @@ + + + + + + + + + + pyhazards.models.phasenet - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.phasenet

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class PhaseNet(nn.Module): + """Lightweight phase-picking network for synthetic waveform smoke runs.""" + + def __init__(self, in_channels: int = 3, hidden_dim: int = 32): + super().__init__() + self.encoder = nn.Sequential( + nn.Conv1d(in_channels, hidden_dim, kernel_size=9, padding=4), + nn.ReLU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=7, padding=3), + nn.ReLU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=5, padding=2), + nn.ReLU(), + ) + self.head = nn.Sequential( + nn.AdaptiveAvgPool1d(1), + nn.Flatten(), + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, 2), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("PhaseNet expects inputs shaped (batch, channels, length).") + return self.head(self.encoder(x))
+
+ + + +
+[docs] +def phasenet_builder( + task: str, + in_channels: int = 3, + hidden_dim: int = 32, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("PhaseNet only supports regression-style phase picking outputs.") + return PhaseNet(in_channels=in_channels, hidden_dim=hidden_dim)
+ + + +__all__ = ["PhaseNet", "phasenet_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/registry.html b/docs/_modules/pyhazards/models/registry.html new file mode 100644 index 00000000..46ddd76f --- /dev/null +++ b/docs/_modules/pyhazards/models/registry.html @@ -0,0 +1,394 @@ + + + + + + + + + + pyhazards.models.registry - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.registry

+from typing import Any, Callable, Dict, Optional
+
+import torch.nn as nn
+
+_MODEL_REGISTRY: Dict[str, Dict[str, Any]] = {}
+
+
+
+[docs] +def register_model(name: str, builder: Callable[..., nn.Module], defaults: Optional[Dict[str, Any]] = None) -> None: + if name in _MODEL_REGISTRY: + raise ValueError(f"Model '{name}' already registered.") + _MODEL_REGISTRY[name] = {"builder": builder, "defaults": defaults or {}}
+ + + +
+[docs] +def available_models(): + return sorted(_MODEL_REGISTRY.keys())
+ + + +
+[docs] +def get_model_config(name: str) -> Optional[Dict[str, Any]]: + return _MODEL_REGISTRY.get(name)
+ + + +__all__ = ["register_model", "available_models", "get_model_config"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/saf_net.html b/docs/_modules/pyhazards/models/saf_net.html new file mode 100644 index 00000000..e2616a70 --- /dev/null +++ b/docs/_modules/pyhazards/models/saf_net.html @@ -0,0 +1,430 @@ + + + + + + + + + + pyhazards.models.saf_net - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.saf_net

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class SAFNet(nn.Module): + """Spatiotemporal intensity-focused storm baseline.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.temporal = nn.Sequential( + nn.Conv1d(input_dim, hidden_dim, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.ReLU(), + ) + self.dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.track_head = nn.Linear(hidden_dim, 2 * self.horizon) + self.intensity_head = nn.Linear(hidden_dim, self.horizon) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("SAFNet expects inputs shaped (batch, history, features).") + encoded = self.temporal(x.transpose(1, 2)).mean(dim=-1) + encoded = self.dropout(encoded) + track = self.track_head(encoded).view(x.size(0), self.horizon, 2) + intensity = self.intensity_head(encoded).view(x.size(0), self.horizon, 1) + return torch.cat([track, intensity], dim=-1)
+
+ + + +
+[docs] +def saf_net_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("SAFNet only supports regression for track/intensity forecasting.") + return SAFNet( + input_dim=input_dim, + hidden_dim=hidden_dim, + horizon=horizon, + dropout=dropout, + )
+ + + +__all__ = ["SAFNet", "saf_net_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/tcif_fusion.html b/docs/_modules/pyhazards/models/tcif_fusion.html new file mode 100644 index 00000000..e8aed6a4 --- /dev/null +++ b/docs/_modules/pyhazards/models/tcif_fusion.html @@ -0,0 +1,440 @@ + + + + + + + + + + pyhazards.models.tcif_fusion - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.tcif_fusion

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class TCIFFusion(nn.Module): + """Knowledge-guided fusion baseline for tropical cyclone forecasting.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.output_dim = int(output_dim) + left_dim = max(1, input_dim // 2) + right_dim = input_dim - left_dim + self.left_dim = left_dim + self.left_encoder = nn.GRU(left_dim, hidden_dim, batch_first=True) + self.right_encoder = nn.GRU(max(1, right_dim), hidden_dim, batch_first=True) + self.fusion = nn.Sequential( + nn.Linear(2 * hidden_dim, hidden_dim), + nn.ReLU(), + nn.Dropout(dropout) if dropout > 0 else nn.Identity(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("TCIFFusion expects inputs shaped (batch, history, features).") + left = x[:, :, : self.left_dim] + right = x[:, :, self.left_dim :] + if right.size(-1) == 0: + right = x[:, :, :1] + _, left_hidden = self.left_encoder(left) + _, right_hidden = self.right_encoder(right) + fused = torch.cat([left_hidden[-1], right_hidden[-1]], dim=-1) + preds = self.fusion(fused) + return preds.view(x.size(0), self.horizon, self.output_dim)
+
+ + + +
+[docs] +def tcif_fusion_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("TCIFFusion only supports regression for track/intensity forecasting.") + return TCIFFusion( + input_dim=input_dim, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + dropout=dropout, + )
+ + + +__all__ = ["TCIFFusion", "tcif_fusion_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/tropicalcyclone_mlp.html b/docs/_modules/pyhazards/models/tropicalcyclone_mlp.html new file mode 100644 index 00000000..91c3f15c --- /dev/null +++ b/docs/_modules/pyhazards/models/tropicalcyclone_mlp.html @@ -0,0 +1,436 @@ + + + + + + + + + + pyhazards.models.tropicalcyclone_mlp - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.tropicalcyclone_mlp

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class TropicalCycloneMLP(nn.Module): + """Compact MLP baseline for storm track and intensity forecasting.""" + + def __init__( + self, + input_dim: int = 8, + history: int = 6, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.history = int(history) + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.net = nn.Sequential( + nn.Linear(self.history * input_dim, hidden_dim), + nn.ReLU(), + nn.Dropout(dropout) if dropout > 0 else nn.Identity(), + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("TropicalCycloneMLP expects inputs shaped (batch, history, features).") + if x.size(1) != self.history: + raise ValueError(f"TropicalCycloneMLP expected history={self.history}, got {x.size(1)}.") + preds = self.net(x.reshape(x.size(0), -1)) + return preds.view(x.size(0), self.horizon, self.output_dim)
+
+ + + +
+[docs] +def tropicalcyclone_mlp_builder( + task: str, + input_dim: int = 8, + history: int = 6, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("TropicalCycloneMLP only supports regression for track/intensity forecasting.") + return TropicalCycloneMLP( + input_dim=input_dim, + history=history, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + dropout=dropout, + )
+ + + +__all__ = ["TropicalCycloneMLP", "tropicalcyclone_mlp_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/tropicyclonenet.html b/docs/_modules/pyhazards/models/tropicyclonenet.html new file mode 100644 index 00000000..5224d00a --- /dev/null +++ b/docs/_modules/pyhazards/models/tropicyclonenet.html @@ -0,0 +1,442 @@ + + + + + + + + + + pyhazards.models.tropicyclonenet - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.tropicyclonenet

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class TropiCycloneNet(nn.Module): + """GRU + attention baseline for all-basin tropical cyclone forecasting.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + num_layers: int = 2, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.encoder = nn.GRU( + input_size=input_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=dropout if num_layers > 1 else 0.0, + bidirectional=True, + ) + self.attention = nn.Linear(2 * hidden_dim, 1) + self.head = nn.Sequential( + nn.Linear(2 * hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("TropiCycloneNet expects inputs shaped (batch, history, features).") + encoded, _ = self.encoder(x) + weights = torch.softmax(self.attention(encoded), dim=1) + pooled = torch.sum(weights * encoded, dim=1) + preds = self.head(pooled) + return preds.view(x.size(0), self.horizon, self.output_dim)
+
+ + + +
+[docs] +def tropicyclonenet_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + num_layers: int = 2, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("TropiCycloneNet only supports regression for track/intensity forecasting.") + return TropiCycloneNet( + input_dim=input_dim, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + num_layers=num_layers, + dropout=dropout, + )
+ + + +__all__ = ["TropiCycloneNet", "tropicyclonenet_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/urbanfloodcast.html b/docs/_modules/pyhazards/models/urbanfloodcast.html new file mode 100644 index 00000000..be04c035 --- /dev/null +++ b/docs/_modules/pyhazards/models/urbanfloodcast.html @@ -0,0 +1,434 @@ + + + + + + + + + + pyhazards.models.urbanfloodcast - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.urbanfloodcast

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class UrbanFloodCast(nn.Module): + """U-Net style urban inundation baseline.""" + + def __init__( + self, + in_channels: int = 3, + history: int = 4, + base_channels: int = 32, + out_channels: int = 1, + ): + super().__init__() + self.history = int(history) + merged_channels = in_channels * history + self.encoder = nn.Sequential( + nn.Conv2d(merged_channels, base_channels, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv2d(base_channels, base_channels, kernel_size=3, padding=1), + nn.ReLU(), + ) + self.decoder = nn.Sequential( + nn.Conv2d(base_channels, base_channels, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv2d(base_channels, out_channels, kernel_size=1), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 5: + raise ValueError("UrbanFloodCast expects inputs shaped (batch, history, channels, height, width).") + if x.size(1) != self.history: + raise ValueError(f"UrbanFloodCast expected history={self.history}, got {x.size(1)}.") + bsz, history, channels, height, width = x.shape + merged = x.reshape(bsz, history * channels, height, width) + features = self.encoder(merged) + return self.decoder(features)
+
+ + + +
+[docs] +def urbanfloodcast_builder( + task: str, + in_channels: int = 3, + history: int = 4, + base_channels: int = 32, + out_channels: int = 1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"regression", "segmentation"}: + raise ValueError("UrbanFloodCast only supports regression or segmentation-style inundation outputs.") + return UrbanFloodCast( + in_channels=in_channels, + history=history, + base_channels=base_channels, + out_channels=out_channels, + )
+ + + +__all__ = ["UrbanFloodCast", "urbanfloodcast_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/wavecastnet.html b/docs/_modules/pyhazards/models/wavecastnet.html new file mode 100644 index 00000000..dca066ab --- /dev/null +++ b/docs/_modules/pyhazards/models/wavecastnet.html @@ -0,0 +1,754 @@ + + + + + + + + + + pyhazards.models.wavecastnet - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.wavecastnet

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+import torch.nn.init as init
+
+
+
+[docs] +class ConvLEMCell(nn.Module): + """ + Convolutional Long Expressive Memory (ConvLEM) cell used by WaveCastNet. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int = 3, + dt: float = 1.0, + activation: str = "tanh", + use_reset_gate: bool = False, + ): + super().__init__() + + if activation == "tanh": + self.activation = torch.tanh + elif activation == "relu": + self.activation = torch.relu + else: + raise ValueError( + "Unsupported activation: {activation}. Use 'tanh' or 'relu'.".format( + activation=activation + ) + ) + + self.dt = float(dt) + self.use_reset_gate = bool(use_reset_gate) + self.out_channels = int(out_channels) + + padding = (kernel_size - 1) // 2 + if self.use_reset_gate: + self.conv_x = nn.Conv2d( + in_channels, + 5 * out_channels, + kernel_size, + padding=padding, + ) + self.conv_h = nn.Conv2d( + out_channels, + 4 * out_channels, + kernel_size, + padding=padding, + ) + else: + self.conv_x = nn.Conv2d( + in_channels, + 4 * out_channels, + kernel_size, + padding=padding, + ) + self.conv_h = nn.Conv2d( + out_channels, + 3 * out_channels, + kernel_size, + padding=padding, + ) + + self.conv_c = nn.Conv2d(out_channels, out_channels, kernel_size, padding=padding) + self.W_c1 = nn.Parameter(torch.empty(out_channels, 1, 1)) + self.W_c2 = nn.Parameter(torch.empty(out_channels, 1, 1)) + if self.use_reset_gate: + self.W_c4 = nn.Parameter(torch.empty(out_channels, 1, 1)) + + self.reset_parameters() + +
+[docs] + def reset_parameters(self) -> None: + for name, param in self.named_parameters(): + if "W_c" in name: + nn.init.constant_(param, 0.0) + elif param.ndim > 1: + init.xavier_uniform_(param) + else: + nn.init.constant_(param, 0.0)
+ + +
+[docs] + def forward( + self, + x: torch.Tensor, + h: torch.Tensor, + c: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor]: + if x.ndim != 4 or h.ndim != 4 or c.ndim != 4: + raise ValueError("ConvLEMCell expects x, h, c shaped (B, C, H, W).") + + conv_x_out = self.conv_x(x) + conv_h_out = self.conv_h(h) + + if self.use_reset_gate: + i_dt1, i_dt2, g_dx2, i_c, i_h = torch.chunk(conv_x_out, chunks=5, dim=1) + h_dt1, h_dt2, h_h, g_dh2 = torch.chunk(conv_h_out, chunks=4, dim=1) + + ms_dt = self.dt * torch.sigmoid(i_dt2 + h_dt2 + self.W_c2 * c) + c = (1.0 - ms_dt) * c + ms_dt * self.activation(i_h + h_h) + + gate2 = self.dt * torch.sigmoid(g_dx2 + g_dh2 + self.W_c4 * c) + conv_c_out = gate2 * self.conv_c(c) + + ms_dt_bar = self.dt * torch.sigmoid(i_dt1 + h_dt1 + self.W_c1 * c) + h = (1.0 - ms_dt_bar) * h + ms_dt_bar * self.activation(conv_c_out + i_c) + else: + i_dt1, i_dt2, i_c, i_h = torch.chunk(conv_x_out, chunks=4, dim=1) + h_dt1, h_dt2, h_h = torch.chunk(conv_h_out, chunks=3, dim=1) + + ms_dt = self.dt * torch.sigmoid(i_dt2 + h_dt2 + self.W_c2 * c) + c = (1.0 - ms_dt) * c + ms_dt * self.activation(i_h + h_h) + + conv_c_out = self.conv_c(c) + ms_dt_bar = self.dt * torch.sigmoid(i_dt1 + h_dt1 + self.W_c1 * c) + h = (1.0 - ms_dt_bar) * h + ms_dt_bar * self.activation(conv_c_out + i_c) + + return h, c
+
+ + + +
+[docs] +class WaveCastNet(nn.Module): + """ + Sequence-to-sequence wavefield forecasting model based on ConvLEM cells. + + Input shape: (B, C, T_in, H, W) + Output shape: (B, C, T_out, H, W) + """ + + def __init__( + self, + in_channels: int, + height: int, + width: int, + temporal_in: int, + temporal_out: int, + hidden_dim: int = 144, + num_layers: int = 2, + kernel_size: int = 3, + dt: float = 1.0, + activation: str = "tanh", + dropout: float = 0.1, + ): + super().__init__() + + self.in_channels = int(in_channels) + self.height = int(height) + self.width = int(width) + self.temporal_in = int(temporal_in) + self.temporal_out = int(temporal_out) + self.hidden_dim = int(hidden_dim) + self.num_layers = int(num_layers) + + padding = (kernel_size - 1) // 2 + proj_dim = max(1, self.hidden_dim // 2) + + self.input_embed = nn.Sequential( + nn.Conv2d(self.in_channels, self.hidden_dim, kernel_size, padding=padding), + nn.BatchNorm2d(self.hidden_dim), + nn.ReLU(), + nn.Dropout2d(dropout), + ) + + self.encoder_layers = nn.ModuleList( + [ + ConvLEMCell( + in_channels=self.hidden_dim, + out_channels=self.hidden_dim, + kernel_size=kernel_size, + dt=dt, + activation=activation, + use_reset_gate=False, + ) + for _ in range(self.num_layers) + ] + ) + self.decoder_layers = nn.ModuleList( + [ + ConvLEMCell( + in_channels=self.hidden_dim, + out_channels=self.hidden_dim, + kernel_size=kernel_size, + dt=dt, + activation=activation, + use_reset_gate=False, + ) + for _ in range(self.num_layers) + ] + ) + + self.output_proj = nn.Sequential( + nn.Conv2d(self.hidden_dim, proj_dim, kernel_size, padding=padding), + nn.ReLU(), + nn.Dropout2d(dropout), + nn.Conv2d(proj_dim, self.in_channels, kernel_size, padding=padding), + ) + self.dropout = nn.Dropout2d(dropout) + +
+[docs] + def _init_states(self, x: torch.Tensor) -> tuple[list[torch.Tensor], list[torch.Tensor]]: + hidden = [ + x.new_zeros(x.size(0), self.hidden_dim, self.height, self.width) + for _ in range(self.num_layers) + ] + memory = [ + x.new_zeros(x.size(0), self.hidden_dim, self.height, self.width) + for _ in range(self.num_layers) + ] + return hidden, memory
+ + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 5: + raise ValueError( + "WaveCastNet expects x shaped (B, C, T, H, W), got {shape}".format( + shape=tuple(x.shape) + ) + ) + + batch_size, channels, temporal_in, height, width = x.shape + if channels != self.in_channels: + raise ValueError( + "Expected in_channels={expected}, got {actual}".format( + expected=self.in_channels, + actual=channels, + ) + ) + if temporal_in != self.temporal_in: + raise ValueError( + "Expected temporal_in={expected}, got {actual}".format( + expected=self.temporal_in, + actual=temporal_in, + ) + ) + if height != self.height or width != self.width: + raise ValueError( + "Expected spatial size ({h}, {w}), got ({actual_h}, {actual_w})".format( + h=self.height, + w=self.width, + actual_h=height, + actual_w=width, + ) + ) + + encoder_h, encoder_c = self._init_states(x) + for t in range(self.temporal_in): + encoded = self.input_embed(x[:, :, t, :, :]) + for i, layer in enumerate(self.encoder_layers): + layer_input = encoded if i == 0 else encoder_h[i - 1] + encoder_h[i], encoder_c[i] = layer(layer_input, encoder_h[i], encoder_c[i]) + + decoder_h = [state.clone() for state in encoder_h] + decoder_c = [state.clone() for state in encoder_c] + + outputs = [] + for t in range(self.temporal_out): + decoder_input = encoder_h[-1] if t == 0 else decoder_h[-1] + for i, layer in enumerate(self.decoder_layers): + layer_input = decoder_input if i == 0 else decoder_h[i - 1] + decoder_h[i], decoder_c[i] = layer(layer_input, decoder_h[i], decoder_c[i]) + output_t = self.output_proj(self.dropout(decoder_h[-1])) + outputs.append(output_t) + + if len(outputs) != self.temporal_out: + raise RuntimeError( + "Decoder generated {actual} steps, expected {expected}".format( + actual=len(outputs), + expected=self.temporal_out, + ) + ) + return torch.stack(outputs, dim=2)
+
+ + + +
+[docs] +def wavecastnet_builder( + task: str, + in_channels: int, + height: int, + width: int, + temporal_in: int, + temporal_out: int, + **kwargs, +) -> WaveCastNet: + if task.lower() != "regression": + raise ValueError("WaveCastNet only supports regression tasks.") + + return WaveCastNet( + in_channels=in_channels, + height=height, + width=width, + temporal_in=temporal_in, + temporal_out=temporal_out, + hidden_dim=kwargs.get("hidden_dim", 144), + num_layers=kwargs.get("num_layers", 2), + kernel_size=kwargs.get("kernel_size", 3), + dt=kwargs.get("dt", 1.0), + activation=kwargs.get("activation", "tanh"), + dropout=kwargs.get("dropout", 0.1), + )
+ + + +
+[docs] +class WaveCastNetLoss(nn.Module): + """ + Huber loss used in the WaveCastNet paper. + """ + + def __init__(self, delta: float = 1.0): + super().__init__() + self.delta = float(delta) + +
+[docs] + def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + diff = pred - target + abs_diff = diff.abs() + quadratic = 0.5 * diff.square() + linear = self.delta * abs_diff - 0.5 * self.delta**2 + return torch.where(abs_diff <= self.delta, quadratic, linear).mean()
+
+ + + +
+[docs] +class WavefieldMetrics: + """ + ACC and RFNE metrics reported in the WaveCastNet paper. + """ + +
+[docs] + @staticmethod + def accuracy(pred: torch.Tensor, target: torch.Tensor) -> float: + pred_flat = pred.reshape(pred.size(0), -1) + target_flat = target.reshape(target.size(0), -1) + numerator = (pred_flat * target_flat).sum(dim=1) + pred_norm = pred_flat.square().sum(dim=1).sqrt() + target_norm = target_flat.square().sum(dim=1).sqrt() + acc = numerator / (pred_norm * target_norm).clamp(min=1e-8) + return float(acc.mean().detach().cpu())
+ + +
+[docs] + @staticmethod + def rfne(pred: torch.Tensor, target: torch.Tensor) -> float: + error_norm = (pred - target).reshape(pred.size(0), -1).square().sum(dim=1).sqrt() + target_norm = target.reshape(target.size(0), -1).square().sum(dim=1).sqrt() + rfne = error_norm / target_norm.clamp(min=1e-8) + return float(rfne.mean().detach().cpu())
+ + +
+[docs] + @staticmethod + def compute_all(pred: torch.Tensor, target: torch.Tensor) -> dict[str, float]: + return { + "ACC": WavefieldMetrics.accuracy(pred, target), + "RFNE": WavefieldMetrics.rfne(pred, target), + }
+
+ + + +__all__ = [ + "ConvLEMCell", + "WaveCastNet", + "WaveCastNetLoss", + "WavefieldMetrics", + "wavecastnet_builder", +] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/wildfire_aspp.html b/docs/_modules/pyhazards/models/wildfire_aspp.html new file mode 100644 index 00000000..a4882186 --- /dev/null +++ b/docs/_modules/pyhazards/models/wildfire_aspp.html @@ -0,0 +1,431 @@ + + + + + + + + + + pyhazards.models.wildfire_aspp - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.wildfire_aspp

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+from .cnn_aspp import WildfireCNNASPP, cnn_aspp_builder
+
+
+
+[docs] +class WildfireASPP(WildfireCNNASPP): + """ + Backward-compatible name for the CNN + ASPP wildfire model. + """
+ + + +
+[docs] +def wildfire_aspp_builder(task: str, **kwargs) -> nn.Module: + return cnn_aspp_builder(task=task, **kwargs)
+ + + +
+[docs] +class TverskyLoss(nn.Module): + """ + Tversky loss for binary segmentation. + """ + + def __init__( + self, + alpha: float = 0.5, + beta: float = 0.5, + smooth: float = 1e-6, + from_logits: bool = True, + ): + super().__init__() + self.alpha = float(alpha) + self.beta = float(beta) + self.smooth = float(smooth) + self.from_logits = bool(from_logits) + +
+[docs] + def forward(self, logits: torch.Tensor, targets: torch.Tensor) -> torch.Tensor: + if self.from_logits: + probs = torch.sigmoid(logits) + else: + probs = logits + + targets = targets.float() + + probs = probs.view(probs.size(0), -1) + targets = targets.view(targets.size(0), -1) + + tp = (probs * targets).sum(dim=1) + fp = (probs * (1 - targets)).sum(dim=1) + fn = ((1 - probs) * targets).sum(dim=1) + + tversky = (tp + self.smooth) / ( + tp + self.alpha * fp + self.beta * fn + self.smooth + ) + loss = 1.0 - tversky + return loss.mean()
+
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/wildfire_forecasting.html b/docs/_modules/pyhazards/models/wildfire_forecasting.html new file mode 100644 index 00000000..b71c9251 --- /dev/null +++ b/docs/_modules/pyhazards/models/wildfire_forecasting.html @@ -0,0 +1,463 @@ + + + + + + + + + + pyhazards.models.wildfire_forecasting - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.wildfire_forecasting

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class WildfireForecasting(nn.Module): + """Sequence forecaster for weekly wildfire size-group activity.""" + + def __init__( + self, + input_dim: int = 7, + hidden_dim: int = 64, + output_dim: int = 5, + lookback: int = 12, + num_layers: int = 2, + dropout: float = 0.1, + ): + super().__init__() + if input_dim <= 0: + raise ValueError(f"input_dim must be positive, got {input_dim}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if output_dim <= 0: + raise ValueError(f"output_dim must be positive, got {output_dim}") + if lookback <= 0: + raise ValueError(f"lookback must be positive, got {lookback}") + if num_layers <= 0: + raise ValueError(f"num_layers must be positive, got {num_layers}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.lookback = int(lookback) + self.encoder = nn.GRU( + input_size=input_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=dropout if num_layers > 1 else 0.0, + ) + self.attention = nn.Linear(hidden_dim, 1) + self.head = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, output_dim), + ) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError( + "WildfireForecasting expects input shape (batch, lookback, features), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.lookback: + raise ValueError( + f"WildfireForecasting expected lookback={self.lookback}, got sequence length {x.size(1)}." + ) + encoded, _ = self.encoder(x) + weights = torch.softmax(self.attention(encoded), dim=1) + pooled = torch.sum(weights * encoded, dim=1) + return self.head(pooled)
+
+ + + +
+[docs] +def wildfire_forecasting_builder( + task: str, + input_dim: int = 7, + hidden_dim: int = 64, + output_dim: int = 5, + lookback: int = 12, + num_layers: int = 2, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"forecasting", "regression"}: + raise ValueError( + "wildfire_forecasting supports task='forecasting' or 'regression', " + f"got {task!r}." + ) + return WildfireForecasting( + input_dim=input_dim, + hidden_dim=hidden_dim, + output_dim=output_dim, + lookback=lookback, + num_layers=num_layers, + dropout=dropout, + )
+ + + +__all__ = ["WildfireForecasting", "wildfire_forecasting_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/wildfire_fpa.html b/docs/_modules/pyhazards/models/wildfire_fpa.html new file mode 100644 index 00000000..6d55e8c8 --- /dev/null +++ b/docs/_modules/pyhazards/models/wildfire_fpa.html @@ -0,0 +1,470 @@ + + + + + + + + + + pyhazards.models.wildfire_fpa - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.wildfire_fpa

+from __future__ import annotations
+
+from typing import Callable, Union
+
+import torch
+import torch.nn as nn
+
+from .wildfire_fpa_dnn import WildfireFPADNN
+from .wildfire_fpa_forecast import WildfireFPAForecast
+
+
+
+[docs] +class WildfireFPA(nn.Module): + """Paper-facing wrapper for the two-stage FPA-FOD wildfire framework.""" + + def __init__(self, stage: str, component: nn.Module): + super().__init__() + normalized_stage = stage.lower() + if normalized_stage not in {"classification", "forecasting", "regression"}: + raise ValueError(f"Unsupported wildfire_fpa stage: {stage!r}") + + self.stage = "forecasting" if normalized_stage == "regression" else normalized_stage + self.component = component + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.component(x)
+ + +
+[docs] + def forward_with_reconstruction(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + if not hasattr(self.component, "forward_with_reconstruction"): + raise AttributeError( + "forward_with_reconstruction is only available for the forecasting stage " + "of wildfire_fpa." + ) + return self.component.forward_with_reconstruction(x)
+
+ + + +
+[docs] +def wildfire_fpa_builder( + task: str, + in_dim: int | None = None, + input_dim: int | None = None, + out_dim: int | None = None, + output_dim: int | None = None, + depth: int = 2, + hidden_dim: int = 64, + activation: Union[str, Callable[[], nn.Module]] = "relu", + dropout: float | None = None, + latent_dim: int = 32, + num_layers: int = 1, + ae_hidden_dim: int | None = None, + ae_num_layers: int | None = None, + lookback: int = 50, + **kwargs, +) -> nn.Module: + _ = kwargs + normalized_task = task.lower() + + if normalized_task == "classification": + feature_dim = in_dim if in_dim is not None else input_dim + if feature_dim is None: + raise TypeError("wildfire_fpa classification requires in_dim (or input_dim).") + + component = WildfireFPADNN( + in_dim=feature_dim, + out_dim=out_dim if out_dim is not None else (output_dim if output_dim is not None else 5), + depth=depth, + hidden_dim=hidden_dim, + activation=activation, + dropout=0.0 if dropout is None else dropout, + ) + return WildfireFPA(stage="classification", component=component) + + if normalized_task in {"forecasting", "regression"}: + sequence_dim = input_dim if input_dim is not None else in_dim + if sequence_dim is None: + raise TypeError("wildfire_fpa forecasting requires input_dim (or in_dim).") + + component = WildfireFPAForecast( + input_dim=sequence_dim, + hidden_dim=hidden_dim, + output_dim=output_dim if output_dim is not None else (out_dim if out_dim is not None else 5), + latent_dim=latent_dim, + num_layers=num_layers, + ae_hidden_dim=ae_hidden_dim, + ae_num_layers=ae_num_layers, + dropout=0.2 if dropout is None else dropout, + lookback=lookback, + ) + return WildfireFPA(stage=normalized_task, component=component) + + raise ValueError( + "wildfire_fpa supports task='classification' for the DNN stage and " + "task in {'forecasting', 'regression'} for the LSTM + autoencoder stage." + )
+ + + +__all__ = ["WildfireFPA", "wildfire_fpa_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/wildfire_mamba.html b/docs/_modules/pyhazards/models/wildfire_mamba.html new file mode 100644 index 00000000..3f9835a5 --- /dev/null +++ b/docs/_modules/pyhazards/models/wildfire_mamba.html @@ -0,0 +1,617 @@ + + + + + + + + + + pyhazards.models.wildfire_mamba - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.wildfire_mamba

+from __future__ import annotations
+
+from typing import Optional
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+def _normalize_adjacency(adj: torch.Tensor) -> torch.Tensor:
+    """
+    Row-normalize an adjacency matrix and ensure self-loops.
+    Accepts (N, N) or (B, N, N) and returns the same rank.
+    """
+    if adj.dim() == 2:
+        adj = adj.unsqueeze(0)
+    eye = torch.eye(adj.size(-1), device=adj.device, dtype=adj.dtype)
+    adj = adj.float() + eye.unsqueeze(0)
+    return adj / adj.sum(-1, keepdim=True).clamp(min=1e-6)
+
+
+class SelectiveSSMBlock(nn.Module):
+    """
+    Lightweight selective state-space block inspired by Mamba.
+
+    Operates over a single temporal stream: (batch, time, features) -> (batch, time, hidden_dim).
+    """
+
+    def __init__(self, in_dim: int, hidden_dim: int, state_dim: int = 64, conv_kernel: int = 5, dropout: float = 0.1):
+        super().__init__()
+        self.in_proj = nn.Linear(in_dim, hidden_dim)
+        self.dwconv = nn.Conv1d(hidden_dim, hidden_dim, kernel_size=conv_kernel, padding=conv_kernel // 2, groups=hidden_dim)
+        self.gate = nn.Linear(hidden_dim, hidden_dim)
+        self.A = nn.Parameter(torch.randn(hidden_dim, state_dim) * 0.02)
+        self.B = nn.Parameter(torch.randn(state_dim, hidden_dim) * 0.02)
+        self.out_proj = nn.Linear(hidden_dim, hidden_dim)
+        self.norm = nn.LayerNorm(hidden_dim)
+        self.drop = nn.Dropout(dropout)
+
+    def forward(self, x: torch.Tensor) -> torch.Tensor:
+        # x: (B, T, F)
+        h = self.in_proj(x)  # (B, T, H)
+        h_conv = self.dwconv(h.transpose(1, 2)).transpose(1, 2)
+        g = torch.sigmoid(self.gate(h_conv))
+        B, T, H = h_conv.shape
+        state = torch.zeros(B, H, device=h_conv.device, dtype=h_conv.dtype)
+        outputs = []
+        for t in range(T):
+            # selective update: gates decide how much new signal to mix into the running state
+            state = g[:, t, :] * (state @ self.A @ self.B + h_conv[:, t, :]) + (1 - g[:, t, :]) * state
+            outputs.append(state)
+        y = torch.stack(outputs, dim=1)
+        y = self.out_proj(self.drop(y)) + h_conv
+        return self.norm(y)
+
+
+class MambaTemporalEncoder(nn.Module):
+    """Stack of selective SSM blocks; returns the last hidden state."""
+
+    def __init__(self, in_dim: int, hidden_dim: int = 128, num_layers: int = 2, state_dim: int = 64, conv_kernel: int = 5, dropout: float = 0.1):
+        super().__init__()
+        self.blocks = nn.ModuleList(
+            [
+                SelectiveSSMBlock(
+                    in_dim=in_dim if i == 0 else hidden_dim,
+                    hidden_dim=hidden_dim,
+                    state_dim=state_dim,
+                    conv_kernel=conv_kernel,
+                    dropout=dropout,
+                )
+                for i in range(num_layers)
+            ]
+        )
+
+    def forward(self, x: torch.Tensor) -> torch.Tensor:
+        h = x
+        for block in self.blocks:
+            h = block(h)
+        return h[:, -1, :]
+
+
+class SimpleGCN(nn.Module):
+    """Two-layer GCN that mixes counties with a fixed adjacency."""
+
+    def __init__(self, in_dim: int, hidden_dim: int = 64, out_dim: int = 64, dropout: float = 0.1):
+        super().__init__()
+        self.lin1 = nn.Linear(in_dim, hidden_dim)
+        self.lin2 = nn.Linear(hidden_dim, out_dim)
+        self.drop = nn.Dropout(dropout)
+
+    def forward(self, H: torch.Tensor, adj: torch.Tensor) -> torch.Tensor:
+        # H: (B, N, D); adj: (B, N, N)
+        z = torch.matmul(adj, H)
+        z = F.relu(self.lin1(z))
+        z = self.drop(z)
+        z = torch.matmul(adj, z)
+        return F.relu(self.lin2(z))
+
+
+
+[docs] +class WildfireMamba(nn.Module): + """ + Mamba-based spatio-temporal wildfire model for county-day ERA5 features. + + Input shape: (batch, past_days, num_counties, num_features) + Output: logits per county for the next day (use sigmoid for probabilities) + """ + + def __init__( + self, + in_dim: int, + num_counties: int, + past_days: int, + hidden_dim: int = 128, + gcn_hidden: int = 64, + mamba_layers: int = 2, + state_dim: int = 64, + conv_kernel: int = 5, + dropout: float = 0.1, + adjacency: Optional[torch.Tensor] = None, + with_count_head: bool = False, + ): + super().__init__() + self.num_counties = num_counties + self.past_days = past_days + self.with_count_head = with_count_head + self.temporal = MambaTemporalEncoder( + in_dim=in_dim, + hidden_dim=hidden_dim, + num_layers=mamba_layers, + state_dim=state_dim, + conv_kernel=conv_kernel, + dropout=dropout, + ) + # differential branch is shallower and gates how much change to inject + self.delta_temporal = MambaTemporalEncoder( + in_dim=in_dim, + hidden_dim=hidden_dim, + num_layers=max(1, mamba_layers - 1), + state_dim=state_dim, + conv_kernel=conv_kernel, + dropout=dropout, + ) + self.delta_gate = nn.Linear(hidden_dim, hidden_dim) + self.gcn = SimpleGCN(hidden_dim, hidden_dim=gcn_hidden, out_dim=gcn_hidden, dropout=dropout) + self.cls_head = nn.Linear(gcn_hidden, 1) + if self.with_count_head: + self.count_head = nn.Linear(gcn_hidden, 1) + self.dropout = nn.Dropout(dropout) + self.register_buffer("_adjacency", None) + if adjacency is not None: + self.set_adjacency(adjacency) + +
+[docs] + def set_adjacency(self, adj: torch.Tensor) -> None: + """Set/override the spatial adjacency.""" + adj = _normalize_adjacency(adj.detach()) + self._adjacency = adj
+ + +
+[docs] + def _get_adjacency(self, batch_size: int) -> torch.Tensor: + if self._adjacency is None: + eye = torch.eye(self.num_counties, device=self.cls_head.weight.device) + adj = _normalize_adjacency(eye) + else: + adj = self._adjacency + if adj.dim() == 2: + adj = adj.unsqueeze(0) + if adj.size(0) == 1 and batch_size > 1: + adj = adj.expand(batch_size, -1, -1) + return adj
+ + +
+[docs] + @staticmethod + def _temporal_delta(x: torch.Tensor) -> torch.Tensor: + # prepend zeros so delta has the same length as the input sequence + zeros = torch.zeros(x.size(0), 1, x.size(2), device=x.device, dtype=x.dtype) + return torch.cat([zeros, x[:, 1:] - x[:, :-1]], dim=1)
+ + +
+[docs] + def forward(self, x: torch.Tensor, adjacency: Optional[torch.Tensor] = None): + """ + Args: + x: Tensor shaped (batch, past_days, num_counties, in_dim) + adjacency: Optional (N, N) or (B, N, N) adjacency override. + Returns: + - logits: (batch, num_counties) + - optional counts: (batch, num_counties) if with_count_head is enabled. + """ + B, T, N, F = x.shape + if T != self.past_days: + raise ValueError(f"Expected past_days={self.past_days}, got {T}.") + if N != self.num_counties: + raise ValueError(f"Expected num_counties={self.num_counties}, got {N}.") + + # flatten counties into the batch for temporal encoding + x_flat = x.permute(0, 2, 1, 3).reshape(B * N, T, F) + base = self.temporal(x_flat) + delta = self.delta_temporal(self._temporal_delta(x_flat)) + gate = torch.sigmoid(self.delta_gate(delta)) + fused = base * gate + delta + fused = fused.view(B, N, -1) + + adj = _normalize_adjacency(adjacency) if adjacency is not None else self._get_adjacency(B) + spatial = self.gcn(fused, adj) + spatial = self.dropout(spatial) + logits = self.cls_head(spatial).squeeze(-1) + if self.with_count_head: + counts = F.relu(self.count_head(spatial)).squeeze(-1) + return logits, counts + return logits
+
+ + + +
+[docs] +def wildfire_mamba_builder( + task: str, + in_dim: int, + num_counties: int, + past_days: int, + **kwargs, +) -> WildfireMamba: + """ + Builder used by the model registry. + """ + if task.lower() not in {"classification", "binary_classification"}: + raise ValueError("WildfireMamba is designed for binary per-county classification.") + return WildfireMamba( + in_dim=in_dim, + num_counties=num_counties, + past_days=past_days, + hidden_dim=kwargs.get("hidden_dim", 128), + gcn_hidden=kwargs.get("gcn_hidden", 64), + mamba_layers=kwargs.get("mamba_layers", 2), + state_dim=kwargs.get("state_dim", 64), + conv_kernel=kwargs.get("conv_kernel", 5), + dropout=kwargs.get("dropout", 0.1), + adjacency=kwargs.get("adjacency"), + with_count_head=kwargs.get("with_count_head", False), + )
+ + + +__all__ = ["WildfireMamba", "wildfire_mamba_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/wildfirespreadts.html b/docs/_modules/pyhazards/models/wildfirespreadts.html new file mode 100644 index 00000000..c89dc84a --- /dev/null +++ b/docs/_modules/pyhazards/models/wildfirespreadts.html @@ -0,0 +1,451 @@ + + + + + + + + + + pyhazards.models.wildfirespreadts - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.wildfirespreadts

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+
+
+
+[docs] +class WildfireSpreadTS(nn.Module): + """Temporal convolution baseline for wildfire spread masks.""" + + def __init__( + self, + history: int = 4, + in_channels: int = 6, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + ): + super().__init__() + if history <= 0: + raise ValueError(f"history must be positive, got {history}") + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if out_channels <= 0: + raise ValueError(f"out_channels must be positive, got {out_channels}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.history = int(history) + self.in_channels = int(in_channels) + self.encoder = nn.Sequential( + nn.Conv3d(in_channels, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.GELU(), + nn.Dropout3d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv3d(hidden_dim, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.GELU(), + ) + self.head = nn.Conv2d(hidden_dim, out_channels, kernel_size=1) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 5: + raise ValueError( + "WildfireSpreadTS expects input shape (batch, history, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.history: + raise ValueError(f"WildfireSpreadTS expected history={self.history}, got {x.size(1)}.") + if x.size(2) != self.in_channels: + raise ValueError(f"WildfireSpreadTS expected in_channels={self.in_channels}, got {x.size(2)}.") + encoded = self.encoder(x.permute(0, 2, 1, 3, 4)) + return self.head(torch.mean(encoded, dim=2))
+
+ + + +
+[docs] +def wildfirespreadts_builder( + task: str, + history: int = 4, + in_channels: int = 6, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"segmentation", "regression"}: + raise ValueError( + "wildfirespreadts supports task='segmentation' or 'regression', " + f"got {task!r}." + ) + return WildfireSpreadTS( + history=history, + in_channels=in_channels, + hidden_dim=hidden_dim, + out_channels=out_channels, + dropout=dropout, + )
+ + + +__all__ = ["WildfireSpreadTS", "wildfirespreadts_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/models/wrf_sfire.html b/docs/_modules/pyhazards/models/wrf_sfire.html new file mode 100644 index 00000000..dacef464 --- /dev/null +++ b/docs/_modules/pyhazards/models/wrf_sfire.html @@ -0,0 +1,438 @@ + + + + + + + + + + pyhazards.models.wrf_sfire - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.models.wrf_sfire

+from __future__ import annotations
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+
+[docs] +class WRFSFireAdapter(nn.Module): + """Lightweight raster adapter inspired by WRF-SFIRE style spread diffusion.""" + + def __init__( + self, + in_channels: int = 12, + out_channels: int = 1, + diffusion_steps: int = 3, + ): + super().__init__() + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if out_channels != 1: + raise ValueError(f"WRFSFireAdapter only supports out_channels=1, got {out_channels}") + if diffusion_steps <= 0: + raise ValueError(f"diffusion_steps must be positive, got {diffusion_steps}") + self.in_channels = int(in_channels) + self.diffusion_steps = int(diffusion_steps) + kernel = torch.tensor( + [[0.02, 0.08, 0.02], [0.08, 0.60, 0.08], [0.02, 0.08, 0.02]], + dtype=torch.float32, + ).view(1, 1, 3, 3) + self.register_buffer("transport_kernel", kernel) + +
+[docs] + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError( + "WRFSFireAdapter expects input shape (batch, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.in_channels: + raise ValueError(f"WRFSFireAdapter expected in_channels={self.in_channels}, got {x.size(1)}.") + fireline = torch.sigmoid(x[:, :1]) + terrain = torch.sigmoid(x[:, 1:2]) + moisture = torch.sigmoid(x[:, 2:3]) + for _ in range(self.diffusion_steps): + fireline = F.conv2d(fireline, self.transport_kernel, padding=1) + fireline = torch.clamp(fireline * (0.9 + 0.1 * terrain) * (1.0 - 0.15 * moisture), 0.0, 1.0) + return fireline
+
+ + + +
+[docs] +def wrf_sfire_builder( + task: str, + in_channels: int = 12, + out_channels: int = 1, + diffusion_steps: int = 3, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"segmentation", "regression"}: + raise ValueError(f"wrf_sfire supports task='segmentation' or 'regression', got {task!r}.") + return WRFSFireAdapter( + in_channels=in_channels, + out_channels=out_channels, + diffusion_steps=diffusion_steps, + )
+ + + +__all__ = ["WRFSFireAdapter", "wrf_sfire_builder"] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/reports/base.html b/docs/_modules/pyhazards/reports/base.html new file mode 100644 index 00000000..adb12c67 --- /dev/null +++ b/docs/_modules/pyhazards/reports/base.html @@ -0,0 +1,465 @@ + + + + + + + + + + pyhazards.reports.base - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.reports.base

+from __future__ import annotations
+
+import csv
+import json
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any, Dict, Mapping, Sequence
+
+
+
+[docs] +@dataclass +class BenchmarkReport: + benchmark_name: str + hazard_task: str + metrics: Dict[str, float] + metadata: Dict[str, Any] = field(default_factory=dict) + artifacts: Dict[str, str] = field(default_factory=dict) + +
+[docs] + def to_dict(self) -> Dict[str, Any]: + return { + "benchmark_name": self.benchmark_name, + "hazard_task": self.hazard_task, + "metrics": self.metrics, + "metadata": self.metadata, + "artifacts": self.artifacts, + }
+
+ + + +
+[docs] +def export_report_bundle( + report: BenchmarkReport, + output_dir: str | Path, + formats: Sequence[str], +) -> Dict[str, str]: + target = Path(output_dir) + target.mkdir(parents=True, exist_ok=True) + paths: Dict[str, str] = {} + for fmt in formats: + fmt = fmt.lower() + path = target / "{name}.{fmt}".format(name=report.benchmark_name, fmt=fmt) + if fmt == "json": + path.write_text(json.dumps(report.to_dict(), indent=2, sort_keys=True), encoding="utf-8") + elif fmt == "md": + path.write_text(_markdown_report(report), encoding="utf-8") + elif fmt == "csv": + _write_csv(path, report.metrics, report.metadata) + else: + raise ValueError("Unsupported report format: {fmt}".format(fmt=fmt)) + paths[fmt] = str(path) + return paths
+ + + +
+[docs] +def _markdown_report(report: BenchmarkReport) -> str: + lines = [ + "# {name}".format(name=report.benchmark_name), + "", + "- Hazard task: `{task}`".format(task=report.hazard_task), + "", + "## Metrics", + "", + ] + if report.metrics: + for key, value in sorted(report.metrics.items()): + lines.append("- `{key}`: {value}".format(key=key, value=value)) + else: + lines.append("- No metrics recorded.") + if report.metadata: + lines.extend(["", "## Metadata", ""]) + for key, value in sorted(report.metadata.items()): + lines.append("- `{key}`: {value}".format(key=key, value=value)) + if report.artifacts: + lines.extend(["", "## Artifacts", ""]) + for key, value in sorted(report.artifacts.items()): + lines.append("- `{key}`: {value}".format(key=key, value=value)) + lines.append("") + return "\n".join(lines)
+ + + +
+[docs] +def _write_csv(path: Path, metrics: Mapping[str, float], metadata: Mapping[str, Any]) -> None: + row: Dict[str, Any] = {} + row.update(metrics) + row.update({"metadata.{key}".format(key=key): value for key, value in metadata.items()}) + fieldnames = list(row.keys()) or ["status"] + if not row: + row = {"status": "empty"} + with path.open("w", encoding="utf-8", newline="") as handle: + writer = csv.DictWriter(handle, fieldnames=fieldnames) + writer.writeheader() + writer.writerow(row)
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/tasks.html b/docs/_modules/pyhazards/tasks.html new file mode 100644 index 00000000..b92829ba --- /dev/null +++ b/docs/_modules/pyhazards/tasks.html @@ -0,0 +1,463 @@ + + + + + + + + + + pyhazards.tasks - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.tasks

+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Dict, List
+
+
+
+[docs] +@dataclass(frozen=True) +class HazardTask: + """Canonical hazard task label used by benchmark and config layers.""" + + name: str + hazard: str + target: str + description: str
+ + + +_HAZARD_TASKS: Dict[str, HazardTask] = { + "earthquake.picking": HazardTask( + name="earthquake.picking", + hazard="earthquake", + target="picking", + description="Waveform-based earthquake phase detection and P/S picking.", + ), + "earthquake.forecasting": HazardTask( + name="earthquake.forecasting", + hazard="earthquake", + target="forecasting", + description="Earthquake forecasting over spatial or temporal forecast windows.", + ), + "wildfire.danger": HazardTask( + name="wildfire.danger", + hazard="wildfire", + target="danger", + description="Wildfire danger or risk prediction over a region and horizon.", + ), + "wildfire.spread": HazardTask( + name="wildfire.spread", + hazard="wildfire", + target="spread", + description="Wildfire spread forecasting over raster masks or burned-area grids.", + ), + "flood.streamflow": HazardTask( + name="flood.streamflow", + hazard="flood", + target="streamflow", + description="Riverine discharge or streamflow forecasting.", + ), + "flood.inundation": HazardTask( + name="flood.inundation", + hazard="flood", + target="inundation", + description="Flood inundation and water-extent forecasting over spatial grids.", + ), + "tc.track_intensity": HazardTask( + name="tc.track_intensity", + hazard="tc", + target="track_intensity", + description="Storm-track and intensity forecasting over lead-time horizons.", + ), +} + + +
+[docs] +def available_hazard_tasks() -> List[str]: + return sorted(_HAZARD_TASKS.keys())
+ + + +
+[docs] +def get_hazard_task(name: str) -> HazardTask: + key = name.strip().lower() + if key not in _HAZARD_TASKS: + raise KeyError( + "Unknown hazard task '{name}'. Known: {known}".format( + name=name, + known=", ".join(available_hazard_tasks()), + ) + ) + return _HAZARD_TASKS[key]
+ + + +
+[docs] +def has_hazard_task(name: str) -> bool: + return name.strip().lower() in _HAZARD_TASKS
+ + + +__all__ = [ + "HazardTask", + "available_hazard_tasks", + "get_hazard_task", + "has_hazard_task", +] +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/utils/common.html b/docs/_modules/pyhazards/utils/common.html new file mode 100644 index 00000000..434e4fd5 --- /dev/null +++ b/docs/_modules/pyhazards/utils/common.html @@ -0,0 +1,392 @@ + + + + + + + + + + pyhazards.utils.common - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.utils.common

+import logging
+import os
+import random
+from typing import Optional
+
+import numpy as np
+import torch
+
+
+
+[docs] +def seed_all(seed: int = 42, deterministic: bool = False) -> None: + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False
+ + + +
+[docs] +def get_logger(name: Optional[str] = None) -> logging.Logger: + logging.basicConfig(level=os.getenv("PYHAZARD_LOGLEVEL", "INFO")) + return logging.getLogger(name or "pyhazards")
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_modules/pyhazards/utils/hardware.html b/docs/_modules/pyhazards/utils/hardware.html new file mode 100644 index 00000000..e07f1972 --- /dev/null +++ b/docs/_modules/pyhazards/utils/hardware.html @@ -0,0 +1,408 @@ + + + + + + + + + + pyhazards.utils.hardware - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for pyhazards.utils.hardware

+from __future__ import annotations
+
+import os
+from typing import Optional
+
+import torch
+
+_DEFAULT_DEVICE_STR = os.getenv("PYHAZARDS_DEVICE") or ("cuda:0" if torch.cuda.is_available() else "cpu")
+_default_device = torch.device(_DEFAULT_DEVICE_STR)
+
+
+
+[docs] +def auto_device(prefer: str | None = None) -> torch.device: + """ + Choose a device automatically. Respects PYHAZARDS_DEVICE and prefer flag. + """ + if prefer: + return torch.device(prefer) + return _default_device
+ + + +
+[docs] +def num_devices() -> int: + if torch.cuda.is_available(): + return torch.cuda.device_count() + return 0
+ + + +
+[docs] +def get_device() -> torch.device: + return _default_device
+ + + +
+[docs] +def set_device(device_str: str | torch.device) -> None: + global _default_device + _default_device = torch.device(device_str)
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/docs/_sources/api/modules.rst.txt b/docs/_sources/api/modules.rst.txt new file mode 100644 index 00000000..b35db4b4 --- /dev/null +++ b/docs/_sources/api/modules.rst.txt @@ -0,0 +1,9 @@ +:orphan: + +pyhazards +========= + +.. toctree:: + :maxdepth: 4 + + pyhazards diff --git a/docs/_sources/api/pyhazards.benchmarks.rst.txt b/docs/_sources/api/pyhazards.benchmarks.rst.txt new file mode 100644 index 00000000..728efa28 --- /dev/null +++ b/docs/_sources/api/pyhazards.benchmarks.rst.txt @@ -0,0 +1,77 @@ +pyhazards.benchmarks package +============================ + +Submodules +---------- + +pyhazards.benchmarks.base module +-------------------------------- + +.. automodule:: pyhazards.benchmarks.base + :members: + :undoc-members: + :show-inheritance: + +pyhazards.benchmarks.registry module +------------------------------------ + +.. automodule:: pyhazards.benchmarks.registry + :members: + :undoc-members: + :show-inheritance: + +pyhazards.benchmarks.runner module +---------------------------------- + +.. automodule:: pyhazards.benchmarks.runner + :members: + :undoc-members: + :show-inheritance: + +pyhazards.benchmarks.schemas module +----------------------------------- + +.. automodule:: pyhazards.benchmarks.schemas + :members: + :undoc-members: + :show-inheritance: + +pyhazards.benchmarks.earthquake module +-------------------------------------- + +.. automodule:: pyhazards.benchmarks.earthquake + :members: + :undoc-members: + :show-inheritance: + +pyhazards.benchmarks.wildfire module +------------------------------------ + +.. automodule:: pyhazards.benchmarks.wildfire + :members: + :undoc-members: + :show-inheritance: + +pyhazards.benchmarks.flood module +--------------------------------- + +.. automodule:: pyhazards.benchmarks.flood + :members: + :undoc-members: + :show-inheritance: + +pyhazards.benchmarks.tc module +------------------------------ + +.. automodule:: pyhazards.benchmarks.tc + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.benchmarks + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api/pygip.models.nn.rst b/docs/_sources/api/pyhazards.configs.rst.txt similarity index 55% rename from docs/source/api/pygip.models.nn.rst rename to docs/_sources/api/pyhazards.configs.rst.txt index 00f9a52a..821ea1fb 100644 --- a/docs/source/api/pygip.models.nn.rst +++ b/docs/_sources/api/pyhazards.configs.rst.txt @@ -1,13 +1,13 @@ -pygip.models.nn package -======================= +pyhazards.configs package +========================= Submodules ---------- -pygip.models.nn.backbones module +pyhazards.configs._schema module -------------------------------- -.. automodule:: pygip.models.nn.backbones +.. automodule:: pyhazards.configs._schema :members: :undoc-members: :show-inheritance: @@ -15,7 +15,7 @@ pygip.models.nn.backbones module Module contents --------------- -.. automodule:: pygip.models.nn +.. automodule:: pyhazards.configs :members: :undoc-members: :show-inheritance: diff --git a/docs/_sources/api/pyhazards.datasets.rst.txt b/docs/_sources/api/pyhazards.datasets.rst.txt new file mode 100644 index 00000000..1a7c06fc --- /dev/null +++ b/docs/_sources/api/pyhazards.datasets.rst.txt @@ -0,0 +1,136 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +pyhazards.datasets package +========================== + +Catalog Summary +--------------- + +This page links the public dataset catalog, the developer dataset +workflow, and the package submodules used to register or inspect datasets. + +For the curated browsing experience, use :doc:`/pyhazards_datasets`. + +Shared Forcing +~~~~~~~~~~~~~~ + +:doc:`ERA5 `, :doc:`GOES-R `, :doc:`MERRA-2 `. + +Wildfire +~~~~~~~~ + +:doc:`FIRMS `, :doc:`FPA-FOD Tabular `, :doc:`FPA-FOD Weekly `, :doc:`LANDFIRE `, :doc:`MTBS `, :doc:`WFIGS `. + +Flood +~~~~~ + +:doc:`Caravan `, :doc:`FloodCastBench `, :doc:`HydroBench `, :doc:`NOAA Flood Events `, :doc:`WaterBench `. + +Earthquake +~~~~~~~~~~ + +:doc:`AEFA Forecast `, :doc:`pick-benchmark `, :doc:`SeisBench `. + +Tropical Cyclone +~~~~~~~~~~~~~~~~ + +:doc:`IBTrACS `, :doc:`TCBench Alpha `, :doc:`TropiCycloneNet-Dataset `. + +Developer Dataset Workflow +-------------------------- + +Use this section when you need the package-level registry and dataset +builder interface rather than the public catalog presentation. + +Inspect an External Dataset Source +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + +Load a Registered Dataset +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from pyhazards.datasets import available_datasets, load_dataset + + print(available_datasets()) + data = load_dataset( + "seisbench_waveforms", + micro=True, + ).load() + print(sorted(data.splits.keys())) + +Register a Custom Dataset +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from pyhazards.datasets import ( + DataBundle, + DataSplit, + Dataset, + FeatureSpec, + LabelSpec, + register_dataset, + ) + + class MyDataset(Dataset): + name = "my_dataset" + + def _load(self) -> DataBundle: + raise NotImplementedError("Return a populated DataBundle here.") + + register_dataset("my_dataset", MyDataset) + +Notes +~~~~~ + +- Public dataset docs are generated from cards in ``pyhazards/dataset_cards``. +- Run ``python scripts/render_dataset_docs.py`` after editing cards or generated dataset docs. +- Use :doc:`/implementation` for the full contributor workflow. + +Submodules +---------- + +pyhazards.datasets.base module +------------------------------ + +.. automodule:: pyhazards.datasets.base + :members: + :undoc-members: + :show-inheritance: + +pyhazards.datasets.registry module +----------------------------------- + +.. automodule:: pyhazards.datasets.registry + :members: + :undoc-members: + :show-inheritance: + +pyhazards.datasets.transforms package +------------------------------------- + +.. automodule:: pyhazards.datasets.transforms + :members: + :undoc-members: + :show-inheritance: + +pyhazards.datasets.hazards package +----------------------------------- + +.. automodule:: pyhazards.datasets.hazards + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.datasets + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_sources/api/pyhazards.engine.rst.txt b/docs/_sources/api/pyhazards.engine.rst.txt new file mode 100644 index 00000000..a4ad03da --- /dev/null +++ b/docs/_sources/api/pyhazards.engine.rst.txt @@ -0,0 +1,37 @@ +pyhazards.engine package +======================== + +Submodules +---------- + +pyhazards.engine.trainer module +------------------------------- + +.. automodule:: pyhazards.engine.trainer + :members: + :undoc-members: + :show-inheritance: + +pyhazards.engine.distributed module +------------------------------------ + +.. automodule:: pyhazards.engine.distributed + :members: + :undoc-members: + :show-inheritance: + +pyhazards.engine.inference module +---------------------------------- + +.. automodule:: pyhazards.engine.inference + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.engine + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_sources/api/pyhazards.metrics.rst.txt b/docs/_sources/api/pyhazards.metrics.rst.txt new file mode 100644 index 00000000..a79a09ac --- /dev/null +++ b/docs/_sources/api/pyhazards.metrics.rst.txt @@ -0,0 +1,7 @@ +pyhazards.metrics package +========================= + +.. automodule:: pyhazards.metrics + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_sources/api/pyhazards.models.rst.txt b/docs/_sources/api/pyhazards.models.rst.txt new file mode 100644 index 00000000..f2ee6f21 --- /dev/null +++ b/docs/_sources/api/pyhazards.models.rst.txt @@ -0,0 +1,137 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +pyhazards.models package +======================== + +Catalog Summary +--------------- + +This page links the public model catalog, the developer registry +workflow, and the package submodules used to implement model builders. + +For the curated browsing experience, use :doc:`/pyhazards_models`. + +Wildfire +~~~~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`ASUFM `, :doc:`DNN-LSTM-AutoEncoder `, :doc:`FireCastNet `, :doc:`ForeFire Adapter `, :doc:`Wildfire Forecasting `, :doc:`WildfireSpreadTS `, :doc:`WRF-SFIRE Adapter `, :doc:`CNN-ASPP `. + +Earthquake +~~~~~~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`EQNet `, :doc:`EQTransformer `, :doc:`GPD `, :doc:`PhaseNet `, :doc:`WaveCastNet `. + +Flood +~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`EA-LSTM `, :doc:`FloodCast `, :doc:`Google Flood Forecasting `, :doc:`NeuralHydrology LSTM `, :doc:`UrbanFloodCast `, :doc:`HydroGraphNet `. + +Tropical Cyclone +~~~~~~~~~~~~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`Hurricast `, :doc:`SAF-Net `, :doc:`TCIF-fusion `, :doc:`Tropical Cyclone MLP `, :doc:`TropiCycloneNet `. + +Experimental Adapters ++++++++++++++++++++++ + +:doc:`FourCastNet TC Adapter `, :doc:`GraphCast TC Adapter `, :doc:`Pangu TC Adapter `. + +Developer Registry Workflow +--------------------------- + +Use this section when you need the package-level builder and registry +interface rather than the public catalog presentation. + +Build a Registered Model +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from pyhazards.models import build_model + + model = build_model( + name="phasenet", + task="regression", + in_channels=3, + ) + +Register a Custom Model +~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + import torch.nn as nn + from pyhazards.models import build_model, register_model + + def my_custom_builder(task: str, in_dim: int, out_dim: int, **kwargs) -> nn.Module: + hidden = kwargs.get("hidden_dim", 128) + return nn.Sequential( + nn.Linear(in_dim, hidden), + nn.ReLU(), + nn.Linear(hidden, out_dim), + ) + + register_model("my_mlp", my_custom_builder, defaults={"hidden_dim": 128}) + model = build_model(name="my_mlp", task="regression", in_dim=16, out_dim=1) + +Notes +~~~~~ + +- Builders receive ``task`` plus any kwargs you pass. +- ``register_model`` stores optional defaults so configs can stay small. +- Use :doc:`/implementation` for the full contributor workflow. + +Submodules +---------- + +pyhazards.models.backbones module +---------------------------------- + +.. automodule:: pyhazards.models.backbones + :members: + :undoc-members: + :show-inheritance: + +pyhazards.models.heads module +------------------------------ + +.. automodule:: pyhazards.models.heads + :members: + :undoc-members: + :show-inheritance: + +pyhazards.models.builder module +------------------------------- + +.. automodule:: pyhazards.models.builder + :members: + :undoc-members: + :show-inheritance: + +pyhazards.models.registry module +-------------------------------- + +.. automodule:: pyhazards.models.registry + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_sources/api/pyhazards.reports.rst.txt b/docs/_sources/api/pyhazards.reports.rst.txt new file mode 100644 index 00000000..ed2243b4 --- /dev/null +++ b/docs/_sources/api/pyhazards.reports.rst.txt @@ -0,0 +1,21 @@ +pyhazards.reports package +========================= + +Submodules +---------- + +pyhazards.reports.base module +----------------------------- + +.. automodule:: pyhazards.reports.base + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.reports + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_sources/api/pyhazards.rst.txt b/docs/_sources/api/pyhazards.rst.txt new file mode 100644 index 00000000..385a8563 --- /dev/null +++ b/docs/_sources/api/pyhazards.rst.txt @@ -0,0 +1,37 @@ +pyhazards package +================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + pyhazards.datasets + pyhazards.models + pyhazards.benchmarks + pyhazards.configs + pyhazards.reports + pyhazards.engine + pyhazards.metrics + pyhazards.utils + +Submodules +---------- + +pyhazards.interactive_map module +-------------------------------- + +.. automodule:: pyhazards.interactive_map + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards + :members: + :undoc-members: + :show-inheritance: + :exclude-members: BenchmarkRunner, GraphTemporalDataset, graph_collate, WildfireMamba, wildfire_mamba_builder diff --git a/docs/_sources/api/pyhazards.utils.rst.txt b/docs/_sources/api/pyhazards.utils.rst.txt new file mode 100644 index 00000000..5b552aa3 --- /dev/null +++ b/docs/_sources/api/pyhazards.utils.rst.txt @@ -0,0 +1,29 @@ +pyhazards.utils package +======================= + +Submodules +---------- + +pyhazards.utils.hardware module +-------------------------------- + +.. automodule:: pyhazards.utils.hardware + :members: + :undoc-members: + :show-inheritance: + +pyhazards.utils.common module +------------------------------ + +.. automodule:: pyhazards.utils.common + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_sources/appendix_a_coverage.rst.txt b/docs/_sources/appendix_a_coverage.rst.txt new file mode 100644 index 00000000..10610e49 --- /dev/null +++ b/docs/_sources/appendix_a_coverage.rst.txt @@ -0,0 +1,306 @@ +.. This file is generated by scripts/render_appendix_a_docs.py. Do not edit by hand. + +Coverage Audit +============== + +Overview +-------- + +This page audits the current PyHazards implementation against the +planned methods, benchmarks, and datasets listed in ``pyhazard_plan.pdf``. +It separates implemented public entries from variant-only entries, +experimental wrappers, and items that are still missing. + +Status meanings: + +- ``Implemented``: a public PyHazards adapter exists for the named method or resource. +- ``Experimental``: a lightweight wrapper exists, but it should not be counted as stable core coverage. +- ``Missing``: no aligned adapter or benchmark integration is present yet. + +Hazard Summary +-------------- + +.. list-table:: + :widths: 26 18 18 18 + :header-rows: 1 + :class: dataset-list + + * - Hazard Family + - Implemented + - Experimental + - Missing + * - Earthquake + - 8 + - 0 + - 0 + * - Wildfire + - 6 + - 0 + - 0 + * - Flood + - 8 + - 0 + - 0 + * - Hurricane / Tropical Cyclone + - 8 + - 3 + - 0 + +Method and Resource Matrix +-------------------------- + +.. list-table:: + :widths: 22 22 16 14 24 34 + :header-rows: 1 + :class: dataset-list + + * - Hazard Family + - Method / Resource + - Type + - Status + - PyHazards Mapping + - Notes + * - Earthquake + - `PhaseNet `_ + - Baseline + - ``Implemented`` + - :doc:`PhaseNet ` + - Model adapter is implemented, but the SeisBench / pick-benchmark data path is still missing. + * - Earthquake + - `EQTransformer `_ + - Baseline + - ``Implemented`` + - :doc:`EQTransformer ` + - Model adapter is implemented, but the benchmark stack remains lighter than the PDF target. + * - Earthquake + - `GPD `_ + - Baseline + - ``Implemented`` + - :doc:`GPD ` + - Model adapter is implemented behind the shared picking interface. + * - Earthquake + - `EQNet `_ + - Baseline + - ``Implemented`` + - :doc:`EQNet ` + - Model adapter is implemented behind the shared picking interface. + * - Earthquake + - `SeisBench `_ + - Benchmark / Data Ecosystem + - ``Implemented`` + - None + - A synthetic-backed SeisBench-compatible waveform adapter is registered for smoke benchmarking. + * - Earthquake + - `pick-benchmark `_ + - Benchmark + - ``Implemented`` + - None + - A synthetic-backed pick-benchmark-compatible waveform adapter is registered for smoke benchmarking. + * - Earthquake + - `pyCSEP `_ + - Benchmark / Reports + - ``Implemented`` + - None + - The forecasting smoke benchmark exports a pyCSEP-style JSON artifact. + * - Earthquake + - `AEFA `_ + - Dataset / Forecast Benchmark + - ``Implemented`` + - None + - A synthetic-backed AEFA-style forecasting dataset adapter is registered. + * - Wildfire + - `wildfire_forecasting `_ + - Baseline + - ``Implemented`` + - :doc:`Wildfire Forecasting ` + - + * - Wildfire + - `WildfireSpreadTS `_ + - Baseline / Benchmark + - ``Implemented`` + - :doc:`WildfireSpreadTS ` + - + * - Wildfire + - `ASUFM `_ + - Baseline + - ``Implemented`` + - :doc:`ASUFM ` + - + * - Wildfire + - `WRF-SFIRE `_ + - Simulator Adapter + - ``Implemented`` + - :doc:`WRF-SFIRE Adapter ` + - The current adapter is lightweight and synthetic-backed rather than a full external simulator binding. + * - Wildfire + - `ForeFire `_ + - Simulator Adapter + - ``Implemented`` + - :doc:`ForeFire Adapter ` + - The current adapter is lightweight and synthetic-backed rather than a full external simulator binding. + * - Wildfire + - `FireCastNet `_ + - Optional Baseline + - ``Implemented`` + - :doc:`FireCastNet ` + - + * - Flood + - `NeuralHydrology `_ + - Baseline Family + - ``Implemented`` + - :doc:`NeuralHydrology LSTM `, :doc:`EA-LSTM ` + - The LSTM and EA-LSTM adapters are implemented, but Caravan / WaterBench benchmark backing is still missing. + * - Flood + - `Caravan `_ + - Dataset + - ``Implemented`` + - None + - A synthetic-backed Caravan adapter is registered for streamflow smoke benchmarking. + * - Flood + - `WaterBench `_ + - Dataset + - ``Implemented`` + - None + - A synthetic-backed WaterBench adapter is registered for streamflow smoke benchmarking. + * - Flood + - `FloodCast `_ + - Baseline + - ``Implemented`` + - :doc:`FloodCast ` + - The model adapter is implemented, but FloodCastBench-backed evaluation is not wired yet. + * - Flood + - `FloodCastBench `_ + - Benchmark + - ``Implemented`` + - None + - A synthetic-backed FloodCastBench-style inundation adapter is registered. + * - Flood + - `UrbanFloodCast `_ + - Baseline + - ``Implemented`` + - :doc:`UrbanFloodCast ` + - The model adapter is implemented on synthetic inundation fixtures today. + * - Flood + - `HydroBench `_ + - Benchmark / Diagnostics + - ``Implemented`` + - None + - A synthetic-backed HydroBench adapter is registered for streamflow smoke benchmarking. + * - Flood + - `google-research/flood-forecasting `_ + - Reference Baseline + - ``Implemented`` + - :doc:`Google Flood Forecasting ` + - + * - Hurricane / Tropical Cyclone + - `Hurricast `_ + - Baseline + - ``Implemented`` + - :doc:`Hurricast ` + - The model adapter is implemented, but the real TCBench / IBTrACS data path is still missing. + * - Hurricane / Tropical Cyclone + - `tropicalcyclone_MLP `_ + - Baseline + - ``Implemented`` + - :doc:`Tropical Cyclone MLP ` + - The model adapter is implemented as a basin-filtered storm baseline. + * - Hurricane / Tropical Cyclone + - `TCIF-fusion `_ + - Baseline + - ``Implemented`` + - :doc:`TCIF-fusion ` + - The model adapter is implemented behind the shared storm evaluator. + * - Hurricane / Tropical Cyclone + - `SAF-Net `_ + - Baseline + - ``Implemented`` + - :doc:`SAF-Net ` + - The model adapter is implemented behind the shared storm evaluator. + * - Hurricane / Tropical Cyclone + - `TropiCycloneNet `_ + - Baseline + - ``Implemented`` + - :doc:`TropiCycloneNet ` + - The model adapter is implemented, but the public benchmark/data track remains synthetic-first. + * - Hurricane / Tropical Cyclone + - `TropiCycloneNet-Dataset `_ + - Dataset + - ``Implemented`` + - None + - A synthetic-backed TropiCycloneNet-Dataset adapter is registered. + * - Hurricane / Tropical Cyclone + - `TCBench Alpha `_ + - Benchmark + - ``Implemented`` + - None + - A synthetic-backed TCBench Alpha adapter is registered. + * - Hurricane / Tropical Cyclone + - `IBTrACS `_ + - Dataset + - ``Implemented`` + - None + - A synthetic-backed IBTrACS adapter is registered. + * - Hurricane / Tropical Cyclone + - `GraphCast / GenCast `_ + - Foundation Adapter + - ``Experimental`` + - :doc:`GraphCast TC Adapter ` + - The current wrapper is intentionally lightweight and should not be counted as stable core coverage. + * - Hurricane / Tropical Cyclone + - `Pangu-Weather `_ + - Foundation Adapter + - ``Experimental`` + - :doc:`Pangu TC Adapter ` + - The current wrapper is intentionally lightweight and should not be counted as stable core coverage. + * - Hurricane / Tropical Cyclone + - `FourCastNet `_ + - Foundation Adapter + - ``Experimental`` + - :doc:`FourCastNet TC Adapter ` + - The current wrapper is intentionally lightweight and should not be counted as stable core coverage. + +Current Public Non-Core Implementations +--------------------------------------- + +These entries remain in the public catalog, but they are not counted as +part of the current core method set. + +.. list-table:: + :widths: 18 18 28 36 + :header-rows: 1 + :class: dataset-list + + * - Hazard Family + - Catalog Status + - Public Entry + - Why it is non-core + * - Wildfire + - ``variant`` + - :doc:`CNN-ASPP ` + - Implemented outside the current core method set and kept public as an additional model. + * - Earthquake + - ``variant`` + - :doc:`WaveCastNet ` + - Implemented outside the current core method set and kept public as an additional model. + * - Flood + - ``variant`` + - :doc:`HydroGraphNet ` + - Implemented outside the current core method set and kept public as an additional model. + * - Tropical Cyclone + - ``experimental`` + - :doc:`FourCastNet TC Adapter ` + - Wrapper-style experimental adapter pending stronger benchmark and dataset support. + * - Tropical Cyclone + - ``experimental`` + - :doc:`GraphCast TC Adapter ` + - Wrapper-style experimental adapter pending stronger benchmark and dataset support. + * - Tropical Cyclone + - ``experimental`` + - :doc:`Pangu TC Adapter ` + - Wrapper-style experimental adapter pending stronger benchmark and dataset support. + +Execution Note +-------------- + +Use `.github/ROADMAP_EXECUTION.md `_ +as the checked-in multi-agent handoff for finishing the remaining roadmap work. diff --git a/docs/_sources/benchmarks/aefa.rst.txt b/docs/_sources/benchmarks/aefa.rst.txt new file mode 100644 index 00000000..725ed707 --- /dev/null +++ b/docs/_sources/benchmarks/aefa.rst.txt @@ -0,0 +1,101 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +AEFA +==== + +Overview +-------- + +The AEFA alignment is implemented as a synthetic-backed dense-grid forecasting adapter used by the WaveCastNet benchmark config. + +It keeps the forecasting task and metric shape aligned without claiming a full AEFA data pipeline. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + 1 model + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``earthquake`` + +**Registered class:** ``EarthquakeBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Earthquake Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`AEFA `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Wavefield Forecasting + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``mse`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``wavecastnet_benchmark_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`WaveCastNet `. diff --git a/docs/_sources/benchmarks/caravan.rst.txt b/docs/_sources/benchmarks/caravan.rst.txt new file mode 100644 index 00000000..291aecb3 --- /dev/null +++ b/docs/_sources/benchmarks/caravan.rst.txt @@ -0,0 +1,104 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Caravan +======= + +Overview +-------- + +The current Caravan alignment is a metadata-backed streamflow adapter layered on top of the shared synthetic graph-temporal flood dataset. + +It currently drives the public smoke runs for NeuralHydrology LSTM and Google Flood Forecasting. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 2 + + .. container:: catalog-stat-note + + 2 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``flood`` + +**Registered class:** ``FloodBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Flood Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`Caravan - A global community dataset for large-sample hydrology `_ (`repo `__). + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Streamflow + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``rmse`` + - ``nse`` + - ``kge`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``neuralhydrology_lstm_smoke.yaml`` + - ``google_flood_forecasting_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`NeuralHydrology LSTM `, :doc:`Google Flood Forecasting `. diff --git a/docs/_sources/benchmarks/earthquake_benchmark.rst.txt b/docs/_sources/benchmarks/earthquake_benchmark.rst.txt new file mode 100644 index 00000000..c1beb12f --- /dev/null +++ b/docs/_sources/benchmarks/earthquake_benchmark.rst.txt @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Earthquake Benchmark +==================== + +Overview +-------- + +The earthquake benchmark family groups the picking and forecasting paths under one registered evaluator and benchmark runner entrypoint. + +Current public coverage is synthetic-backed but already exposes the same task and report shape used across the earthquake smoke configs. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Family + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 5 + + .. container:: catalog-stat-note + + 5 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``earthquake`` + +**Registered class:** ``EarthquakeBenchmark`` + +Mapped benchmark ecosystems +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`AEFA `, :doc:`pick-benchmark `, :doc:`pyCSEP `, :doc:`SeisBench `. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Phase Picking + - Wavefield Forecasting + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``p_pick_mae`` + - ``s_pick_mae`` + - ``precision`` + - ``recall`` + - ``f1`` + - ``mae`` + - ``mse`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``phasenet_smoke.yaml`` + - ``eqtransformer_smoke.yaml`` + - ``gpd_smoke.yaml`` + - ``eqnet_smoke.yaml`` + - ``wavecastnet_benchmark_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`PhaseNet `, :doc:`EQTransformer `, :doc:`GPD `, :doc:`EQNet `, :doc:`WaveCastNet `. + +.. dropdown:: Notes + :class-container: catalog-dropdown + + - Forecasting runs export a pyCSEP-style report artifact through the shared earthquake benchmark. diff --git a/docs/_sources/benchmarks/flood_benchmark.rst.txt b/docs/_sources/benchmarks/flood_benchmark.rst.txt new file mode 100644 index 00000000..f21f03a3 --- /dev/null +++ b/docs/_sources/benchmarks/flood_benchmark.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Flood Benchmark +=============== + +Overview +-------- + +The flood benchmark family keeps streamflow and inundation scoring under one shared evaluator contract while preserving hazard-task-specific metrics. + +Current public coverage is synthetic-backed, but the same family already drives the streamflow and inundation smoke configs used across the flood models. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Family + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 6 + + .. container:: catalog-stat-note + + 6 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``flood`` + +**Registered class:** ``FloodBenchmark`` + +Mapped benchmark ecosystems +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Caravan `, :doc:`FloodCastBench `, :doc:`HydroBench `, :doc:`WaterBench `. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Streamflow + - Inundation + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``rmse`` + - ``nse`` + - ``kge`` + - ``pixel_mae`` + - ``iou`` + - ``f1`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``hydrographnet_smoke.yaml`` + - ``neuralhydrology_lstm_smoke.yaml`` + - ``neuralhydrology_ealstm_smoke.yaml`` + - ``google_flood_forecasting_smoke.yaml`` + - ``floodcast_smoke.yaml`` + - ``urbanfloodcast_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`HydroGraphNet `, :doc:`NeuralHydrology LSTM `, :doc:`EA-LSTM `, :doc:`Google Flood Forecasting `, :doc:`FloodCast `, :doc:`UrbanFloodCast `. diff --git a/docs/_sources/benchmarks/floodcastbench.rst.txt b/docs/_sources/benchmarks/floodcastbench.rst.txt new file mode 100644 index 00000000..70297a31 --- /dev/null +++ b/docs/_sources/benchmarks/floodcastbench.rst.txt @@ -0,0 +1,103 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +FloodCastBench +============== + +Overview +-------- + +The current FloodCastBench alignment is implemented as a synthetic raster inundation adapter used by the public inundation smoke configs. + +It documents the benchmark/data protocol behind the FloodCast and UrbanFloodCast smoke paths. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 2 + + .. container:: catalog-stat-note + + 2 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``flood`` + +**Registered class:** ``FloodBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Flood Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`FloodCastBench `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Inundation + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``pixel_mae`` + - ``iou`` + - ``f1`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``floodcast_smoke.yaml`` + - ``urbanfloodcast_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`FloodCast `, :doc:`UrbanFloodCast `. diff --git a/docs/_sources/benchmarks/hydrobench.rst.txt b/docs/_sources/benchmarks/hydrobench.rst.txt new file mode 100644 index 00000000..7e4213b7 --- /dev/null +++ b/docs/_sources/benchmarks/hydrobench.rst.txt @@ -0,0 +1,103 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +HydroBench +========== + +Overview +-------- + +The current HydroBench alignment uses a metadata-backed streamflow adapter over the shared synthetic flood streamflow dataset. + +It is currently exercised through the HydroGraphNet smoke benchmark path. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + 1 model + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``flood`` + +**Registered class:** ``FloodBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Flood Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`HydroBench `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Streamflow + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``rmse`` + - ``nse`` + - ``kge`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``hydrographnet_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`HydroGraphNet `. diff --git a/docs/_sources/benchmarks/ibtracs.rst.txt b/docs/_sources/benchmarks/ibtracs.rst.txt new file mode 100644 index 00000000..0279ab5d --- /dev/null +++ b/docs/_sources/benchmarks/ibtracs.rst.txt @@ -0,0 +1,104 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +IBTrACS +======= + +Overview +-------- + +The current IBTrACS alignment uses a metadata-backed storm-history adapter over the shared synthetic tropical-cyclone dataset. + +It is the benchmark ecosystem currently used by Hurricast and the experimental weather-model adapter smoke configs. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + 4 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``tc`` + +**Registered class:** ``TropicalCycloneBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Tropical Cyclone Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`IBTrACS `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Track + Intensity + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``track_error`` + - ``intensity_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``hurricast_smoke.yaml`` + - ``graphcast_tc_smoke.yaml`` + - ``pangu_tc_smoke.yaml`` + - ``fourcastnet_tc_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`Hurricast `, :doc:`GraphCast TC Adapter `, :doc:`Pangu TC Adapter `, :doc:`FourCastNet TC Adapter `. diff --git a/docs/_sources/benchmarks/pick_benchmark.rst.txt b/docs/_sources/benchmarks/pick_benchmark.rst.txt new file mode 100644 index 00000000..bf9da111 --- /dev/null +++ b/docs/_sources/benchmarks/pick_benchmark.rst.txt @@ -0,0 +1,105 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +pick-benchmark +============== + +Overview +-------- + +The current pick-benchmark path reuses the synthetic waveform picking bundle and tags it as a pick-benchmark-style benchmark adapter. + +It supports the earthquake picking smoke path for the transformer and CNN picking baselines. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 2 + + .. container:: catalog-stat-note + + 2 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``earthquake`` + +**Registered class:** ``EarthquakeBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Earthquake Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`pick-benchmark `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Phase Picking + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``p_pick_mae`` + - ``s_pick_mae`` + - ``precision`` + - ``recall`` + - ``f1`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``eqtransformer_smoke.yaml`` + - ``gpd_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`EQTransformer `, :doc:`GPD `. diff --git a/docs/_sources/benchmarks/pycsep.rst.txt b/docs/_sources/benchmarks/pycsep.rst.txt new file mode 100644 index 00000000..5150e19e --- /dev/null +++ b/docs/_sources/benchmarks/pycsep.rst.txt @@ -0,0 +1,106 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +pyCSEP +====== + +Overview +-------- + +The current pyCSEP alignment is implemented as a report export contract inside the shared earthquake benchmark rather than as a standalone benchmark family. + +It documents the forecasting artifact shape used by the WaveCastNet smoke config. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + 1 model + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``earthquake`` + +**Registered class:** ``EarthquakeBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Earthquake Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`pyCSEP `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Wavefield Forecasting + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``mse`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``wavecastnet_benchmark_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`WaveCastNet `. + +.. dropdown:: Notes + :class-container: catalog-dropdown + + - Current repo support is report-export alignment, not a separate pyCSEP benchmark runner. diff --git a/docs/_sources/benchmarks/seisbench.rst.txt b/docs/_sources/benchmarks/seisbench.rst.txt new file mode 100644 index 00000000..9fc1ba8e --- /dev/null +++ b/docs/_sources/benchmarks/seisbench.rst.txt @@ -0,0 +1,105 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +SeisBench +========= + +Overview +-------- + +The current SeisBench path uses a synthetic waveform adapter that preserves the same picking task shape expected by the shared earthquake benchmark. + +It exists today as a benchmark-compatible smoke path rather than a full external SeisBench ingestion pipeline. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 2 + + .. container:: catalog-stat-note + + 2 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``earthquake`` + +**Registered class:** ``EarthquakeBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Earthquake Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`SeisBench - A Toolbox for Machine Learning in Seismology `_ (`repo `__). + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Phase Picking + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``p_pick_mae`` + - ``s_pick_mae`` + - ``precision`` + - ``recall`` + - ``f1`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``phasenet_smoke.yaml`` + - ``eqnet_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`PhaseNet `, :doc:`EQNet `. diff --git a/docs/_sources/benchmarks/tcbench_alpha.rst.txt b/docs/_sources/benchmarks/tcbench_alpha.rst.txt new file mode 100644 index 00000000..4943e4f7 --- /dev/null +++ b/docs/_sources/benchmarks/tcbench_alpha.rst.txt @@ -0,0 +1,103 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +TCBench Alpha +============= + +Overview +-------- + +The current TCBench Alpha alignment uses a metadata-backed storm-history adapter over the shared synthetic tropical-cyclone dataset. + +It currently drives the tropicalcyclone_MLP, SAF-Net, and TCIF-fusion smoke configs. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 3 + + .. container:: catalog-stat-note + + 3 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``tc`` + +**Registered class:** ``TropicalCycloneBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Tropical Cyclone Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`TCBench Alpha `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Track + Intensity + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``track_error`` + - ``intensity_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``tropicalcyclone_mlp_smoke.yaml`` + - ``saf_net_smoke.yaml`` + - ``tcif_fusion_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`Tropical Cyclone MLP `, :doc:`SAF-Net `, :doc:`TCIF-fusion `. diff --git a/docs/_sources/benchmarks/tropical_cyclone_benchmark.rst.txt b/docs/_sources/benchmarks/tropical_cyclone_benchmark.rst.txt new file mode 100644 index 00000000..a9aa8309 --- /dev/null +++ b/docs/_sources/benchmarks/tropical_cyclone_benchmark.rst.txt @@ -0,0 +1,108 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Tropical Cyclone Benchmark +========================== + +Overview +-------- + +The tropical cyclone benchmark family is the single storm evaluator used by the hurricane-specific and all-basin tropical-cyclone smoke configs. + +Current coverage is synthetic-backed, but the same evaluator contract already scores core storm baselines and experimental weather-model adapters. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Family + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 8 + + .. container:: catalog-stat-note + + 8 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``tc`` + +**Registered class:** ``TropicalCycloneBenchmark`` + +Mapped benchmark ecosystems +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`IBTrACS `, :doc:`TCBench Alpha `, :doc:`TropiCycloneNet-Dataset `. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Track + Intensity + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``track_error`` + - ``intensity_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``hurricast_smoke.yaml`` + - ``tropicalcyclone_mlp_smoke.yaml`` + - ``tropicyclonenet_smoke.yaml`` + - ``saf_net_smoke.yaml`` + - ``tcif_fusion_smoke.yaml`` + - ``graphcast_tc_smoke.yaml`` + - ``pangu_tc_smoke.yaml`` + - ``fourcastnet_tc_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`Hurricast `, :doc:`Tropical Cyclone MLP `, :doc:`TropiCycloneNet `, :doc:`SAF-Net `, :doc:`TCIF-fusion `, :doc:`GraphCast TC Adapter `, :doc:`Pangu TC Adapter `, :doc:`FourCastNet TC Adapter `. + +.. dropdown:: Notes + :class-container: catalog-dropdown + + - IBTrACS, TCBench Alpha, and TropiCycloneNet-Dataset are surfaced as the public storm benchmark ecosystems. diff --git a/docs/_sources/benchmarks/tropicyclonenet_dataset.rst.txt b/docs/_sources/benchmarks/tropicyclonenet_dataset.rst.txt new file mode 100644 index 00000000..50b9f92b --- /dev/null +++ b/docs/_sources/benchmarks/tropicyclonenet_dataset.rst.txt @@ -0,0 +1,101 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +TropiCycloneNet-Dataset +======================= + +Overview +-------- + +The current TropiCycloneNet-Dataset alignment uses a metadata-backed storm-history adapter over the shared synthetic tropical-cyclone dataset. + +It exists today to support the public TropiCycloneNet smoke benchmark path. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + 1 model + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``tc`` + +**Registered class:** ``TropicalCycloneBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Tropical Cyclone Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`TropiCycloneNet-Dataset `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Track + Intensity + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``track_error`` + - ``intensity_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``tropicyclonenet_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`TropiCycloneNet `. diff --git a/docs/_sources/benchmarks/waterbench.rst.txt b/docs/_sources/benchmarks/waterbench.rst.txt new file mode 100644 index 00000000..73c5ca68 --- /dev/null +++ b/docs/_sources/benchmarks/waterbench.rst.txt @@ -0,0 +1,103 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +WaterBench +========== + +Overview +-------- + +The current WaterBench alignment uses a metadata-only adapter over the shared synthetic streamflow bundle and preserves the streamflow task contract. + +It is currently exercised by the EA-LSTM smoke benchmark path. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + 1 model + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``flood`` + +**Registered class:** ``FloodBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Flood Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting `_ (`repo `__). + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Streamflow + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``rmse`` + - ``nse`` + - ``kge`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``neuralhydrology_ealstm_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`EA-LSTM `. diff --git a/docs/_sources/benchmarks/wildfire_benchmark.rst.txt b/docs/_sources/benchmarks/wildfire_benchmark.rst.txt new file mode 100644 index 00000000..24016f64 --- /dev/null +++ b/docs/_sources/benchmarks/wildfire_benchmark.rst.txt @@ -0,0 +1,116 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Wildfire Benchmark +================== + +Overview +-------- + +The wildfire benchmark family is the single scoring layer for tabular danger tasks, weekly forecasting tasks, and raster spread tasks. + +Current coverage is synthetic-backed, but it already exposes a single hazard-level evaluator contract across wildfire danger and wildfire spread smoke configs. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Family + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 8 + + .. container:: catalog-stat-note + + 8 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``wildfire`` + +**Registered class:** ``WildfireBenchmark`` + +Mapped benchmark ecosystems +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`WildfireSpreadTS `. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Danger + - Spread + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``accuracy`` + - ``macro_f1`` + - ``auc`` + - ``pr_auc`` + - ``mae`` + - ``rmse`` + - ``iou`` + - ``f1`` + - ``burned_area_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``wildfire_danger_smoke.yaml`` + - ``wildfire_forecasting_smoke.yaml`` + - ``asufm_smoke.yaml`` + - ``wildfire_spread_smoke.yaml`` + - ``wildfirespreadts_smoke.yaml`` + - ``forefire_smoke.yaml`` + - ``wrf_sfire_smoke.yaml`` + - ``firecastnet_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`DNN-LSTM-AutoEncoder `, :doc:`Wildfire Forecasting `, :doc:`ASUFM `, :doc:`CNN-ASPP `, :doc:`WildfireSpreadTS `, :doc:`ForeFire Adapter `, :doc:`WRF-SFIRE Adapter `, :doc:`FireCastNet `. + +.. dropdown:: Notes + :class-container: catalog-dropdown + + - WildfireSpreadTS is the public Appendix-A benchmark ecosystem surfaced on this page. diff --git a/docs/_sources/benchmarks/wildfirespreadts_ecosystem.rst.txt b/docs/_sources/benchmarks/wildfirespreadts_ecosystem.rst.txt new file mode 100644 index 00000000..73f88b7c --- /dev/null +++ b/docs/_sources/benchmarks/wildfirespreadts_ecosystem.rst.txt @@ -0,0 +1,106 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +WildfireSpreadTS +================ + +Overview +-------- + +WildfireSpreadTS is the public wildfire benchmark ecosystem surfaced from Appendix A. + +The current repo uses a synthetic temporal spread dataset to exercise the same spread-task contract for WildfireSpreadTS-style evaluation. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 5 + + .. container:: catalog-stat-note + + 5 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``wildfire`` + +**Registered class:** ``WildfireBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Wildfire Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction `_ (`repo `__). + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Spread + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``iou`` + - ``f1`` + - ``burned_area_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``wildfire_spread_smoke.yaml`` + - ``wildfirespreadts_smoke.yaml`` + - ``forefire_smoke.yaml`` + - ``wrf_sfire_smoke.yaml`` + - ``firecastnet_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`CNN-ASPP `, :doc:`WildfireSpreadTS `, :doc:`ForeFire Adapter `, :doc:`WRF-SFIRE Adapter `, :doc:`FireCastNet `. diff --git a/docs/_sources/cite.rst.txt b/docs/_sources/cite.rst.txt new file mode 100644 index 00000000..6e53ea01 --- /dev/null +++ b/docs/_sources/cite.rst.txt @@ -0,0 +1,19 @@ +How to Cite +=========== + +Use the following citation for the PyHazards software package itself. If you are +also relying on specific datasets or model papers, cite those sources from +:doc:`references` as well. + +Library Citation +---------------- + +.. code-block:: bibtex + + @misc{pyhazards2025, + title = {PyHazards: An Open-Source Library for AI-Powered Hazard Prediction}, + author = {Cheng et al.}, + year = {2025}, + howpublished = {\url{https://github.com/LabRAI/PyHazards}}, + note = {GitHub repository} + } diff --git a/docs/_sources/datasets/aefa_forecast.rst.txt b/docs/_sources/datasets/aefa_forecast.rst.txt new file mode 100644 index 00000000..e116a389 --- /dev/null +++ b/docs/_sources/datasets/aefa_forecast.rst.txt @@ -0,0 +1,113 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +AEFA Forecast +============= + +Synthetic-backed dense-grid forecasting adapter aligned to the AEFA earthquake forecasting workflow. + +Overview +-------- + +AEFA Forecast is the public forecasting adapter used by the earthquake benchmark when exercising dense-grid wavefield forecasting models. + +The current implementation is synthetic-backed, but it preserves the task shape, tensor layout, and reporting surface used by the shared earthquake evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - AEFA forecasting ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Earthquake + * - Source Role + - Forecast Benchmark + * - Coverage + - Benchmark-aligned earthquake forecasting samples + * - Geometry + - Dense-grid wavefield tensors + * - Spatial Resolution + - Benchmark-defined dense sensor grid + * - Temporal Resolution + - Short history and forecast windows + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``aefa_forecast`` + +Data Characteristics +-------------------- + +- Multichannel dense-grid history tensors paired with future dense-grid targets. +- Registry-backed benchmark adapter rather than a raw external archive loader. +- Intended for forecasting-path validation and report generation. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Smoke tests for WaveCastNet-style earthquake forecasting. +- Shared forecasting benchmark runs under the earthquake evaluator. +- Validation of report exports aligned to the forecasting path. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `AEFA repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public earthquake forecasting benchmark surface rather than the private synthetic dataset name. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``aefa_forecast`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "aefa_forecast", + micro=True, + temporal_in=5, + temporal_out=4, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +- micro=True keeps the synthetic-backed forecasting path lightweight for validation. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`AEFA ` + +**Representative Models:** :doc:`WaveCastNet ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a benchmark adapter, not a full external AEFA ingestion pipeline. + +Reference +--------- + +- `AEFA `_. diff --git a/docs/_sources/datasets/caravan_streamflow.rst.txt b/docs/_sources/datasets/caravan_streamflow.rst.txt new file mode 100644 index 00000000..414bef72 --- /dev/null +++ b/docs/_sources/datasets/caravan_streamflow.rst.txt @@ -0,0 +1,112 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +Caravan +======= + +Synthetic-backed streamflow benchmark adapter aligned to the Caravan large-sample hydrology ecosystem. + +Overview +-------- + +Caravan is the public flood streamflow adapter used to align PyHazards with a large-sample hydrology benchmark surface. + +The current implementation is synthetic-backed, but it preserves the streamflow forecasting contract used by the shared flood benchmark. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - Caravan community dataset surfaced through a PyHazards adapter + * - Hazard Family + - Flood + * - Source Role + - Streamflow Benchmark + * - Coverage + - Benchmark-aligned streamflow forecasting samples + * - Geometry + - Graph-temporal basin or node sequences + * - Spatial Resolution + - Basin or gauge nodes represented as graph elements + * - Temporal Resolution + - Rolling history windows for streamflow prediction + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch graph-temporal dataset objects via the dataset registry + * - Registry Entry + - ``caravan_streamflow`` + +Data Characteristics +-------------------- + +- Graph-temporal sequences with node-level targets for next-step streamflow prediction. +- Registry-backed benchmark adapter instead of a raw Caravan ingestion pipeline. +- Supports the public streamflow smoke path for NeuralHydrology LSTM and Google Flood Forecasting. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Streamflow smoke tests for benchmark-linked flood models. +- Shared flood benchmark runs with streamflow metrics such as NSE and KGE. +- Regression checks for graph-temporal basin workflows. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `Caravan paper `_ +- `Caravan repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public Caravan-aligned streamflow surface exposed by the flood benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``caravan_streamflow`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "caravan_streamflow", + micro=True, + history=4, + nodes=6, + ).load() + + train = data.get_split("train") + print(len(train.inputs), train.inputs[0].x.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Flood Benchmark `, :doc:`Caravan ` + +**Representative Models:** :doc:`NeuralHydrology LSTM `, :doc:`Google Flood Forecasting ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full Caravan downloader. + +Reference +--------- + +- `Caravan - A global community dataset for large-sample hydrology `_ (`repo `__). diff --git a/docs/_sources/datasets/era5.rst.txt b/docs/_sources/datasets/era5.rst.txt new file mode 100644 index 00000000..e816179a --- /dev/null +++ b/docs/_sources/datasets/era5.rst.txt @@ -0,0 +1,98 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +ERA5 +==== + +ECMWF's global reanalysis used as a high-resolution meteorological baseline for hazard experiments. + +Overview +-------- + +ERA5 is ECMWF's fifth-generation global reanalysis, combining historical observations with a modern data assimilation system to produce temporally consistent atmospheric fields. + +PyHazards uses ERA5 as a shared meteorological baseline for flood, wildfire, and weather-aware graph workflows, including the HydroGraphNet example path. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - ECMWF / Copernicus Climate Change Service (C3S) + * - Hazard Family + - Shared Forcing + * - Source Role + - Reanalysis + * - Coverage + - Global + * - Geometry + - Regular latitude-longitude grid + * - Spatial Resolution + - ~0.25 deg x 0.25 deg + * - Temporal Resolution + - Hourly + * - Update Cadence + - Daily ERA5T updates with about 5-day latency, followed by final validated releases after 2-3 months + * - Period of Record + - 1940-present + * - Formats + - GRIB and NetCDF + * - Inspection CLI + - ``python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10`` + +Data Characteristics +-------------------- + +- Global hourly fields on a regular latitude-longitude grid. +- Single-level products with optional pressure-level and model-level variables. +- Common variables include near-surface meteorology, precipitation, radiation, and atmospheric state variables. +- Recent dates may mix validated ERA5 with preliminary ERA5T data. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Meteorological forcing for flood, wildfire, and extreme-weather prediction models. +- Climate variability analysis and environmental feature engineering. +- Shared reanalysis input for graph and spatiotemporal benchmark pipelines. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `ERA5 single levels `_ +- `Copernicus Climate Data Store `_ + +PyHazards Usage +--------------- + +Use the inspection command for direct file validation, then feed local ERA5 files into HydroGraphNet-style helper loaders when you need graph-temporal training inputs. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Representative Models:** :doc:`HydroGraphNet ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + +Notes +----- + +- ERA5 is inspection-first in the public catalog; the downstream HydroGraphNet helper is documented here for convenience but is not a ``load_dataset(...)`` registry entry. + +Reference +--------- + +- `Hersbach et al. (2020). The ERA5 global reanalysis. `_. diff --git a/docs/_sources/datasets/firms.rst.txt b/docs/_sources/datasets/firms.rst.txt new file mode 100644 index 00000000..8199627c --- /dev/null +++ b/docs/_sources/datasets/firms.rst.txt @@ -0,0 +1,95 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +FIRMS +===== + +NASA's near-real-time active fire detections used for operational wildfire monitoring and event labeling. + +Overview +-------- + +FIRMS distributes active fire and thermal anomaly detections derived from MODIS and VIIRS satellite sensors, with each record corresponding to a time-stamped hotspot observation. + +PyHazards uses FIRMS as a wildfire occurrence signal for operational monitoring and label construction when combined with weather and land-surface context. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NASA LANCE / FIRMS + * - Hazard Family + - Wildfire + * - Source Role + - Active Fire Detections + * - Coverage + - Global + * - Geometry + - Event-based point detections + * - Spatial Resolution + - ~375 m for VIIRS, ~1 km for MODIS + * - Temporal Resolution + - Event-based detections with multiple updates per day + * - Update Cadence + - Fire maps refresh about every 5 minutes and downloadable files refresh about hourly + * - Period of Record + - Near-real-time archive with later standard-science replacements + * - Formats + - CSV, Shapefile, GeoJSON, KML + * - Inspection CLI + - ``python -m pyhazards.datasets.firms.inspection --path /path/to/firms_data --max-items 10`` + +Data Characteristics +-------------------- + +- Global event-based point detections rather than gridded tensors. +- Latency is typically under 3 hours globally and faster for some U.S. and Canada products. +- Common attributes include location, detection time, fire radiative power, and confidence indicators. +- Near-real-time detections are later replaced by standard or science-quality products. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Operational wildfire monitoring and early detection. +- Event labeling for wildfire prediction pipelines. +- Spatiotemporal analysis of fire occurrence and activity patterns. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `FIRMS portal `_ +- `NASA Earthdata `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.firms.inspection --path /path/to/firms_data --max-items 10 + +- Some archive and bulk-download routes require Earthdata login credentials. + +Reference +--------- + +- `Schroeder et al. (2014). The New VIIRS 375 m active fire detection data product. `_. diff --git a/docs/_sources/datasets/floodcastbench_inundation.rst.txt b/docs/_sources/datasets/floodcastbench_inundation.rst.txt new file mode 100644 index 00000000..77a95f66 --- /dev/null +++ b/docs/_sources/datasets/floodcastbench_inundation.rst.txt @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +FloodCastBench +============== + +Synthetic-backed inundation benchmark adapter aligned to the FloodCastBench evaluation ecosystem. + +Overview +-------- + +FloodCastBench is the public inundation adapter used by PyHazards for raster flood prediction benchmarks. + +The current implementation is synthetic-backed, but it preserves the raster task and metric surface used by the shared flood evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - FloodCastBench ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Flood + * - Source Role + - Inundation Benchmark + * - Coverage + - Benchmark-aligned flood inundation samples + * - Geometry + - Raster inundation sequences + * - Spatial Resolution + - Benchmark-defined raster tiles + * - Temporal Resolution + - Short history windows with next-horizon inundation targets + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``floodcastbench_inundation`` + +Data Characteristics +-------------------- + +- Multi-step raster inputs paired with next-horizon inundation targets. +- Registry-backed benchmark adapter rather than a raw external dataset ingestion path. +- Intended for pixel-level evaluation such as IoU and pixel MAE. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Smoke tests for FloodCast and UrbanFloodCast. +- Shared flood benchmark runs on inundation tasks. +- Regression checks for raster flood prediction outputs. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `FloodCastBench repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public FloodCastBench-aligned inundation surface exposed by the flood benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``floodcastbench_inundation`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "floodcastbench_inundation", + micro=True, + history=4, + channels=3, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Flood Benchmark `, :doc:`FloodCastBench ` + +**Representative Models:** :doc:`FloodCast `, :doc:`UrbanFloodCast ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full FloodCastBench ingestion pipeline. + +Reference +--------- + +- `FloodCastBench `_. diff --git a/docs/_sources/datasets/fpa_fod_tabular.rst.txt b/docs/_sources/datasets/fpa_fod_tabular.rst.txt new file mode 100644 index 00000000..3f0a549d --- /dev/null +++ b/docs/_sources/datasets/fpa_fod_tabular.rst.txt @@ -0,0 +1,115 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +FPA-FOD Tabular +=============== + +Incident-level FPA-FOD features packaged for wildfire cause and size classification. + +Overview +-------- + +FPA-FOD Tabular converts one wildfire incident record into one feature vector for classification tasks such as incident cause prediction and grouped size prediction. + +PyHazards exposes it as a loadable dataset with a deterministic micro mode so the full source database is not required for smoke tests or quick experimentation. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - Fire Program Analysis Fire-Occurrence Database (FPA-FOD) adaptation in PyHazards + * - Hazard Family + - Wildfire + * - Source Role + - Incident Tabular + * - Coverage + - User-provided FPA-FOD coverage + * - Geometry + - Tabular feature vectors + * - Spatial Resolution + - Incident-level records + * - Temporal Resolution + - Event-based + * - Update Cadence + - User-managed local inputs or deterministic micro mode + * - Period of Record + - Depends on the supplied FPA-FOD source files + * - Formats + - SQLite, DB, CSV, and Parquet inputs + * - Inspection CLI + - ``python -m pyhazards.datasets.fpa_fod_tabular.inspection --task cause --micro`` + * - Registry Entry + - ``fpa_fod_tabular`` + +Data Characteristics +-------------------- + +- Supports task='cause' and task='size' classification targets. +- Accepts SQLite, DB, CSV, and Parquet sources. +- Micro mode keeps the path deterministic and lightweight for validation. +- Returned splits follow the standard DataBundle contract with tabular inputs and integer targets. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Wildfire cause classification experiments. +- Grouped fire size classification from incident records. +- Lightweight smoke and regression tests for the wildfire tabular path. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `PyHazards public dataset catalog `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``fpa_fod_tabular`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "fpa_fod_tabular", + task="cause", + micro=True, + normalize=True, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +- region='US' uses all available states, while region='CA' restricts to California incidents. +- cause_mode='paper5' preserves the five consolidated cause groups used by the public wildfire tabular path. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +**Representative Models:** :doc:`DNN-LSTM-AutoEncoder ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.fpa_fod_tabular.inspection --task cause --micro + +Reference +--------- + +- `PyHazards FPA-FOD tabular adaptation for the wildfire incident classification path. `_. diff --git a/docs/_sources/datasets/fpa_fod_weekly.rst.txt b/docs/_sources/datasets/fpa_fod_weekly.rst.txt new file mode 100644 index 00000000..6dfa3f7f --- /dev/null +++ b/docs/_sources/datasets/fpa_fod_weekly.rst.txt @@ -0,0 +1,115 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +FPA-FOD Weekly +============== + +Weekly FPA-FOD aggregates packaged for next-week wildfire count forecasting by size group. + +Overview +-------- + +FPA-FOD Weekly builds rolling lookback windows from weekly wildfire incident counts and predicts next-week counts for grouped size classes. + +PyHazards exposes it as a loadable forecasting dataset with a micro mode so sequence models can be validated without the full source archive. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - Fire Program Analysis Fire-Occurrence Database (FPA-FOD) adaptation in PyHazards + * - Hazard Family + - Wildfire + * - Source Role + - Weekly Forecasting + * - Coverage + - User-provided FPA-FOD coverage + * - Geometry + - Temporal tabular sequences + * - Spatial Resolution + - Weekly aggregate windows + * - Temporal Resolution + - Weekly + * - Update Cadence + - User-managed local inputs or deterministic micro mode + * - Period of Record + - Depends on the supplied FPA-FOD source files + * - Formats + - SQLite, DB, CSV, and Parquet inputs + * - Inspection CLI + - ``python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12`` + * - Registry Entry + - ``fpa_fod_weekly`` + +Data Characteristics +-------------------- + +- Predicts next-week counts for grouped size classes A/B/C/D/EFG. +- Supports feature modes with counts only or counts plus seasonal time features. +- Uses chronological splits to preserve the forecasting setting. +- Returned splits follow the DataBundle contract with sequence inputs and floating-point targets. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Weekly wildfire forecasting experiments. +- Sequence-model smoke tests for wildfire activity prediction. +- Lightweight benchmarking of tabular temporal wildfire baselines. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `PyHazards public dataset catalog `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``fpa_fod_weekly`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "fpa_fod_weekly", + micro=True, + features="counts+time", + lookback_weeks=12, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +- features='counts' uses only the five weekly count channels. +- features='counts+time' adds sinusoidal week-of-year features for seasonality. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +**Representative Models:** :doc:`DNN-LSTM-AutoEncoder `, :doc:`Wildfire Forecasting `, :doc:`ASUFM ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12 + +Reference +--------- + +- `PyHazards FPA-FOD weekly adaptation for the wildfire forecasting path. `_. diff --git a/docs/_sources/datasets/goesr.rst.txt b/docs/_sources/datasets/goesr.rst.txt new file mode 100644 index 00000000..a496cacb --- /dev/null +++ b/docs/_sources/datasets/goesr.rst.txt @@ -0,0 +1,88 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +GOES-R +====== + +Rapid-refresh GOES-R satellite imagery used for smoke, fire, and weather monitoring workflows. + +Overview +-------- + +GOES-R provides high-frequency geostationary observations from the Advanced Baseline Imager, enabling continuous monitoring of atmospheric and surface processes across the Americas. + +PyHazards uses it as rapid-refresh imagery for smoke, fire evolution, ignition monitoring, and operational situational awareness workflows. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA GOES-R Program + * - Hazard Family + - Shared Forcing + * - Source Role + - Geostationary Imagery + * - Coverage + - Western Hemisphere / Americas geostationary view + * - Geometry + - Raster imagery time series on the ABI fixed grid + * - Spatial Resolution + - ~0.5-2 km depending on spectral band + * - Temporal Resolution + - 1-10 minute refresh depending on sector and mode + * - Update Cadence + - Continuous ingest as new files become available + * - Period of Record + - Ongoing operational satellite archive + * - Formats + - NetCDF + * - Inspection CLI + - ``python -m pyhazards.datasets.goesr.inspection --path /path/to/goesr_data --max-items 10`` + +Data Characteristics +-------------------- + +- Raster time series rather than event records. +- Typical Mode 6 scan cadence is 10 minutes for Full Disk, 5 minutes for CONUS, and 1 minute for mesoscale sectors. +- Common products include visible and infrared imagery, brightness temperature, and fire-related thermal context. +- Distribution latency depends on the access route even when observations are near real time. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Early detection and monitoring of wildfire ignition and growth. +- Smoke and fire evolution analysis at high temporal resolution. +- Real-time situational awareness workflows. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `GOES-R Program `_ +- `NOAA Open Data Dissemination `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.goesr.inspection --path /path/to/goesr_data --max-items 10 + +Reference +--------- + +- `Schmit et al. (2017). A closer look at the ABI on the GOES-R series. `_. diff --git a/docs/_sources/datasets/hydrobench_streamflow.rst.txt b/docs/_sources/datasets/hydrobench_streamflow.rst.txt new file mode 100644 index 00000000..7bcc3e1b --- /dev/null +++ b/docs/_sources/datasets/hydrobench_streamflow.rst.txt @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +HydroBench +========== + +Synthetic-backed streamflow diagnostics adapter aligned to the HydroBench ecosystem. + +Overview +-------- + +HydroBench is the public flood adapter used for streamflow diagnostics and HydroGraphNet-aligned benchmark runs. + +The current implementation is synthetic-backed, but it preserves the streamflow task and metric contract exposed by the shared flood benchmark. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - HydroBench ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Flood + * - Source Role + - Streamflow Benchmark + * - Coverage + - Benchmark-aligned streamflow forecasting samples + * - Geometry + - Graph-temporal basin or node sequences + * - Spatial Resolution + - Basin or gauge nodes represented as graph elements + * - Temporal Resolution + - Rolling history windows for streamflow prediction + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch graph-temporal dataset objects via the dataset registry + * - Registry Entry + - ``hydrobench_streamflow`` + +Data Characteristics +-------------------- + +- Graph-temporal sequences with node-level targets for next-step streamflow prediction. +- Registry-backed benchmark adapter rather than a raw HydroBench dataset ingestion path. +- Intended for HydroGraphNet smoke runs and flood benchmark diagnostics. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- HydroGraphNet smoke tests. +- Shared flood benchmark runs with HydroBench-aligned metrics. +- Diagnostics for graph-based flood forecasting experiments. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `HydroBench repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public HydroBench-aligned streamflow surface exposed by the flood benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``hydrobench_streamflow`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "hydrobench_streamflow", + micro=True, + history=4, + nodes=6, + ).load() + + train = data.get_split("train") + print(len(train.inputs), train.inputs[0].x.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Flood Benchmark `, :doc:`HydroBench ` + +**Representative Models:** :doc:`HydroGraphNet ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full HydroBench downloader. + +Reference +--------- + +- `HydroBench `_. diff --git a/docs/_sources/datasets/ibtracs_tracks.rst.txt b/docs/_sources/datasets/ibtracs_tracks.rst.txt new file mode 100644 index 00000000..a3dbf0e3 --- /dev/null +++ b/docs/_sources/datasets/ibtracs_tracks.rst.txt @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +IBTrACS +======= + +Synthetic-backed storm-track adapter aligned to the IBTrACS tropical cyclone archive. + +Overview +-------- + +IBTrACS is the public storm-track adapter used by PyHazards for shared tropical cyclone benchmark runs. + +The current implementation is synthetic-backed, but it preserves the track-intensity forecasting surface used by the shared tropical cyclone evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA NCEI International Best Track Archive for Climate Stewardship surfaced through a PyHazards adapter + * - Hazard Family + - Tropical Cyclone + * - Source Role + - Track Archive + * - Coverage + - Benchmark-aligned tropical cyclone track and intensity samples + * - Geometry + - Storm-track history sequences + * - Spatial Resolution + - Storm-centered best-track sequences + * - Temporal Resolution + - Historical track windows with forecast horizons + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``ibtracs_tracks`` + +Data Characteristics +-------------------- + +- Storm-history sequences with future latitude, longitude, and intensity targets. +- Registry-backed benchmark adapter rather than a raw IBTrACS archive loader. +- Supports both basin-specific hurricane models and broader tropical cyclone adapters. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Hurricast smoke tests. +- Shared tropical cyclone benchmark runs for track and intensity prediction. +- Benchmark-aligned validation for weather-model storm adapters. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `IBTrACS product page `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public IBTrACS-aligned storm-track surface exposed by the tropical cyclone benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``ibtracs_tracks`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "ibtracs_tracks", + micro=True, + history=6, + horizon=5, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`IBTrACS ` + +**Representative Models:** :doc:`Hurricast `, :doc:`GraphCast TC Adapter `, :doc:`Pangu TC Adapter `, :doc:`FourCastNet TC Adapter ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full IBTrACS ingestion pipeline. + +Reference +--------- + +- `IBTrACS `_. diff --git a/docs/_sources/datasets/landfire.rst.txt b/docs/_sources/datasets/landfire.rst.txt new file mode 100644 index 00000000..42569a64 --- /dev/null +++ b/docs/_sources/datasets/landfire.rst.txt @@ -0,0 +1,93 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +LANDFIRE +======== + +Nationwide fuels, vegetation, and canopy layers used as static wildfire covariates. + +Overview +-------- + +LANDFIRE provides nationwide maps of vegetation, fuels, canopy structure, and fire regime information derived from remote sensing, field observations, and ecological modeling. + +PyHazards uses it as static landscape context for wildfire spread, behavior, and risk-oriented workflows. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - U.S. Forest Service LANDFIRE Program + * - Hazard Family + - Wildfire + * - Source Role + - Fuels and Vegetation + * - Coverage + - United States + * - Geometry + - Gridded raster layers + * - Spatial Resolution + - ~30 m + * - Temporal Resolution + - Static or slowly varying versioned releases + * - Update Cadence + - Annual versioned update suites + * - Period of Record + - Versioned annual releases + * - Formats + - GeoTIFF and related GIS packages + * - Inspection CLI + - ``python -m pyhazards.datasets.landfire.inspection --path /path/to/landfire_data --max-items 10`` + +Data Characteristics +-------------------- + +- Raster covariates rather than event records. +- Versioned annual releases intended to stay current to the previous year. +- Common layers include fuel models, vegetation type, canopy metrics, and fire regime products. +- Distributed in projected coordinate systems with product-specific metadata. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Fuel characterization for wildfire behavior and spread modeling. +- Landscape-scale wildfire risk assessment. +- Static feature layers for machine-learning wildfire models. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `LANDFIRE data access `_ +- `LANDFIRE program overview `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.landfire.inspection --path /path/to/landfire_data --max-items 10 + +Reference +--------- + +- `Rollins (2009). LANDFIRE: A nationally consistent vegetation, wildland fire, and fuel assessment. `_. diff --git a/docs/_sources/datasets/merra2.rst.txt b/docs/_sources/datasets/merra2.rst.txt new file mode 100644 index 00000000..a141c69d --- /dev/null +++ b/docs/_sources/datasets/merra2.rst.txt @@ -0,0 +1,90 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +MERRA-2 +======= + +Global atmospheric reanalysis from NASA GMAO used as a shared meteorological backbone for hazard modeling. + +Overview +-------- + +MERRA-2 is a global atmospheric reanalysis that assimilates satellite and conventional observations into a numerical weather prediction system to produce gridded, time-continuous estimates of the atmospheric state. + +In PyHazards it serves as a shared forcing and covariate source for weather-aware hazard workflows, especially when a project needs a stable long historical archive. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NASA Global Modeling and Assimilation Office (GMAO) + * - Hazard Family + - Shared Forcing + * - Source Role + - Reanalysis + * - Coverage + - Global + * - Geometry + - Regular latitude-longitude grid + * - Spatial Resolution + - ~0.5 deg x 0.625 deg + * - Temporal Resolution + - Hourly + * - Update Cadence + - Published monthly with typical 2-3 week latency after month end + * - Period of Record + - 1980-present + * - Formats + - NetCDF4 + * - Inspection CLI + - ``python -m pyhazards.datasets.merra2.inspection 20260101`` + +Data Characteristics +-------------------- + +- Global coverage on a regular latitude-longitude grid. +- Hourly meteorology with derived 3-hourly, daily, and monthly products. +- Surface fields plus multi-level atmospheric profiles. +- Common variables include near-surface temperature, humidity, wind, precipitation, and surface fluxes. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Meteorological forcing for wildfire and multi-hazard prediction models. +- Climate diagnostics and long-horizon environmental covariates. +- Shared weather backbone for weather-climate benchmark pipelines. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `MERRA-2 overview `_ +- `NASA Earthdata `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.merra2.inspection 20260101 + +- Earthdata credentials are required when raw files are not already available locally. + +Reference +--------- + +- `Gelaro et al. (2017). The Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2). `_. diff --git a/docs/_sources/datasets/mtbs.rst.txt b/docs/_sources/datasets/mtbs.rst.txt new file mode 100644 index 00000000..e1daddf0 --- /dev/null +++ b/docs/_sources/datasets/mtbs.rst.txt @@ -0,0 +1,93 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +MTBS +==== + +U.S. burn severity and fire perimeter products used for post-fire analysis and wildfire evaluation. + +Overview +-------- + +MTBS maps wildfire perimeters and burn severity across the United States using Landsat imagery and standardized spectral change products such as dNBR and RdNBR. + +In PyHazards it acts as a post-fire assessment source for burn extent, severity, and long-term wildfire regime studies. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - U.S. Geological Survey and USDA Forest Service MTBS program + * - Hazard Family + - Wildfire + * - Source Role + - Burn Severity + * - Coverage + - United States + * - Geometry + - Per-fire rasters with associated vector perimeters + * - Spatial Resolution + - 30 m + * - Temporal Resolution + - Fire-event and fire-year products + * - Update Cadence + - Continuous mapping with quarterly releases + * - Period of Record + - 1984-near present + * - Formats + - GeoTIFF, Shapefile, File Geodatabase + * - Inspection CLI + - ``python -m pyhazards.datasets.mtbs.inspection --path /path/to/mtbs_data --max-items 10`` + +Data Characteristics +-------------------- + +- Event-based raster layers with vector perimeters for individual fires. +- Historical archive from 1984 onward, expanded through quarterly releases. +- Includes burn severity classes and supporting spectral severity products. +- Product availability depends on Landsat imagery timing and production workflow rather than near-real-time ingest. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Post-fire burn severity and impact assessment. +- Long-term wildfire regime and trend analysis. +- Model evaluation for fire extent and severity prediction. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `MTBS data portal `_ +- `USGS MTBS overview `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.mtbs.inspection --path /path/to/mtbs_data --max-items 10 + +Reference +--------- + +- `Eidenshink et al. (2007). A project for monitoring trends in burn severity. `_. diff --git a/docs/_sources/datasets/noaa_flood.rst.txt b/docs/_sources/datasets/noaa_flood.rst.txt new file mode 100644 index 00000000..a9c79ccc --- /dev/null +++ b/docs/_sources/datasets/noaa_flood.rst.txt @@ -0,0 +1,94 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +NOAA Flood Events +================= + +Historical NOAA storm-event flood records used as event labels and impact targets for flood studies. + +Overview +-------- + +NOAA Flood Events are derived from the NOAA Storm Events Database and document the timing, location, and impacts of severe flood-related events across the United States. + +In PyHazards they function as event-level labels or targets for flood occurrence and impact analysis, especially when paired with meteorological drivers. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA National Centers for Environmental Information (NCEI) + * - Hazard Family + - Flood + * - Source Role + - Event Records + * - Coverage + - United States + * - Geometry + - Tabular event records with administrative regions and optional point coordinates + * - Spatial Resolution + - County or zone level reporting, with points when available + * - Temporal Resolution + - Event-based + * - Update Cadence + - Updated monthly, typically 75-90 days after the end of a data month + * - Period of Record + - 1950-present + * - Formats + - Web query, bulk CSV, and database extracts + * - Inspection CLI + - ``python -m pyhazards.datasets.noaa_flood.inspection --path /path/to/noaa_flood_data --max-items 10`` + +Data Characteristics +-------------------- + +- Event-based tabular records rather than gridded tensors. +- Historical archive appended as new months are processed and validated. +- Typical attributes include event timing, location, narratives, and reported damages. +- Very recent months may be unavailable because of reporting and validation lag. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Flood occurrence and frequency analysis. +- Impact and damage assessment studies. +- Supervised learning with event records as flood targets. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `Storm Events Database `_ +- `Storm Events bulk download `_ +- `NOAA NCEI `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Flood Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.noaa_flood.inspection --path /path/to/noaa_flood_data --max-items 10 + +Reference +--------- + +- `NOAA National Centers for Environmental Information. Storm Events Database Documentation. `_. diff --git a/docs/_sources/datasets/pick_benchmark_waveforms.rst.txt b/docs/_sources/datasets/pick_benchmark_waveforms.rst.txt new file mode 100644 index 00000000..008adadf --- /dev/null +++ b/docs/_sources/datasets/pick_benchmark_waveforms.rst.txt @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +pick-benchmark +============== + +Synthetic-backed waveform picking adapter aligned to the pick-benchmark evaluation ecosystem. + +Overview +-------- + +pick-benchmark is the public waveform adapter used by the earthquake benchmark for transformer and CNN picking baselines. + +The current implementation is synthetic-backed, but it preserves the phase-picking task shape, labels, and metrics expected by the shared earthquake evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - pick-benchmark ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Earthquake + * - Source Role + - Waveform Benchmark + * - Coverage + - Benchmark-aligned earthquake phase-picking samples + * - Geometry + - Multichannel waveform windows + * - Spatial Resolution + - Benchmark-defined waveform channels and sample windows + * - Temporal Resolution + - Short waveform windows with phase-pick targets + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``pick_benchmark_waveforms`` + +Data Characteristics +-------------------- + +- Multichannel waveform windows paired with P- and S-arrival sample targets. +- Registry-backed benchmark adapter rather than a raw external waveform ingestion path. +- Intended for phase-picking validation and smoke tests. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- EQTransformer and GPD smoke tests. +- Shared earthquake picking benchmark runs. +- Regression checks for waveform-based picking models. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `pick-benchmark repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public pick-benchmark-aligned waveform surface exposed by the earthquake benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``pick_benchmark_waveforms`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "pick_benchmark_waveforms", + micro=True, + channels=3, + length=256, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`pick-benchmark ` + +**Representative Models:** :doc:`EQTransformer `, :doc:`GPD ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full pick-benchmark downloader. + +Reference +--------- + +- `pick-benchmark `_. diff --git a/docs/_sources/datasets/seisbench_waveforms.rst.txt b/docs/_sources/datasets/seisbench_waveforms.rst.txt new file mode 100644 index 00000000..5fb54c8f --- /dev/null +++ b/docs/_sources/datasets/seisbench_waveforms.rst.txt @@ -0,0 +1,112 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +SeisBench +========= + +Synthetic-backed waveform picking adapter aligned to the SeisBench ecosystem. + +Overview +-------- + +SeisBench is the public waveform adapter used by PyHazards for the earthquake picking path. + +The current implementation is synthetic-backed, but it preserves the picking task shape, labels, and metrics expected by the shared earthquake evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - SeisBench ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Earthquake + * - Source Role + - Waveform Benchmark + * - Coverage + - Benchmark-aligned earthquake phase-picking samples + * - Geometry + - Multichannel waveform windows + * - Spatial Resolution + - Benchmark-defined waveform channels and sample windows + * - Temporal Resolution + - Short waveform windows with phase-pick targets + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``seisbench_waveforms`` + +Data Characteristics +-------------------- + +- Multichannel waveform windows paired with P- and S-arrival sample targets. +- Registry-backed benchmark adapter rather than a raw external waveform ingestion path. +- Intended for phase-picking validation and smoke tests. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- PhaseNet and EQNet smoke tests. +- Shared earthquake benchmark runs on picking tasks. +- Regression checks for waveform-based seismic models. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `SeisBench paper `_ +- `SeisBench repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public SeisBench-aligned waveform surface exposed by the earthquake benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``seisbench_waveforms`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "seisbench_waveforms", + micro=True, + channels=3, + length=256, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`SeisBench ` + +**Representative Models:** :doc:`PhaseNet `, :doc:`EQNet ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full SeisBench ingestion pipeline. + +Reference +--------- + +- `SeisBench - A Toolbox for Machine Learning in Seismology `_ (`repo `__). diff --git a/docs/_sources/datasets/tcbench_alpha.rst.txt b/docs/_sources/datasets/tcbench_alpha.rst.txt new file mode 100644 index 00000000..a0989b29 --- /dev/null +++ b/docs/_sources/datasets/tcbench_alpha.rst.txt @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +TCBench Alpha +============= + +Synthetic-backed storm-track benchmark adapter aligned to the TCBench Alpha ecosystem. + +Overview +-------- + +TCBench Alpha is the public storm adapter used by several tropical cyclone baselines on the shared track-intensity evaluator. + +The current implementation is synthetic-backed, but it preserves the task, metric, and reporting surface used by the shared tropical cyclone benchmark. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - TCBench Alpha ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Tropical Cyclone + * - Source Role + - Track Benchmark + * - Coverage + - Benchmark-aligned tropical cyclone track and intensity samples + * - Geometry + - Storm-track history sequences + * - Spatial Resolution + - Storm-centered best-track sequences + * - Temporal Resolution + - Historical track windows with forecast horizons + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``tcbench_alpha`` + +Data Characteristics +-------------------- + +- Storm-history sequences with future latitude, longitude, and intensity targets. +- Registry-backed benchmark adapter rather than a raw external benchmark ingestion path. +- Intended for benchmark-linked track-intensity forecasting runs. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Tropical Cyclone MLP, SAF-Net, and TCIF-fusion smoke tests. +- Shared tropical cyclone benchmark runs. +- Regression checks for storm-track baselines. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `TCBench Alpha repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public TCBench Alpha-aligned storm surface exposed by the tropical cyclone benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``tcbench_alpha`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "tcbench_alpha", + micro=True, + history=6, + horizon=5, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`TCBench Alpha ` + +**Representative Models:** :doc:`Tropical Cyclone MLP `, :doc:`SAF-Net `, :doc:`TCIF-fusion ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full TCBench Alpha ingestion pipeline. + +Reference +--------- + +- `TCBench Alpha `_. diff --git a/docs/_sources/datasets/tropicyclonenet_dataset.rst.txt b/docs/_sources/datasets/tropicyclonenet_dataset.rst.txt new file mode 100644 index 00000000..41508b57 --- /dev/null +++ b/docs/_sources/datasets/tropicyclonenet_dataset.rst.txt @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +TropiCycloneNet-Dataset +======================= + +Synthetic-backed storm-track benchmark adapter aligned to the TropiCycloneNet-Dataset ecosystem. + +Overview +-------- + +TropiCycloneNet-Dataset is the public storm adapter used by the TropiCycloneNet model path on the shared track-intensity evaluator. + +The current implementation is synthetic-backed, but it preserves the task, metric, and reporting surface used by the shared tropical cyclone benchmark. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - TropiCycloneNet-Dataset ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Tropical Cyclone + * - Source Role + - Track Benchmark + * - Coverage + - Benchmark-aligned tropical cyclone track and intensity samples + * - Geometry + - Storm-track history sequences + * - Spatial Resolution + - Storm-centered best-track sequences + * - Temporal Resolution + - Historical track windows with forecast horizons + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``tropicyclonenet_dataset`` + +Data Characteristics +-------------------- + +- Storm-history sequences with future latitude, longitude, and intensity targets. +- Registry-backed benchmark adapter rather than a raw external dataset ingestion path. +- Intended for benchmark-linked storm forecasting smoke runs. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- TropiCycloneNet smoke tests. +- Shared tropical cyclone benchmark runs. +- Regression checks for track-intensity prediction models. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `TropiCycloneNet-Dataset repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public TropiCycloneNet-Dataset-aligned storm surface exposed by the tropical cyclone benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``tropicyclonenet_dataset`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "tropicyclonenet_dataset", + micro=True, + history=6, + horizon=5, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`TropiCycloneNet-Dataset ` + +**Representative Models:** :doc:`TropiCycloneNet ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full TropiCycloneNet-Dataset downloader. + +Reference +--------- + +- `TropiCycloneNet-Dataset `_. diff --git a/docs/_sources/datasets/waterbench_streamflow.rst.txt b/docs/_sources/datasets/waterbench_streamflow.rst.txt new file mode 100644 index 00000000..e9530966 --- /dev/null +++ b/docs/_sources/datasets/waterbench_streamflow.rst.txt @@ -0,0 +1,112 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +WaterBench +========== + +Synthetic-backed streamflow benchmark adapter aligned to the WaterBench ecosystem. + +Overview +-------- + +WaterBench is the public flood streamflow adapter used by the EA-LSTM path on the shared flood benchmark. + +The current implementation is synthetic-backed, but it preserves the streamflow forecasting contract expected by the shared evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - WaterBench ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Flood + * - Source Role + - Streamflow Benchmark + * - Coverage + - Benchmark-aligned streamflow forecasting samples + * - Geometry + - Graph-temporal basin or node sequences + * - Spatial Resolution + - Basin or gauge nodes represented as graph elements + * - Temporal Resolution + - Rolling history windows for streamflow prediction + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch graph-temporal dataset objects via the dataset registry + * - Registry Entry + - ``waterbench_streamflow`` + +Data Characteristics +-------------------- + +- Graph-temporal sequences with node-level targets for next-step streamflow prediction. +- Registry-backed benchmark adapter rather than a raw WaterBench ingestion pipeline. +- Intended for benchmark-linked streamflow smoke runs. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- EA-LSTM smoke tests. +- Shared flood benchmark runs with streamflow metrics. +- Regression checks for basin-scale forecasting experiments. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `WaterBench abstract `_ +- `WaterBench repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public WaterBench-aligned streamflow surface exposed by the flood benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``waterbench_streamflow`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "waterbench_streamflow", + micro=True, + history=4, + nodes=6, + ).load() + + train = data.get_split("train") + print(len(train.inputs), train.inputs[0].x.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Flood Benchmark `, :doc:`WaterBench ` + +**Representative Models:** :doc:`EA-LSTM ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full WaterBench downloader. + +Reference +--------- + +- `WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting `_ (`repo `__). diff --git a/docs/_sources/datasets/wfigs.rst.txt b/docs/_sources/datasets/wfigs.rst.txt new file mode 100644 index 00000000..1193d849 --- /dev/null +++ b/docs/_sources/datasets/wfigs.rst.txt @@ -0,0 +1,93 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +WFIGS +===== + +Interagency wildfire incident records used as authoritative wildfire ground truth across the United States. + +Overview +-------- + +WFIGS aggregates geospatial information on active and historical wildland fire incidents, representing officially reported incidents rather than satellite-detected hotspots. + +In PyHazards it acts as an authoritative wildfire ground-truth source for validation, labeling, and comparison against remote-sensing detections. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - National Interagency Fire Center (NIFC) / interagency WFIGS + * - Hazard Family + - Wildfire + * - Source Role + - Incident Records + * - Coverage + - United States + * - Geometry + - Incident points and perimeters + * - Spatial Resolution + - Event-level vector geometries + * - Temporal Resolution + - Event-based with live operational updates + * - Update Cadence + - Refreshed from IRWIN roughly every 5 minutes, with perimeter changes often appearing within 15 minutes + * - Period of Record + - Historical archive plus ongoing incidents + * - Formats + - ArcGIS REST services, GeoJSON, and Shapefile downloads + * - Inspection CLI + - ``python -m pyhazards.datasets.wfigs.inspection --path /path/to/wfigs_data --max-items 10`` + +Data Characteristics +-------------------- + +- Event-based incident records with point and polygon geometries. +- Operational data that can change as incidents evolve and records are reconciled. +- Common fields include incident identifiers, timing, status, location, and fire size. +- Current and year-to-date layers follow different retention rules. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Ground-truth labeling of wildfire occurrence. +- Validation of satellite-based fire detection products. +- Analysis of ignition timing and incident geography. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `NIFC Open Data WFIGS layers `_ +- `National Interagency Fire Center `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.wfigs.inspection --path /path/to/wfigs_data --max-items 10 + +Reference +--------- + +- `National Interagency Fire Center. Wildland Fire Incident Geospatial Services (WFIGS). `_. diff --git a/docs/_sources/implementation.rst.txt b/docs/_sources/implementation.rst.txt new file mode 100644 index 00000000..36af24da --- /dev/null +++ b/docs/_sources/implementation.rst.txt @@ -0,0 +1,497 @@ +Implementation Guide +==================== + +Use this guide when you want to extend PyHazards itself. It is written for +contributors who are adding new datasets, new models, smoke tests, catalog +cards, or documentation updates for the public site. + +This page explains the public contributor workflow. For repository operations +and maintainer automation details, also see ``.github/IMPLEMENTATION.md``. + +Who This Guide Is For +--------------------- + +This guide assumes you already know Python and PyTorch, but you have not yet +worked inside the PyHazards codebase. It is most useful when you are doing one +of the following: + +- adding a new dataset loader or dataset inspection entrypoint, +- porting a paper or external implementation into ``pyhazards.models``, +- updating the public dataset or model catalogs and generated documentation, +- preparing a pull request that should be easy to review and merge. + +If you only want to install the library and run a first example, use +:doc:`installation` and :doc:`quick_start` instead. + +Repository Mental Model +----------------------- + +PyHazards is organized around a small set of extension points: + +- ``pyhazards.datasets`` contains dataset abstractions, the dataset registry, + and inspection entrypoints for supported data sources. +- ``pyhazards.models`` contains model builders, reusable components, and the + model registry used by ``build_model(...)``. +- ``pyhazards.engine`` contains the shared training and evaluation workflow. +- ``pyhazards/dataset_cards`` contains YAML cards used to generate the public + dataset catalog and per-dataset documentation pages. +- ``pyhazards/model_cards`` contains YAML cards used to generate the public + model tables and per-model documentation pages. +- ``docs/source`` contains handwritten Sphinx pages, while the committed + ``docs/`` directory contains the rendered HTML published on GitHub Pages. + +There are three separate layers to keep in mind: + +1. registry availability: + a dataset or model can be constructed from Python once it is registered; +2. catalog visibility: + a public dataset or model only appears on the website when it also has a + matching catalog card; +3. published website output: + GitHub Pages only changes after the rendered HTML in ``docs/`` is rebuilt. + +Typical Contribution Workflow +----------------------------- + +Most changes should follow the same sequence: + +1. decide whether you are extending a dataset, a model, or both; +2. implement the code in ``pyhazards/datasets`` or ``pyhazards/models``; +3. register the new entrypoint so it is discoverable from the library API; +4. add or update smoke-test coverage for the new behavior; +5. update the relevant docs source and, for public datasets or models, the + matching catalog cards; +6. run the smallest local validation commands that match the change; +7. rebuild the published docs HTML if the website output changed; +8. open a pull request with the required metadata and validation notes. + +Treat code, validation, generated docs, and published docs as one contribution. +A public dataset or model implementation is not complete if users cannot +discover it or if the website catalog still describes the old state of the +library. + +Adding a Dataset +---------------- + +Datasets are built around ``Dataset`` and ``DataBundle``. A dataset subclass +implements ``_load()`` and returns train/validation/test splits plus feature and +label metadata. + +The minimum pattern looks like this: + +.. code-block:: python + + import torch + from pyhazards.datasets import ( + DataBundle, + DataSplit, + Dataset, + FeatureSpec, + LabelSpec, + register_dataset, + ) + + class MyHazardDataset(Dataset): + name = "my_hazard" + + def _load(self) -> DataBundle: + x = torch.randn(1000, 16) + y = torch.randint(0, 2, (1000,)) + + splits = { + "train": DataSplit(x[:800], y[:800]), + "val": DataSplit(x[800:900], y[800:900]), + "test": DataSplit(x[900:], y[900:]), + } + + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=16, + description="Example tabular hazard features.", + ), + label_spec=LabelSpec( + num_targets=2, + task_type="classification", + description="Binary hazard label.", + ), + ) + + register_dataset(MyHazardDataset.name, MyHazardDataset) + +Keep the following expectations in mind when you add a dataset: + +- use ``DataBundle`` to make split names, feature dimensions, and target + semantics explicit; +- keep the builder/import path lightweight so the dataset can be imported + without triggering heavy side effects; +- register the dataset with ``register_dataset(...)`` so + ``load_dataset(name=...)`` can construct it; +- if the dataset belongs in the public catalog, add or update a card in + ``pyhazards/dataset_cards`` and regenerate the dataset docs; +- prefer clear metadata over implicit conventions, especially when a model + depends on shapes, channels, graph structure, or task type. + +Dataset Inspection Entry Points +------------------------------- + +PyHazards also includes inspection modules under ``pyhazards.datasets`` for +supported external data sources. If you add a new dataset family, keep the +inspection module consistent with the existing ones: + +- it should be importable as ``python -m pyhazards.datasets..inspection``; +- ``--help`` should exit cleanly; +- argument parsing should work without requiring optional plotting or network + dependencies at import time; +- if the dataset belongs in the public dataset table, its inspection workflow + should be stable enough for ``scripts/verify_table_entries.py``. + +The goal is simple: users should be able to discover the dataset from the docs, +inspect it from the command line, and load it from Python through the registry. + +Dataset Cards and Generated Docs +-------------------------------- + +Public datasets are documented through cards in ``pyhazards/dataset_cards``. +These cards are the source of truth for the public dataset catalog and the +generated per-dataset detail pages. + +A typical dataset card includes: + +- the public display name and hazard family, +- a one-sentence summary and source role, +- provider, geometry, cadence, and period-of-record metadata, +- the primary source or product reference, +- the inspection command when the dataset is inspection-first, +- the registry name and example when it is public through + ``load_dataset(...)``, +- related model and benchmark links when those cross-links help users navigate + the library. + +After updating dataset cards, refresh the generated docs: + +.. code-block:: bash + + python scripts/render_dataset_docs.py + +Use the ``--check`` mode when you want to confirm the generated files are +already up to date: + +.. code-block:: bash + + python scripts/render_dataset_docs.py --check + +Adding a Model +-------------- + +Models are registered builders that can be constructed through: + +.. code-block:: python + + from pyhazards.models import build_model + + model = build_model(name="", task="", **kwargs) + +When you port a paper or external repository into PyHazards, define the library +contract first. Your builder should: + +- accept ``task: str``, +- accept the shape and hyperparameter arguments needed to construct the model, +- return an ``nn.Module``, +- validate unsupported tasks early with a clear error, +- accept ``**kwargs`` so extra configuration keys do not break the call path. + +The minimum pattern looks like this: + +.. code-block:: python + + from __future__ import annotations + + import torch + import torch.nn as nn + from pyhazards.models import register_model + + + class MyModel(nn.Module): + def __init__(self, in_dim: int, out_dim: int, hidden_dim: int = 128): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, out_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 2: + raise ValueError(f"Expected input of shape (batch, features), got {tuple(x.shape)}") + return self.net(x) + + + def my_model_builder( + task: str, + in_dim: int, + out_dim: int, + hidden_dim: int = 128, + **kwargs, + ) -> nn.Module: + _ = kwargs + if task.lower() not in {"classification", "regression"}: + raise ValueError(f"MyModel does not support task={task!r}") + return MyModel(in_dim=in_dim, out_dim=out_dim, hidden_dim=hidden_dim) + + + register_model( + "my_model", + my_model_builder, + defaults={"hidden_dim": 128}, + ) + +In practice, good model ports also include: + +- a short paper-to-library mapping from the original repository into the new + PyHazards module and builder kwargs; +- explicit input-shape validation in ``forward()`` so integration failures are + easy to diagnose; +- clear task handling when the same architecture can be used for different + objectives; +- minimal defaults in the registry so ``build_model(...)`` is predictable. + +Match the Forward Signature to the Data Path +-------------------------------------------- + +PyHazards supports more than one input style. Some models work with plain tensor +pairs, while others expect mappings, graph batches, or custom dataset objects. +Make that contract explicit. + +As a rule: + +- if your model expects ``Tensor -> Tensor``, keep the shape assumptions simple + and document them in the model card; +- if your model expects graph or structured inputs, prefer dataset and collate + behavior that produces the mapping your ``forward()`` already consumes; +- use ``FeatureSpec``, ``LabelSpec``, and split metadata to record dimensions, + channels, and task semantics instead of burying them in comments. + +Porting Training Logic +---------------------- + +Do not copy an upstream training loop into PyHazards unless the architecture +truly depends on custom runtime behavior. In most cases you should: + +- keep the architecture inside ``nn.Module``, +- keep custom losses or helper blocks close to the model implementation, +- use ``pyhazards.engine.Trainer`` for fit, evaluate, and predict workflows, +- document intentional differences from the paper repository in the pull request. + +If the PyHazards port changes preprocessing, outputs, or optimization behavior, +state that clearly in the PR's parity notes. Review is much faster when the +intended differences are explicit. + +Model Cards and Generated Docs +------------------------------ + +Public models are documented through cards in ``pyhazards/model_cards``. A model +card is not optional when you want a model to appear on the website. + +A typical card includes: + +- the public model name and display name, +- the hazard family used for the model table, +- the source file and builder name, +- a short summary and description, +- the paper citation or technical reference, +- supported tasks, +- one runnable example, +- a synthetic smoke-test specification. + +For example: + +.. code-block:: yaml + + model_name: my_model + display_name: My Model + hazard: Flood + source_file: pyhazards/models/my_model.py + builder_name: my_model_builder + summary: > + Short description of the public model entrypoint. + paper: + title: Example paper title + url: https://example.com/paper + tasks: + - regression + smoke_test: + task: regression + build_kwargs: + in_dim: 16 + out_dim: 1 + input: + kind: tensor + shape: [4, 16] + expected_output: + kind: tensor + shape: [4, 1] + +Model cards drive the generated pages in :doc:`pyhazards_models`. They also +control public visibility: + +- if a model is registered but has no card, it can still be used from Python but + it will not appear in the public model tables; +- if a card sets ``include_in_public_catalog: false``, the implementation stays + in the library but is hidden from the public catalog; +- if the hazard name in the card is new, the generated model page creates a new + hazard section automatically. + +After updating a card, refresh the generated docs: + +.. code-block:: bash + + python scripts/render_model_docs.py + +Use the ``--check`` mode when you want to confirm the generated files are +already up to date: + +.. code-block:: bash + + python scripts/render_model_docs.py --check + +Validation Workflow +------------------- + +Run the smallest set of checks that covers your change. The core validation +commands in this repository are: + +.. code-block:: bash + + python -c "import pyhazards; print(pyhazards.__version__)" + python scripts/render_dataset_docs.py --check + python scripts/render_model_docs.py --check + python scripts/verify_table_entries.py + +Use them for the following purposes: + +- ``python -c "import pyhazards; print(pyhazards.__version__)"`` + verifies that the package still imports cleanly; +- ``python scripts/render_dataset_docs.py --check`` + verifies that generated dataset docs and catalog pages are in sync with the + current dataset cards; +- ``python scripts/render_model_docs.py --check`` + verifies that generated model docs and catalog pages are in sync with the + current model cards; +- ``python scripts/verify_table_entries.py`` + exercises dataset inspection entrypoints and runs smoke tests for cataloged + public models. + +When you changed a specific model, also run the model-scoped smoke test: + +.. code-block:: bash + + python scripts/smoke_test_models.py --models + +This uses the model card's smoke-test spec, so it is the fastest way to confirm +that a new public model can build and run with synthetic inputs. + +If your change touched the model catalog or its generation logic, also run: + +.. code-block:: bash + + python -m pytest tests/test_model_catalog.py + +If you changed runtime behavior in the training path and you have the required +hardware available, run the broader smoke path described in ``test.py`` as well. + +Preparing a Model Pull Request +------------------------------ + +Model PRs should make the implementation easy to review against the original +paper or upstream repository. The PR template asks for a few specific fields for +that reason: + +- ``Model Summary`` should describe the architecture and public API you are + adding, not just the file names you changed; +- ``Hazard Scenario`` should name the model table that owns the entry, and it + should explicitly call out when the PR introduces a new hazard family; +- ``Registry Name`` should list the exact ``build_model(name=...)`` entrypoints + added or changed in the PR; +- ``Paper / Source`` should link the scientific paper, source repository, or + technical reference that the implementation follows; +- ``Smoke Test`` should list the commands you ran or point to the card's + smoke-test specification; +- ``Parity Notes`` should explain intentional differences from the upstream + implementation, especially around preprocessing, outputs, or objectives. + +PR automation can only help when this metadata is present and accurate. A +catalog-backed model PR is expected to include the implementation, the registry +wiring, the model card, the smoke-test path, and refreshed generated docs. + +Registration, Catalog, and Published HTML +----------------------------------------- + +It is easy to update one layer of the repo and forget the others. Keep this +distinction in mind: + +- code registration makes a dataset or model usable from Python; +- dataset cards make a public dataset discoverable in the generated docs; +- model cards make a public model discoverable in the generated docs; +- Sphinx source updates change the documentation source tree; +- rebuilding ``docs/`` updates the committed HTML published on GitHub Pages. + +If the website output changed, rebuild the site locally: + +.. code-block:: bash + + cd docs + sphinx-build -b html source build/html + cp -r build/html/* . + +That final copy step matters in this repository because the published website is +served from the committed ``docs/`` directory, not from ``docs/source``. + +Common Mistakes +--------------- + +These are the issues that most often block review: + +- the new dataset or model exists in code but was never registered; +- a public dataset changed, but ``pyhazards/dataset_cards`` or the generated + dataset docs were not updated; +- a public model was implemented without a matching card in + ``pyhazards/model_cards``; +- generated docs were not refreshed after the model card changed; +- ``docs/source`` was updated but the committed ``docs/`` HTML was not rebuilt; +- the builder does not validate unsupported tasks or accepts the wrong shape + arguments for the intended use; +- a hidden or internal model was accidentally left visible in the public + catalog; +- an inspection module imports optional heavy dependencies at module import time, + which breaks ``python -m ... --help`` in clean environments. + +Contributor Checklist +--------------------- + +Before you open a pull request, confirm all of the following: + +- the implementation lives in the correct dataset or model module; +- the new entrypoint is registered and can be constructed from the public API; +- task handling and input-shape validation are clear and actionable; +- public datasets have a complete card when they belong in the public catalog; +- generated dataset docs are refreshed and pass ``render_dataset_docs.py --check``; +- public models have a complete card with a runnable smoke-test spec; +- generated model docs are refreshed and pass ``render_model_docs.py --check``; +- dataset inspection entrypoints and public tables pass + ``scripts/verify_table_entries.py``; +- the published docs HTML in ``docs/`` was rebuilt if the visible website output + changed; +- the pull request explains the source paper, registry name, hazard scenario, + smoke-test commands, and parity notes. + +Next Steps +---------- + +After you finish a contributor-oriented change: + +- browse the public catalogs in :doc:`pyhazards_datasets` and + :doc:`pyhazards_models` to confirm the new entry is discoverable; +- use :doc:`quick_start` to check that the user path still feels coherent; +- keep ``.github/IMPLEMENTATION.md`` and this page aligned when the repository + workflow changes. diff --git a/docs/_sources/index.rst.txt b/docs/_sources/index.rst.txt new file mode 100644 index 00000000..7804de80 --- /dev/null +++ b/docs/_sources/index.rst.txt @@ -0,0 +1,307 @@ +.. title:: PyHazards + +.. image:: _static/logo.png + :alt: PyHazards Icon + :width: 220px + :align: center + :class: landing-hero-logo + +.. raw:: html + + + +Overview +-------- + +PyHazards brings together public dataset catalogs, registry-based models, +benchmark families, experiment configs, and shared training or reporting +workflows across wildfire, earthquake, flood, and tropical cyclone tasks. + +It is designed for researchers and practitioners who need one coherent library +for reproducing baselines, comparing methods, and extending hazard-ML +workflows without rebuilding the software stack for each hazard family. + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid home-kicker-grid home-hero-stats + + .. grid-item-card:: Hazard Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + Wildfire, earthquake, flood, and tropical cyclone workflows under one library. + + .. grid-item-card:: Public Datasets + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 20 + + .. container:: catalog-stat-note + + Curated dataset pages covering forcing sources and hazard-specific benchmark adapters. + + .. grid-item-card:: Implemented Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 24 + + .. container:: catalog-stat-note + + Public implemented baselines and variants surfaced through the model catalog. + + .. grid-item-card:: Benchmark Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + Shared evaluator families with linked ecosystems, smoke configs, and reports. + +Start Here +---------- + +.. container:: home-section-note + + Use one of these four paths to move from overview to action quickly. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid home-link-grid + + .. grid-item-card:: Quick Start + :class-card: catalog-detail-card + + Run the first benchmark-aware workflow and verify the package. + + **Open:** :doc:`Quick Start ` + + .. grid-item-card:: Browse Datasets + :class-card: catalog-detail-card + + Explore forcing sources, benchmark adapters, and inspection entrypoints. + + **Open:** :doc:`Datasets ` + + .. grid-item-card:: Browse Models + :class-card: catalog-detail-card + + Compare implemented baselines, variants, and benchmark-linked model detail pages. + + **Open:** :doc:`Models ` + + .. grid-item-card:: Browse Benchmarks + :class-card: catalog-detail-card + + Compare hazard benchmark families, ecosystem mappings, and smoke coverage. + + **Open:** :doc:`Benchmarks ` + +Why PyHazards +------------- + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-grid home-pillar-grid + + .. grid-item-card:: Unified Datasets + :class-card: catalog-detail-card + + Public datasets, forcing sources, and inspection surfaces are documented through one hazard-first catalog. + + .. grid-item-card:: Benchmark-aligned Evaluation + :class-card: catalog-detail-card + + Shared benchmark families, smoke configs, and report exports make model comparisons more reproducible. + + .. grid-item-card:: Registry-based Models + :class-card: catalog-detail-card + + Baselines and adapters are exposed through a consistent build surface instead of one-off scripts. + + .. grid-item-card:: Shared Training and Inference + :class-card: catalog-detail-card + + One engine layer supports training, evaluation, prediction, and benchmark execution across hazard tasks. + +Hazard Coverage +--------------- + +.. container:: home-section-note + + PyHazards spans four hazard families with public datasets, models, and benchmark pages designed to work together. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid home-hazard-grid + + .. grid-item-card:: Wildfire + :class-card: catalog-detail-card + + Danger forecasting, weekly forecasting, spread baselines, fuels, burn products, and active-fire sources. + + **Explore:** :doc:`Datasets ` | :doc:`Models ` + + .. grid-item-card:: Earthquake + :class-card: catalog-detail-card + + Waveform picking, dense-grid forecasting adapters, and linked benchmark ecosystems for phase-picking workflows. + + **Explore:** :doc:`Models ` | :doc:`Benchmarks ` + + .. grid-item-card:: Flood + :class-card: catalog-detail-card + + Streamflow and inundation baselines with benchmark-backed datasets, configs, and evaluation coverage. + + **Explore:** :doc:`Datasets ` | :doc:`Benchmarks ` + + .. grid-item-card:: Tropical Cyclone + :class-card: catalog-detail-card + + Track-and-intensity forecasting baselines plus shared benchmark ecosystems and experimental weather-model adapters. + + **Explore:** :doc:`Models ` | :doc:`Benchmarks ` + +Featured Example +---------------- + +.. container:: home-section-note + + Run a benchmark-aligned smoke configuration with one command, then move into the full Quick Start for model building and training workflows. + +.. code-block:: bash + + python scripts/run_benchmark.py --config pyhazards/configs/flood/hydrographnet_smoke.yaml + +.. container:: catalog-link-row + + **Next step:** :doc:`Quick Start ` for the first full workflow, or :doc:`Models ` to browse benchmark-linked baselines. + +Explore the Docs +---------------- + +.. grid:: 1 1 2 3 + :gutter: 2 + :class-container: catalog-recommend-grid home-link-grid + + .. grid-item-card:: Installation + :class-card: catalog-detail-card + + Set up PyHazards from PyPI or source and verify the environment. + + **Open:** :doc:`installation` + + .. grid-item-card:: Quick Start + :class-card: catalog-detail-card + + Run the shortest end-to-end workflow in the library. + + **Open:** :doc:`quick_start` + + .. grid-item-card:: Datasets + :class-card: catalog-detail-card + + Browse hazard-grouped dataset cards, detail pages, and inspection entrypoints. + + **Open:** :doc:`pyhazards_datasets` + + .. grid-item-card:: Models + :class-card: catalog-detail-card + + Compare implemented models, variants, and benchmark-linked detail pages. + + **Open:** :doc:`pyhazards_models` + + .. grid-item-card:: Benchmarks + :class-card: catalog-detail-card + + Review benchmark families, ecosystem mappings, and smoke-config coverage. + + **Open:** :doc:`pyhazards_benchmarks` + + .. grid-item-card:: Reports and Configs + :class-card: catalog-detail-card + + Load reproducible experiment YAML files and export benchmark summaries. + + **Open:** :doc:`pyhazards_configs` | :doc:`pyhazards_reports` + +For Contributors +---------------- + +PyHazards is registry-driven and uses dataset cards, model cards, and benchmark +cards to generate the public catalogs. If you plan to extend the library, use +:doc:`implementation` for the contributor workflow and :doc:`appendix_a_coverage` +for the audited gap list behind the current roadmap work. + +Citation +-------- + +If you use PyHazards in your research, please cite: + +.. code-block:: bibtex + + @misc{pyhazards2025, + title = {PyHazards: An Open-Source Library for AI-Powered Hazard Prediction}, + author = {Cheng et al.}, + year = {2025}, + howpublished = {\url{https://github.com/LabRAI/PyHazards}}, + note = {GitHub repository} + } + +Community +--------- + +Use the `RAI Lab Slack channel `_ +for project discussion and coordination. + + +.. toctree:: + :maxdepth: 2 + :caption: Getting Started + :hidden: + + installation + quick_start + +.. toctree:: + :maxdepth: 1 + :caption: API Reference + :hidden: + + pyhazards_datasets + pyhazards_models + pyhazards_benchmarks + pyhazards_configs + pyhazards_reports + pyhazards_engine + pyhazards_metrics + pyhazards_utils + interactive_map + +.. toctree:: + :maxdepth: 2 + :caption: Additional Information + :hidden: + + implementation + appendix_a_coverage + cite + references + team diff --git a/docs/_sources/installation.rst.txt b/docs/_sources/installation.rst.txt new file mode 100644 index 00000000..9bd47237 --- /dev/null +++ b/docs/_sources/installation.rst.txt @@ -0,0 +1,63 @@ +Installation +============ + +Use this page to install PyHazards, verify that the package imports correctly, +and choose the right setup path for local use or contribution. PyHazards +supports Python 3.8 through 3.12 and installs with ``pip``. + +Requirements +------------ + +- Python ``>=3.8, <3.13`` +- PyTorch ``>=2.3, <3.0`` + +Install from PyPI +----------------- + +Install from PyPI: + +.. code-block:: bash + + pip install pyhazards + +GPU Install +----------- + +If you plan to run on GPU, install a matching PyTorch build first and then +install PyHazards. + +Example for CUDA 12.6: + +.. code-block:: bash + + pip install torch --index-url https://download.pytorch.org/whl/cu126 + pip install pyhazards + +Install from Source +------------------- + +Use an editable install when you are contributing code or documentation: + +.. code-block:: bash + + git clone https://github.com/LabRAI/PyHazards.git + cd PyHazards + python -m pip install -e . + +Verify the Installation +----------------------- + +Run a small import check to confirm that the package is available in the +environment: + +.. code-block:: bash + + python -c "import pyhazards; print(pyhazards.__version__)" + +You should see the installed package version printed to stdout. + +Next Steps +---------- + +- Continue to :doc:`quick_start` for the first end-to-end workflow. +- See :doc:`implementation` if you are setting up a contributor workflow. diff --git a/docs/_sources/interactive_map.rst.txt b/docs/_sources/interactive_map.rst.txt new file mode 100644 index 00000000..b3b5798e --- /dev/null +++ b/docs/_sources/interactive_map.rst.txt @@ -0,0 +1,60 @@ +Wildfire Interactive Map +======================== + +PyHazards includes a lightweight launcher for the external **RAI Fire** +interactive wildfire map. This companion site is specific to wildfire use +cases; it is not a general interactive map for every hazard domain in +PyHazards. + +Use it when you want a browser-based wildfire view without leaving the broader +PyHazards workflow. + +What This Page Covers +--------------------- + +- the live wildfire-focused RAI Fire website, +- the built-in launcher command, +- the small Python helper exposed by the package. + +Live Website +------------ + +- `RAI Fire `_ +- `Source repository `_ + +Command Line +------------ + +Open the website from the library with: + +.. code-block:: bash + + python -m pyhazards map + +The command prints the URL and, when possible, opens it in your default browser. + +Python API +---------- + +.. code-block:: python + + from pyhazards import open_interactive_map + + url = open_interactive_map() + print(url) + +Notes +----- + +The interactive map is an external companion application for wildfire +visualization. PyHazards links to it and provides a launcher, but it does not +host the web application inside the Python package itself. + +Module Reference +---------------- + +.. automodule:: pyhazards.interactive_map + :members: + :undoc-members: + :show-inheritance: + :no-index: diff --git a/docs/_sources/modules/datasets_era5_inspection.rst.txt b/docs/_sources/modules/datasets_era5_inspection.rst.txt new file mode 100644 index 00000000..13f1db5c --- /dev/null +++ b/docs/_sources/modules/datasets_era5_inspection.rst.txt @@ -0,0 +1,16 @@ +pyhazards.datasets.era5.inspection +================================== + +Description +----------- + +CLI entrypoint for ERA5 file inspection. It validates local NetCDF files, prints discovered dimensions/variables, +and falls back to HDF5-level inspection when optional xarray NetCDF backends are unavailable. + +Example usage +------------- + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + diff --git a/docs/_sources/modules/datasets_firms_inspection.rst.txt b/docs/_sources/modules/datasets_firms_inspection.rst.txt new file mode 100644 index 00000000..0701063c --- /dev/null +++ b/docs/_sources/modules/datasets_firms_inspection.rst.txt @@ -0,0 +1,16 @@ +pyhazards.datasets.firms.inspection +=================================== + +Description +----------- + +CLI entrypoint for FIRMS dataset path validation. It verifies local paths and reports file counts/sample files +for quick sanity checks. + +Example usage +------------- + +.. code-block:: bash + + python -m pyhazards.datasets.firms.inspection --path /path/to/firms_data + diff --git a/docs/_sources/modules/datasets_goesr_inspection.rst.txt b/docs/_sources/modules/datasets_goesr_inspection.rst.txt new file mode 100644 index 00000000..a12c5d55 --- /dev/null +++ b/docs/_sources/modules/datasets_goesr_inspection.rst.txt @@ -0,0 +1,16 @@ +pyhazards.datasets.goesr.inspection +=================================== + +Description +----------- + +CLI entrypoint for GOES-R dataset path validation. It validates local file/folder paths and prints concise +inventory details. + +Example usage +------------- + +.. code-block:: bash + + python -m pyhazards.datasets.goesr.inspection --path /path/to/goesr_data + diff --git a/docs/_sources/modules/datasets_landfire_inspection.rst.txt b/docs/_sources/modules/datasets_landfire_inspection.rst.txt new file mode 100644 index 00000000..67d219a4 --- /dev/null +++ b/docs/_sources/modules/datasets_landfire_inspection.rst.txt @@ -0,0 +1,16 @@ +pyhazards.datasets.landfire.inspection +====================================== + +Description +----------- + +CLI entrypoint for LANDFIRE dataset path validation. It provides a lightweight check that required local assets +exist and can be enumerated. + +Example usage +------------- + +.. code-block:: bash + + python -m pyhazards.datasets.landfire.inspection --path /path/to/landfire_data + diff --git a/docs/_sources/modules/datasets_merra2_inspection.rst.txt b/docs/_sources/modules/datasets_merra2_inspection.rst.txt new file mode 100644 index 00000000..cbc2b565 --- /dev/null +++ b/docs/_sources/modules/datasets_merra2_inspection.rst.txt @@ -0,0 +1,17 @@ +pyhazards.datasets.merra2.inspection +==================================== + +Description +----------- + +CLI entrypoint for the MERRA-2 inspection workflow in PyHazards. It wraps the full pipeline in +``pyhazards.datasets.inspection`` (download/preprocess/inspect/outputs). + +Example usage +------------- + +.. code-block:: bash + + # Full pipeline for one date key + python -m pyhazards.datasets.merra2.inspection 20260101 + diff --git a/docs/_sources/modules/datasets_mtbs_inspection.rst.txt b/docs/_sources/modules/datasets_mtbs_inspection.rst.txt new file mode 100644 index 00000000..bf58e08d --- /dev/null +++ b/docs/_sources/modules/datasets_mtbs_inspection.rst.txt @@ -0,0 +1,16 @@ +pyhazards.datasets.mtbs.inspection +================================== + +Description +----------- + +CLI entrypoint for MTBS dataset path validation. It checks local file/folder availability and prints lightweight +inventory information. + +Example usage +------------- + +.. code-block:: bash + + python -m pyhazards.datasets.mtbs.inspection --path /path/to/mtbs_data + diff --git a/docs/_sources/modules/datasets_noaa_flood_inspection.rst.txt b/docs/_sources/modules/datasets_noaa_flood_inspection.rst.txt new file mode 100644 index 00000000..85c3905c --- /dev/null +++ b/docs/_sources/modules/datasets_noaa_flood_inspection.rst.txt @@ -0,0 +1,16 @@ +pyhazards.datasets.noaa_flood.inspection +======================================== + +Description +----------- + +CLI entrypoint for NOAA flood dataset path validation. It checks whether a local file/folder exists and prints +a compact summary of detected files. + +Example usage +------------- + +.. code-block:: bash + + python -m pyhazards.datasets.noaa_flood.inspection --path /path/to/noaa_flood_data + diff --git a/docs/_sources/modules/datasets_wfigs_inspection.rst.txt b/docs/_sources/modules/datasets_wfigs_inspection.rst.txt new file mode 100644 index 00000000..bcdc85e0 --- /dev/null +++ b/docs/_sources/modules/datasets_wfigs_inspection.rst.txt @@ -0,0 +1,16 @@ +pyhazards.datasets.wfigs.inspection +=================================== + +Description +----------- + +CLI entrypoint for WFIGS dataset path validation. It checks local path existence and prints simple file inventory +for quick validation. + +Example usage +------------- + +.. code-block:: bash + + python -m pyhazards.datasets.wfigs.inspection --path /path/to/wfigs_data + diff --git a/docs/_sources/modules/models_asufm.rst.txt b/docs/_sources/modules/models_asufm.rst.txt new file mode 100644 index 00000000..388e3e7f --- /dev/null +++ b/docs/_sources/modules/models_asufm.rst.txt @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +ASUFM +===== + +Overview +-------- + +``asufm`` is a compact temporal convolution baseline for next-window wildfire activity prediction. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Forecasting + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``asufm`` is a compact temporal convolution baseline for next-window wildfire activity prediction. + +PyHazards exposes it through the shared wildfire benchmark and config workflow. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +External References +------------------- + +**Paper:** `Wildfire Spread Prediction in North America Using Satellite Imagery and Vision Transformer `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``asufm`` + +Supported Tasks +--------------- + +- Forecasting + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="asufm", + task="forecasting", + input_dim=7, + output_dim=5, + lookback=12, + ) + preds = model(torch.randn(2, 12, 7)) + print(preds.shape) + +Notes +----- + +- The smoke path uses weekly wildfire count windows with seasonal time features. diff --git a/docs/_sources/modules/models_eqnet.rst.txt b/docs/_sources/modules/models_eqnet.rst.txt new file mode 100644 index 00000000..649727af --- /dev/null +++ b/docs/_sources/modules/models_eqnet.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +EQNet +===== + +Overview +-------- + +``eqnet`` extends the PyHazards earthquake benchmark stack with a lightweight attention-based picking model. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Phase Picking + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Earthquake Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``eqnet`` extends the PyHazards earthquake benchmark stack with a lightweight attention-based picking model. + +The implementation keeps the shared waveform input and two-pick output contract so it can be evaluated alongside ``phasenet`` and ``eqtransformer``. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Earthquake Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`SeisBench ` + +External References +------------------- + +**Paper:** `An End-To-End Earthquake Detection Method for Joint Phase Picking and Association Using Deep Learning `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``eqnet`` + +Supported Tasks +--------------- + +- Phase Picking + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="eqnet", task="regression", in_channels=3) + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) + +Notes +----- + +- Outputs are P- and S-arrival sample indices. diff --git a/docs/_sources/modules/models_eqtransformer.rst.txt b/docs/_sources/modules/models_eqtransformer.rst.txt new file mode 100644 index 00000000..001e9916 --- /dev/null +++ b/docs/_sources/modules/models_eqtransformer.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +EQTransformer +============= + +Overview +-------- + +``eqtransformer`` is the second earthquake picking baseline in the staged roadmap and shares the synthetic waveform contract used by ``phasenet``. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Phase Picking + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Earthquake Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``eqtransformer`` is the second earthquake picking baseline in the staged roadmap and shares the synthetic waveform contract used by ``phasenet``. + +The PyHazards adapter focuses on the shared picking interface rather than a full reproduction of the original multitask training pipeline. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Earthquake Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`pick-benchmark ` + +External References +------------------- + +**Paper:** `Earthquake Transformer-An attentive deep-learning model for simultaneous earthquake detection and phase picking `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``eqtransformer`` + +Supported Tasks +--------------- + +- Phase Picking + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="eqtransformer", task="regression", in_channels=3) + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) + +Notes +----- + +- Outputs are P- and S-arrival sample indices. diff --git a/docs/_sources/modules/models_firecastnet.rst.txt b/docs/_sources/modules/models_firecastnet.rst.txt new file mode 100644 index 00000000..26970365 --- /dev/null +++ b/docs/_sources/modules/models_firecastnet.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +FireCastNet +=========== + +Overview +-------- + +``firecastnet`` is a raster wildfire spread baseline that uses a shallow encoder-decoder architecture. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Spread + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``firecastnet`` is a raster wildfire spread baseline that uses a shallow encoder-decoder architecture. + +The PyHazards implementation is optimized for the shared smoke benchmark rather than the full upstream training stack. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`WildfireSpreadTS ` + +External References +------------------- + +**Paper:** `FireCastNet: Earth-as-a-Graph for Seasonal Fire Prediction `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``firecastnet`` + +Supported Tasks +--------------- + +- Spread + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="firecastnet", task="segmentation", in_channels=12) + logits = model(torch.randn(2, 12, 16, 16)) + print(logits.shape) + +Notes +----- + +- The smoke configuration uses the single-frame wildfire spread raster fixture. diff --git a/docs/_sources/modules/models_floodcast.rst.txt b/docs/_sources/modules/models_floodcast.rst.txt new file mode 100644 index 00000000..42decc16 --- /dev/null +++ b/docs/_sources/modules/models_floodcast.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +FloodCast +========= + +Overview +-------- + +``floodcast`` is the first public inundation model in the staged PyHazards flood roadmap. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Inundation + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``floodcast`` is the first public inundation model in the staged PyHazards flood roadmap. + +The adapter uses shared raster tensors so it can be benchmarked through the ``flood.inundation`` evaluator without dataset-specific glue code. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`FloodCastBench ` + +External References +------------------- + +**Paper:** `Large-scale flood modeling and forecasting with FloodCast `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``floodcast`` + +Supported Tasks +--------------- + +- Inundation + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="floodcast", task="regression", in_channels=3, history=4) + preds = model(torch.randn(2, 4, 3, 16, 16)) + print(preds.shape) + +Notes +----- + +- Outputs are next-horizon inundation depth rasters. diff --git a/docs/_sources/modules/models_forefire.rst.txt b/docs/_sources/modules/models_forefire.rst.txt new file mode 100644 index 00000000..cc0f3b6a --- /dev/null +++ b/docs/_sources/modules/models_forefire.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +ForeFire Adapter +================ + +Overview +-------- + +``forefire`` is a deterministic raster adapter that approximates simulator-style front propagation through fixed diffusion kernels. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Spread + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``forefire`` is a deterministic raster adapter that approximates simulator-style front propagation through fixed diffusion kernels. + +PyHazards exposes it as a benchmarkable baseline through the standard model registry. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`WildfireSpreadTS ` + +External References +------------------- + +**Paper:** `ForeFire: A Modular, Scriptable C++ Simulation Engine and Library for Wildland-Fire Spread `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``forefire`` + +Supported Tasks +--------------- + +- Spread + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="forefire", task="segmentation", in_channels=12) + logits = model(torch.randn(2, 12, 16, 16)) + print(logits.shape) + +Notes +----- + +- This adapter is deterministic and does not learn parameters during the smoke test. diff --git a/docs/_sources/modules/models_fourcastnet_tc.rst.txt b/docs/_sources/modules/models_fourcastnet_tc.rst.txt new file mode 100644 index 00000000..3515efc7 --- /dev/null +++ b/docs/_sources/modules/models_fourcastnet_tc.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +FourCastNet TC Adapter +====================== + +Overview +-------- + +``fourcastnet_tc`` completes the first wave of experimental foundation-weather storm adapters in the staged roadmap. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Experimental Adapter + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``fourcastnet_tc`` completes the first wave of experimental foundation-weather storm adapters in the staged roadmap. + +The PyHazards version is intentionally lightweight and uses the same trajectory output contract as the other storm baselines. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`IBTrACS ` + +External References +------------------- + +**Paper:** `FourCastNet: A Global Data-driven High-resolution Weather Model using Adaptive Fourier Neural Operators `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``fourcastnet_tc`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="fourcastnet_tc", task="regression", input_dim=8, history=6, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Experimental adapter: intended for shared-evaluator prototyping rather than exact weather-model parity. diff --git a/docs/_sources/modules/models_google_flood_forecasting.rst.txt b/docs/_sources/modules/models_google_flood_forecasting.rst.txt new file mode 100644 index 00000000..ea8999be --- /dev/null +++ b/docs/_sources/modules/models_google_flood_forecasting.rst.txt @@ -0,0 +1,113 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Google Flood Forecasting +======================== + +Overview +-------- + +``google_flood_forecasting`` is a compact sequence-to-node forecasting baseline for flood streamflow prediction. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Streamflow + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``google_flood_forecasting`` is a compact sequence-to-node forecasting baseline for flood streamflow prediction. + +The PyHazards implementation uses a transformer encoder over per-node history windows and returns one forecast value per node. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`Caravan ` + +External References +------------------- + +**Paper:** `Global Flood Forecasting at a Fine Catchment Resolution using Machine Learning `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``google_flood_forecasting`` + +Supported Tasks +--------------- + +- Streamflow + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="google_flood_forecasting", + task="regression", + input_dim=2, + out_dim=1, + history=4, + ) + preds = model({"x": torch.randn(2, 4, 6, 2)}) + print(preds.shape) + +Notes +----- + +- The smoke path uses the same streamflow-style graph fixture as the other flood baselines. diff --git a/docs/_sources/modules/models_gpd.rst.txt b/docs/_sources/modules/models_gpd.rst.txt new file mode 100644 index 00000000..12607c77 --- /dev/null +++ b/docs/_sources/modules/models_gpd.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +GPD +=== + +Overview +-------- + +``gpd`` provides a lightweight earthquake picking adapter with the same waveform-to-pick interface used across the PyHazards earthquake benchmarks. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Phase Picking + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Earthquake Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``gpd`` provides a lightweight earthquake picking adapter with the same waveform-to-pick interface used across the PyHazards earthquake benchmarks. + +This adapter is intended as a reproducible low-cost baseline rather than an exact port of every original training detail. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Earthquake Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`pick-benchmark ` + +External References +------------------- + +**Paper:** `Generalized Seismic Phase Detection with Deep Learning `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``gpd`` + +Supported Tasks +--------------- + +- Phase Picking + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="gpd", task="regression", in_channels=3) + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) + +Notes +----- + +- The adapter keeps a simple two-output pick interface for shared evaluation. diff --git a/docs/_sources/modules/models_graphcast_tc.rst.txt b/docs/_sources/modules/models_graphcast_tc.rst.txt new file mode 100644 index 00000000..4d267deb --- /dev/null +++ b/docs/_sources/modules/models_graphcast_tc.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +GraphCast TC Adapter +==================== + +Overview +-------- + +``graphcast_tc`` is an experimental foundation-weather adapter that keeps the shared storm trajectory interface while remaining lightweight enough for CI. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Experimental Adapter + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``graphcast_tc`` is an experimental foundation-weather adapter that keeps the shared storm trajectory interface while remaining lightweight enough for CI. + +The PyHazards version is intentionally wrapper-style and should be treated as an adapter contract rather than a full reproduction of the original weather model. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`IBTrACS ` + +External References +------------------- + +**Paper:** `GraphCast: Learning skillful medium-range global weather forecasting `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``graphcast_tc`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="graphcast_tc", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Experimental adapter: intended for shared-evaluator prototyping rather than exact weather-model parity. diff --git a/docs/_sources/modules/models_hurricast.rst.txt b/docs/_sources/modules/models_hurricast.rst.txt new file mode 100644 index 00000000..8fa01259 --- /dev/null +++ b/docs/_sources/modules/models_hurricast.rst.txt @@ -0,0 +1,114 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Hurricast +========= + +Overview +-------- + +``hurricast`` is the first basin-specific storm baseline in the staged PyHazards roadmap and operates on storm-history sequences. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``hurricast`` is the first basin-specific storm baseline in the staged PyHazards roadmap and operates on storm-history sequences. + +This initial adapter focuses on the shared tropical-cyclone forecasting interface and is intended as a reproducible starting point before broader storm-model breadth. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`IBTrACS ` + +External References +------------------- + +**Paper:** `Hurricane Forecasting: A Novel Multimodal Machine Learning Framework `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``hurricast`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="hurricast", + task="regression", + input_dim=8, + horizon=5, + output_dim=3, + ) + + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Outputs are lead-time sequences of latitude, longitude, and intensity targets. diff --git a/docs/_sources/modules/models_hydrographnet.rst.txt b/docs/_sources/modules/models_hydrographnet.rst.txt new file mode 100644 index 00000000..da02a757 --- /dev/null +++ b/docs/_sources/modules/models_hydrographnet.rst.txt @@ -0,0 +1,119 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +HydroGraphNet +============= + +Overview +-------- + +``hydrographnet`` is the PyHazards entrypoint for flood forecasting on irregular meshes with graph-structured hydrologic state updates. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Streamflow + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``hydrographnet`` is the PyHazards entrypoint for flood forecasting on irregular meshes with graph-structured hydrologic state updates. + +In PyHazards, this model is typically paired with the ERA5-based hydrograph adapter ``load_hydrograph_data`` for end-to-end smoke validation. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`HydroBench ` + +External References +------------------- + +**Paper:** `Interpretable physics-informed graph neural networks for flood forecasting `_ + +Registry Name +------------- + +Primary entrypoint: ``hydrographnet`` + +Supported Tasks +--------------- + +- Streamflow + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, + ) + + batch = { + "x": torch.randn(1, 3, 6, 2), + "adj": torch.eye(6).unsqueeze(0), + "coords": torch.randn(6, 2), + } + preds = model(batch) + print(preds.shape) + +Notes +----- + +- The smoke test uses a synthetic graph batch so it stays CPU-safe in CI. diff --git a/docs/_sources/modules/models_neuralhydrology_ealstm.rst.txt b/docs/_sources/modules/models_neuralhydrology_ealstm.rst.txt new file mode 100644 index 00000000..7004c06d --- /dev/null +++ b/docs/_sources/modules/models_neuralhydrology_ealstm.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +EA-LSTM +======= + +Overview +-------- + +``neuralhydrology_ealstm`` complements the plain LSTM adapter with a lightweight static gating path inspired by EA-LSTM style hydrology models. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Streamflow + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``neuralhydrology_ealstm`` complements the plain LSTM adapter with a lightweight static gating path inspired by EA-LSTM style hydrology models. + +It keeps the same graph-temporal input contract as the rest of the flood streamflow roadmap. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`WaterBench ` + +External References +------------------- + +**Paper:** `Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``neuralhydrology_ealstm`` + +Supported Tasks +--------------- + +- Streamflow + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="neuralhydrology_ealstm", task="regression", input_dim=2, out_dim=1) + preds = model({"x": torch.randn(1, 4, 6, 2)}) + print(preds.shape) + +Notes +----- + +- This adapter focuses on the entity-aware gating contract, not exact repo parity. diff --git a/docs/_sources/modules/models_neuralhydrology_lstm.rst.txt b/docs/_sources/modules/models_neuralhydrology_lstm.rst.txt new file mode 100644 index 00000000..8379c935 --- /dev/null +++ b/docs/_sources/modules/models_neuralhydrology_lstm.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +NeuralHydrology LSTM +==================== + +Overview +-------- + +``neuralhydrology_lstm`` is the first community-style hydrology baseline in the PyHazards flood roadmap. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Streamflow + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``neuralhydrology_lstm`` is the first community-style hydrology baseline in the PyHazards flood roadmap. + +The adapter consumes the shared graph-temporal streamflow batch format and produces next-step nodewise discharge predictions. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`Caravan ` + +External References +------------------- + +**Paper:** `Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``neuralhydrology_lstm`` + +Supported Tasks +--------------- + +- Streamflow + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="neuralhydrology_lstm", task="regression", input_dim=2, out_dim=1) + preds = model({"x": torch.randn(1, 4, 6, 2)}) + print(preds.shape) + +Notes +----- + +- The smoke test uses the shared synthetic streamflow dataset shape. diff --git a/docs/_sources/modules/models_pangu_tc.rst.txt b/docs/_sources/modules/models_pangu_tc.rst.txt new file mode 100644 index 00000000..d7ec2240 --- /dev/null +++ b/docs/_sources/modules/models_pangu_tc.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Pangu TC Adapter +================ + +Overview +-------- + +``pangu_tc`` adds a second foundation-weather reference path behind the shared tropical-cyclone evaluator. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Experimental Adapter + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``pangu_tc`` adds a second foundation-weather reference path behind the shared tropical-cyclone evaluator. + +The implementation is intentionally lightweight and should be interpreted as an adapter contract for forecast-field driven storm evaluation. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`IBTrACS ` + +External References +------------------- + +**Paper:** `Accurate medium-range global weather forecasting with 3D neural networks `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``pangu_tc`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="pangu_tc", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Experimental adapter: intended for shared-evaluator prototyping rather than exact weather-model parity. diff --git a/docs/_sources/modules/models_phasenet.rst.txt b/docs/_sources/modules/models_phasenet.rst.txt new file mode 100644 index 00000000..780512f6 --- /dev/null +++ b/docs/_sources/modules/models_phasenet.rst.txt @@ -0,0 +1,112 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +PhaseNet +======== + +Overview +-------- + +``phasenet`` is the first earthquake picking baseline in the staged PyHazards roadmap and is paired with the synthetic waveform dataset for smoke validation. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Phase Picking + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Earthquake Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``phasenet`` is the first earthquake picking baseline in the staged PyHazards roadmap and is paired with the synthetic waveform dataset for smoke validation. + +This initial adapter focuses on the shared waveform-to-pick interface and does not claim exact reproduction of the original PhaseNet training stack. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Earthquake Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`SeisBench ` + +External References +------------------- + +**Paper:** `PhaseNet: A Deep-Neural-Network-Based Seismic Arrival Time Picking Method `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``phasenet`` + +Supported Tasks +--------------- + +- Phase Picking + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="phasenet", + task="regression", + in_channels=3, + ) + + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) + +Notes +----- + +- Outputs are P- and S-arrival sample indices in the current smoke-test adapter. diff --git a/docs/_sources/modules/models_saf_net.rst.txt b/docs/_sources/modules/models_saf_net.rst.txt new file mode 100644 index 00000000..4d99da2f --- /dev/null +++ b/docs/_sources/modules/models_saf_net.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +SAF-Net +======= + +Overview +-------- + +``saf_net`` adds an intensity-oriented storm baseline to the shared ``tc.track_intensity`` evaluator. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``saf_net`` adds an intensity-oriented storm baseline to the shared ``tc.track_intensity`` evaluator. + +The adapter keeps full trajectory outputs so it can use the same report format as the other PyHazards storm models. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`TCBench Alpha ` + +External References +------------------- + +**Paper:** `SAF-Net: A spatio-temporal deep learning method for typhoon intensity prediction `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``saf_net`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="saf_net", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Track channels are retained so the shared storm evaluator can score all baselines consistently. diff --git a/docs/_sources/modules/models_tcif_fusion.rst.txt b/docs/_sources/modules/models_tcif_fusion.rst.txt new file mode 100644 index 00000000..f9c46589 --- /dev/null +++ b/docs/_sources/modules/models_tcif_fusion.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +TCIF-fusion +=========== + +Overview +-------- + +``tcif_fusion`` combines multiple feature streams behind the shared storm forecasting interface used throughout the PyHazards cyclone roadmap. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``tcif_fusion`` combines multiple feature streams behind the shared storm forecasting interface used throughout the PyHazards cyclone roadmap. + +The adapter focuses on the fusion contract and evaluator compatibility rather than full reproduction of the original training stack. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`TCBench Alpha ` + +External References +------------------- + +**Paper:** `Tropical cyclone intensity forecasting using model knowledge guided deep learning model `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``tcif_fusion`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="tcif_fusion", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Outputs are shared storm forecast trajectories over the configured horizon. diff --git a/docs/_sources/modules/models_tropicalcyclone_mlp.rst.txt b/docs/_sources/modules/models_tropicalcyclone_mlp.rst.txt new file mode 100644 index 00000000..7c0b4c3c --- /dev/null +++ b/docs/_sources/modules/models_tropicalcyclone_mlp.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Tropical Cyclone MLP +==================== + +Overview +-------- + +``tropicalcyclone_mlp`` complements ``hurricast`` with a lighter-weight hurricane baseline that uses the same storm-history input contract. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``tropicalcyclone_mlp`` complements ``hurricast`` with a lighter-weight hurricane baseline that uses the same storm-history input contract. + +The adapter is useful for practical low-cost intensity and trajectory experiments in basin-filtered settings. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`TCBench Alpha ` + +External References +------------------- + +**Paper:** `Deep Learning Experiments for Tropical Cyclone Intensity Forecasts `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``tropicalcyclone_mlp`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="tropicalcyclone_mlp", task="regression", input_dim=8, history=6) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Outputs are lead-time sequences of latitude, longitude, and intensity targets. diff --git a/docs/_sources/modules/models_tropicyclonenet.rst.txt b/docs/_sources/modules/models_tropicyclonenet.rst.txt new file mode 100644 index 00000000..dba1fe62 --- /dev/null +++ b/docs/_sources/modules/models_tropicyclonenet.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +TropiCycloneNet +=============== + +Overview +-------- + +``tropicyclonenet`` extends the shared storm benchmark stack beyond the hurricane-only presets. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``tropicyclonenet`` extends the shared storm benchmark stack beyond the hurricane-only presets. + +The PyHazards adapter keeps a single storm-history to forecast-trajectory interface so it can share the same evaluator as ``hurricast``. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`TropiCycloneNet-Dataset ` + +External References +------------------- + +**Paper:** `Benchmark dataset and deep learning method for global tropical cyclone forecasting `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``tropicyclonenet`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="tropicyclonenet", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Outputs are lead-time sequences of latitude, longitude, and intensity targets. diff --git a/docs/_sources/modules/models_urbanfloodcast.rst.txt b/docs/_sources/modules/models_urbanfloodcast.rst.txt new file mode 100644 index 00000000..6d52cdb2 --- /dev/null +++ b/docs/_sources/modules/models_urbanfloodcast.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +UrbanFloodCast +============== + +Overview +-------- + +``urbanfloodcast`` adds an urban-focused raster baseline to the PyHazards inundation benchmark stack. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Inundation + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``urbanfloodcast`` adds an urban-focused raster baseline to the PyHazards inundation benchmark stack. + +The implementation keeps the shared spatiotemporal tensor contract used by the synthetic inundation smoke dataset. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`FloodCastBench ` + +External References +------------------- + +**Paper:** `UrbanFloodCast: WMO Urban Flooding Forecasting Challenge `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``urbanfloodcast`` + +Supported Tasks +--------------- + +- Inundation + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="urbanfloodcast", task="regression", in_channels=3, history=4) + preds = model(torch.randn(2, 4, 3, 16, 16)) + print(preds.shape) + +Notes +----- + +- Outputs are next-horizon inundation depth rasters. diff --git a/docs/_sources/modules/models_wavecastnet.rst.txt b/docs/_sources/modules/models_wavecastnet.rst.txt new file mode 100644 index 00000000..51a41fef --- /dev/null +++ b/docs/_sources/modules/models_wavecastnet.rst.txt @@ -0,0 +1,121 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +WaveCastNet +=========== + +Overview +-------- + +``wavecastnet`` is the PyHazards entrypoint for dense-grid earthquake wavefield forecasting based on the ConvLEM encoder-decoder design described by Lyu et al. (2025). + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Wavefield Forecasting + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Earthquake Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wavecastnet`` is the PyHazards entrypoint for dense-grid earthquake wavefield forecasting based on the ConvLEM encoder-decoder design described by Lyu et al. (2025). + +This implementation focuses on the core dense-grid forecasting path and keeps data loading outside the model so users can adapt it to their own simulation or sensor pipelines. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Earthquake Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`AEFA `, :doc:`pyCSEP ` + +External References +------------------- + +**Paper:** `Rapid wavefield forecasting for earthquake early warning via deep sequence to sequence learning `_ + +Registry Name +------------- + +Primary entrypoint: ``wavecastnet`` + +Supported Tasks +--------------- + +- Wavefield Forecasting + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wavecastnet", + task="regression", + in_channels=3, + height=32, + width=24, + temporal_in=6, + temporal_out=4, + hidden_dim=32, + num_layers=1, + dropout=0.0, + ) + + x = torch.randn(2, 3, 6, 32, 24) + y = model(x) + print(y.shape) + +Notes +----- + +- The PyHazards version currently targets dense-grid forecasting rather than the paper's sparse-sensor variants. +- The smoke test uses reduced spatial and temporal sizes so it stays CPU-safe in CI. diff --git a/docs/_sources/modules/models_wildfire_aspp.rst.txt b/docs/_sources/modules/models_wildfire_aspp.rst.txt new file mode 100644 index 00000000..1f411824 --- /dev/null +++ b/docs/_sources/modules/models_wildfire_aspp.rst.txt @@ -0,0 +1,115 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +CNN-ASPP +======== + +Overview +-------- + +``wildfire_aspp`` is the backward-compatible public PyHazards entrypoint for the CNN + ASPP wildfire spread model. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Spread + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wildfire_aspp`` is the backward-compatible public PyHazards entrypoint for the CNN + ASPP wildfire spread model. + +PyHazards keeps the alias for compatibility while the implementation delegates to the native ``wildfire_cnn_aspp`` builder under the hood. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`WildfireSpreadTS ` + +External References +------------------- + +**Paper:** `Application of Explainable Artificial Intelligence in Predicting Wildfire Spread `_ + +Registry Name +------------- + +Primary entrypoint: ``wildfire_aspp`` + +Aliases: ``wildfire_cnn_aspp`` + +Supported Tasks +--------------- + +- Spread + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_aspp", + task="segmentation", + in_channels=12, + ) + + x = torch.randn(2, 12, 64, 64) + logits = model(x) + print(logits.shape) + +Notes +----- + +- ``wildfire_cnn_aspp`` remains available as an alias for the same public model. diff --git a/docs/_sources/modules/models_wildfire_forecasting.rst.txt b/docs/_sources/modules/models_wildfire_forecasting.rst.txt new file mode 100644 index 00000000..82a6b73a --- /dev/null +++ b/docs/_sources/modules/models_wildfire_forecasting.rst.txt @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Wildfire Forecasting +==================== + +Overview +-------- + +``wildfire_forecasting`` is a compact GRU-attention forecaster for weekly wildfire activity windows. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Forecasting + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wildfire_forecasting`` is a compact GRU-attention forecaster for weekly wildfire activity windows. + +The PyHazards implementation targets smoke-testable next-window size-group prediction through the shared wildfire benchmark flow. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +External References +------------------- + +**Paper:** `Wildfire Danger Prediction and Understanding with Deep Learning `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``wildfire_forecasting`` + +Supported Tasks +--------------- + +- Forecasting + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_forecasting", + task="forecasting", + input_dim=7, + output_dim=5, + lookback=12, + ) + preds = model(torch.randn(2, 12, 7)) + print(preds.shape) + +Notes +----- + +- This public adapter is exercised on the weekly wildfire smoke benchmark. diff --git a/docs/_sources/modules/models_wildfire_fpa.rst.txt b/docs/_sources/modules/models_wildfire_fpa.rst.txt new file mode 100644 index 00000000..b5890f79 --- /dev/null +++ b/docs/_sources/modules/models_wildfire_fpa.rst.txt @@ -0,0 +1,117 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +DNN-LSTM-AutoEncoder +==================== + +Overview +-------- + +``wildfire_fpa`` is the paper-facing PyHazards entrypoint for the FPA-FOD wildfire framework described by Shen et al. (2023). + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 2 + + .. container:: catalog-stat-note + + Classification, Forecasting + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wildfire_fpa`` is the paper-facing PyHazards entrypoint for the FPA-FOD wildfire framework described by Shen et al. (2023). + +PyHazards exposes the combined DNN-LSTM-AutoEncoder workflow through one public registry name while keeping the lower-level components internal. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +External References +------------------- + +**Paper:** `Developing risk assessment framework for wildfire in the United States `_ + +Registry Name +------------- + +Primary entrypoint: ``wildfire_fpa`` + +Supported Tasks +--------------- + +- Classification +- Forecasting + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_fpa", + task="classification", + in_dim=8, + out_dim=5, + hidden_dim=64, + depth=2, + ) + + x = torch.randn(4, 8) + logits = model(x) + print(logits.shape) + +Notes +----- + +- This is the only retained public method from Shen et al. (2023) in the PyHazards catalog. +- Use ``task="classification"`` for the DNN stage. +- Use ``task="forecasting"`` or ``task="regression"`` for the sequence stage. diff --git a/docs/_sources/modules/models_wildfire_mamba.rst.txt b/docs/_sources/modules/models_wildfire_mamba.rst.txt new file mode 100644 index 00000000..0912fc54 --- /dev/null +++ b/docs/_sources/modules/models_wildfire_mamba.rst.txt @@ -0,0 +1,115 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +:orphan: + +Wildfire Mamba +============== + +Overview +-------- + +``wildfire_mamba`` models county-day ERA5 sequences by combining selective state-space temporal blocks with a simple spatial graph layer. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Hidden + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Classification + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Unmapped + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wildfire_mamba`` models county-day ERA5 sequences by combining selective state-space temporal blocks with a simple spatial graph layer. + +The PyHazards implementation targets binary next-day per-county wildfire classification and supports an optional count head for multi-task extensions. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** Not yet mapped. + +External References +------------------- + +**Paper:** `Mamba: Linear-Time Sequence Modeling with Selective State Spaces `_ + +Registry Name +------------- + +Primary entrypoint: ``wildfire_mamba`` + +Supported Tasks +--------------- + +- Classification + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_mamba", + task="classification", + in_dim=3, + num_counties=4, + past_days=5, + ) + + x = torch.randn(2, 5, 4, 3) + logits = model(x) + print(logits.shape) + +Notes +----- + +- The CI smoke test validates the default binary-classification path on synthetic data. diff --git a/docs/_sources/modules/models_wildfirespreadts.rst.txt b/docs/_sources/modules/models_wildfirespreadts.rst.txt new file mode 100644 index 00000000..ec85fdfe --- /dev/null +++ b/docs/_sources/modules/models_wildfirespreadts.rst.txt @@ -0,0 +1,112 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +WildfireSpreadTS +================ + +Overview +-------- + +``wildfirespreadts`` models wildfire spread as a sequence-to-mask prediction task. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Spread + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wildfirespreadts`` models wildfire spread as a sequence-to-mask prediction task. + +The PyHazards adapter uses a compact 3D convolution stack that consumes short raster history windows and predicts the next spread mask. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`WildfireSpreadTS ` + +External References +------------------- + +**Paper:** `WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``wildfirespreadts`` + +Supported Tasks +--------------- + +- Spread + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfirespreadts", + task="segmentation", + history=4, + in_channels=6, + ) + logits = model(torch.randn(2, 4, 6, 16, 16)) + print(logits.shape) + +Notes +----- + +- The smoke dataset uses temporal wildfire spread tensors rather than single-frame rasters. diff --git a/docs/_sources/modules/models_wrf_sfire.rst.txt b/docs/_sources/modules/models_wrf_sfire.rst.txt new file mode 100644 index 00000000..85546f73 --- /dev/null +++ b/docs/_sources/modules/models_wrf_sfire.rst.txt @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +WRF-SFIRE Adapter +================= + +Overview +-------- + +``wrf_sfire`` approximates simulator-style spread transport with a fixed diffusion and terrain-moisture modulation layer. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Spread + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wrf_sfire`` approximates simulator-style spread transport with a fixed diffusion and terrain-moisture modulation layer. + +The PyHazards adapter is designed for consistent smoke benchmarking rather than full physical simulation. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`WildfireSpreadTS ` + +External References +------------------- + +**Paper:** `Coupled atmosphere-wildland fire modeling with WRF 3.3 and SFIRE 2011 `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``wrf_sfire`` + +Supported Tasks +--------------- + +- Spread + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="wrf_sfire", task="segmentation", in_channels=12) + logits = model(torch.randn(2, 12, 16, 16)) + print(logits.shape) + +Notes +----- + +- This smoke-path adapter keeps the simulator slot benchmarkable without external binaries. diff --git a/docs/_sources/pyhazards_benchmarks.rst.txt b/docs/_sources/pyhazards_benchmarks.rst.txt new file mode 100644 index 00000000..82ec8635 --- /dev/null +++ b/docs/_sources/pyhazards_benchmarks.rst.txt @@ -0,0 +1,694 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Benchmarks +=================== + +Explore shared benchmark families, aligned external ecosystems, supported +tasks, and model compatibility across PyHazards. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Benchmark Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + Shared evaluator families available through the benchmark runner. + + .. grid-item-card:: Ecosystem Mappings + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 12 + + .. container:: catalog-stat-note + + External benchmark or data ecosystems linked from the public docs. + + .. grid-item-card:: Supported Task Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 7 + + .. container:: catalog-stat-note + + Hazard tasks covered across the family-level benchmark contracts. + + .. grid-item-card:: Smoke Configurations + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 27 + + .. container:: catalog-stat-note + + Unique smoke configs referenced by the benchmark family cards. + + +Benchmark Families +------------------ + +These four cards summarize the benchmark families exposed through the +shared runner and compress the core tasks, metrics, support level, and +coverage counts into a scan-friendly catalog. + +.. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Wildfire Benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Shared PyHazards evaluator family for wildfire danger and wildfire spread experiments. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Danger` :bdg-secondary:`Spread` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Tasks:** Danger, Spread + + .. container:: catalog-meta-row + + **Key Metrics:** Accuracy, Macro F1, AUC, PR-AUC, +5 more + + .. container:: catalog-meta-row + + **Coverage:** 8 smoke configs | 8 models | 1 ecosystem + + .. container:: catalog-link-row + + **View Details:** :doc:`Wildfire Benchmark ` + + .. grid-item-card:: Earthquake Benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Shared PyHazards evaluator family for earthquake phase-picking and wavefield-forecasting runs. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-secondary:`Wavefield Forecasting` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Tasks:** Phase Picking, Wavefield Forecasting + + .. container:: catalog-meta-row + + **Key Metrics:** P-pick MAE, S-pick MAE, Precision, Recall, +3 more + + .. container:: catalog-meta-row + + **Coverage:** 5 smoke configs | 5 models | 4 ecosystems + + .. container:: catalog-link-row + + **View Details:** :doc:`Earthquake Benchmark ` + + .. grid-item-card:: Flood Benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Shared PyHazards evaluator family for streamflow forecasting and inundation prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-secondary:`Inundation` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Tasks:** Streamflow, Inundation + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, RMSE, NSE, KGE, +3 more + + .. container:: catalog-meta-row + + **Coverage:** 6 smoke configs | 6 models | 4 ecosystems + + .. container:: catalog-link-row + + **View Details:** :doc:`Flood Benchmark ` + + .. grid-item-card:: Tropical Cyclone Benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Shared PyHazards evaluator family for tropical cyclone and hurricane track-intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Tasks:** Track + Intensity + + .. container:: catalog-meta-row + + **Key Metrics:** Track Error, Intensity MAE + + .. container:: catalog-meta-row + + **Coverage:** 8 smoke configs | 8 models | 3 ecosystems + + .. container:: catalog-link-row + + **View Details:** :doc:`Tropical Cyclone Benchmark ` + + +Coverage Matrix +--------------- + +Use the matrix below for side-by-side comparison of hazard coverage, +family-level tasks, primary metrics, linked-model counts, and support +status without opening the detail pages first. + +.. list-table:: + :widths: 14 22 18 20 14 12 + :header-rows: 1 + :class: catalog-matrix + + * - Hazard + - Benchmark Family + - Tasks + - Primary Metrics + - Linked Models + - Support Status + * - Wildfire + - :doc:`Wildfire Benchmark ` + - Danger, Spread + - Accuracy, Macro F1, AUC, PR-AUC, +5 more + - 8 models + - Synthetic-backed + * - Earthquake + - :doc:`Earthquake Benchmark ` + - Phase Picking, Wavefield Forecasting + - P-pick MAE, S-pick MAE, Precision, Recall, +3 more + - 5 models + - Synthetic-backed + * - Flood + - :doc:`Flood Benchmark ` + - Streamflow, Inundation + - MAE, RMSE, NSE, KGE, +3 more + - 6 models + - Synthetic-backed + * - Tropical Cyclone + - :doc:`Tropical Cyclone Benchmark ` + - Track + Intensity + - Track Error, Intensity MAE + - 8 models + - Synthetic-backed + +Benchmark Ecosystems +-------------------- + +Browse the aligned benchmark ecosystems by hazard family. Each card +links to a detail page with the routed benchmark family, source links, +and the models currently mapped to that ecosystem. + +.. tab-set:: + :class: catalog-tabs + + .. tab-item:: Wildfire + + .. container:: catalog-section-note + + Ecosystem cards describe the external benchmark or data protocol + surfaced on this page and show how it maps back to the shared + PyHazards benchmark family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: WildfireSpreadTS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Temporal wildfire spread benchmark coverage for the shared wildfire spread evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** IoU, F1, Burned-area MAE + + .. container:: catalog-meta-row + + **Coverage:** 5 smoke configs | 5 models + + .. container:: catalog-link-row + + **View Details:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction `_ | **Repo:** `Repository `__ + + + .. tab-item:: Earthquake + + .. container:: catalog-section-note + + Ecosystem cards describe the external benchmark or data protocol + surfaced on this page and show how it maps back to the shared + PyHazards benchmark family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: AEFA + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + AEFA-style forecasting dataset support for the shared earthquake forecasting path. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Wavefield Forecasting` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, MSE + + .. container:: catalog-meta-row + + **Coverage:** 1 smoke config | 1 model + + .. container:: catalog-link-row + + **View Details:** :doc:`AEFA ` + + .. container:: catalog-link-row + + **Paper:** `AEFA `_ + + .. grid-item-card:: pick-benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + pick-benchmark-compatible waveform picking support routed through the shared earthquake evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** P-pick MAE, S-pick MAE, Precision, Recall, +1 more + + .. container:: catalog-meta-row + + **Coverage:** 2 smoke configs | 2 models + + .. container:: catalog-link-row + + **View Details:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Paper:** `pick-benchmark `_ + + .. grid-item-card:: pyCSEP + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + pyCSEP-style forecasting report export for the earthquake forecasting smoke path. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Wavefield Forecasting` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, MSE + + .. container:: catalog-meta-row + + **Coverage:** 1 smoke config | 1 model + + .. container:: catalog-link-row + + **View Details:** :doc:`pyCSEP ` + + .. container:: catalog-link-row + + **Paper:** `pyCSEP `_ + + .. grid-item-card:: SeisBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + SeisBench-shaped waveform picking support for the shared earthquake benchmark family. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** P-pick MAE, S-pick MAE, Precision, Recall, +1 more + + .. container:: catalog-meta-row + + **Coverage:** 2 smoke configs | 2 models + + .. container:: catalog-link-row + + **View Details:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Paper:** `SeisBench - A Toolbox for Machine Learning in Seismology `_ | **Repo:** `Repository `__ + + + .. tab-item:: Flood + + .. container:: catalog-section-note + + Ecosystem cards describe the external benchmark or data protocol + surfaced on this page and show how it maps back to the shared + PyHazards benchmark family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Caravan + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Caravan-style streamflow benchmark coverage for the shared flood streamflow evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, RMSE, NSE, KGE + + .. container:: catalog-meta-row + + **Coverage:** 2 smoke configs | 2 models + + .. container:: catalog-link-row + + **View Details:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Paper:** `Caravan - A global community dataset for large-sample hydrology `_ | **Repo:** `Repository `__ + + .. grid-item-card:: FloodCastBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + FloodCastBench-style inundation benchmark coverage for the shared flood inundation evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Inundation` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** Pixel MAE, IoU, F1 + + .. container:: catalog-meta-row + + **Coverage:** 2 smoke configs | 2 models + + .. container:: catalog-link-row + + **View Details:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Paper:** `FloodCastBench `_ + + .. grid-item-card:: HydroBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + HydroBench-style streamflow diagnostics coverage for the shared flood streamflow evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, RMSE, NSE, KGE + + .. container:: catalog-meta-row + + **Coverage:** 1 smoke config | 1 model + + .. container:: catalog-link-row + + **View Details:** :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Paper:** `HydroBench `_ + + .. grid-item-card:: WaterBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + WaterBench-style streamflow benchmark coverage for the shared flood evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, RMSE, NSE, KGE + + .. container:: catalog-meta-row + + **Coverage:** 1 smoke config | 1 model + + .. container:: catalog-link-row + + **View Details:** :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Paper:** `WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting `_ | **Repo:** `Repository `__ + + + .. tab-item:: Tropical Cyclone + + .. container:: catalog-section-note + + Ecosystem cards describe the external benchmark or data protocol + surfaced on this page and show how it maps back to the shared + PyHazards benchmark family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: IBTrACS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + IBTrACS-backed storm benchmark coverage for the shared tropical cyclone evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** Track Error, Intensity MAE + + .. container:: catalog-meta-row + + **Coverage:** 4 smoke configs | 4 models + + .. container:: catalog-link-row + + **View Details:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `IBTrACS `_ + + .. grid-item-card:: TCBench Alpha + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + TCBench Alpha-style storm benchmark coverage for the shared tropical cyclone evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** Track Error, Intensity MAE + + .. container:: catalog-meta-row + + **Coverage:** 3 smoke configs | 3 models + + .. container:: catalog-link-row + + **View Details:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `TCBench Alpha `_ + + .. grid-item-card:: TropiCycloneNet-Dataset + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + TropiCycloneNet-Dataset-backed storm benchmark coverage for the shared tropical cyclone evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** Track Error, Intensity MAE + + .. container:: catalog-meta-row + + **Coverage:** 1 smoke config | 1 model + + .. container:: catalog-link-row + + **View Details:** :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Paper:** `TropiCycloneNet-Dataset `_ + + + +Programmatic Use +---------------- + +.. code-block:: python + + from pyhazards.configs import load_experiment_config + from pyhazards.engine import BenchmarkRunner + + config = load_experiment_config("pyhazards/configs/earthquake/phasenet_smoke.yaml") + summary = BenchmarkRunner().run(config) + print(summary.metrics) + +Use ``python scripts/run_benchmark.py --help`` for the CLI entry point, +then pair this page with :doc:`pyhazards_configs` for experiment YAMLs +and :doc:`pyhazards_reports` for comparable benchmark exports. + +.. toctree:: + :maxdepth: 1 + :hidden: + + benchmarks/aefa + benchmarks/caravan + benchmarks/earthquake_benchmark + benchmarks/flood_benchmark + benchmarks/floodcastbench + benchmarks/hydrobench + benchmarks/ibtracs + benchmarks/pick_benchmark + benchmarks/pycsep + benchmarks/seisbench + benchmarks/tcbench_alpha + benchmarks/tropical_cyclone_benchmark + benchmarks/tropicyclonenet_dataset + benchmarks/waterbench + benchmarks/wildfire_benchmark + benchmarks/wildfirespreadts_ecosystem diff --git a/docs/_sources/pyhazards_configs.rst.txt b/docs/_sources/pyhazards_configs.rst.txt new file mode 100644 index 00000000..0a09d5cc --- /dev/null +++ b/docs/_sources/pyhazards_configs.rst.txt @@ -0,0 +1,40 @@ +Configs +=================== + +Overview +-------- + +Use the configs layer when you want reproducible experiment specifications for +benchmark runs, smoke tests, and hazard-specific model comparisons. + +What This Page Covers +--------------------- + +- ``pyhazards.configs`` dataclasses and YAML loading helpers +- hazard-scoped smoke configs under ``pyhazards/configs//`` +- the shared structure for benchmark, dataset, model, and report settings + +Typical Usage +------------- + +.. code-block:: python + + from pyhazards.configs import load_experiment_config + + config = load_experiment_config("pyhazards/configs/flood/hydrographnet_smoke.yaml") + print(config.benchmark.hazard_task) + print(config.model.name) + +Config Layout +------------- + +Each experiment config contains four sections: + +- ``benchmark``: which evaluator to run and which hazard task to score +- ``dataset``: which registered dataset to load and with which parameters +- ``model``: which registered model to build and with which parameters +- ``report``: where to write JSON, Markdown, or CSV outputs + +Next step: pair this page with :doc:`pyhazards_benchmarks` when you want to +match configs to implemented evaluation paths, and with +:doc:`pyhazards_reports` when you want to export benchmark outputs. diff --git a/docs/_sources/pyhazards_datasets.rst.txt b/docs/_sources/pyhazards_datasets.rst.txt new file mode 100644 index 00000000..31b4f17a --- /dev/null +++ b/docs/_sources/pyhazards_datasets.rst.txt @@ -0,0 +1,929 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +Datasets +=================== + +Browse PyHazards datasets across hazard families, compare source roles, +inspection paths, and registry surfaces, and navigate to dataset-specific +detail pages. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Groups + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 5 + + .. container:: catalog-stat-note + + Public dataset tabs grouped by the curated hazard-first taxonomy. + + .. grid-item-card:: Public Datasets + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 20 + + .. container:: catalog-stat-note + + Curated datasets surfaced on the public site. + + .. grid-item-card:: Inspection Entry Points + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 10 + + .. container:: catalog-stat-note + + Datasets with an explicit inspection command documented on the site. + + .. grid-item-card:: Registry-loadable Datasets + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 12 + + .. container:: catalog-stat-note + + Datasets with a documented public ``load_dataset(...)`` path. + + +Catalog by Hazard +----------------- + +Use the hazard tabs below to browse the public dataset catalog. Each +card keeps the summary short, then links into the detail page, the +primary source, and the most relevant inspection or registry surface. + +.. tab-set:: + :class: catalog-tabs + + .. tab-item:: Shared Forcing + + .. container:: catalog-section-note + + Cross-hazard meteorology and imagery sources that support multiple PyHazards workflows, inspections, and forcing pipelines. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: ERA5 + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + ECMWF's global reanalysis used as a high-resolution meteorological baseline for hazard experiments. + + .. container:: catalog-chip-row + + :bdg-secondary:`Reanalysis` :bdg-info:`Regular latitude-longitude grid` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Daily ERA5T updates with about 5-day latency, followed by final validated releases after 2-3 months + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10`` + + .. container:: catalog-link-row + + **Details:** :doc:`ERA5 ` + + .. container:: catalog-link-row + + **Primary Source:** `Hersbach et al. (2020). The ERA5 global reanalysis. `_ + + .. grid-item-card:: GOES-R + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Rapid-refresh GOES-R satellite imagery used for smoke, fire, and weather monitoring workflows. + + .. container:: catalog-chip-row + + :bdg-secondary:`Geostationary Imagery` :bdg-info:`Raster imagery time series on the ABI fixed grid` + + .. container:: catalog-meta-row + + **Coverage:** Western Hemisphere / Americas geostationary view + + .. container:: catalog-meta-row + + **Update Cadence:** Continuous ingest as new files become available + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.goesr.inspection --path /path/to/goesr_data --max-items 10`` + + .. container:: catalog-link-row + + **Details:** :doc:`GOES-R ` + + .. container:: catalog-link-row + + **Primary Source:** `Schmit et al. (2017). A closer look at the ABI on the GOES-R series. `_ + + .. grid-item-card:: MERRA-2 + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Global atmospheric reanalysis from NASA GMAO used as a shared meteorological backbone for hazard modeling. + + .. container:: catalog-chip-row + + :bdg-secondary:`Reanalysis` :bdg-info:`Regular latitude-longitude grid` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Published monthly with typical 2-3 week latency after month end + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.merra2.inspection 20260101`` + + .. container:: catalog-link-row + + **Details:** :doc:`MERRA-2 ` + + .. container:: catalog-link-row + + **Primary Source:** `Gelaro et al. (2017). The Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2). `_ + + + .. tab-item:: Wildfire + + .. container:: catalog-section-note + + Wildfire datasets span authoritative incident records, active-fire detections, fuels, burn severity, and forecast-ready benchmark adapters. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: FIRMS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + NASA's near-real-time active fire detections used for operational wildfire monitoring and event labeling. + + .. container:: catalog-chip-row + + :bdg-secondary:`Active Fire Detections` :bdg-info:`Event-based point detections` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Fire maps refresh about every 5 minutes and downloadable files refresh about hourly + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.firms.inspection --path /path/to/firms_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`FIRMS ` + + .. container:: catalog-link-row + + **Primary Source:** `Schroeder et al. (2014). The New VIIRS 375 m active fire detection data product. `_ + + .. grid-item-card:: FPA-FOD Tabular + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Incident-level FPA-FOD features packaged for wildfire cause and size classification. + + .. container:: catalog-chip-row + + :bdg-secondary:`Incident Tabular` :bdg-info:`Tabular feature vectors` + + .. container:: catalog-meta-row + + **Coverage:** User-provided FPA-FOD coverage + + .. container:: catalog-meta-row + + **Update Cadence:** User-managed local inputs or deterministic micro mode + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.fpa_fod_tabular.inspection --task cause --micro`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`FPA-FOD Tabular ` + + .. container:: catalog-link-row + + **Primary Source:** `PyHazards FPA-FOD tabular adaptation for the wildfire incident classification path. `_ + + .. grid-item-card:: FPA-FOD Weekly + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Weekly FPA-FOD aggregates packaged for next-week wildfire count forecasting by size group. + + .. container:: catalog-chip-row + + :bdg-secondary:`Weekly Forecasting` :bdg-info:`Temporal tabular sequences` + + .. container:: catalog-meta-row + + **Coverage:** User-provided FPA-FOD coverage + + .. container:: catalog-meta-row + + **Update Cadence:** User-managed local inputs or deterministic micro mode + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`FPA-FOD Weekly ` + + .. container:: catalog-link-row + + **Primary Source:** `PyHazards FPA-FOD weekly adaptation for the wildfire forecasting path. `_ + + .. grid-item-card:: LANDFIRE + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Nationwide fuels, vegetation, and canopy layers used as static wildfire covariates. + + .. container:: catalog-chip-row + + :bdg-secondary:`Fuels and Vegetation` :bdg-info:`Gridded raster layers` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Annual versioned update suites + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.landfire.inspection --path /path/to/landfire_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`LANDFIRE ` + + .. container:: catalog-link-row + + **Primary Source:** `Rollins (2009). LANDFIRE: A nationally consistent vegetation, wildland fire, and fuel assessment. `_ + + .. grid-item-card:: MTBS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + U.S. burn severity and fire perimeter products used for post-fire analysis and wildfire evaluation. + + .. container:: catalog-chip-row + + :bdg-secondary:`Burn Severity` :bdg-info:`Per-fire rasters with associated vector perimeters` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Continuous mapping with quarterly releases + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.mtbs.inspection --path /path/to/mtbs_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`MTBS ` + + .. container:: catalog-link-row + + **Primary Source:** `Eidenshink et al. (2007). A project for monitoring trends in burn severity. `_ + + .. grid-item-card:: WFIGS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Interagency wildfire incident records used as authoritative wildfire ground truth across the United States. + + .. container:: catalog-chip-row + + :bdg-secondary:`Incident Records` :bdg-info:`Incident points and perimeters` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Refreshed from IRWIN roughly every 5 minutes, with perimeter changes often appearing within 15 minutes + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.wfigs.inspection --path /path/to/wfigs_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`WFIGS ` + + .. container:: catalog-link-row + + **Primary Source:** `National Interagency Fire Center. Wildland Fire Incident Geospatial Services (WFIGS). `_ + + + .. tab-item:: Flood + + .. container:: catalog-section-note + + Flood datasets combine event records with streamflow and inundation benchmark adapters used by the public flood models. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Caravan + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed streamflow benchmark adapter aligned to the Caravan large-sample hydrology ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Streamflow Benchmark` :bdg-info:`Graph-temporal basin or node sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned streamflow forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('caravan_streamflow', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`Caravan ` + + .. container:: catalog-link-row + + **Details:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Primary Source:** `Caravan - A global community dataset for large-sample hydrology `_ + + .. grid-item-card:: FloodCastBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed inundation benchmark adapter aligned to the FloodCastBench evaluation ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Inundation Benchmark` :bdg-info:`Raster inundation sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned flood inundation samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('floodcastbench_inundation', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Primary Source:** `FloodCastBench `_ + + .. grid-item-card:: HydroBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed streamflow diagnostics adapter aligned to the HydroBench ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Streamflow Benchmark` :bdg-info:`Graph-temporal basin or node sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned streamflow forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('hydrobench_streamflow', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Primary Source:** `HydroBench `_ + + .. grid-item-card:: NOAA Flood Events + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Historical NOAA storm-event flood records used as event labels and impact targets for flood studies. + + .. container:: catalog-chip-row + + :bdg-secondary:`Event Records` :bdg-info:`Tabular event records with administrative regions and optional point coordinates` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Updated monthly, typically 75-90 days after the end of a data month + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.noaa_flood.inspection --path /path/to/noaa_flood_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`NOAA Flood Events ` + + .. container:: catalog-link-row + + **Primary Source:** `NOAA National Centers for Environmental Information. Storm Events Database Documentation. `_ + + .. grid-item-card:: WaterBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed streamflow benchmark adapter aligned to the WaterBench ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Streamflow Benchmark` :bdg-info:`Graph-temporal basin or node sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned streamflow forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('waterbench_streamflow', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Primary Source:** `WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting `_ + + + .. tab-item:: Earthquake + + .. container:: catalog-section-note + + Earthquake datasets cover waveform-picking and forecasting adapters that align the public models with the shared earthquake benchmark. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: AEFA Forecast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed dense-grid forecasting adapter aligned to the AEFA earthquake forecasting workflow. + + .. container:: catalog-chip-row + + :bdg-secondary:`Forecast Benchmark` :bdg-info:`Dense-grid wavefield tensors` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned earthquake forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('aefa_forecast', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`AEFA ` + + .. container:: catalog-link-row + + **Details:** :doc:`AEFA Forecast ` + + .. container:: catalog-link-row + + **Primary Source:** `AEFA `_ + + .. grid-item-card:: pick-benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed waveform picking adapter aligned to the pick-benchmark evaluation ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Waveform Benchmark` :bdg-info:`Multichannel waveform windows` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned earthquake phase-picking samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('pick_benchmark_waveforms', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Primary Source:** `pick-benchmark `_ + + .. grid-item-card:: SeisBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed waveform picking adapter aligned to the SeisBench ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Waveform Benchmark` :bdg-info:`Multichannel waveform windows` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned earthquake phase-picking samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('seisbench_waveforms', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Primary Source:** `SeisBench - A Toolbox for Machine Learning in Seismology `_ + + + .. tab-item:: Tropical Cyclone + + .. container:: catalog-section-note + + Storm datasets cover best-track archives and benchmark adapters used by the shared tropical cyclone track-intensity workflow. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: IBTrACS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed storm-track adapter aligned to the IBTrACS tropical cyclone archive. + + .. container:: catalog-chip-row + + :bdg-secondary:`Track Archive` :bdg-info:`Storm-track history sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned tropical cyclone track and intensity samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('ibtracs_tracks', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Details:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Primary Source:** `IBTrACS `_ + + .. grid-item-card:: TCBench Alpha + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed storm-track benchmark adapter aligned to the TCBench Alpha ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Track Benchmark` :bdg-info:`Storm-track history sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned tropical cyclone track and intensity samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('tcbench_alpha', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Details:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Primary Source:** `TCBench Alpha `_ + + .. grid-item-card:: TropiCycloneNet-Dataset + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed storm-track benchmark adapter aligned to the TropiCycloneNet-Dataset ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Track Benchmark` :bdg-info:`Storm-track history sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned tropical cyclone track and intensity samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('tropicyclonenet_dataset', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Details:** :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Primary Source:** `TropiCycloneNet-Dataset `_ + + + +Recommended Entry Points +------------------------ + +If you are new to PyHazards, start with one high-signal dataset per +hazard group before branching into the full catalog. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid + + .. grid-item-card:: Shared Forcing + :class-card: catalog-detail-card + + **Start with:** :doc:`ERA5 ` + + ECMWF's global reanalysis used as a high-resolution meteorological baseline for hazard experiments. + + **Primary Surface:** Inspection: ``python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10`` + + .. grid-item-card:: Wildfire + :class-card: catalog-detail-card + + **Start with:** :doc:`FPA-FOD Weekly ` + + Weekly FPA-FOD aggregates packaged for next-week wildfire count forecasting by size group. + + **Primary Surface:** Inspection: ``python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12`` + + .. grid-item-card:: Flood + :class-card: catalog-detail-card + + **Start with:** :doc:`Caravan ` + + Synthetic-backed streamflow benchmark adapter aligned to the Caravan large-sample hydrology ecosystem. + + **Primary Surface:** Registry: ``load_dataset('caravan_streamflow', ...)`` + + .. grid-item-card:: Earthquake + :class-card: catalog-detail-card + + **Start with:** :doc:`SeisBench ` + + Synthetic-backed waveform picking adapter aligned to the SeisBench ecosystem. + + **Primary Surface:** Registry: ``load_dataset('seisbench_waveforms', ...)`` + + .. grid-item-card:: Tropical Cyclone + :class-card: catalog-detail-card + + **Start with:** :doc:`IBTrACS ` + + Synthetic-backed storm-track adapter aligned to the IBTrACS tropical cyclone archive. + + **Primary Surface:** Registry: ``load_dataset('ibtracs_tracks', ...)`` + + +Programmatic Use +---------------- + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "fpa_fod_weekly", + micro=True, + lookback_weeks=12, + features="counts+time", + ).load() + print(sorted(data.splits.keys())) + +Use :doc:`api/pyhazards.datasets` for the developer dataset workflow +and package-level API lookup. Pair this page with :doc:`pyhazards_models` +and :doc:`pyhazards_benchmarks` when you need to trace datasets into +model and evaluation coverage. + +.. toctree:: + :maxdepth: 1 + :hidden: + + datasets/era5 + datasets/goesr + datasets/merra2 + datasets/firms + datasets/fpa_fod_tabular + datasets/fpa_fod_weekly + datasets/landfire + datasets/mtbs + datasets/wfigs + datasets/caravan_streamflow + datasets/floodcastbench_inundation + datasets/hydrobench_streamflow + datasets/noaa_flood + datasets/waterbench_streamflow + datasets/aefa_forecast + datasets/pick_benchmark_waveforms + datasets/seisbench_waveforms + datasets/ibtracs_tracks + datasets/tcbench_alpha + datasets/tropicyclonenet_dataset diff --git a/docs/_sources/pyhazards_engine.rst.txt b/docs/_sources/pyhazards_engine.rst.txt new file mode 100644 index 00000000..8e5e310a --- /dev/null +++ b/docs/_sources/pyhazards_engine.rst.txt @@ -0,0 +1,47 @@ +Engine +=================== + +Overview +-------- + +Use the engine when you want a shared interface for training, evaluation, and +prediction without rewriting the loop for every hazard task. + +Core modules +------------ + +- ``pyhazards.engine.trainer``: the ``Trainer`` class with ``fit``, + ``evaluate``, and ``predict``. +- ``pyhazards.engine.distributed``: distributed-strategy helpers. +- ``pyhazards.engine.inference``: inference utilities for large grids or + sliding-window style workflows. + +Typical Usage +------------- + +.. code-block:: python + + import torch + from pyhazards.engine import Trainer + from pyhazards.metrics import ClassificationMetrics + from pyhazards.models import build_model + + model = build_model(name="mlp", task="classification", in_dim=16, out_dim=2) + trainer = Trainer(model=model, metrics=[ClassificationMetrics()], mixed_precision=True) + + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + loss_fn = torch.nn.CrossEntropyLoss() + + trainer.fit(data_bundle, optimizer=optimizer, loss_fn=loss_fn, max_epochs=10) + results = trainer.evaluate(data_bundle, split="test") + preds = trainer.predict(data_bundle, split="test") + +Device and Distributed Notes +---------------------------- + +- ``Trainer(strategy="auto")`` uses DDP when multiple GPUs are available; otherwise runs single-device. +- ``mixed_precision=True`` enables AMP when on CUDA. +- Device selection is handled via ``pyhazards.utils.hardware.auto_device`` by default. + +Next step: pair this page with :doc:`pyhazards_metrics` and +:doc:`pyhazards_utils` when you want to customize evaluation or device behavior. diff --git a/docs/_sources/pyhazards_metrics.rst.txt b/docs/_sources/pyhazards_metrics.rst.txt new file mode 100644 index 00000000..128f5896 --- /dev/null +++ b/docs/_sources/pyhazards_metrics.rst.txt @@ -0,0 +1,29 @@ +Metrics +=================== + +Overview +-------- + +PyHazards includes small, task-oriented metric classes that accumulate +predictions and targets across a full split. + +Core Classes +------------ + +- ``MetricBase``: shared interface with ``update``, ``compute``, and ``reset``. +- ``ClassificationMetrics``: basic classification metrics such as accuracy. +- ``RegressionMetrics``: MAE and RMSE style regression summaries. +- ``SegmentationMetrics``: segmentation-oriented aggregation. + +Usage +----- + +.. code-block:: python + + from pyhazards.metrics import ClassificationMetrics + + metrics = [ClassificationMetrics()] + # pass to Trainer or update metrics directly + +Use this page together with :doc:`pyhazards_engine` if you want a consistent +train/evaluate workflow. diff --git a/docs/_sources/pyhazards_models.rst.txt b/docs/_sources/pyhazards_models.rst.txt new file mode 100644 index 00000000..fd46d953 --- /dev/null +++ b/docs/_sources/pyhazards_models.rst.txt @@ -0,0 +1,952 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Models +=================== + +Browse PyHazards model implementations across hazard families, compare +scope and maturity, and navigate to model-specific detail pages. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + Catalog tabs grouped by the normalized public hazard taxonomy. + + .. grid-item-card:: Implemented Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 24 + + .. container:: catalog-stat-note + + Public core baselines plus additional implemented variants. + + .. grid-item-card:: Experimental Adapters + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 3 + + .. container:: catalog-stat-note + + Prototype weather-model integrations kept separate from the stable catalog. + + .. grid-item-card:: Benchmark-linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 27 + + .. container:: catalog-stat-note + + Models with explicit benchmark-family or ecosystem links on this page. + + +Catalog by Hazard +----------------- + +Use the hazard tabs below to browse the public catalog. Each card keeps +the index-page summary short, then links into model-specific detail +pages and compatible benchmark coverage. + +.. tab-set:: + :class: catalog-tabs + + .. tab-item:: Wildfire + + .. container:: catalog-section-note + + Wildfire models cover danger forecasting, weekly activity forecasting, and spread prediction under the shared wildfire benchmark family. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: ASUFM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A temporal convolution baseline for weekly wildfire activity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`ASUFM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Wildfire Spread Prediction in North America Using Satellite Imagery and Vision Transformer `_ | **Repo:** `Repository `__ + + .. grid-item-card:: DNN-LSTM-AutoEncoder + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A two-stage wildfire framework with a DNN stage for incident-level cause and size prediction plus an LSTM + autoencoder stage for weekly forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Classification` :bdg-secondary:`Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`DNN-LSTM-AutoEncoder ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Developing risk assessment framework for wildfire in the United States `_ + + .. grid-item-card:: FireCastNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact encoder-decoder baseline for wildfire spread mask prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`FireCastNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `FireCastNet: Earth-as-a-Graph for Seasonal Fire Prediction `_ | **Repo:** `Repository `__ + + .. grid-item-card:: ForeFire Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A lightweight simulator-style wildfire spread adapter inspired by front-propagation systems. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`ForeFire Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `ForeFire: A Modular, Scriptable C++ Simulation Engine and Library for Wildland-Fire Spread `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Wildfire Forecasting + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A sequence forecasting baseline for next-window wildfire activity across weekly count features. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Wildfire Forecasting ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Wildfire Danger Prediction and Understanding with Deep Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: WildfireSpreadTS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A temporal convolution wildfire spread baseline over short raster history windows. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction `_ | **Repo:** `Repository `__ + + .. grid-item-card:: WRF-SFIRE Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A lightweight raster wildfire spread adapter inspired by WRF-SFIRE style transport. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`WRF-SFIRE Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `Coupled atmosphere-wildland fire modeling with WRF 3.3 and SFIRE 2011 `_ | **Repo:** `Repository `__ + + .. grid-item-card:: CNN-ASPP + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An explainable CNN segmentation model with an ASPP mechanism for next-day wildfire spread prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`CNN-ASPP ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `Application of Explainable Artificial Intelligence in Predicting Wildfire Spread `_ + + + .. tab-item:: Earthquake + + .. container:: catalog-section-note + + Earthquake models span phase picking and dense-grid forecasting, with detail pages linked to the shared earthquake benchmark coverage. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: EQNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A transformer-style earthquake phase-picking baseline for modern sequence modeling comparisons. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`EQNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Paper:** `An End-To-End Earthquake Detection Method for Joint Phase Picking and Association Using Deep Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: EQTransformer + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A bidirectional sequence encoder for joint earthquake phase picking with attention pooling over waveform windows. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`EQTransformer ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Earthquake Transformer-An attentive deep-learning model for simultaneous earthquake detection and phase picking `_ | **Repo:** `Repository `__ + + .. grid-item-card:: GPD + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact CNN baseline for generalized phase detection and historical earthquake picking comparisons. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`GPD ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Generalized Seismic Phase Detection with Deep Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: PhaseNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A lightweight phase-picking baseline that predicts P- and S-arrival indices from multichannel waveform windows. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`PhaseNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Paper:** `PhaseNet: A Deep-Neural-Network-Based Seismic Arrival Time Picking Method `_ | **Repo:** `Repository `__ + + .. grid-item-card:: WaveCastNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A ConvLEM-based sequence-to-sequence model for dense-grid earthquake wavefield forecasting and early-warning style rollout experiments. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Wavefield Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`WaveCastNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`AEFA `, :doc:`pyCSEP ` + + .. container:: catalog-link-row + + **Paper:** `Rapid wavefield forecasting for earthquake early warning via deep sequence to sequence learning `_ + + + .. tab-item:: Flood + + .. container:: catalog-section-note + + Flood models cover streamflow and inundation forecasting, ranging from sequence baselines to dense-grid flood-mapping architectures. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: EA-LSTM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An entity-aware hydrology baseline with static-feature gating over streamflow histories. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`EA-LSTM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Paper:** `Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets `_ | **Repo:** `Repository `__ + + .. grid-item-card:: FloodCast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact spatiotemporal flood-inundation baseline for raster forecast experiments. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Inundation` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`FloodCast ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Paper:** `Large-scale flood modeling and forecasting with FloodCast `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Google Flood Forecasting + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A transformer-style sequence baseline for nodewise streamflow forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Google Flood Forecasting ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Paper:** `Global Flood Forecasting at a Fine Catchment Resolution using Machine Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: NeuralHydrology LSTM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An adapter-style LSTM baseline for nodewise streamflow forecasting on graph-temporal inputs. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`NeuralHydrology LSTM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Paper:** `Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets `_ | **Repo:** `Repository `__ + + .. grid-item-card:: UrbanFloodCast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A U-Net style urban inundation baseline for dense-grid flood prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Inundation` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`UrbanFloodCast ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Paper:** `UrbanFloodCast: WMO Urban Flooding Forecasting Challenge `_ | **Repo:** `Repository `__ + + .. grid-item-card:: HydroGraphNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A physics-informed graph neural network for flood forecasting with interpretable KAN-style components, residual message passing, and delta-state decoding. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`HydroGraphNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Paper:** `Interpretable physics-informed graph neural networks for flood forecasting `_ + + + .. tab-item:: Tropical Cyclone + + .. container:: catalog-section-note + + Storm models are organized under one tropical-cyclone family, including basin-specific hurricane baselines and shared all-basin forecasting models. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hurricast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact multimodal storm baseline for hurricane track and intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Hurricast ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `Hurricane Forecasting: A Novel Multimodal Machine Learning Framework `_ | **Repo:** `Repository `__ + + .. grid-item-card:: SAF-Net + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A spatiotemporal tropical-cyclone baseline with an intensity-focused head and shared trajectory output. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`SAF-Net ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `SAF-Net: A spatio-temporal deep learning method for typhoon intensity prediction `_ | **Repo:** `Repository `__ + + .. grid-item-card:: TCIF-fusion + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A knowledge-guided fusion baseline for tropical cyclone track and intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`TCIF-fusion ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `Tropical cyclone intensity forecasting using model knowledge guided deep learning model `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Tropical Cyclone MLP + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact MLP baseline for hurricane track and intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Tropical Cyclone MLP ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `Deep Learning Experiments for Tropical Cyclone Intensity Forecasts `_ | **Repo:** `Repository `__ + + .. grid-item-card:: TropiCycloneNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A GRU plus attention baseline for all-basin tropical cyclone forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`TropiCycloneNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Paper:** `Benchmark dataset and deep learning method for global tropical cyclone forecasting `_ | **Repo:** `Repository `__ + + .. rubric:: Experimental Adapters + + .. container:: catalog-section-note + + These entries remain public as lightweight wrapper or prototype integrations and should not be counted as stable implemented methods. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: FourCastNet TC Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An experimental wrapper-style storm adapter inspired by FourCastNet forecast fields. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-warning:`Experimental Adapter` + + .. container:: catalog-meta-row + + **Details:** :doc:`FourCastNet TC Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `FourCastNet: A Global Data-driven High-resolution Weather Model using Adaptive Fourier Neural Operators `_ | **Repo:** `Repository `__ + + .. grid-item-card:: GraphCast TC Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An experimental wrapper-style storm adapter inspired by GraphCast/GenCast forecast fields. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-warning:`Experimental Adapter` + + .. container:: catalog-meta-row + + **Details:** :doc:`GraphCast TC Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `GraphCast: Learning skillful medium-range global weather forecasting `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Pangu TC Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An experimental wrapper-style storm adapter inspired by Pangu-Weather forecast fields. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-warning:`Experimental Adapter` + + .. container:: catalog-meta-row + + **Details:** :doc:`Pangu TC Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `Accurate medium-range global weather forecasting with 3D neural networks `_ | **Repo:** `Repository `__ + + + +Recommended Entry Points +------------------------ + +If you are new to PyHazards, these four models provide the clearest +starting point for each hazard family. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid + + .. grid-item-card:: Wildfire + :class-card: catalog-detail-card + + **Start with:** :doc:`FireCastNet ` + + A compact encoder-decoder baseline for wildfire spread mask prediction. + + **Benchmark:** :doc:`Wildfire Benchmark ` + + .. grid-item-card:: Earthquake + :class-card: catalog-detail-card + + **Start with:** :doc:`PhaseNet ` + + A lightweight phase-picking baseline that predicts P- and S-arrival indices from multichannel waveform windows. + + **Benchmark:** :doc:`Earthquake Benchmark ` + + .. grid-item-card:: Flood + :class-card: catalog-detail-card + + **Start with:** :doc:`FloodCast ` + + A compact spatiotemporal flood-inundation baseline for raster forecast experiments. + + **Benchmark:** :doc:`Flood Benchmark ` + + .. grid-item-card:: Tropical Cyclone + :class-card: catalog-detail-card + + **Start with:** :doc:`Hurricast ` + + A compact multimodal storm baseline for hurricane track and intensity forecasting. + + **Benchmark:** :doc:`Tropical Cyclone Benchmark ` + + +Programmatic Use +---------------- + +Use :doc:`api/pyhazards.models` for the developer registry workflow, +builder examples, and package-level API lookup. Use +:doc:`pyhazards_benchmarks` to compare compatible benchmark families +before selecting a model for evaluation. + +.. toctree:: + :maxdepth: 1 + :hidden: + + modules/models_asufm + modules/models_eqnet + modules/models_eqtransformer + modules/models_firecastnet + modules/models_floodcast + modules/models_forefire + modules/models_fourcastnet_tc + modules/models_google_flood_forecasting + modules/models_gpd + modules/models_graphcast_tc + modules/models_hurricast + modules/models_hydrographnet + modules/models_neuralhydrology_ealstm + modules/models_neuralhydrology_lstm + modules/models_pangu_tc + modules/models_phasenet + modules/models_saf_net + modules/models_tcif_fusion + modules/models_tropicalcyclone_mlp + modules/models_tropicyclonenet + modules/models_urbanfloodcast + modules/models_wavecastnet + modules/models_wildfire_aspp + modules/models_wildfire_forecasting + modules/models_wildfire_fpa + modules/models_wildfirespreadts + modules/models_wrf_sfire diff --git a/docs/_sources/pyhazards_reports.rst.txt b/docs/_sources/pyhazards_reports.rst.txt new file mode 100644 index 00000000..c42ede8e --- /dev/null +++ b/docs/_sources/pyhazards_reports.rst.txt @@ -0,0 +1,36 @@ +Reports +=================== + +Overview +-------- + +Use the reports layer when you want benchmark outputs exported in structured +formats that are easy to archive, compare, and publish. + +What This Page Covers +--------------------- + +- ``pyhazards.reports`` exporters for JSON, CSV, and Markdown summaries +- how benchmark metrics and metadata are written to disk +- where report paths appear in ``BenchmarkRunSummary`` + +Typical Usage +------------- + +.. code-block:: python + + from pyhazards.configs import load_experiment_config + from pyhazards.engine import BenchmarkRunner + + config = load_experiment_config("pyhazards/configs/tc/hurricast_smoke.yaml") + summary = BenchmarkRunner().run(config, output_dir="reports/tc_demo") + print(summary.report_paths) + +Why It Matters +-------------- + +The reports layer keeps hazard comparisons reproducible by exporting the same +metric and config snapshot structure across benchmark runs. + +Next step: pair this page with :doc:`pyhazards_benchmarks` when you want to +inspect the evaluator contracts behind those report files. diff --git a/docs/_sources/pyhazards_utils.rst.txt b/docs/_sources/pyhazards_utils.rst.txt new file mode 100644 index 00000000..713beb65 --- /dev/null +++ b/docs/_sources/pyhazards_utils.rst.txt @@ -0,0 +1,22 @@ +Utils +=================== + +Overview +-------- + +Utility helpers keep the rest of the library concise. Use these modules for +device selection, reproducibility, and small shared helpers. + +Submodules +---------- + +- :mod:`pyhazards.utils.hardware`: device helpers and automatic device selection. +- :mod:`pyhazards.utils.common`: reproducibility, logging, and shared utility + functions. + +Typical Uses +------------ + +- choose CPU or GPU behavior explicitly, +- set deterministic seeds for experiments, +- reuse small helpers instead of copying project-specific boilerplate. diff --git a/docs/_sources/quick_start.rst.txt b/docs/_sources/quick_start.rst.txt new file mode 100644 index 00000000..0a9c6f5c --- /dev/null +++ b/docs/_sources/quick_start.rst.txt @@ -0,0 +1,111 @@ +Quick Start +=========== + +Use this page after :doc:`installation` to run the first end-to-end PyHazards +workflow: verify the package, inspect example data, build a model, and execute +one short training loop. + +Step 1: Verify the Package +-------------------------- + +Confirm that Python can import the package cleanly: + +.. code-block:: bash + + python -c "import pyhazards; print(pyhazards.__version__)" + +Step 2: Inspect Example Data +---------------------------- + +Use the ERA5 inspection entrypoint to validate the bundled sample data before +training: + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + +Step 3: Build a Model +--------------------- + +Instantiate ``hydrographnet`` through the unified model registry: + +.. code-block:: python + + from pyhazards.models import build_model + + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, + ) + print(type(model).__name__) + +Step 4: Run a Short Train/Evaluate Loop +--------------------------------------- + +This example pairs the ERA5 subset with ``hydrographnet`` to confirm that the +dataset, model, and training engine work together in one workflow. + +.. code-block:: python + + import torch + from pyhazards.data.load_hydrograph_data import load_hydrograph_data + from pyhazards.datasets import graph_collate + from pyhazards.engine import Trainer + from pyhazards.models import build_model + + data = load_hydrograph_data("pyhazards/data/era5_subset", max_nodes=50) + + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, + ) + + trainer = Trainer(model=model, mixed_precision=False) + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + loss_fn = torch.nn.MSELoss() + + trainer.fit( + data, + optimizer=optimizer, + loss_fn=loss_fn, + max_epochs=1, + batch_size=1, + collate_fn=graph_collate, + ) + + metrics = trainer.evaluate( + data, + split="train", + batch_size=1, + collate_fn=graph_collate, + ) + print(metrics) + +Step 5: Next Steps +------------------ + +- Go to :doc:`pyhazards_datasets` to browse supported datasets. +- Go to :doc:`pyhazards_models` to compare built-in models. +- Go to :doc:`implementation` to add your own dataset or model. + +Device Notes +------------ + +PyHazards uses CUDA automatically when available. To force a device: + +.. code-block:: bash + + export PYHAZARDS_DEVICE=cuda:0 + +.. code-block:: python + + from pyhazards.utils import set_device + + set_device("cuda:0") + set_device("cpu") diff --git a/docs/_sources/references.rst.txt b/docs/_sources/references.rst.txt new file mode 100644 index 00000000..71a1176b --- /dev/null +++ b/docs/_sources/references.rst.txt @@ -0,0 +1,76 @@ +References +========== + +This page collects the main dataset and model references cited throughout the +PyHazards docs. It is a project reference list, not an exhaustive bibliography. + +Dataset References +------------------ + +- Gelaro, R., McCarty, W., Suárez, M. J., et al. (2017). *The Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2)*. `[link] `__. +- Hersbach, H., Bell, B., Berrisford, P., et al. (2020). *The ERA5 global reanalysis*. `[link] `__. +- NOAA National Centers for Environmental Information (NCEI). *Storm Events Database Documentation*. `[link] `__. +- Schroeder, W., Oliva, P., Giglio, L., and Csiszar, I. (2014). *The New VIIRS 375 m active fire detection data product: Algorithm description and initial assessment*. `[link] `__. +- Eidenshink, J., Schwind, B., Brewer, K., Zhu, Z., Quayle, B., and Howard, S. (2007). *A project for monitoring trends in burn severity*. `[link] `__. +- Rollins, M. G. (2009). *LANDFIRE: A nationally consistent vegetation, wildland fire, and fuel assessment*. `[link] `__. +- National Interagency Fire Center (NIFC). *Wildland Fire Incident Geospatial Services (WFIGS)*. `[link] `__. +- Schmit, T. J., Griffith, P., Gunshor, M. M., et al. (2017). *A closer look at the ABI on the GOES-R series*. `[link] `__. + +Model References +---------------- + +Wildfire +~~~~~~~~ + +- *Developing risk assessment framework for wildfire in the United States*. `[paper] `__. +- *Application of Explainable Artificial Intelligence in Predicting Wildfire Spread: An ASPP-Enabled CNN Approach*. `[paper] `__. +- *Wildfire Danger Prediction and Understanding with Deep Learning*. `[paper] `__, `[repo] `__. +- *WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction*. `[paper] `__, `[repo] `__. +- *Wildfire Spread Prediction in North America Using Satellite Imagery and Vision Transformer*. `[paper] `__, `[repo] `__. +- *ForeFire: A Modular, Scriptable C++ Simulation Engine and Library for Wildland-Fire Spread*. `[paper] `__, `[repo] `__. +- *Coupled atmosphere-wildland fire modeling with WRF 3.3 and SFIRE 2011*. `[paper] `__, `[repo] `__. +- *FireCastNet: Earth-as-a-Graph for Seasonal Fire Prediction*. `[paper] `__, `[repo] `__. + +Earthquake +~~~~~~~~~~ + +- *Rapid wavefield forecasting for earthquake early warning via deep sequence to sequence learning*. `[paper] `__. +- *PhaseNet: A Deep-Neural-Network-Based Seismic Arrival Time Picking Method*. `[paper] `__, `[repo] `__. +- *Earthquake Transformer-An attentive deep-learning model for simultaneous earthquake detection and phase picking*. `[paper] `__, `[repo] `__. +- *Generalized Seismic Phase Detection with Deep Learning*. `[paper] `__, `[repo] `__. +- *An End-To-End Earthquake Detection Method for Joint Phase Picking and Association Using Deep Learning*. `[paper] `__, `[repo] `__. + +Flood +~~~~~ + +- *Interpretable physics-informed graph neural networks for flood forecasting*. `[paper] `__. +- *Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets*. `[paper] `__, `[repo] `__. +- *Large-scale flood modeling and forecasting with FloodCast*. `[paper] `__, `[repo] `__. +- *UrbanFloodCast: WMO Urban Flooding Forecasting Challenge*. `[paper] `__, `[repo] `__. +- *Global Flood Forecasting at a Fine Catchment Resolution using Machine Learning*. `[paper] `__, `[repo] `__. + +Hurricane and Tropical Cyclone +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- *Hurricane Forecasting: A Novel Multimodal Machine Learning Framework*. `[paper] `__, `[repo] `__. +- *Deep Learning Experiments for Tropical Cyclone Intensity Forecasts*. `[paper] `__, `[repo] `__. +- *Benchmark dataset and deep learning method for global tropical cyclone forecasting*. `[paper] `__, `[repo] `__. +- *SAF-Net: A spatio-temporal deep learning method for typhoon intensity prediction*. `[paper] `__, `[repo] `__. +- *Tropical cyclone intensity forecasting using model knowledge guided deep learning model*. `[paper] `__, `[repo] `__. +- *GraphCast: Learning skillful medium-range global weather forecasting*. `[paper] `__, `[repo] `__. +- *Accurate medium-range global weather forecasting with 3D neural networks*. `[paper] `__, `[repo] `__. +- *FourCastNet: A Global Data-driven High-resolution Weather Model using Adaptive Fourier Neural Operators*. `[paper] `__, `[repo] `__. + +Benchmark and Data Resources +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- *SeisBench - A Toolbox for Machine Learning in Seismology*. `[paper] `__, `[repo] `__. +- *pick-benchmark*. `[repo] `__. +- *pyCSEP*. `[repo] `__. +- *AEFA*. `[repo] `__. +- *Caravan - A global community dataset for large-sample hydrology*. `[paper] `__, `[repo] `__. +- *WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting*. `[paper] `__, `[repo] `__. +- *FloodCastBench*. `[repo] `__. +- *HydroBench*. `[repo] `__. +- *TCBench Alpha*. `[repo] `__. +- *IBTrACS*. `[product page] `__. diff --git a/docs/_sources/team.rst.txt b/docs/_sources/team.rst.txt new file mode 100644 index 00000000..0a031d10 --- /dev/null +++ b/docs/_sources/team.rst.txt @@ -0,0 +1,39 @@ +Core Team +========= + +PyHazards is maintained by researchers and engineers working on hazard-focused +machine learning, data systems, and model development. + +Lead Developer +-------------- + +- Xueqi Cheng, Florida State University (xc25@fsu.edu) + +Founder +------- + +- Yushun Dong, Florida State University + +Principal Contributors & Maintainers +------------------------------------ + +- Yangshuang Xu, Florida State University +- Runyang Xu, Florida State University +- Hugh Long, Florida State University + +Core Contributors +----------------- + +- Lex Schneier, Florida State University +- Sharan Kumar Reddy Kodudula, Florida State University +- Cristian Victoria, Florida State University +- Deyang Hsu, University of Southern California +- Dacheng Shen, University of Southern California + +What the Team Maintains +----------------------- + +- technical direction for the library, +- code review and quality checks, +- documentation and examples, +- ongoing maintenance of public releases. diff --git a/docs/_sphinx_design_static/design-tabs.js b/docs/_sphinx_design_static/design-tabs.js new file mode 100644 index 00000000..b25bd6a4 --- /dev/null +++ b/docs/_sphinx_design_static/design-tabs.js @@ -0,0 +1,101 @@ +// @ts-check + +// Extra JS capability for selected tabs to be synced +// The selection is stored in local storage so that it persists across page loads. + +/** + * @type {Record} + */ +let sd_id_to_elements = {}; +const storageKeyPrefix = "sphinx-design-tab-id-"; + +/** + * Create a key for a tab element. + * @param {HTMLElement} el - The tab element. + * @returns {[string, string, string] | null} - The key. + * + */ +function create_key(el) { + let syncId = el.getAttribute("data-sync-id"); + let syncGroup = el.getAttribute("data-sync-group"); + if (!syncId || !syncGroup) return null; + return [syncGroup, syncId, syncGroup + "--" + syncId]; +} + +/** + * Initialize the tab selection. + * + */ +function ready() { + // Find all tabs with sync data + + /** @type {string[]} */ + let groups = []; + + document.querySelectorAll(".sd-tab-label").forEach((label) => { + if (label instanceof HTMLElement) { + let data = create_key(label); + if (data) { + let [group, id, key] = data; + + // add click event listener + // @ts-ignore + label.onclick = onSDLabelClick; + + // store map of key to elements + if (!sd_id_to_elements[key]) { + sd_id_to_elements[key] = []; + } + sd_id_to_elements[key].push(label); + + if (groups.indexOf(group) === -1) { + groups.push(group); + // Check if a specific tab has been selected via URL parameter + const tabParam = new URLSearchParams(window.location.search).get( + group + ); + if (tabParam) { + console.log( + "sphinx-design: Selecting tab id for group '" + + group + + "' from URL parameter: " + + tabParam + ); + window.sessionStorage.setItem(storageKeyPrefix + group, tabParam); + } + } + + // Check is a specific tab has been selected previously + let previousId = window.sessionStorage.getItem( + storageKeyPrefix + group + ); + if (previousId === id) { + // console.log( + // "sphinx-design: Selecting tab from session storage: " + id + // ); + // @ts-ignore + label.previousElementSibling.checked = true; + } + } + } + }); +} + +/** + * Activate other tabs with the same sync id. + * + * @this {HTMLElement} - The element that was clicked. + */ +function onSDLabelClick() { + let data = create_key(this); + if (!data) return; + let [group, id, key] = data; + for (const label of sd_id_to_elements[key]) { + if (label === this) continue; + // @ts-ignore + label.previousElementSibling.checked = true; + } + window.sessionStorage.setItem(storageKeyPrefix + group, id); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/docs/_sphinx_design_static/sphinx-design.min.css b/docs/_sphinx_design_static/sphinx-design.min.css new file mode 100644 index 00000000..860c36da --- /dev/null +++ b/docs/_sphinx_design_static/sphinx-design.min.css @@ -0,0 +1 @@ +.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative;font-size:var(--sd-fontsize-dropdown)}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary.sd-summary-title{padding:.5em .6em .5em 1em;font-size:var(--sd-fontsize-dropdown-title);font-weight:var(--sd-fontweight-dropdown-title);user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;list-style:none;display:inline-flex;justify-content:space-between}details.sd-dropdown summary.sd-summary-title::-webkit-details-marker{display:none}details.sd-dropdown summary.sd-summary-title:focus{outline:none}details.sd-dropdown summary.sd-summary-title .sd-summary-icon{margin-right:.6em;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary.sd-summary-title .sd-summary-text{flex-grow:1;line-height:1.5;padding-right:.5rem}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker{pointer-events:none;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker svg{opacity:.6}details.sd-dropdown summary.sd-summary-title:hover .sd-summary-state-marker svg{opacity:1;transform:scale(1.1)}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown .sd-summary-chevron-right{transition:.25s}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-right{transform:rotate(90deg)}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-down{transform:rotate(180deg)}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #0071bc;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0060a0;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-bg: rgba(0, 113, 188, 0.2);--sd-color-secondary-bg: rgba(108, 117, 125, 0.2);--sd-color-success-bg: rgba(40, 167, 69, 0.2);--sd-color-info-bg: rgba(23, 162, 184, 0.2);--sd-color-warning-bg: rgba(240, 179, 126, 0.2);--sd-color-danger-bg: rgba(220, 53, 69, 0.2);--sd-color-light-bg: rgba(248, 249, 250, 0.2);--sd-color-muted-bg: rgba(108, 117, 125, 0.2);--sd-color-dark-bg: rgba(33, 37, 41, 0.2);--sd-color-black-bg: rgba(0, 0, 0, 0.2);--sd-color-white-bg: rgba(255, 255, 255, 0.2);--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem;--sd-fontsize-dropdown: inherit;--sd-fontsize-dropdown-title: 1rem;--sd-fontweight-dropdown-title: 700} diff --git a/docs/_static/basic.css b/docs/_static/basic.css new file mode 100644 index 00000000..4738b2ed --- /dev/null +++ b/docs/_static/basic.css @@ -0,0 +1,906 @@ +/* + * Sphinx stylesheet -- basic theme. + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin-top: 10px; +} + +ul.search li { + padding: 5px 0; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/_static/custom.css b/docs/_static/custom.css new file mode 100644 index 00000000..6f211f4d --- /dev/null +++ b/docs/_static/custom.css @@ -0,0 +1,279 @@ +.dataset-list.table-wrapper, +.catalog-matrix.table-wrapper { + font-size: 0.95rem; + letter-spacing: 0.01em; +} + +.dataset-list.table-wrapper table, +.catalog-matrix.table-wrapper table { + width: 100%; +} + +.dataset-list.table-wrapper td, +.catalog-matrix.table-wrapper td, +.catalog-matrix.table-wrapper th { + padding: 0.8rem 1rem; + vertical-align: top; +} + +.dataset-list.table-wrapper td:first-child { + font-family: var(--font-stack--monospace); + font-weight: 600; + font-size: 1rem; + color: var(--color-brand-primary); + white-space: nowrap; +} + +.dataset-list.table-wrapper td:last-child, +.catalog-matrix.table-wrapper td { + font-family: var(--font-stack); + font-size: 0.95rem; + line-height: 1.6; + color: var(--color-foreground-secondary); +} + +.catalog-matrix.table-wrapper th { + font-size: 0.78rem; + letter-spacing: 0.04em; + text-transform: uppercase; +} + +.catalog-stat-card.sd-card, +.catalog-entry-card.sd-card, +.catalog-detail-card.sd-card { + border: 1px solid var(--color-background-border); + border-radius: 0.9rem; + box-shadow: none; +} + +.catalog-stat-card.sd-card { + background: linear-gradient(180deg, var(--color-card-background), color-mix(in srgb, var(--color-card-background) 92%, var(--color-brand-primary) 8%)); +} + +.catalog-stat-card .sd-card-header, +.catalog-entry-card .sd-card-header, +.catalog-detail-card .sd-card-header { + border-bottom: none; + padding-bottom: 0; +} + +.catalog-stat-card .sd-card-title, +.catalog-entry-card .sd-card-title, +.catalog-detail-card .sd-card-title { + font-size: 0.82rem; + letter-spacing: 0.04em; + text-transform: uppercase; +} + +.catalog-stat-value { + margin: 0.35rem 0 0.2rem; + font-size: 2rem; + font-weight: 700; + line-height: 1; + color: var(--color-brand-primary); +} + +.catalog-stat-note { + font-size: 0.92rem; + line-height: 1.55; + color: var(--color-foreground-secondary); +} + +.catalog-section-note { + margin: 0.15rem 0 1.15rem; + font-size: 0.98rem; + line-height: 1.65; + color: var(--color-foreground-secondary); +} + +.catalog-entry-card .sd-card-body { + display: flex; + flex-direction: column; + gap: 0.7rem; +} + +.catalog-entry-name { + margin: 0; + font-size: 1.08rem; + font-weight: 700; + line-height: 1.3; +} + +.catalog-entry-summary { + margin: 0; + font-size: 0.97rem; + line-height: 1.65; + color: var(--color-foreground-secondary); +} + +.catalog-chip-row, +.catalog-meta-row, +.catalog-link-row { + margin: 0; + line-height: 1.65; +} + +.catalog-chip-row { + display: flex; + flex-wrap: wrap; + gap: 0.35rem; +} + +.catalog-chip-row .sd-badge { + border-radius: 999px; + font-size: 0.72rem; + letter-spacing: 0.02em; +} + +.catalog-meta-row, +.catalog-link-row { + font-size: 0.9rem; + color: var(--color-foreground-secondary); +} + +.catalog-link-row a { + font-weight: 600; +} + +.catalog-grid { + margin-bottom: 1.3rem; +} + +.catalog-tabs { + margin-top: 1rem; +} + +.catalog-tabs .sd-tab-set > label { + font-weight: 600; +} + +.catalog-tabs .sd-tab-content { + padding-top: 1rem; +} + +.catalog-dropdown { + margin: 0.85rem 0; +} + +.catalog-dropdown .sd-summary-title { + font-weight: 600; +} + +.catalog-dropdown .sd-summary-content { + color: var(--color-foreground-secondary); +} + +.catalog-dropdown ul { + margin: 0.35rem 0 0.35rem 1.2rem; +} + +.catalog-detail-links { + font-size: 0.94rem; + line-height: 1.7; +} + +.catalog-detail-links strong { + color: var(--color-foreground-primary); +} + +.catalog-recommend-grid .sd-card-title { + font-size: 0.95rem; + text-transform: none; + letter-spacing: normal; +} + +.catalog-recommend-grid .sd-card-body { + font-size: 0.94rem; + color: var(--color-foreground-secondary); +} + +.landing-hero-logo { + margin-bottom: 0.35rem; +} + +.landing-hero-title { + margin: 0.15rem 0 0.35rem; + text-align: center; + font-size: clamp(2rem, 3.8vw, 2.95rem); + line-height: 1.12; + letter-spacing: -0.04em; +} + +.home-cta-row { + display: flex; + flex-wrap: wrap; + justify-content: center; + gap: 0.85rem; + margin: 0 0 1.15rem; +} + +.home-cta-button { + display: inline-flex; + align-items: center; + justify-content: center; + min-width: 10.5rem; + padding: 0.78rem 1.2rem; + border-radius: 999px; + border: 1px solid var(--color-background-border); + text-decoration: none; + font-weight: 650; + letter-spacing: 0.01em; + transition: transform 0.12s ease, border-color 0.12s ease, background-color 0.12s ease, color 0.12s ease; +} + +.home-cta-button:hover { + transform: translateY(-1px); + text-decoration: none; +} + +.home-cta-primary { + background: color-mix(in srgb, var(--color-card-background) 84%, var(--color-brand-primary) 16%); + border-color: color-mix(in srgb, var(--color-background-border) 68%, var(--color-brand-primary) 32%); + color: var(--color-brand-primary); +} + +.home-cta-primary:hover { + background: color-mix(in srgb, var(--color-card-background) 76%, var(--color-brand-primary) 24%); + border-color: color-mix(in srgb, var(--color-background-border) 55%, var(--color-brand-primary) 45%); + color: var(--color-brand-primary); +} + +.home-cta-secondary { + background: color-mix(in srgb, var(--color-card-background) 92%, var(--color-brand-primary) 8%); + border-color: color-mix(in srgb, var(--color-background-border) 82%, var(--color-brand-primary) 18%); + color: var(--color-foreground-primary); +} + +.home-cta-secondary:hover { + background: color-mix(in srgb, var(--color-card-background) 86%, var(--color-brand-primary) 14%); + border-color: color-mix(in srgb, var(--color-background-border) 68%, var(--color-brand-primary) 32%); + color: var(--color-brand-primary); +} + +.home-hero-stats { + margin-top: 0.15rem; +} + +.home-section-note { + margin: 0.1rem 0 1.05rem; + font-size: 0.98rem; + line-height: 1.65; + color: var(--color-foreground-secondary); +} + +.home-link-grid .sd-card-title, +.home-pillar-grid .sd-card-title, +.home-hazard-grid .sd-card-title, +.home-kicker-grid .sd-card-title { + font-size: 0.95rem; + text-transform: none; + letter-spacing: normal; +} + +.home-link-grid .sd-card-body, +.home-pillar-grid .sd-card-body, +.home-hazard-grid .sd-card-body { + font-size: 0.95rem; + line-height: 1.65; + color: var(--color-foreground-secondary); +} diff --git a/docs/_static/debug.css b/docs/_static/debug.css new file mode 100644 index 00000000..74d4aec3 --- /dev/null +++ b/docs/_static/debug.css @@ -0,0 +1,69 @@ +/* + This CSS file should be overridden by the theme authors. It's + meant for debugging and developing the skeleton that this theme provides. +*/ +body { + font-family: -apple-system, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, + "Apple Color Emoji", "Segoe UI Emoji"; + background: lavender; +} +.sb-announcement { + background: rgb(131, 131, 131); +} +.sb-announcement__inner { + background: black; + color: white; +} +.sb-header { + background: lightskyblue; +} +.sb-header__inner { + background: royalblue; + color: white; +} +.sb-header-secondary { + background: lightcyan; +} +.sb-header-secondary__inner { + background: cornflowerblue; + color: white; +} +.sb-sidebar-primary { + background: lightgreen; +} +.sb-main { + background: blanchedalmond; +} +.sb-main__inner { + background: antiquewhite; +} +.sb-header-article { + background: lightsteelblue; +} +.sb-article-container { + background: snow; +} +.sb-article-main { + background: white; +} +.sb-footer-article { + background: lightpink; +} +.sb-sidebar-secondary { + background: lightgoldenrodyellow; +} +.sb-footer-content { + background: plum; +} +.sb-footer-content__inner { + background: palevioletred; +} +.sb-footer { + background: pink; +} +.sb-footer__inner { + background: salmon; +} +.sb-article { + background: white; +} diff --git a/docs/_static/design-tabs.js b/docs/_static/design-tabs.js new file mode 100644 index 00000000..b25bd6a4 --- /dev/null +++ b/docs/_static/design-tabs.js @@ -0,0 +1,101 @@ +// @ts-check + +// Extra JS capability for selected tabs to be synced +// The selection is stored in local storage so that it persists across page loads. + +/** + * @type {Record} + */ +let sd_id_to_elements = {}; +const storageKeyPrefix = "sphinx-design-tab-id-"; + +/** + * Create a key for a tab element. + * @param {HTMLElement} el - The tab element. + * @returns {[string, string, string] | null} - The key. + * + */ +function create_key(el) { + let syncId = el.getAttribute("data-sync-id"); + let syncGroup = el.getAttribute("data-sync-group"); + if (!syncId || !syncGroup) return null; + return [syncGroup, syncId, syncGroup + "--" + syncId]; +} + +/** + * Initialize the tab selection. + * + */ +function ready() { + // Find all tabs with sync data + + /** @type {string[]} */ + let groups = []; + + document.querySelectorAll(".sd-tab-label").forEach((label) => { + if (label instanceof HTMLElement) { + let data = create_key(label); + if (data) { + let [group, id, key] = data; + + // add click event listener + // @ts-ignore + label.onclick = onSDLabelClick; + + // store map of key to elements + if (!sd_id_to_elements[key]) { + sd_id_to_elements[key] = []; + } + sd_id_to_elements[key].push(label); + + if (groups.indexOf(group) === -1) { + groups.push(group); + // Check if a specific tab has been selected via URL parameter + const tabParam = new URLSearchParams(window.location.search).get( + group + ); + if (tabParam) { + console.log( + "sphinx-design: Selecting tab id for group '" + + group + + "' from URL parameter: " + + tabParam + ); + window.sessionStorage.setItem(storageKeyPrefix + group, tabParam); + } + } + + // Check is a specific tab has been selected previously + let previousId = window.sessionStorage.getItem( + storageKeyPrefix + group + ); + if (previousId === id) { + // console.log( + // "sphinx-design: Selecting tab from session storage: " + id + // ); + // @ts-ignore + label.previousElementSibling.checked = true; + } + } + } + }); +} + +/** + * Activate other tabs with the same sync id. + * + * @this {HTMLElement} - The element that was clicked. + */ +function onSDLabelClick() { + let data = create_key(this); + if (!data) return; + let [group, id, key] = data; + for (const label of sd_id_to_elements[key]) { + if (label === this) continue; + // @ts-ignore + label.previousElementSibling.checked = true; + } + window.sessionStorage.setItem(storageKeyPrefix + group, id); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/docs/_static/doctools.js b/docs/_static/doctools.js new file mode 100644 index 00000000..0398ebb9 --- /dev/null +++ b/docs/_static/doctools.js @@ -0,0 +1,149 @@ +/* + * Base JavaScript utilities for all Sphinx HTML documentation. + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js new file mode 100644 index 00000000..841b958a --- /dev/null +++ b/docs/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '1.0.5', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: true, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/docs/_static/file.png b/docs/_static/file.png new file mode 100644 index 00000000..a858a410 Binary files /dev/null and b/docs/_static/file.png differ diff --git a/docs/_static/github.svg b/docs/_static/github.svg new file mode 100644 index 00000000..013e0253 --- /dev/null +++ b/docs/_static/github.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/docs/_static/language_data.js b/docs/_static/language_data.js new file mode 100644 index 00000000..c7fe6c6f --- /dev/null +++ b/docs/_static/language_data.js @@ -0,0 +1,192 @@ +/* + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, if available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/docs/_static/logo.png b/docs/_static/logo.png new file mode 100644 index 00000000..3a7451eb Binary files /dev/null and b/docs/_static/logo.png differ diff --git a/docs/_static/minus.png b/docs/_static/minus.png new file mode 100644 index 00000000..d96755fd Binary files /dev/null and b/docs/_static/minus.png differ diff --git a/docs/_static/plus.png b/docs/_static/plus.png new file mode 100644 index 00000000..7107cec9 Binary files /dev/null and b/docs/_static/plus.png differ diff --git a/docs/_static/pygments.css b/docs/_static/pygments.css new file mode 100644 index 00000000..9d1083bf --- /dev/null +++ b/docs/_static/pygments.css @@ -0,0 +1,250 @@ +.highlight pre { line-height: 125%; } +.highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #fdf2e2 } +.highlight { background: #f2f2f2; color: #1E1E1E } +.highlight .c { color: #515151 } /* Comment */ +.highlight .err { color: #D71835 } /* Error */ +.highlight .k { color: #8045E5 } /* Keyword */ +.highlight .l { color: #7F4707 } /* Literal */ +.highlight .n { color: #1E1E1E } /* Name */ +.highlight .o { color: #163 } /* Operator */ +.highlight .p { color: #1E1E1E } /* Punctuation */ +.highlight .ch { color: #515151 } /* Comment.Hashbang */ +.highlight .cm { color: #515151 } /* Comment.Multiline */ +.highlight .cp { color: #515151 } /* Comment.Preproc */ +.highlight .cpf { color: #515151 } /* Comment.PreprocFile */ +.highlight .c1 { color: #515151 } /* Comment.Single */ +.highlight .cs { color: #515151 } /* Comment.Special */ +.highlight .gd { color: #00749C } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gh { color: #00749C } /* Generic.Heading */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #00749C } /* Generic.Subheading */ +.highlight .kc { color: #8045E5 } /* Keyword.Constant */ +.highlight .kd { color: #8045E5 } /* Keyword.Declaration */ +.highlight .kn { color: #8045E5 } /* Keyword.Namespace */ +.highlight .kp { color: #8045E5 } /* Keyword.Pseudo */ +.highlight .kr { color: #8045E5 } /* Keyword.Reserved */ +.highlight .kt { color: #7F4707 } /* Keyword.Type */ +.highlight .ld { color: #7F4707 } /* Literal.Date */ +.highlight .m { color: #7F4707 } /* Literal.Number */ +.highlight .s { color: #163 } /* Literal.String */ +.highlight .na { color: #7F4707 } /* Name.Attribute */ +.highlight .nb { color: #7F4707 } /* Name.Builtin */ +.highlight .nc { color: #00749C } /* Name.Class */ +.highlight .no { color: #00749C } /* Name.Constant */ +.highlight .nd { color: #7F4707 } /* Name.Decorator */ +.highlight .ni { color: #163 } /* Name.Entity */ +.highlight .ne { color: #8045E5 } /* Name.Exception */ +.highlight .nf { color: #00749C } /* Name.Function */ +.highlight .nl { color: #7F4707 } /* Name.Label */ +.highlight .nn { color: #1E1E1E } /* Name.Namespace */ +.highlight .nx { color: #1E1E1E } /* Name.Other */ +.highlight .py { color: #00749C } /* Name.Property */ +.highlight .nt { color: #00749C } /* Name.Tag */ +.highlight .nv { color: #D71835 } /* Name.Variable */ +.highlight .ow { color: #8045E5 } /* Operator.Word */ +.highlight .pm { color: #1E1E1E } /* Punctuation.Marker */ +.highlight .w { color: #1E1E1E } /* Text.Whitespace */ +.highlight .mb { color: #7F4707 } /* Literal.Number.Bin */ +.highlight .mf { color: #7F4707 } /* Literal.Number.Float */ +.highlight .mh { color: #7F4707 } /* Literal.Number.Hex */ +.highlight .mi { color: #7F4707 } /* Literal.Number.Integer */ +.highlight .mo { color: #7F4707 } /* Literal.Number.Oct */ +.highlight .sa { color: #163 } /* Literal.String.Affix */ +.highlight .sb { color: #163 } /* Literal.String.Backtick */ +.highlight .sc { color: #163 } /* Literal.String.Char */ +.highlight .dl { color: #163 } /* Literal.String.Delimiter */ +.highlight .sd { color: #163 } /* Literal.String.Doc */ +.highlight .s2 { color: #163 } /* Literal.String.Double */ +.highlight .se { color: #163 } /* Literal.String.Escape */ +.highlight .sh { color: #163 } /* Literal.String.Heredoc */ +.highlight .si { color: #163 } /* Literal.String.Interpol */ +.highlight .sx { color: #163 } /* Literal.String.Other */ +.highlight .sr { color: #D71835 } /* Literal.String.Regex */ +.highlight .s1 { color: #163 } /* Literal.String.Single */ +.highlight .ss { color: #00749C } /* Literal.String.Symbol */ +.highlight .bp { color: #7F4707 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #00749C } /* Name.Function.Magic */ +.highlight .vc { color: #D71835 } /* Name.Variable.Class */ +.highlight .vg { color: #D71835 } /* Name.Variable.Global */ +.highlight .vi { color: #D71835 } /* Name.Variable.Instance */ +.highlight .vm { color: #7F4707 } /* Name.Variable.Magic */ +.highlight .il { color: #7F4707 } /* Literal.Number.Integer.Long */ +@media not print { +body[data-theme="dark"] .highlight pre { line-height: 125%; } +body[data-theme="dark"] .highlight td.linenos .normal { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight .hll { background-color: #404040 } +body[data-theme="dark"] .highlight { background: #202020; color: #D0D0D0 } +body[data-theme="dark"] .highlight .c { color: #ABABAB; font-style: italic } /* Comment */ +body[data-theme="dark"] .highlight .err { color: #A61717; background-color: #E3D2D2 } /* Error */ +body[data-theme="dark"] .highlight .esc { color: #D0D0D0 } /* Escape */ +body[data-theme="dark"] .highlight .g { color: #D0D0D0 } /* Generic */ +body[data-theme="dark"] .highlight .k { color: #6EBF26; font-weight: bold } /* Keyword */ +body[data-theme="dark"] .highlight .l { color: #D0D0D0 } /* Literal */ +body[data-theme="dark"] .highlight .n { color: #D0D0D0 } /* Name */ +body[data-theme="dark"] .highlight .o { color: #D0D0D0 } /* Operator */ +body[data-theme="dark"] .highlight .x { color: #D0D0D0 } /* Other */ +body[data-theme="dark"] .highlight .p { color: #D0D0D0 } /* Punctuation */ +body[data-theme="dark"] .highlight .ch { color: #ABABAB; font-style: italic } /* Comment.Hashbang */ +body[data-theme="dark"] .highlight .cm { color: #ABABAB; font-style: italic } /* Comment.Multiline */ +body[data-theme="dark"] .highlight .cp { color: #FF3A3A; font-weight: bold } /* Comment.Preproc */ +body[data-theme="dark"] .highlight .cpf { color: #ABABAB; font-style: italic } /* Comment.PreprocFile */ +body[data-theme="dark"] .highlight .c1 { color: #ABABAB; font-style: italic } /* Comment.Single */ +body[data-theme="dark"] .highlight .cs { color: #E50808; font-weight: bold; background-color: #520000 } /* Comment.Special */ +body[data-theme="dark"] .highlight .gd { color: #FF3A3A } /* Generic.Deleted */ +body[data-theme="dark"] .highlight .ge { color: #D0D0D0; font-style: italic } /* Generic.Emph */ +body[data-theme="dark"] .highlight .ges { color: #D0D0D0; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +body[data-theme="dark"] .highlight .gr { color: #FF3A3A } /* Generic.Error */ +body[data-theme="dark"] .highlight .gh { color: #FFF; font-weight: bold } /* Generic.Heading */ +body[data-theme="dark"] .highlight .gi { color: #589819 } /* Generic.Inserted */ +body[data-theme="dark"] .highlight .go { color: #CCC } /* Generic.Output */ +body[data-theme="dark"] .highlight .gp { color: #AAA } /* Generic.Prompt */ +body[data-theme="dark"] .highlight .gs { color: #D0D0D0; font-weight: bold } /* Generic.Strong */ +body[data-theme="dark"] .highlight .gu { color: #FFF; text-decoration: underline } /* Generic.Subheading */ +body[data-theme="dark"] .highlight .gt { color: #FF3A3A } /* Generic.Traceback */ +body[data-theme="dark"] .highlight .kc { color: #6EBF26; font-weight: bold } /* Keyword.Constant */ +body[data-theme="dark"] .highlight .kd { color: #6EBF26; font-weight: bold } /* Keyword.Declaration */ +body[data-theme="dark"] .highlight .kn { color: #6EBF26; font-weight: bold } /* Keyword.Namespace */ +body[data-theme="dark"] .highlight .kp { color: #6EBF26 } /* Keyword.Pseudo */ +body[data-theme="dark"] .highlight .kr { color: #6EBF26; font-weight: bold } /* Keyword.Reserved */ +body[data-theme="dark"] .highlight .kt { color: #6EBF26; font-weight: bold } /* Keyword.Type */ +body[data-theme="dark"] .highlight .ld { color: #D0D0D0 } /* Literal.Date */ +body[data-theme="dark"] .highlight .m { color: #51B2FD } /* Literal.Number */ +body[data-theme="dark"] .highlight .s { color: #ED9D13 } /* Literal.String */ +body[data-theme="dark"] .highlight .na { color: #BBB } /* Name.Attribute */ +body[data-theme="dark"] .highlight .nb { color: #2FBCCD } /* Name.Builtin */ +body[data-theme="dark"] .highlight .nc { color: #71ADFF; text-decoration: underline } /* Name.Class */ +body[data-theme="dark"] .highlight .no { color: #40FFFF } /* Name.Constant */ +body[data-theme="dark"] .highlight .nd { color: #FFA500 } /* Name.Decorator */ +body[data-theme="dark"] .highlight .ni { color: #D0D0D0 } /* Name.Entity */ +body[data-theme="dark"] .highlight .ne { color: #BBB } /* Name.Exception */ +body[data-theme="dark"] .highlight .nf { color: #71ADFF } /* Name.Function */ +body[data-theme="dark"] .highlight .nl { color: #D0D0D0 } /* Name.Label */ +body[data-theme="dark"] .highlight .nn { color: #71ADFF; text-decoration: underline } /* Name.Namespace */ +body[data-theme="dark"] .highlight .nx { color: #D0D0D0 } /* Name.Other */ +body[data-theme="dark"] .highlight .py { color: #D0D0D0 } /* Name.Property */ +body[data-theme="dark"] .highlight .nt { color: #6EBF26; font-weight: bold } /* Name.Tag */ +body[data-theme="dark"] .highlight .nv { color: #40FFFF } /* Name.Variable */ +body[data-theme="dark"] .highlight .ow { color: #6EBF26; font-weight: bold } /* Operator.Word */ +body[data-theme="dark"] .highlight .pm { color: #D0D0D0 } /* Punctuation.Marker */ +body[data-theme="dark"] .highlight .w { color: #666 } /* Text.Whitespace */ +body[data-theme="dark"] .highlight .mb { color: #51B2FD } /* Literal.Number.Bin */ +body[data-theme="dark"] .highlight .mf { color: #51B2FD } /* Literal.Number.Float */ +body[data-theme="dark"] .highlight .mh { color: #51B2FD } /* Literal.Number.Hex */ +body[data-theme="dark"] .highlight .mi { color: #51B2FD } /* Literal.Number.Integer */ +body[data-theme="dark"] .highlight .mo { color: #51B2FD } /* Literal.Number.Oct */ +body[data-theme="dark"] .highlight .sa { color: #ED9D13 } /* Literal.String.Affix */ +body[data-theme="dark"] .highlight .sb { color: #ED9D13 } /* Literal.String.Backtick */ +body[data-theme="dark"] .highlight .sc { color: #ED9D13 } /* Literal.String.Char */ +body[data-theme="dark"] .highlight .dl { color: #ED9D13 } /* Literal.String.Delimiter */ +body[data-theme="dark"] .highlight .sd { color: #ED9D13 } /* Literal.String.Doc */ +body[data-theme="dark"] .highlight .s2 { color: #ED9D13 } /* Literal.String.Double */ +body[data-theme="dark"] .highlight .se { color: #ED9D13 } /* Literal.String.Escape */ +body[data-theme="dark"] .highlight .sh { color: #ED9D13 } /* Literal.String.Heredoc */ +body[data-theme="dark"] .highlight .si { color: #ED9D13 } /* Literal.String.Interpol */ +body[data-theme="dark"] .highlight .sx { color: #FFA500 } /* Literal.String.Other */ +body[data-theme="dark"] .highlight .sr { color: #ED9D13 } /* Literal.String.Regex */ +body[data-theme="dark"] .highlight .s1 { color: #ED9D13 } /* Literal.String.Single */ +body[data-theme="dark"] .highlight .ss { color: #ED9D13 } /* Literal.String.Symbol */ +body[data-theme="dark"] .highlight .bp { color: #2FBCCD } /* Name.Builtin.Pseudo */ +body[data-theme="dark"] .highlight .fm { color: #71ADFF } /* Name.Function.Magic */ +body[data-theme="dark"] .highlight .vc { color: #40FFFF } /* Name.Variable.Class */ +body[data-theme="dark"] .highlight .vg { color: #40FFFF } /* Name.Variable.Global */ +body[data-theme="dark"] .highlight .vi { color: #40FFFF } /* Name.Variable.Instance */ +body[data-theme="dark"] .highlight .vm { color: #40FFFF } /* Name.Variable.Magic */ +body[data-theme="dark"] .highlight .il { color: #51B2FD } /* Literal.Number.Integer.Long */ +@media (prefers-color-scheme: dark) { +body:not([data-theme="light"]) .highlight pre { line-height: 125%; } +body:not([data-theme="light"]) .highlight td.linenos .normal { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight .hll { background-color: #404040 } +body:not([data-theme="light"]) .highlight { background: #202020; color: #D0D0D0 } +body:not([data-theme="light"]) .highlight .c { color: #ABABAB; font-style: italic } /* Comment */ +body:not([data-theme="light"]) .highlight .err { color: #A61717; background-color: #E3D2D2 } /* Error */ +body:not([data-theme="light"]) .highlight .esc { color: #D0D0D0 } /* Escape */ +body:not([data-theme="light"]) .highlight .g { color: #D0D0D0 } /* Generic */ +body:not([data-theme="light"]) .highlight .k { color: #6EBF26; font-weight: bold } /* Keyword */ +body:not([data-theme="light"]) .highlight .l { color: #D0D0D0 } /* Literal */ +body:not([data-theme="light"]) .highlight .n { color: #D0D0D0 } /* Name */ +body:not([data-theme="light"]) .highlight .o { color: #D0D0D0 } /* Operator */ +body:not([data-theme="light"]) .highlight .x { color: #D0D0D0 } /* Other */ +body:not([data-theme="light"]) .highlight .p { color: #D0D0D0 } /* Punctuation */ +body:not([data-theme="light"]) .highlight .ch { color: #ABABAB; font-style: italic } /* Comment.Hashbang */ +body:not([data-theme="light"]) .highlight .cm { color: #ABABAB; font-style: italic } /* Comment.Multiline */ +body:not([data-theme="light"]) .highlight .cp { color: #FF3A3A; font-weight: bold } /* Comment.Preproc */ +body:not([data-theme="light"]) .highlight .cpf { color: #ABABAB; font-style: italic } /* Comment.PreprocFile */ +body:not([data-theme="light"]) .highlight .c1 { color: #ABABAB; font-style: italic } /* Comment.Single */ +body:not([data-theme="light"]) .highlight .cs { color: #E50808; font-weight: bold; background-color: #520000 } /* Comment.Special */ +body:not([data-theme="light"]) .highlight .gd { color: #FF3A3A } /* Generic.Deleted */ +body:not([data-theme="light"]) .highlight .ge { color: #D0D0D0; font-style: italic } /* Generic.Emph */ +body:not([data-theme="light"]) .highlight .ges { color: #D0D0D0; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +body:not([data-theme="light"]) .highlight .gr { color: #FF3A3A } /* Generic.Error */ +body:not([data-theme="light"]) .highlight .gh { color: #FFF; font-weight: bold } /* Generic.Heading */ +body:not([data-theme="light"]) .highlight .gi { color: #589819 } /* Generic.Inserted */ +body:not([data-theme="light"]) .highlight .go { color: #CCC } /* Generic.Output */ +body:not([data-theme="light"]) .highlight .gp { color: #AAA } /* Generic.Prompt */ +body:not([data-theme="light"]) .highlight .gs { color: #D0D0D0; font-weight: bold } /* Generic.Strong */ +body:not([data-theme="light"]) .highlight .gu { color: #FFF; text-decoration: underline } /* Generic.Subheading */ +body:not([data-theme="light"]) .highlight .gt { color: #FF3A3A } /* Generic.Traceback */ +body:not([data-theme="light"]) .highlight .kc { color: #6EBF26; font-weight: bold } /* Keyword.Constant */ +body:not([data-theme="light"]) .highlight .kd { color: #6EBF26; font-weight: bold } /* Keyword.Declaration */ +body:not([data-theme="light"]) .highlight .kn { color: #6EBF26; font-weight: bold } /* Keyword.Namespace */ +body:not([data-theme="light"]) .highlight .kp { color: #6EBF26 } /* Keyword.Pseudo */ +body:not([data-theme="light"]) .highlight .kr { color: #6EBF26; font-weight: bold } /* Keyword.Reserved */ +body:not([data-theme="light"]) .highlight .kt { color: #6EBF26; font-weight: bold } /* Keyword.Type */ +body:not([data-theme="light"]) .highlight .ld { color: #D0D0D0 } /* Literal.Date */ +body:not([data-theme="light"]) .highlight .m { color: #51B2FD } /* Literal.Number */ +body:not([data-theme="light"]) .highlight .s { color: #ED9D13 } /* Literal.String */ +body:not([data-theme="light"]) .highlight .na { color: #BBB } /* Name.Attribute */ +body:not([data-theme="light"]) .highlight .nb { color: #2FBCCD } /* Name.Builtin */ +body:not([data-theme="light"]) .highlight .nc { color: #71ADFF; text-decoration: underline } /* Name.Class */ +body:not([data-theme="light"]) .highlight .no { color: #40FFFF } /* Name.Constant */ +body:not([data-theme="light"]) .highlight .nd { color: #FFA500 } /* Name.Decorator */ +body:not([data-theme="light"]) .highlight .ni { color: #D0D0D0 } /* Name.Entity */ +body:not([data-theme="light"]) .highlight .ne { color: #BBB } /* Name.Exception */ +body:not([data-theme="light"]) .highlight .nf { color: #71ADFF } /* Name.Function */ +body:not([data-theme="light"]) .highlight .nl { color: #D0D0D0 } /* Name.Label */ +body:not([data-theme="light"]) .highlight .nn { color: #71ADFF; text-decoration: underline } /* Name.Namespace */ +body:not([data-theme="light"]) .highlight .nx { color: #D0D0D0 } /* Name.Other */ +body:not([data-theme="light"]) .highlight .py { color: #D0D0D0 } /* Name.Property */ +body:not([data-theme="light"]) .highlight .nt { color: #6EBF26; font-weight: bold } /* Name.Tag */ +body:not([data-theme="light"]) .highlight .nv { color: #40FFFF } /* Name.Variable */ +body:not([data-theme="light"]) .highlight .ow { color: #6EBF26; font-weight: bold } /* Operator.Word */ +body:not([data-theme="light"]) .highlight .pm { color: #D0D0D0 } /* Punctuation.Marker */ +body:not([data-theme="light"]) .highlight .w { color: #666 } /* Text.Whitespace */ +body:not([data-theme="light"]) .highlight .mb { color: #51B2FD } /* Literal.Number.Bin */ +body:not([data-theme="light"]) .highlight .mf { color: #51B2FD } /* Literal.Number.Float */ +body:not([data-theme="light"]) .highlight .mh { color: #51B2FD } /* Literal.Number.Hex */ +body:not([data-theme="light"]) .highlight .mi { color: #51B2FD } /* Literal.Number.Integer */ +body:not([data-theme="light"]) .highlight .mo { color: #51B2FD } /* Literal.Number.Oct */ +body:not([data-theme="light"]) .highlight .sa { color: #ED9D13 } /* Literal.String.Affix */ +body:not([data-theme="light"]) .highlight .sb { color: #ED9D13 } /* Literal.String.Backtick */ +body:not([data-theme="light"]) .highlight .sc { color: #ED9D13 } /* Literal.String.Char */ +body:not([data-theme="light"]) .highlight .dl { color: #ED9D13 } /* Literal.String.Delimiter */ +body:not([data-theme="light"]) .highlight .sd { color: #ED9D13 } /* Literal.String.Doc */ +body:not([data-theme="light"]) .highlight .s2 { color: #ED9D13 } /* Literal.String.Double */ +body:not([data-theme="light"]) .highlight .se { color: #ED9D13 } /* Literal.String.Escape */ +body:not([data-theme="light"]) .highlight .sh { color: #ED9D13 } /* Literal.String.Heredoc */ +body:not([data-theme="light"]) .highlight .si { color: #ED9D13 } /* Literal.String.Interpol */ +body:not([data-theme="light"]) .highlight .sx { color: #FFA500 } /* Literal.String.Other */ +body:not([data-theme="light"]) .highlight .sr { color: #ED9D13 } /* Literal.String.Regex */ +body:not([data-theme="light"]) .highlight .s1 { color: #ED9D13 } /* Literal.String.Single */ +body:not([data-theme="light"]) .highlight .ss { color: #ED9D13 } /* Literal.String.Symbol */ +body:not([data-theme="light"]) .highlight .bp { color: #2FBCCD } /* Name.Builtin.Pseudo */ +body:not([data-theme="light"]) .highlight .fm { color: #71ADFF } /* Name.Function.Magic */ +body:not([data-theme="light"]) .highlight .vc { color: #40FFFF } /* Name.Variable.Class */ +body:not([data-theme="light"]) .highlight .vg { color: #40FFFF } /* Name.Variable.Global */ +body:not([data-theme="light"]) .highlight .vi { color: #40FFFF } /* Name.Variable.Instance */ +body:not([data-theme="light"]) .highlight .vm { color: #40FFFF } /* Name.Variable.Magic */ +body:not([data-theme="light"]) .highlight .il { color: #51B2FD } /* Literal.Number.Integer.Long */ +} +} \ No newline at end of file diff --git a/docs/_static/result/inspection1.png b/docs/_static/result/inspection1.png new file mode 100644 index 00000000..d697ad84 Binary files /dev/null and b/docs/_static/result/inspection1.png differ diff --git a/examples/attack/__init__.py b/docs/_static/scripts/furo-extensions.js similarity index 100% rename from examples/attack/__init__.py rename to docs/_static/scripts/furo-extensions.js diff --git a/docs/_static/scripts/furo.js b/docs/_static/scripts/furo.js new file mode 100644 index 00000000..87e1767f --- /dev/null +++ b/docs/_static/scripts/furo.js @@ -0,0 +1,3 @@ +/*! For license information please see furo.js.LICENSE.txt */ +(()=>{var t={856:function(t,e,n){var o,r;r=void 0!==n.g?n.g:"undefined"!=typeof window?window:this,o=function(){return function(t){"use strict";var e={navClass:"active",contentClass:"active",nested:!1,nestedClass:"active",offset:0,reflow:!1,events:!0},n=function(t,e,n){if(n.settings.events){var o=new CustomEvent(t,{bubbles:!0,cancelable:!0,detail:n});e.dispatchEvent(o)}},o=function(t){var e=0;if(t.offsetParent)for(;t;)e+=t.offsetTop,t=t.offsetParent;return e>=0?e:0},r=function(t){t&&t.sort(function(t,e){return o(t.content)=Math.max(document.body.scrollHeight,document.documentElement.scrollHeight,document.body.offsetHeight,document.documentElement.offsetHeight,document.body.clientHeight,document.documentElement.clientHeight)},l=function(t,e){var n=t[t.length-1];if(function(t,e){return!(!s()||!c(t.content,e,!0))}(n,e))return n;for(var o=t.length-1;o>=0;o--)if(c(t[o].content,e))return t[o]},a=function(t,e){if(e.nested&&t.parentNode){var n=t.parentNode.closest("li");n&&(n.classList.remove(e.nestedClass),a(n,e))}},i=function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.remove(e.navClass),t.content.classList.remove(e.contentClass),a(o,e),n("gumshoeDeactivate",o,{link:t.nav,content:t.content,settings:e}))}},u=function(t,e){if(e.nested){var n=t.parentNode.closest("li");n&&(n.classList.add(e.nestedClass),u(n,e))}};return function(o,c){var s,a,d,f,m,v={setup:function(){s=document.querySelectorAll(o),a=[],Array.prototype.forEach.call(s,function(t){var e=document.getElementById(decodeURIComponent(t.hash.substr(1)));e&&a.push({nav:t,content:e})}),r(a)},detect:function(){var t=l(a,m);t?d&&t.content===d.content||(i(d,m),function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.add(e.navClass),t.content.classList.add(e.contentClass),u(o,e),n("gumshoeActivate",o,{link:t.nav,content:t.content,settings:e}))}}(t,m),d=t):d&&(i(d,m),d=null)}},h=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame(v.detect)},g=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame(function(){r(a),v.detect()})};return v.destroy=function(){d&&i(d,m),t.removeEventListener("scroll",h,!1),m.reflow&&t.removeEventListener("resize",g,!1),a=null,s=null,d=null,f=null,m=null},m=function(){var t={};return Array.prototype.forEach.call(arguments,function(e){for(var n in e){if(!e.hasOwnProperty(n))return;t[n]=e[n]}}),t}(e,c||{}),v.setup(),v.detect(),t.addEventListener("scroll",h,!1),m.reflow&&t.addEventListener("resize",g,!1),v}}(r)}.apply(e,[]),void 0===o||(t.exports=o)}},e={};function n(o){var r=e[o];if(void 0!==r)return r.exports;var c=e[o]={exports:{}};return t[o].call(c.exports,c,c.exports,n),c.exports}n.n=t=>{var e=t&&t.__esModule?()=>t.default:()=>t;return n.d(e,{a:e}),e},n.d=(t,e)=>{for(var o in e)n.o(e,o)&&!n.o(t,o)&&Object.defineProperty(t,o,{enumerable:!0,get:e[o]})},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(t){if("object"==typeof window)return window}}(),n.o=(t,e)=>Object.prototype.hasOwnProperty.call(t,e),(()=>{"use strict";var t=n(856),e=n.n(t),o=null,r=null,c=document.documentElement.scrollTop;function s(){const t=localStorage.getItem("theme")||"auto";var e;"light"!==(e=window.matchMedia("(prefers-color-scheme: dark)").matches?"auto"===t?"light":"light"==t?"dark":"auto":"auto"===t?"dark":"dark"==t?"light":"auto")&&"dark"!==e&&"auto"!==e&&(console.error(`Got invalid theme mode: ${e}. Resetting to auto.`),e="auto"),document.body.dataset.theme=e,localStorage.setItem("theme",e),console.log(`Changed to ${e} mode.`)}function l(){!function(){const t=document.getElementsByClassName("theme-toggle");Array.from(t).forEach(t=>{t.addEventListener("click",s)})}(),function(){let t=0,e=!1;window.addEventListener("scroll",function(n){t=window.scrollY,e||(window.requestAnimationFrame(function(){var n;(function(t){t>0?r.classList.add("scrolled"):r.classList.remove("scrolled")})(n=t),function(t){t<64?document.documentElement.classList.remove("show-back-to-top"):tc&&document.documentElement.classList.remove("show-back-to-top"),c=t}(n),function(t){null!==o&&(0==t?o.scrollTo(0,0):Math.ceil(t)>=Math.floor(document.documentElement.scrollHeight-window.innerHeight)?o.scrollTo(0,o.scrollHeight):document.querySelector(".scroll-current"))}(n),e=!1}),e=!0)}),window.scroll()}(),null!==o&&new(e())(".toc-tree a",{reflow:!0,recursive:!0,navClass:"scroll-current",offset:()=>{let t=parseFloat(getComputedStyle(document.documentElement).fontSize);const e=r.getBoundingClientRect();return e.top+e.height+2.5*t+1}})}document.addEventListener("DOMContentLoaded",function(){document.body.parentNode.classList.remove("no-js"),r=document.querySelector("header"),o=document.querySelector(".toc-scroll"),l()})})()})(); +//# sourceMappingURL=furo.js.map \ No newline at end of file diff --git a/docs/_static/scripts/furo.js.LICENSE.txt b/docs/_static/scripts/furo.js.LICENSE.txt new file mode 100644 index 00000000..1632189c --- /dev/null +++ b/docs/_static/scripts/furo.js.LICENSE.txt @@ -0,0 +1,7 @@ +/*! + * gumshoejs v5.1.2 (patched by @pradyunsg) + * A simple, framework-agnostic scrollspy script. + * (c) 2019 Chris Ferdinandi + * MIT License + * http://github.com/cferdinandi/gumshoe + */ diff --git a/docs/_static/scripts/furo.js.map b/docs/_static/scripts/furo.js.map new file mode 100644 index 00000000..3b316f3a --- /dev/null +++ b/docs/_static/scripts/furo.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/furo.js","mappings":";iCAAA,MAQWA,SAWS,IAAX,EAAAC,EACH,EAAAA,EACkB,oBAAXC,OACLA,OACAC,KAbO,EAAF,WACP,OAaJ,SAAUD,GACR,aAMA,IAAIE,EAAW,CAEbC,SAAU,SACVC,aAAc,SAGdC,QAAQ,EACRC,YAAa,SAGbC,OAAQ,EACRC,QAAQ,EAGRC,QAAQ,GA6BNC,EAAY,SAAUC,EAAMC,EAAMC,GAEpC,GAAKA,EAAOC,SAASL,OAArB,CAGA,IAAIM,EAAQ,IAAIC,YAAYL,EAAM,CAChCM,SAAS,EACTC,YAAY,EACZL,OAAQA,IAIVD,EAAKO,cAAcJ,EAVgB,CAWrC,EAOIK,EAAe,SAAUR,GAC3B,IAAIS,EAAW,EACf,GAAIT,EAAKU,aACP,KAAOV,GACLS,GAAYT,EAAKW,UACjBX,EAAOA,EAAKU,aAGhB,OAAOD,GAAY,EAAIA,EAAW,CACpC,EAMIG,EAAe,SAAUC,GACvBA,GACFA,EAASC,KAAK,SAAUC,EAAOC,GAG7B,OAFcR,EAAaO,EAAME,SACnBT,EAAaQ,EAAMC,UACF,EACxB,CACT,EAEJ,EAwCIC,EAAW,SAAUlB,EAAME,EAAUiB,GACvC,IAAIC,EAASpB,EAAKqB,wBACd1B,EAnCU,SAAUO,GAExB,MAA+B,mBAApBA,EAASP,OACX2B,WAAWpB,EAASP,UAItB2B,WAAWpB,EAASP,OAC7B,CA2Be4B,CAAUrB,GACvB,OAAIiB,EAEAK,SAASJ,EAAOD,OAAQ,KACvB/B,EAAOqC,aAAeC,SAASC,gBAAgBC,cAG7CJ,SAASJ,EAAOS,IAAK,KAAOlC,CACrC,EAMImC,EAAa,WACf,OACEC,KAAKC,KAAK5C,EAAOqC,YAAcrC,EAAO6C,cAnCjCF,KAAKG,IACVR,SAASS,KAAKC,aACdV,SAASC,gBAAgBS,aACzBV,SAASS,KAAKE,aACdX,SAASC,gBAAgBU,aACzBX,SAASS,KAAKP,aACdF,SAASC,gBAAgBC,aAkC7B,EAmBIU,EAAY,SAAUzB,EAAUX,GAClC,IAAIqC,EAAO1B,EAASA,EAAS2B,OAAS,GACtC,GAbgB,SAAUC,EAAMvC,GAChC,SAAI4B,MAAgBZ,EAASuB,EAAKxB,QAASf,GAAU,GAEvD,CAUMwC,CAAYH,EAAMrC,GAAW,OAAOqC,EACxC,IAAK,IAAII,EAAI9B,EAAS2B,OAAS,EAAGG,GAAK,EAAGA,IACxC,GAAIzB,EAASL,EAAS8B,GAAG1B,QAASf,GAAW,OAAOW,EAAS8B,EAEjE,EAOIC,EAAmB,SAAUC,EAAK3C,GAEpC,GAAKA,EAAST,QAAWoD,EAAIC,WAA7B,CAGA,IAAIC,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASR,aAG7BkD,EAAiBG,EAAI7C,GAV0B,CAWjD,EAOIiD,EAAa,SAAUC,EAAOlD,GAEhC,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASX,UAC7B6D,EAAMnC,QAAQgC,UAAUC,OAAOhD,EAASV,cAGxCoD,EAAiBG,EAAI7C,GAGrBJ,EAAU,oBAAqBiD,EAAI,CACjCM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,IAjBM,CAmBpB,EAOIoD,EAAiB,SAAUT,EAAK3C,GAElC,GAAKA,EAAST,OAAd,CAGA,IAAIsD,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASR,aAG1B4D,EAAeP,EAAI7C,GAVS,CAW9B,EA6LA,OA1JkB,SAAUsD,EAAUC,GAKpC,IACIC,EAAU7C,EAAU8C,EAASC,EAAS1D,EADtC2D,EAAa,CAUjBA,MAAmB,WAEjBH,EAAWhC,SAASoC,iBAAiBN,GAGrC3C,EAAW,GAGXkD,MAAMC,UAAUC,QAAQC,KAAKR,EAAU,SAAUjB,GAE/C,IAAIxB,EAAUS,SAASyC,eACrBC,mBAAmB3B,EAAK4B,KAAKC,OAAO,KAEjCrD,GAGLJ,EAAS0D,KAAK,CACZ1B,IAAKJ,EACLxB,QAASA,GAEb,GAGAL,EAAaC,EACf,EAKAgD,OAAoB,WAElB,IAAIW,EAASlC,EAAUzB,EAAUX,GAG5BsE,EASDb,GAAWa,EAAOvD,UAAY0C,EAAQ1C,UAG1CkC,EAAWQ,EAASzD,GAzFT,SAAUkD,EAAOlD,GAE9B,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASX,UAC1B6D,EAAMnC,QAAQgC,UAAUM,IAAIrD,EAASV,cAGrC8D,EAAeP,EAAI7C,GAGnBJ,EAAU,kBAAmBiD,EAAI,CAC/BM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,IAjBM,CAmBpB,CAqEIuE,CAASD,EAAQtE,GAGjByD,EAAUa,GAfJb,IACFR,EAAWQ,EAASzD,GACpByD,EAAU,KAchB,GAMIe,EAAgB,SAAUvE,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,sBAAsBf,EAAWgB,OACpD,EAMIC,EAAgB,SAAU3E,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,sBAAsB,WACrChE,EAAaC,GACbgD,EAAWgB,QACb,EACF,EAkDA,OA7CAhB,EAAWkB,QAAU,WAEfpB,GACFR,EAAWQ,EAASzD,GAItBd,EAAO4F,oBAAoB,SAAUN,GAAe,GAChDxE,EAASN,QACXR,EAAO4F,oBAAoB,SAAUF,GAAe,GAItDjE,EAAW,KACX6C,EAAW,KACXC,EAAU,KACVC,EAAU,KACV1D,EAAW,IACb,EAOEA,EA3XS,WACX,IAAI+E,EAAS,CAAC,EAOd,OANAlB,MAAMC,UAAUC,QAAQC,KAAKgB,UAAW,SAAUC,GAChD,IAAK,IAAIC,KAAOD,EAAK,CACnB,IAAKA,EAAIE,eAAeD,GAAM,OAC9BH,EAAOG,GAAOD,EAAIC,EACpB,CACF,GACOH,CACT,CAkXeK,CAAOhG,EAAUmE,GAAW,CAAC,GAGxCI,EAAW0B,QAGX1B,EAAWgB,SAGXzF,EAAOoG,iBAAiB,SAAUd,GAAe,GAC7CxE,EAASN,QACXR,EAAOoG,iBAAiB,SAAUV,GAAe,GAS9CjB,CACT,CAOF,CArcW4B,CAAQvG,EAChB,UAFM,SAEN,oB,GCXDwG,EAA2B,CAAC,EAGhC,SAASC,EAAoBC,GAE5B,IAAIC,EAAeH,EAAyBE,GAC5C,QAAqBE,IAAjBD,EACH,OAAOA,EAAaE,QAGrB,IAAIC,EAASN,EAAyBE,GAAY,CAGjDG,QAAS,CAAC,GAOX,OAHAE,EAAoBL,GAAU1B,KAAK8B,EAAOD,QAASC,EAAQA,EAAOD,QAASJ,GAGpEK,EAAOD,OACf,CCrBAJ,EAAoBO,EAAKF,IACxB,IAAIG,EAASH,GAAUA,EAAOI,WAC7B,IAAOJ,EAAiB,QACxB,IAAM,EAEP,OADAL,EAAoBU,EAAEF,EAAQ,CAAEG,EAAGH,IAC5BA,GCLRR,EAAoBU,EAAI,CAACN,EAASQ,KACjC,IAAI,IAAInB,KAAOmB,EACXZ,EAAoBa,EAAED,EAAYnB,KAASO,EAAoBa,EAAET,EAASX,IAC5EqB,OAAOC,eAAeX,EAASX,EAAK,CAAEuB,YAAY,EAAMC,IAAKL,EAAWnB,MCJ3EO,EAAoBxG,EAAI,WACvB,GAA0B,iBAAf0H,WAAyB,OAAOA,WAC3C,IACC,OAAOxH,MAAQ,IAAIyH,SAAS,cAAb,EAChB,CAAE,MAAOC,GACR,GAAsB,iBAAX3H,OAAqB,OAAOA,MACxC,CACA,CAPuB,GCAxBuG,EAAoBa,EAAI,CAACrB,EAAK6B,IAAUP,OAAOzC,UAAUqB,eAAenB,KAAKiB,EAAK6B,G,yCCK9EC,EAAY,KACZC,EAAS,KACTC,EAAgBzF,SAASC,gBAAgByF,UA4E7C,SAASC,IACP,MAAMC,EAAeC,aAAaC,QAAQ,UAAY,OAZxD,IAAkBC,EACH,WADGA,EAaIrI,OAAOsI,WAAW,gCAAgCC,QAI/C,SAAjBL,EACO,QACgB,SAAhBA,EACA,OAEA,OAIU,SAAjBA,EACO,OACgB,QAAhBA,EACA,QAEA,SA9BoB,SAATG,GAA4B,SAATA,IACzCG,QAAQC,MAAM,2BAA2BJ,yBACzCA,EAAO,QAGT/F,SAASS,KAAK2F,QAAQC,MAAQN,EAC9BF,aAAaS,QAAQ,QAASP,GAC9BG,QAAQK,IAAI,cAAcR,UA0B5B,CAmDA,SAASlC,KART,WAEE,MAAM2C,EAAUxG,SAASyG,uBAAuB,gBAChDpE,MAAMqE,KAAKF,GAASjE,QAASoE,IAC3BA,EAAI7C,iBAAiB,QAAS6B,IAElC,CAGEiB,GA/CF,WAEE,IAAIC,EAA6B,EAC7BC,GAAU,EAEdpJ,OAAOoG,iBAAiB,SAAU,SAAUuB,GAC1CwB,EAA6BnJ,OAAOqJ,QAE/BD,IACHpJ,OAAOwF,sBAAsB,WAzDnC,IAAuB8D,GArDvB,SAAgCA,GAC1BA,EAAY,EACdxB,EAAOjE,UAAUM,IAAI,YAErB2D,EAAOjE,UAAUC,OAAO,WAE5B,EAgDEyF,CADqBD,EA0DDH,GAvGtB,SAAmCG,GAC7BA,EAXmB,GAYrBhH,SAASC,gBAAgBsB,UAAUC,OAAO,oBAEtCwF,EAAYvB,EACdzF,SAASC,gBAAgBsB,UAAUM,IAAI,oBAC9BmF,EAAYvB,GACrBzF,SAASC,gBAAgBsB,UAAUC,OAAO,oBAG9CiE,EAAgBuB,CAClB,CAoCEE,CAA0BF,GAlC5B,SAA6BA,GACT,OAAdzB,IAKa,GAAbyB,EACFzB,EAAU4B,SAAS,EAAG,GAGtB9G,KAAKC,KAAK0G,IACV3G,KAAK+G,MAAMpH,SAASC,gBAAgBS,aAAehD,OAAOqC,aAE1DwF,EAAU4B,SAAS,EAAG5B,EAAU7E,cAGhBV,SAASqH,cAAc,mBAc3C,CAKEC,CAAoBN,GAwDdF,GAAU,CACZ,GAEAA,GAAU,EAEd,GACApJ,OAAO6J,QACT,CA8BEC,GA3BkB,OAAdjC,GAKJ,IAAI,IAAJ,CAAY,cAAe,CACzBrH,QAAQ,EACRuJ,WAAW,EACX5J,SAAU,iBACVI,OAAQ,KACN,IAAIyJ,EAAM9H,WAAW+H,iBAAiB3H,SAASC,iBAAiB2H,UAChE,MAAMC,EAAarC,EAAO7F,wBAC1B,OAAOkI,EAAW1H,IAAM0H,EAAWC,OAAS,IAAMJ,EAAM,IAiB9D,CAcA1H,SAAS8D,iBAAiB,mBAT1B,WACE9D,SAASS,KAAKW,WAAWG,UAAUC,OAAO,SAE1CgE,EAASxF,SAASqH,cAAc,UAChC9B,EAAYvF,SAASqH,cAAc,eAEnCxD,GACF,E","sources":["webpack:///./src/furo/assets/scripts/gumshoe-patched.js","webpack:///webpack/bootstrap","webpack:///webpack/runtime/compat get default export","webpack:///webpack/runtime/define property getters","webpack:///webpack/runtime/global","webpack:///webpack/runtime/hasOwnProperty shorthand","webpack:///./src/furo/assets/scripts/furo.js"],"sourcesContent":["/*!\n * gumshoejs v5.1.2 (patched by @pradyunsg)\n * A simple, framework-agnostic scrollspy script.\n * (c) 2019 Chris Ferdinandi\n * MIT License\n * http://github.com/cferdinandi/gumshoe\n */\n\n(function (root, factory) {\n if (typeof define === \"function\" && define.amd) {\n define([], function () {\n return factory(root);\n });\n } else if (typeof exports === \"object\") {\n module.exports = factory(root);\n } else {\n root.Gumshoe = factory(root);\n }\n})(\n typeof global !== \"undefined\"\n ? global\n : typeof window !== \"undefined\"\n ? window\n : this,\n function (window) {\n \"use strict\";\n\n //\n // Defaults\n //\n\n var defaults = {\n // Active classes\n navClass: \"active\",\n contentClass: \"active\",\n\n // Nested navigation\n nested: false,\n nestedClass: \"active\",\n\n // Offset & reflow\n offset: 0,\n reflow: false,\n\n // Event support\n events: true,\n };\n\n //\n // Methods\n //\n\n /**\n * Merge two or more objects together.\n * @param {Object} objects The objects to merge together\n * @returns {Object} Merged values of defaults and options\n */\n var extend = function () {\n var merged = {};\n Array.prototype.forEach.call(arguments, function (obj) {\n for (var key in obj) {\n if (!obj.hasOwnProperty(key)) return;\n merged[key] = obj[key];\n }\n });\n return merged;\n };\n\n /**\n * Emit a custom event\n * @param {String} type The event type\n * @param {Node} elem The element to attach the event to\n * @param {Object} detail Any details to pass along with the event\n */\n var emitEvent = function (type, elem, detail) {\n // Make sure events are enabled\n if (!detail.settings.events) return;\n\n // Create a new event\n var event = new CustomEvent(type, {\n bubbles: true,\n cancelable: true,\n detail: detail,\n });\n\n // Dispatch the event\n elem.dispatchEvent(event);\n };\n\n /**\n * Get an element's distance from the top of the Document.\n * @param {Node} elem The element\n * @return {Number} Distance from the top in pixels\n */\n var getOffsetTop = function (elem) {\n var location = 0;\n if (elem.offsetParent) {\n while (elem) {\n location += elem.offsetTop;\n elem = elem.offsetParent;\n }\n }\n return location >= 0 ? location : 0;\n };\n\n /**\n * Sort content from first to last in the DOM\n * @param {Array} contents The content areas\n */\n var sortContents = function (contents) {\n if (contents) {\n contents.sort(function (item1, item2) {\n var offset1 = getOffsetTop(item1.content);\n var offset2 = getOffsetTop(item2.content);\n if (offset1 < offset2) return -1;\n return 1;\n });\n }\n };\n\n /**\n * Get the offset to use for calculating position\n * @param {Object} settings The settings for this instantiation\n * @return {Float} The number of pixels to offset the calculations\n */\n var getOffset = function (settings) {\n // if the offset is a function run it\n if (typeof settings.offset === \"function\") {\n return parseFloat(settings.offset());\n }\n\n // Otherwise, return it as-is\n return parseFloat(settings.offset);\n };\n\n /**\n * Get the document element's height\n * @private\n * @returns {Number}\n */\n var getDocumentHeight = function () {\n return Math.max(\n document.body.scrollHeight,\n document.documentElement.scrollHeight,\n document.body.offsetHeight,\n document.documentElement.offsetHeight,\n document.body.clientHeight,\n document.documentElement.clientHeight,\n );\n };\n\n /**\n * Determine if an element is in view\n * @param {Node} elem The element\n * @param {Object} settings The settings for this instantiation\n * @param {Boolean} bottom If true, check if element is above bottom of viewport instead\n * @return {Boolean} Returns true if element is in the viewport\n */\n var isInView = function (elem, settings, bottom) {\n var bounds = elem.getBoundingClientRect();\n var offset = getOffset(settings);\n if (bottom) {\n return (\n parseInt(bounds.bottom, 10) <\n (window.innerHeight || document.documentElement.clientHeight)\n );\n }\n return parseInt(bounds.top, 10) <= offset;\n };\n\n /**\n * Check if at the bottom of the viewport\n * @return {Boolean} If true, page is at the bottom of the viewport\n */\n var isAtBottom = function () {\n if (\n Math.ceil(window.innerHeight + window.pageYOffset) >=\n getDocumentHeight()\n )\n return true;\n return false;\n };\n\n /**\n * Check if the last item should be used (even if not at the top of the page)\n * @param {Object} item The last item\n * @param {Object} settings The settings for this instantiation\n * @return {Boolean} If true, use the last item\n */\n var useLastItem = function (item, settings) {\n if (isAtBottom() && isInView(item.content, settings, true)) return true;\n return false;\n };\n\n /**\n * Get the active content\n * @param {Array} contents The content areas\n * @param {Object} settings The settings for this instantiation\n * @return {Object} The content area and matching navigation link\n */\n var getActive = function (contents, settings) {\n var last = contents[contents.length - 1];\n if (useLastItem(last, settings)) return last;\n for (var i = contents.length - 1; i >= 0; i--) {\n if (isInView(contents[i].content, settings)) return contents[i];\n }\n };\n\n /**\n * Deactivate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var deactivateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested || !nav.parentNode) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Remove the active class\n li.classList.remove(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n deactivateNested(li, settings);\n };\n\n /**\n * Deactivate a nav and content area\n * @param {Object} items The nav item and content to deactivate\n * @param {Object} settings The settings for this instantiation\n */\n var deactivate = function (items, settings) {\n // Make sure there are items to deactivate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Remove the active class from the nav and content\n li.classList.remove(settings.navClass);\n items.content.classList.remove(settings.contentClass);\n\n // Deactivate any parent navs in a nested navigation\n deactivateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeDeactivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Activate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var activateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Add the active class\n li.classList.add(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n activateNested(li, settings);\n };\n\n /**\n * Activate a nav and content area\n * @param {Object} items The nav item and content to activate\n * @param {Object} settings The settings for this instantiation\n */\n var activate = function (items, settings) {\n // Make sure there are items to activate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Add the active class to the nav and content\n li.classList.add(settings.navClass);\n items.content.classList.add(settings.contentClass);\n\n // Activate any parent navs in a nested navigation\n activateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeActivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Create the Constructor object\n * @param {String} selector The selector to use for navigation items\n * @param {Object} options User options and settings\n */\n var Constructor = function (selector, options) {\n //\n // Variables\n //\n\n var publicAPIs = {};\n var navItems, contents, current, timeout, settings;\n\n //\n // Methods\n //\n\n /**\n * Set variables from DOM elements\n */\n publicAPIs.setup = function () {\n // Get all nav items\n navItems = document.querySelectorAll(selector);\n\n // Create contents array\n contents = [];\n\n // Loop through each item, get it's matching content, and push to the array\n Array.prototype.forEach.call(navItems, function (item) {\n // Get the content for the nav item\n var content = document.getElementById(\n decodeURIComponent(item.hash.substr(1)),\n );\n if (!content) return;\n\n // Push to the contents array\n contents.push({\n nav: item,\n content: content,\n });\n });\n\n // Sort contents by the order they appear in the DOM\n sortContents(contents);\n };\n\n /**\n * Detect which content is currently active\n */\n publicAPIs.detect = function () {\n // Get the active content\n var active = getActive(contents, settings);\n\n // if there's no active content, deactivate and bail\n if (!active) {\n if (current) {\n deactivate(current, settings);\n current = null;\n }\n return;\n }\n\n // If the active content is the one currently active, do nothing\n if (current && active.content === current.content) return;\n\n // Deactivate the current content and activate the new content\n deactivate(current, settings);\n activate(active, settings);\n\n // Update the currently active content\n current = active;\n };\n\n /**\n * Detect the active content on scroll\n * Debounced for performance\n */\n var scrollHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(publicAPIs.detect);\n };\n\n /**\n * Update content sorting on resize\n * Debounced for performance\n */\n var resizeHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(function () {\n sortContents(contents);\n publicAPIs.detect();\n });\n };\n\n /**\n * Destroy the current instantiation\n */\n publicAPIs.destroy = function () {\n // Undo DOM changes\n if (current) {\n deactivate(current, settings);\n }\n\n // Remove event listeners\n window.removeEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.removeEventListener(\"resize\", resizeHandler, false);\n }\n\n // Reset variables\n contents = null;\n navItems = null;\n current = null;\n timeout = null;\n settings = null;\n };\n\n /**\n * Initialize the current instantiation\n */\n var init = function () {\n // Merge user options into defaults\n settings = extend(defaults, options || {});\n\n // Setup variables based on the current DOM\n publicAPIs.setup();\n\n // Find the currently active content\n publicAPIs.detect();\n\n // Setup event listeners\n window.addEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.addEventListener(\"resize\", resizeHandler, false);\n }\n };\n\n //\n // Initialize and return the public APIs\n //\n\n init();\n return publicAPIs;\n };\n\n //\n // Return the Constructor\n //\n\n return Constructor;\n },\n);\n","// The module cache\nvar __webpack_module_cache__ = {};\n\n// The require function\nfunction __webpack_require__(moduleId) {\n\t// Check if module is in cache\n\tvar cachedModule = __webpack_module_cache__[moduleId];\n\tif (cachedModule !== undefined) {\n\t\treturn cachedModule.exports;\n\t}\n\t// Create a new module (and put it into the cache)\n\tvar module = __webpack_module_cache__[moduleId] = {\n\t\t// no module.id needed\n\t\t// no module.loaded needed\n\t\texports: {}\n\t};\n\n\t// Execute the module function\n\t__webpack_modules__[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n\t// Return the exports of the module\n\treturn module.exports;\n}\n\n","// getDefaultExport function for compatibility with non-harmony modules\n__webpack_require__.n = (module) => {\n\tvar getter = module && module.__esModule ?\n\t\t() => (module['default']) :\n\t\t() => (module);\n\t__webpack_require__.d(getter, { a: getter });\n\treturn getter;\n};","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.g = (function() {\n\tif (typeof globalThis === 'object') return globalThis;\n\ttry {\n\t\treturn this || new Function('return this')();\n\t} catch (e) {\n\t\tif (typeof window === 'object') return window;\n\t}\n})();","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","import Gumshoe from \"./gumshoe-patched.js\";\n\n////////////////////////////////////////////////////////////////////////////////\n// Scroll Handling\n////////////////////////////////////////////////////////////////////////////////\nvar tocScroll = null;\nvar header = null;\nvar lastScrollTop = document.documentElement.scrollTop;\nconst GO_TO_TOP_OFFSET = 64;\n\nfunction scrollHandlerForHeader(positionY) {\n if (positionY > 0) {\n header.classList.add(\"scrolled\");\n } else {\n header.classList.remove(\"scrolled\");\n }\n}\n\nfunction scrollHandlerForBackToTop(positionY) {\n if (positionY < GO_TO_TOP_OFFSET) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n } else {\n if (positionY < lastScrollTop) {\n document.documentElement.classList.add(\"show-back-to-top\");\n } else if (positionY > lastScrollTop) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n }\n }\n lastScrollTop = positionY;\n}\n\nfunction scrollHandlerForTOC(positionY) {\n if (tocScroll === null) {\n return;\n }\n\n // top of page.\n if (positionY == 0) {\n tocScroll.scrollTo(0, 0);\n } else if (\n // bottom of page.\n Math.ceil(positionY) >=\n Math.floor(document.documentElement.scrollHeight - window.innerHeight)\n ) {\n tocScroll.scrollTo(0, tocScroll.scrollHeight);\n } else {\n // somewhere in the middle.\n const current = document.querySelector(\".scroll-current\");\n if (current == null) {\n return;\n }\n\n // https://github.com/pypa/pip/issues/9159 This breaks scroll behaviours.\n // // scroll the currently \"active\" heading in toc, into view.\n // const rect = current.getBoundingClientRect();\n // if (0 > rect.top) {\n // current.scrollIntoView(true); // the argument is \"alignTop\"\n // } else if (rect.bottom > window.innerHeight) {\n // current.scrollIntoView(false);\n // }\n }\n}\n\nfunction scrollHandler(positionY) {\n scrollHandlerForHeader(positionY);\n scrollHandlerForBackToTop(positionY);\n scrollHandlerForTOC(positionY);\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Theme Toggle\n////////////////////////////////////////////////////////////////////////////////\nfunction setTheme(mode) {\n if (mode !== \"light\" && mode !== \"dark\" && mode !== \"auto\") {\n console.error(`Got invalid theme mode: ${mode}. Resetting to auto.`);\n mode = \"auto\";\n }\n\n document.body.dataset.theme = mode;\n localStorage.setItem(\"theme\", mode);\n console.log(`Changed to ${mode} mode.`);\n}\n\nfunction cycleThemeOnce() {\n const currentTheme = localStorage.getItem(\"theme\") || \"auto\";\n const prefersDark = window.matchMedia(\"(prefers-color-scheme: dark)\").matches;\n\n if (prefersDark) {\n // Auto (dark) -> Light -> Dark\n if (currentTheme === \"auto\") {\n setTheme(\"light\");\n } else if (currentTheme == \"light\") {\n setTheme(\"dark\");\n } else {\n setTheme(\"auto\");\n }\n } else {\n // Auto (light) -> Dark -> Light\n if (currentTheme === \"auto\") {\n setTheme(\"dark\");\n } else if (currentTheme == \"dark\") {\n setTheme(\"light\");\n } else {\n setTheme(\"auto\");\n }\n }\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////////////////////////\nfunction setupScrollHandler() {\n // Taken from https://developer.mozilla.org/en-US/docs/Web/API/Document/scroll_event\n let last_known_scroll_position = 0;\n let ticking = false;\n\n window.addEventListener(\"scroll\", function (e) {\n last_known_scroll_position = window.scrollY;\n\n if (!ticking) {\n window.requestAnimationFrame(function () {\n scrollHandler(last_known_scroll_position);\n ticking = false;\n });\n\n ticking = true;\n }\n });\n window.scroll();\n}\n\nfunction setupScrollSpy() {\n if (tocScroll === null) {\n return;\n }\n\n // Scrollspy -- highlight table on contents, based on scroll\n new Gumshoe(\".toc-tree a\", {\n reflow: true,\n recursive: true,\n navClass: \"scroll-current\",\n offset: () => {\n let rem = parseFloat(getComputedStyle(document.documentElement).fontSize);\n const headerRect = header.getBoundingClientRect();\n return headerRect.top + headerRect.height + 2.5 * rem + 1;\n },\n });\n}\n\nfunction setupTheme() {\n // Attach event handlers for toggling themes\n const buttons = document.getElementsByClassName(\"theme-toggle\");\n Array.from(buttons).forEach((btn) => {\n btn.addEventListener(\"click\", cycleThemeOnce);\n });\n}\n\nfunction setup() {\n setupTheme();\n setupScrollHandler();\n setupScrollSpy();\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Main entrypoint\n////////////////////////////////////////////////////////////////////////////////\nfunction main() {\n document.body.parentNode.classList.remove(\"no-js\");\n\n header = document.querySelector(\"header\");\n tocScroll = document.querySelector(\".toc-scroll\");\n\n setup();\n}\n\ndocument.addEventListener(\"DOMContentLoaded\", main);\n"],"names":["root","g","window","this","defaults","navClass","contentClass","nested","nestedClass","offset","reflow","events","emitEvent","type","elem","detail","settings","event","CustomEvent","bubbles","cancelable","dispatchEvent","getOffsetTop","location","offsetParent","offsetTop","sortContents","contents","sort","item1","item2","content","isInView","bottom","bounds","getBoundingClientRect","parseFloat","getOffset","parseInt","innerHeight","document","documentElement","clientHeight","top","isAtBottom","Math","ceil","pageYOffset","max","body","scrollHeight","offsetHeight","getActive","last","length","item","useLastItem","i","deactivateNested","nav","parentNode","li","closest","classList","remove","deactivate","items","link","activateNested","add","selector","options","navItems","current","timeout","publicAPIs","querySelectorAll","Array","prototype","forEach","call","getElementById","decodeURIComponent","hash","substr","push","active","activate","scrollHandler","cancelAnimationFrame","requestAnimationFrame","detect","resizeHandler","destroy","removeEventListener","merged","arguments","obj","key","hasOwnProperty","extend","setup","addEventListener","factory","__webpack_module_cache__","__webpack_require__","moduleId","cachedModule","undefined","exports","module","__webpack_modules__","n","getter","__esModule","d","a","definition","o","Object","defineProperty","enumerable","get","globalThis","Function","e","prop","tocScroll","header","lastScrollTop","scrollTop","cycleThemeOnce","currentTheme","localStorage","getItem","mode","matchMedia","matches","console","error","dataset","theme","setItem","log","buttons","getElementsByClassName","from","btn","setupTheme","last_known_scroll_position","ticking","scrollY","positionY","scrollHandlerForHeader","scrollHandlerForBackToTop","scrollTo","floor","querySelector","scrollHandlerForTOC","scroll","setupScrollHandler","recursive","rem","getComputedStyle","fontSize","headerRect","height"],"sourceRoot":""} \ No newline at end of file diff --git a/docs/_static/searchtools.js b/docs/_static/searchtools.js new file mode 100644 index 00000000..91f4be57 --- /dev/null +++ b/docs/_static/searchtools.js @@ -0,0 +1,635 @@ +/* + * Sphinx JavaScript utilities for the full-text search. + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename, kind] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +// Global search result kind enum, used by themes to style search results. +class SearchResultKind { + static get index() { return "index"; } + static get object() { return "object"; } + static get text() { return "text"; } + static get title() { return "title"; } +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename, kind] = item; + + let listItem = document.createElement("li"); + // Add a class representing the item's type: + // can be used by a theme's CSS selector for styling + // See SearchResultKind for the class names. + listItem.classList.add(`kind-${kind}`); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms, anchor) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = Documentation.ngettext( + "Search finished, found one page matching the search query.", + "Search finished, found ${resultCount} pages matching the search query.", + resultCount, + ).replace('${resultCount}', resultCount); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; +// Helper function used by query() to order search results. +// Each input is an array of [docname, title, anchor, descr, score, filename, kind]. +// Order the results by score (in opposite order of appearance, since the +// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically. +const _orderResultsByScoreThenName = (a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString, anchor) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + for (const removalQuery of [".headerlink", "script", "style"]) { + htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() }); + } + if (anchor) { + const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`); + if (anchorContent) return anchorContent.textContent; + + console.warn( + `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.` + ); + } + + // if anchor not specified or not found, fall back to main content + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent) return docContent.textContent; + + console.warn( + "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.setAttribute("role", "list"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + _parseQuery: (query) => { + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + return [query, searchTerms, excludedTerms, highlightTerms, objectTerms]; + }, + + /** + * execute search (requires search index to be loaded) + */ + _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // Collect multiple result groups to be sorted separately and then ordered. + // Each is an array of [docname, title, anchor, descr, score, filename, kind]. + const normalResults = []; + const nonMainIndexResults = []; + + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase().trim(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + const score = Math.round(Scorer.title * queryLower.length / title.length); + const boost = titles[file] === title ? 1 : 0; // add a boost for document titles + normalResults.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score + boost, + filenames[file], + SearchResultKind.title, + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id, isMain] of foundEntries) { + const score = Math.round(100 * queryLower.length / entry.length); + const result = [ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + SearchResultKind.index, + ]; + if (isMain) { + normalResults.push(result); + } else { + nonMainIndexResults.push(result); + } + } + } + } + + // lookup as object + objectTerms.forEach((term) => + normalResults.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) { + normalResults.forEach((item) => (item[4] = Scorer.score(item))); + nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item))); + } + + // Sort each group of results by score and then alphabetically by name. + normalResults.sort(_orderResultsByScoreThenName); + nonMainIndexResults.sort(_orderResultsByScoreThenName); + + // Combine the result groups in (reverse) order. + // Non-main index entries are typically arbitrary cross-references, + // so display them after other results. + let results = [...nonMainIndexResults, ...normalResults]; + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + return results.reverse(); + }, + + query: (query) => { + const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query); + const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + SearchResultKind.object, + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + // find documents, if any, containing the query word in their text/title term indices + // use Object.hasOwnProperty to avoid mismatching against prototype properties + const arr = [ + { files: terms.hasOwnProperty(word) ? terms[word] : undefined, score: Scorer.term }, + { files: titleTerms.hasOwnProperty(word) ? titleTerms[word] : undefined, score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + if (!terms.hasOwnProperty(word)) { + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + } + if (!titleTerms.hasOwnProperty(word)) { + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: titleTerms[term], score: Scorer.partialTitle }); + }); + } + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, new Map()); + const fileScores = scoreMap.get(file); + fileScores.set(word, record.score); + }); + }); + + // create the mapping + files.forEach((file) => { + if (!fileMap.has(file)) fileMap.set(file, [word]); + else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file).get(w))); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + SearchResultKind.text, + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords, anchor) => { + const text = Search.htmlToText(htmlText, anchor); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/docs/_static/skeleton.css b/docs/_static/skeleton.css new file mode 100644 index 00000000..467c878c --- /dev/null +++ b/docs/_static/skeleton.css @@ -0,0 +1,296 @@ +/* Some sane resets. */ +html { + height: 100%; +} + +body { + margin: 0; + min-height: 100%; +} + +/* All the flexbox magic! */ +body, +.sb-announcement, +.sb-content, +.sb-main, +.sb-container, +.sb-container__inner, +.sb-article-container, +.sb-footer-content, +.sb-header, +.sb-header-secondary, +.sb-footer { + display: flex; +} + +/* These order things vertically */ +body, +.sb-main, +.sb-article-container { + flex-direction: column; +} + +/* Put elements in the center */ +.sb-header, +.sb-header-secondary, +.sb-container, +.sb-content, +.sb-footer, +.sb-footer-content { + justify-content: center; +} +/* Put elements at the ends */ +.sb-article-container { + justify-content: space-between; +} + +/* These elements grow. */ +.sb-main, +.sb-content, +.sb-container, +article { + flex-grow: 1; +} + +/* Because padding making this wider is not fun */ +article { + box-sizing: border-box; +} + +/* The announcements element should never be wider than the page. */ +.sb-announcement { + max-width: 100%; +} + +.sb-sidebar-primary, +.sb-sidebar-secondary { + flex-shrink: 0; + width: 17rem; +} + +.sb-announcement__inner { + justify-content: center; + + box-sizing: border-box; + height: 3rem; + + overflow-x: auto; + white-space: nowrap; +} + +/* Sidebars, with checkbox-based toggle */ +.sb-sidebar-primary, +.sb-sidebar-secondary { + position: fixed; + height: 100%; + top: 0; +} + +.sb-sidebar-primary { + left: -17rem; + transition: left 250ms ease-in-out; +} +.sb-sidebar-secondary { + right: -17rem; + transition: right 250ms ease-in-out; +} + +.sb-sidebar-toggle { + display: none; +} +.sb-sidebar-overlay { + position: fixed; + top: 0; + width: 0; + height: 0; + + transition: width 0ms ease 250ms, height 0ms ease 250ms, opacity 250ms ease; + + opacity: 0; + background-color: rgba(0, 0, 0, 0.54); +} + +#sb-sidebar-toggle--primary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--primary"], +#sb-sidebar-toggle--secondary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--secondary"] { + width: 100%; + height: 100%; + opacity: 1; + transition: width 0ms ease, height 0ms ease, opacity 250ms ease; +} + +#sb-sidebar-toggle--primary:checked ~ .sb-container .sb-sidebar-primary { + left: 0; +} +#sb-sidebar-toggle--secondary:checked ~ .sb-container .sb-sidebar-secondary { + right: 0; +} + +/* Full-width mode */ +.drop-secondary-sidebar-for-full-width-content + .hide-when-secondary-sidebar-shown { + display: none !important; +} +.drop-secondary-sidebar-for-full-width-content .sb-sidebar-secondary { + display: none !important; +} + +/* Mobile views */ +.sb-page-width { + width: 100%; +} + +.sb-article-container, +.sb-footer-content__inner, +.drop-secondary-sidebar-for-full-width-content .sb-article, +.drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 100vw; +} + +.sb-article, +.match-content-width { + padding: 0 1rem; + box-sizing: border-box; +} + +@media (min-width: 32rem) { + .sb-article, + .match-content-width { + padding: 0 2rem; + } +} + +/* Tablet views */ +@media (min-width: 42rem) { + .sb-article-container { + width: auto; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 42rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 46rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 46rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 50rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 50rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Tablet views */ +@media (min-width: 59rem) { + .sb-sidebar-secondary { + position: static; + } + .hide-when-secondary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 63rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 67rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Desktop views */ +@media (min-width: 76rem) { + .sb-sidebar-primary { + position: static; + } + .hide-when-primary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} + +/* Full desktop views */ +@media (min-width: 80rem) { + .sb-article, + .match-content-width { + width: 46rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } +} + +@media (min-width: 84rem) { + .sb-article, + .match-content-width { + width: 50rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } +} + +@media (min-width: 88rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-page-width { + width: 88rem; + } +} diff --git a/docs/_static/sphinx-design.min.css b/docs/_static/sphinx-design.min.css new file mode 100644 index 00000000..860c36da --- /dev/null +++ b/docs/_static/sphinx-design.min.css @@ -0,0 +1 @@ +.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative;font-size:var(--sd-fontsize-dropdown)}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary.sd-summary-title{padding:.5em .6em .5em 1em;font-size:var(--sd-fontsize-dropdown-title);font-weight:var(--sd-fontweight-dropdown-title);user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;list-style:none;display:inline-flex;justify-content:space-between}details.sd-dropdown summary.sd-summary-title::-webkit-details-marker{display:none}details.sd-dropdown summary.sd-summary-title:focus{outline:none}details.sd-dropdown summary.sd-summary-title .sd-summary-icon{margin-right:.6em;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary.sd-summary-title .sd-summary-text{flex-grow:1;line-height:1.5;padding-right:.5rem}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker{pointer-events:none;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker svg{opacity:.6}details.sd-dropdown summary.sd-summary-title:hover .sd-summary-state-marker svg{opacity:1;transform:scale(1.1)}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown .sd-summary-chevron-right{transition:.25s}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-right{transform:rotate(90deg)}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-down{transform:rotate(180deg)}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #0071bc;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0060a0;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-bg: rgba(0, 113, 188, 0.2);--sd-color-secondary-bg: rgba(108, 117, 125, 0.2);--sd-color-success-bg: rgba(40, 167, 69, 0.2);--sd-color-info-bg: rgba(23, 162, 184, 0.2);--sd-color-warning-bg: rgba(240, 179, 126, 0.2);--sd-color-danger-bg: rgba(220, 53, 69, 0.2);--sd-color-light-bg: rgba(248, 249, 250, 0.2);--sd-color-muted-bg: rgba(108, 117, 125, 0.2);--sd-color-dark-bg: rgba(33, 37, 41, 0.2);--sd-color-black-bg: rgba(0, 0, 0, 0.2);--sd-color-white-bg: rgba(255, 255, 255, 0.2);--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem;--sd-fontsize-dropdown: inherit;--sd-fontsize-dropdown-title: 1rem;--sd-fontweight-dropdown-title: 700} diff --git a/docs/_static/sphinx_highlight.js b/docs/_static/sphinx_highlight.js new file mode 100644 index 00000000..8a96c69a --- /dev/null +++ b/docs/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/docs/_static/styles/furo-extensions.css b/docs/_static/styles/furo-extensions.css new file mode 100644 index 00000000..2d74267f --- /dev/null +++ b/docs/_static/styles/furo-extensions.css @@ -0,0 +1,2 @@ +#furo-sidebar-ad-placement{padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)}#furo-sidebar-ad-placement .ethical-sidebar{background:var(--color-background-secondary);border:none;box-shadow:none}#furo-sidebar-ad-placement .ethical-sidebar:hover{background:var(--color-background-hover)}#furo-sidebar-ad-placement .ethical-sidebar a{color:var(--color-foreground-primary)}#furo-sidebar-ad-placement .ethical-callout a{color:var(--color-foreground-secondary)!important}#furo-readthedocs-versions{background:transparent;display:block;position:static;width:100%}#furo-readthedocs-versions .rst-versions{background:#1a1c1e}#furo-readthedocs-versions .rst-current-version{background:var(--color-sidebar-item-background);cursor:unset}#furo-readthedocs-versions .rst-current-version:hover{background:var(--color-sidebar-item-background)}#furo-readthedocs-versions .rst-current-version .fa-book{color:var(--color-foreground-primary)}#furo-readthedocs-versions>.rst-other-versions{padding:0}#furo-readthedocs-versions>.rst-other-versions small{opacity:1}#furo-readthedocs-versions .injected .rst-versions{position:unset}#furo-readthedocs-versions:focus-within,#furo-readthedocs-versions:hover{box-shadow:0 0 0 1px var(--color-sidebar-background-border)}#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:hover .rst-current-version{background:#1a1c1e;font-size:inherit;height:auto;line-height:inherit;padding:12px;text-align:right}#furo-readthedocs-versions:focus-within .rst-current-version .fa-book,#furo-readthedocs-versions:hover .rst-current-version .fa-book{color:#fff;float:left}#furo-readthedocs-versions:focus-within .fa-caret-down,#furo-readthedocs-versions:hover .fa-caret-down{display:none}#furo-readthedocs-versions:focus-within .injected,#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:focus-within .rst-other-versions,#furo-readthedocs-versions:hover .injected,#furo-readthedocs-versions:hover .rst-current-version,#furo-readthedocs-versions:hover .rst-other-versions{display:block}#furo-readthedocs-versions:focus-within>.rst-current-version,#furo-readthedocs-versions:hover>.rst-current-version{display:none}.highlight:hover button.copybtn{color:var(--color-code-foreground)}.highlight button.copybtn{align-items:center;background-color:var(--color-code-background);border:none;color:var(--color-background-item);cursor:pointer;height:1.25em;right:.5rem;top:.625rem;transition:color .3s,opacity .3s;width:1.25em}.highlight button.copybtn:hover{background-color:var(--color-code-background);color:var(--color-brand-content)}.highlight button.copybtn:after{background-color:transparent;color:var(--color-code-foreground);display:none}.highlight button.copybtn.success{color:#22863a;transition:color 0s}.highlight button.copybtn.success:after{display:block}.highlight button.copybtn svg{padding:0}body{--sd-color-primary:var(--color-brand-primary);--sd-color-primary-highlight:var(--color-brand-content);--sd-color-primary-text:var(--color-background-primary);--sd-color-shadow:rgba(0,0,0,.05);--sd-color-card-border:var(--color-card-border);--sd-color-card-border-hover:var(--color-brand-content);--sd-color-card-background:var(--color-card-background);--sd-color-card-text:var(--color-foreground-primary);--sd-color-card-header:var(--color-card-marginals-background);--sd-color-card-footer:var(--color-card-marginals-background);--sd-color-tabs-label-active:var(--color-brand-content);--sd-color-tabs-label-hover:var(--color-foreground-muted);--sd-color-tabs-label-inactive:var(--color-foreground-muted);--sd-color-tabs-underline-active:var(--color-brand-content);--sd-color-tabs-underline-hover:var(--color-foreground-border);--sd-color-tabs-underline-inactive:var(--color-background-border);--sd-color-tabs-overline:var(--color-background-border);--sd-color-tabs-underline:var(--color-background-border)}.sd-tab-content{box-shadow:0 -2px var(--sd-color-tabs-overline),0 1px var(--sd-color-tabs-underline)}.sd-card{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)}.sd-shadow-sm{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-md{box-shadow:0 .3rem .75rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-lg{box-shadow:0 .6rem 1.5rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-card-hover:hover{transform:none}.sd-cards-carousel{gap:.25rem;padding:.25rem}body{--tabs--label-text:var(--color-foreground-muted);--tabs--label-text--hover:var(--color-foreground-muted);--tabs--label-text--active:var(--color-brand-content);--tabs--label-text--active--hover:var(--color-brand-content);--tabs--label-background:transparent;--tabs--label-background--hover:transparent;--tabs--label-background--active:transparent;--tabs--label-background--active--hover:transparent;--tabs--padding-x:0.25em;--tabs--margin-x:1em;--tabs--border:var(--color-background-border);--tabs--label-border:transparent;--tabs--label-border--hover:var(--color-foreground-muted);--tabs--label-border--active:var(--color-brand-content);--tabs--label-border--active--hover:var(--color-brand-content)}[role=main] .container{max-width:none;padding-left:0;padding-right:0}.shadow.docutils{border:none;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)!important}.sphinx-bs .card{background-color:var(--color-background-secondary);color:var(--color-foreground)} +/*# sourceMappingURL=furo-extensions.css.map*/ \ No newline at end of file diff --git a/docs/_static/styles/furo-extensions.css.map b/docs/_static/styles/furo-extensions.css.map new file mode 100644 index 00000000..68fb7fd0 --- /dev/null +++ b/docs/_static/styles/furo-extensions.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo-extensions.css","mappings":"AAGA,2BACE,oFACA,4CAKE,6CAHA,YACA,eAEA,CACA,kDACE,yCAEF,8CACE,sCAEJ,8CACE,kDAEJ,2BAGE,uBACA,cAHA,gBACA,UAEA,CAGA,yCACE,mBAEF,gDAEE,gDADA,YACA,CACA,sDACE,gDACF,yDACE,sCAEJ,+CACE,UACA,qDACE,UAGF,mDACE,eAEJ,yEAEE,4DAEA,mHASE,mBAPA,kBAEA,YADA,oBAGA,aADA,gBAIA,CAEA,qIAEE,WADA,UACA,CAEJ,uGACE,aAEF,iUAGE,cAEF,mHACE,aC1EJ,gCACE,mCAEF,0BAEE,mBAUA,8CACA,YAFA,mCAKA,eAZA,cAIA,YADA,YAYA,iCAdA,YAcA,CAEA,gCAEE,8CADA,gCACA,CAEF,gCAGE,6BADA,mCADA,YAEA,CAEF,kCAEE,cADA,mBACA,CACA,wCACE,cAEJ,8BACE,UCzCN,KAEE,6CAA8C,CAC9C,uDAAwD,CACxD,uDAAwD,CAGxD,iCAAsC,CAGtC,+CAAgD,CAChD,uDAAwD,CACxD,uDAAwD,CACxD,oDAAqD,CACrD,6DAA8D,CAC9D,6DAA8D,CAG9D,uDAAwD,CACxD,yDAA0D,CAC1D,4DAA6D,CAC7D,2DAA4D,CAC5D,8DAA+D,CAC/D,iEAAkE,CAClE,uDAAwD,CACxD,wDAAyD,CAG3D,gBACE,qFAGF,SACE,6EAEF,cACE,uFAEF,cACE,uFAEF,cACE,uFAGF,qBACE,eAEF,mBACE,WACA,eChDF,KACE,gDAAiD,CACjD,uDAAwD,CACxD,qDAAsD,CACtD,4DAA6D,CAC7D,oCAAqC,CACrC,2CAA4C,CAC5C,4CAA6C,CAC7C,mDAAoD,CACpD,wBAAyB,CACzB,oBAAqB,CACrB,6CAA8C,CAC9C,gCAAiC,CACjC,yDAA0D,CAC1D,uDAAwD,CACxD,8DAA+D,CCbjE,uBACE,eACA,eACA,gBAGF,iBACE,YACA,+EAGF,iBACE,mDACA","sources":["webpack:///./src/furo/assets/styles/extensions/_readthedocs.sass","webpack:///./src/furo/assets/styles/extensions/_copybutton.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-design.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-inline-tabs.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-panels.sass"],"sourcesContent":["// This file contains the styles used for tweaking how ReadTheDoc's embedded\n// contents would show up inside the theme.\n\n#furo-sidebar-ad-placement\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n .ethical-sidebar\n // Remove the border and box-shadow.\n border: none\n box-shadow: none\n // Manage the background colors.\n background: var(--color-background-secondary)\n &:hover\n background: var(--color-background-hover)\n // Ensure the text is legible.\n a\n color: var(--color-foreground-primary)\n\n .ethical-callout a\n color: var(--color-foreground-secondary) !important\n\n#furo-readthedocs-versions\n position: static\n width: 100%\n background: transparent\n display: block\n\n // Make the background color fit with the theme's aesthetic.\n .rst-versions\n background: rgb(26, 28, 30)\n\n .rst-current-version\n cursor: unset\n background: var(--color-sidebar-item-background)\n &:hover\n background: var(--color-sidebar-item-background)\n .fa-book\n color: var(--color-foreground-primary)\n\n > .rst-other-versions\n padding: 0\n small\n opacity: 1\n\n .injected\n .rst-versions\n position: unset\n\n &:hover,\n &:focus-within\n box-shadow: 0 0 0 1px var(--color-sidebar-background-border)\n\n .rst-current-version\n // Undo the tweaks done in RTD's CSS\n font-size: inherit\n line-height: inherit\n height: auto\n text-align: right\n padding: 12px\n\n // Match the rest of the body\n background: #1a1c1e\n\n .fa-book\n float: left\n color: white\n\n .fa-caret-down\n display: none\n\n .rst-current-version,\n .rst-other-versions,\n .injected\n display: block\n\n > .rst-current-version\n display: none\n",".highlight\n &:hover button.copybtn\n color: var(--color-code-foreground)\n\n button.copybtn\n // Align things correctly\n align-items: center\n\n height: 1.25em\n width: 1.25em\n\n top: 0.625rem // $code-spacing-vertical\n right: 0.5rem\n\n // Make it look better\n color: var(--color-background-item)\n background-color: var(--color-code-background)\n border: none\n\n // Change to cursor to make it obvious that you can click on it\n cursor: pointer\n\n // Transition smoothly, for aesthetics\n transition: color 300ms, opacity 300ms\n\n &:hover\n color: var(--color-brand-content)\n background-color: var(--color-code-background)\n\n &::after\n display: none\n color: var(--color-code-foreground)\n background-color: transparent\n\n &.success\n transition: color 0ms\n color: #22863a\n &::after\n display: block\n\n svg\n padding: 0\n","body\n // Colors\n --sd-color-primary: var(--color-brand-primary)\n --sd-color-primary-highlight: var(--color-brand-content)\n --sd-color-primary-text: var(--color-background-primary)\n\n // Shadows\n --sd-color-shadow: rgba(0, 0, 0, 0.05)\n\n // Cards\n --sd-color-card-border: var(--color-card-border)\n --sd-color-card-border-hover: var(--color-brand-content)\n --sd-color-card-background: var(--color-card-background)\n --sd-color-card-text: var(--color-foreground-primary)\n --sd-color-card-header: var(--color-card-marginals-background)\n --sd-color-card-footer: var(--color-card-marginals-background)\n\n // Tabs\n --sd-color-tabs-label-active: var(--color-brand-content)\n --sd-color-tabs-label-hover: var(--color-foreground-muted)\n --sd-color-tabs-label-inactive: var(--color-foreground-muted)\n --sd-color-tabs-underline-active: var(--color-brand-content)\n --sd-color-tabs-underline-hover: var(--color-foreground-border)\n --sd-color-tabs-underline-inactive: var(--color-background-border)\n --sd-color-tabs-overline: var(--color-background-border)\n --sd-color-tabs-underline: var(--color-background-border)\n\n// Tabs\n.sd-tab-content\n box-shadow: 0 -2px var(--sd-color-tabs-overline), 0 1px var(--sd-color-tabs-underline)\n\n// Shadows\n.sd-card // Have a shadow by default\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n.sd-shadow-sm\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-md\n box-shadow: 0 0.3rem 0.75rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-lg\n box-shadow: 0 0.6rem 1.5rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Cards\n.sd-card-hover:hover // Don't change scale on hover\n transform: none\n\n.sd-cards-carousel // Have a bit of gap in the carousel by default\n gap: 0.25rem\n padding: 0.25rem\n","// This file contains styles to tweak sphinx-inline-tabs to work well with Furo.\n\nbody\n --tabs--label-text: var(--color-foreground-muted)\n --tabs--label-text--hover: var(--color-foreground-muted)\n --tabs--label-text--active: var(--color-brand-content)\n --tabs--label-text--active--hover: var(--color-brand-content)\n --tabs--label-background: transparent\n --tabs--label-background--hover: transparent\n --tabs--label-background--active: transparent\n --tabs--label-background--active--hover: transparent\n --tabs--padding-x: 0.25em\n --tabs--margin-x: 1em\n --tabs--border: var(--color-background-border)\n --tabs--label-border: transparent\n --tabs--label-border--hover: var(--color-foreground-muted)\n --tabs--label-border--active: var(--color-brand-content)\n --tabs--label-border--active--hover: var(--color-brand-content)\n","// This file contains styles to tweak sphinx-panels to work well with Furo.\n\n// sphinx-panels includes Bootstrap 4, which uses .container which can conflict\n// with docutils' `.. container::` directive.\n[role=\"main\"] .container\n max-width: initial\n padding-left: initial\n padding-right: initial\n\n// Make the panels look nicer!\n.shadow.docutils\n border: none\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Make panel colors respond to dark mode\n.sphinx-bs .card\n background-color: var(--color-background-secondary)\n color: var(--color-foreground)\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/docs/_static/styles/furo.css b/docs/_static/styles/furo.css new file mode 100644 index 00000000..592d5bff --- /dev/null +++ b/docs/_static/styles/furo.css @@ -0,0 +1,2 @@ +/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}[hidden],template{display:none}@media print{.content-icon-container,.headerlink,.mobile-header,.related-pages{display:none!important}.highlight{border:.1pt solid var(--color-foreground-border)}a,blockquote,dl,ol,p,pre,table,ul{page-break-inside:avoid}caption,figure,h1,h2,h3,h4,h5,h6,img{page-break-after:avoid;page-break-inside:avoid}dl,ol,ul{page-break-before:avoid}}.visually-hidden{height:1px!important;margin:-1px!important;overflow:hidden!important;padding:0!important;position:absolute!important;width:1px!important;clip:rect(0,0,0,0)!important;background:var(--color-background-primary);border:0!important;color:var(--color-foreground-primary);white-space:nowrap!important}:-moz-focusring{outline:auto}body{--font-stack:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji;--font-stack--monospace:"SFMono-Regular",Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace;--font-stack--headings:var(--font-stack);--font-size--normal:100%;--font-size--small:87.5%;--font-size--small--2:81.25%;--font-size--small--3:75%;--font-size--small--4:62.5%;--sidebar-caption-font-size:var(--font-size--small--2);--sidebar-item-font-size:var(--font-size--small);--sidebar-search-input-font-size:var(--font-size--small);--toc-font-size:var(--font-size--small--3);--toc-font-size--mobile:var(--font-size--normal);--toc-title-font-size:var(--font-size--small--4);--admonition-font-size:0.8125rem;--admonition-title-font-size:0.8125rem;--code-font-size:var(--font-size--small--2);--api-font-size:var(--font-size--small);--header-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*4);--header-padding:0.5rem;--sidebar-tree-space-above:1.5rem;--sidebar-caption-space-above:1rem;--sidebar-item-line-height:1rem;--sidebar-item-spacing-vertical:0.5rem;--sidebar-item-spacing-horizontal:1rem;--sidebar-item-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*2);--sidebar-expander-width:var(--sidebar-item-height);--sidebar-search-space-above:0.5rem;--sidebar-search-input-spacing-vertical:0.5rem;--sidebar-search-input-spacing-horizontal:0.5rem;--sidebar-search-input-height:1rem;--sidebar-search-icon-size:var(--sidebar-search-input-height);--toc-title-padding:0.25rem 0;--toc-spacing-vertical:1.5rem;--toc-spacing-horizontal:1.5rem;--toc-item-spacing-vertical:0.4rem;--toc-item-spacing-horizontal:1rem;--icon-search:url('data:image/svg+xml;charset=utf-8,');--icon-pencil:url('data:image/svg+xml;charset=utf-8,');--icon-abstract:url('data:image/svg+xml;charset=utf-8,');--icon-info:url('data:image/svg+xml;charset=utf-8,');--icon-flame:url('data:image/svg+xml;charset=utf-8,');--icon-question:url('data:image/svg+xml;charset=utf-8,');--icon-warning:url('data:image/svg+xml;charset=utf-8,');--icon-failure:url('data:image/svg+xml;charset=utf-8,');--icon-spark:url('data:image/svg+xml;charset=utf-8,');--color-admonition-title--caution:#ff9100;--color-admonition-title-background--caution:rgba(255,145,0,.2);--color-admonition-title--warning:#ff9100;--color-admonition-title-background--warning:rgba(255,145,0,.2);--color-admonition-title--danger:#ff5252;--color-admonition-title-background--danger:rgba(255,82,82,.2);--color-admonition-title--attention:#ff5252;--color-admonition-title-background--attention:rgba(255,82,82,.2);--color-admonition-title--error:#ff5252;--color-admonition-title-background--error:rgba(255,82,82,.2);--color-admonition-title--hint:#00c852;--color-admonition-title-background--hint:rgba(0,200,82,.2);--color-admonition-title--tip:#00c852;--color-admonition-title-background--tip:rgba(0,200,82,.2);--color-admonition-title--important:#00bfa5;--color-admonition-title-background--important:rgba(0,191,165,.2);--color-admonition-title--note:#00b0ff;--color-admonition-title-background--note:rgba(0,176,255,.2);--color-admonition-title--seealso:#448aff;--color-admonition-title-background--seealso:rgba(68,138,255,.2);--color-admonition-title--admonition-todo:grey;--color-admonition-title-background--admonition-todo:hsla(0,0%,50%,.2);--color-admonition-title:#651fff;--color-admonition-title-background:rgba(101,31,255,.2);--icon-admonition-default:var(--icon-abstract);--color-topic-title:#14b8a6;--color-topic-title-background:rgba(20,184,166,.2);--icon-topic-default:var(--icon-pencil);--color-problematic:#b30000;--color-foreground-primary:#000;--color-foreground-secondary:#5a5c63;--color-foreground-muted:#6b6f76;--color-foreground-border:#878787;--color-background-primary:#fff;--color-background-secondary:#f8f9fb;--color-background-hover:#efeff4;--color-background-hover--transparent:#efeff400;--color-background-border:#eeebee;--color-background-item:#ccc;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#0a4bff;--color-brand-content:#2757dd;--color-brand-visited:#872ee0;--color-api-background:var(--color-background-hover--transparent);--color-api-background-hover:var(--color-background-hover);--color-api-overall:var(--color-foreground-secondary);--color-api-name:var(--color-problematic);--color-api-pre-name:var(--color-problematic);--color-api-paren:var(--color-foreground-secondary);--color-api-keyword:var(--color-foreground-primary);--color-api-added:#21632c;--color-api-added-border:#38a84d;--color-api-changed:#046172;--color-api-changed-border:#06a1bc;--color-api-deprecated:#605706;--color-api-deprecated-border:#f0d90f;--color-api-removed:#b30000;--color-api-removed-border:#ff5c5c;--color-highlight-on-target:#ffc;--color-inline-code-background:var(--color-background-secondary);--color-highlighted-background:#def;--color-highlighted-text:var(--color-foreground-primary);--color-guilabel-background:#ddeeff80;--color-guilabel-border:#bedaf580;--color-guilabel-text:var(--color-foreground-primary);--color-admonition-background:transparent;--color-table-header-background:var(--color-background-secondary);--color-table-border:var(--color-background-border);--color-card-border:var(--color-background-secondary);--color-card-background:transparent;--color-card-marginals-background:var(--color-background-secondary);--color-header-background:var(--color-background-primary);--color-header-border:var(--color-background-border);--color-header-text:var(--color-foreground-primary);--color-sidebar-background:var(--color-background-secondary);--color-sidebar-background-border:var(--color-background-border);--color-sidebar-brand-text:var(--color-foreground-primary);--color-sidebar-caption-text:var(--color-foreground-muted);--color-sidebar-link-text:var(--color-foreground-secondary);--color-sidebar-link-text--top-level:var(--color-brand-primary);--color-sidebar-item-background:var(--color-sidebar-background);--color-sidebar-item-background--current:var( --color-sidebar-item-background );--color-sidebar-item-background--hover:linear-gradient(90deg,var(--color-background-hover--transparent) 0%,var(--color-background-hover) var(--sidebar-item-spacing-horizontal),var(--color-background-hover) 100%);--color-sidebar-item-expander-background:transparent;--color-sidebar-item-expander-background--hover:var( --color-background-hover );--color-sidebar-search-text:var(--color-foreground-primary);--color-sidebar-search-background:var(--color-background-secondary);--color-sidebar-search-background--focus:var(--color-background-primary);--color-sidebar-search-border:var(--color-background-border);--color-sidebar-search-icon:var(--color-foreground-muted);--color-toc-background:var(--color-background-primary);--color-toc-title-text:var(--color-foreground-muted);--color-toc-item-text:var(--color-foreground-secondary);--color-toc-item-text--hover:var(--color-foreground-primary);--color-toc-item-text--active:var(--color-brand-primary);--color-content-foreground:var(--color-foreground-primary);--color-content-background:transparent;--color-link:var(--color-brand-content);--color-link-underline:var(--color-background-border);--color-link--hover:var(--color-brand-content);--color-link-underline--hover:var(--color-foreground-border);--color-link--visited:var(--color-brand-visited);--color-link-underline--visited:var(--color-background-border);--color-link--visited--hover:var(--color-brand-visited);--color-link-underline--visited--hover:var(--color-foreground-border)}.only-light{display:block!important}html body .only-dark{display:none!important}@media not print{body[data-theme=dark]{--color-problematic:#ee5151;--color-foreground-primary:#cfd0d0;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#3d94ff;--color-brand-content:#5ca5ff;--color-brand-visited:#b27aeb;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-api-added:#3db854;--color-api-added-border:#267334;--color-api-changed:#09b0ce;--color-api-changed-border:#056d80;--color-api-deprecated:#b1a10b;--color-api-deprecated-border:#6e6407;--color-api-removed:#ff7575;--color-api-removed-border:#b03b3b;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body[data-theme=dark] .only-light{display:none!important}body[data-theme=dark] .only-dark{display:block!important}@media(prefers-color-scheme:dark){body:not([data-theme=light]){--color-problematic:#ee5151;--color-foreground-primary:#cfd0d0;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#3d94ff;--color-brand-content:#5ca5ff;--color-brand-visited:#b27aeb;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-api-added:#3db854;--color-api-added-border:#267334;--color-api-changed:#09b0ce;--color-api-changed-border:#056d80;--color-api-deprecated:#b1a10b;--color-api-deprecated-border:#6e6407;--color-api-removed:#ff7575;--color-api-removed-border:#b03b3b;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body:not([data-theme=light]) .only-light{display:none!important}body:not([data-theme=light]) .only-dark{display:block!important}}}body[data-theme=auto] .theme-toggle svg.theme-icon-when-auto-light{display:block}@media(prefers-color-scheme:dark){body[data-theme=auto] .theme-toggle svg.theme-icon-when-auto-dark{display:block}body[data-theme=auto] .theme-toggle svg.theme-icon-when-auto-light{display:none}}body[data-theme=dark] .theme-toggle svg.theme-icon-when-dark,body[data-theme=light] .theme-toggle svg.theme-icon-when-light{display:block}body{font-family:var(--font-stack)}code,kbd,pre,samp{font-family:var(--font-stack--monospace)}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}article{line-height:1.5}h1,h2,h3,h4,h5,h6{border-radius:.5rem;font-family:var(--font-stack--headings);font-weight:700;line-height:1.25;margin:.5rem -.5rem;padding-left:.5rem;padding-right:.5rem}h1+p,h2+p,h3+p,h4+p,h5+p,h6+p{margin-top:0}h1{font-size:2.5em;margin-bottom:1rem}h1,h2{margin-top:1.75rem}h2{font-size:2em}h3{font-size:1.5em}h4{font-size:1.25em}h5{font-size:1.125em}h6{font-size:1em}small{font-size:80%;opacity:75%}p{margin-bottom:.75rem;margin-top:.5rem}hr.docutils{background-color:var(--color-background-border);border:0;height:1px;margin:2rem 0;padding:0}.centered{text-align:center}a{color:var(--color-link);text-decoration:underline;text-decoration-color:var(--color-link-underline)}a:visited{color:var(--color-link--visited);text-decoration-color:var(--color-link-underline--visited)}a:visited:hover{color:var(--color-link--visited--hover);text-decoration-color:var(--color-link-underline--visited--hover)}a:hover{color:var(--color-link--hover);text-decoration-color:var(--color-link-underline--hover)}a.muted-link{color:inherit}a.muted-link:hover{color:var(--color-link--hover);text-decoration-color:var(--color-link-underline--hover)}a.muted-link:hover:visited{color:var(--color-link--visited--hover);text-decoration-color:var(--color-link-underline--visited--hover)}html{overflow-x:hidden;overflow-y:scroll;scroll-behavior:smooth}.sidebar-scroll,.toc-scroll,article[role=main] *{scrollbar-color:var(--color-foreground-border) transparent;scrollbar-width:thin}body,html{height:100%}.skip-to-content,body,html{background:var(--color-background-primary);color:var(--color-foreground-primary)}.skip-to-content{border-radius:1rem;left:.25rem;padding:1rem;position:fixed;top:.25rem;transform:translateY(-200%);transition:transform .3s ease-in-out;z-index:40}.skip-to-content:focus-within{transform:translateY(0)}article{background:var(--color-content-background);color:var(--color-content-foreground);overflow-wrap:break-word}.page{display:flex;min-height:100%}.mobile-header{background-color:var(--color-header-background);border-bottom:1px solid var(--color-header-border);color:var(--color-header-text);display:none;height:var(--header-height);width:100%;z-index:10}.mobile-header.scrolled{border-bottom:none;box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2)}.mobile-header .header-center a{color:var(--color-header-text);text-decoration:none}.main{display:flex;flex:1}.sidebar-drawer{background:var(--color-sidebar-background);border-right:1px solid var(--color-sidebar-background-border);box-sizing:border-box;display:flex;justify-content:flex-end;min-width:15em;width:calc(50% - 26em)}.sidebar-container,.toc-drawer{box-sizing:border-box;width:15em}.toc-drawer{background:var(--color-toc-background);padding-right:1rem}.sidebar-sticky,.toc-sticky{display:flex;flex-direction:column;height:min(100%,100vh);height:100vh;position:sticky;top:0}.sidebar-scroll,.toc-scroll{flex-grow:1;flex-shrink:1;overflow:auto;scroll-behavior:smooth}.content{display:flex;flex-direction:column;justify-content:space-between;padding:0 3em;width:46em}.icon{display:inline-block;height:1rem;width:1rem}.icon svg{height:100%;width:100%}.announcement{align-items:center;background-color:var(--color-announcement-background);color:var(--color-announcement-text);display:flex;height:var(--header-height);overflow-x:auto}.announcement+.page{min-height:calc(100% - var(--header-height))}.announcement-content{box-sizing:border-box;min-width:100%;padding:.5rem;text-align:center;white-space:nowrap}.announcement-content a{color:var(--color-announcement-text);text-decoration-color:var(--color-announcement-text)}.announcement-content a:hover{color:var(--color-announcement-text);text-decoration-color:var(--color-link--hover)}.no-js .theme-toggle-container{display:none}.theme-toggle-container{display:flex}.theme-toggle{background:transparent;border:none;cursor:pointer;display:flex;padding:0}.theme-toggle svg{color:var(--color-foreground-primary);display:none;height:1.25rem;width:1.25rem}.theme-toggle-header{align-items:center;display:flex;justify-content:center}.nav-overlay-icon,.toc-overlay-icon{cursor:pointer;display:none}.nav-overlay-icon .icon,.toc-overlay-icon .icon{color:var(--color-foreground-secondary);height:1.5rem;width:1.5rem}.nav-overlay-icon,.toc-header-icon{align-items:center;justify-content:center}.toc-content-icon{height:1.5rem;width:1.5rem}.content-icon-container{display:flex;float:right;gap:.5rem;margin-bottom:1rem;margin-left:1rem;margin-top:1.5rem}.content-icon-container .edit-this-page svg,.content-icon-container .view-this-page svg{color:inherit;height:1.25rem;width:1.25rem}.sidebar-toggle{display:none;position:absolute}.sidebar-toggle[name=__toc]{left:20px}.sidebar-toggle:checked{left:40px}.overlay{background-color:rgba(0,0,0,.54);height:0;opacity:0;position:fixed;top:0;transition:width 0s,height 0s,opacity .25s ease-out;width:0}.sidebar-overlay{z-index:20}.toc-overlay{z-index:40}.sidebar-drawer{transition:left .25s ease-in-out;z-index:30}.toc-drawer{transition:right .25s ease-in-out;z-index:50}#__navigation:checked~.sidebar-overlay{height:100%;opacity:1;width:100%}#__navigation:checked~.page .sidebar-drawer{left:0;top:0}#__toc:checked~.toc-overlay{height:100%;opacity:1;width:100%}#__toc:checked~.page .toc-drawer{right:0;top:0}.back-to-top{background:var(--color-background-primary);border-radius:1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 1px 0 hsla(220,9%,46%,.502);display:none;font-size:.8125rem;left:0;margin-left:50%;padding:.5rem .75rem .5rem .5rem;position:fixed;text-decoration:none;top:1rem;transform:translateX(-50%);z-index:10}.back-to-top svg{height:1rem;width:1rem;fill:currentColor;display:inline-block}.back-to-top span{margin-left:.25rem}.show-back-to-top .back-to-top{align-items:center;display:flex}@media(min-width:97em){html{font-size:110%}}@media(max-width:82em){.toc-content-icon{display:flex}.toc-drawer{border-left:1px solid var(--color-background-muted);height:100vh;position:fixed;right:-15em;top:0}.toc-tree{border-left:none;font-size:var(--toc-font-size--mobile)}.sidebar-drawer{width:calc(50% - 18.5em)}}@media(max-width:67em){.content{margin-left:auto;margin-right:auto;padding:0 1em}}@media(max-width:63em){.nav-overlay-icon{display:flex}.sidebar-drawer{height:100vh;left:-15em;position:fixed;top:0;width:15em}.theme-toggle-header,.toc-header-icon{display:flex}.theme-toggle-content,.toc-content-icon{display:none}.mobile-header{align-items:center;display:flex;justify-content:space-between;position:sticky;top:0}.mobile-header .header-left,.mobile-header .header-right{display:flex;height:var(--header-height);padding:0 var(--header-padding)}.mobile-header .header-left label,.mobile-header .header-right label{height:100%;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:100%}.nav-overlay-icon .icon,.theme-toggle svg{height:1.5rem;width:1.5rem}:target{scroll-margin-top:calc(var(--header-height) + 2.5rem)}.back-to-top{top:calc(var(--header-height) + .5rem)}.page{flex-direction:column;justify-content:center}}@media(max-width:48em){.content{overflow-x:auto;width:100%}}@media(max-width:46em){article[role=main] aside.sidebar{float:none;margin:1rem 0;width:100%}}.admonition,.topic{background:var(--color-admonition-background);border-radius:.2rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1);font-size:var(--admonition-font-size);margin:1rem auto;overflow:hidden;padding:0 .5rem .5rem;page-break-inside:avoid}.admonition>:nth-child(2),.topic>:nth-child(2){margin-top:0}.admonition>:last-child,.topic>:last-child{margin-bottom:0}.admonition p.admonition-title,p.topic-title{font-size:var(--admonition-title-font-size);font-weight:500;line-height:1.3;margin:0 -.5rem .5rem;padding:.4rem .5rem .4rem 2rem;position:relative}.admonition p.admonition-title:before,p.topic-title:before{content:"";height:1rem;left:.5rem;position:absolute;width:1rem}p.admonition-title{background-color:var(--color-admonition-title-background)}p.admonition-title:before{background-color:var(--color-admonition-title);-webkit-mask-image:var(--icon-admonition-default);mask-image:var(--icon-admonition-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}p.topic-title{background-color:var(--color-topic-title-background)}p.topic-title:before{background-color:var(--color-topic-title);-webkit-mask-image:var(--icon-topic-default);mask-image:var(--icon-topic-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}.admonition{border-left:.2rem solid var(--color-admonition-title)}.admonition.caution{border-left-color:var(--color-admonition-title--caution)}.admonition.caution>.admonition-title{background-color:var(--color-admonition-title-background--caution)}.admonition.caution>.admonition-title:before{background-color:var(--color-admonition-title--caution);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.warning{border-left-color:var(--color-admonition-title--warning)}.admonition.warning>.admonition-title{background-color:var(--color-admonition-title-background--warning)}.admonition.warning>.admonition-title:before{background-color:var(--color-admonition-title--warning);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.danger{border-left-color:var(--color-admonition-title--danger)}.admonition.danger>.admonition-title{background-color:var(--color-admonition-title-background--danger)}.admonition.danger>.admonition-title:before{background-color:var(--color-admonition-title--danger);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.attention{border-left-color:var(--color-admonition-title--attention)}.admonition.attention>.admonition-title{background-color:var(--color-admonition-title-background--attention)}.admonition.attention>.admonition-title:before{background-color:var(--color-admonition-title--attention);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.error{border-left-color:var(--color-admonition-title--error)}.admonition.error>.admonition-title{background-color:var(--color-admonition-title-background--error)}.admonition.error>.admonition-title:before{background-color:var(--color-admonition-title--error);-webkit-mask-image:var(--icon-failure);mask-image:var(--icon-failure)}.admonition.hint{border-left-color:var(--color-admonition-title--hint)}.admonition.hint>.admonition-title{background-color:var(--color-admonition-title-background--hint)}.admonition.hint>.admonition-title:before{background-color:var(--color-admonition-title--hint);-webkit-mask-image:var(--icon-question);mask-image:var(--icon-question)}.admonition.tip{border-left-color:var(--color-admonition-title--tip)}.admonition.tip>.admonition-title{background-color:var(--color-admonition-title-background--tip)}.admonition.tip>.admonition-title:before{background-color:var(--color-admonition-title--tip);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.important{border-left-color:var(--color-admonition-title--important)}.admonition.important>.admonition-title{background-color:var(--color-admonition-title-background--important)}.admonition.important>.admonition-title:before{background-color:var(--color-admonition-title--important);-webkit-mask-image:var(--icon-flame);mask-image:var(--icon-flame)}.admonition.note{border-left-color:var(--color-admonition-title--note)}.admonition.note>.admonition-title{background-color:var(--color-admonition-title-background--note)}.admonition.note>.admonition-title:before{background-color:var(--color-admonition-title--note);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition.seealso{border-left-color:var(--color-admonition-title--seealso)}.admonition.seealso>.admonition-title{background-color:var(--color-admonition-title-background--seealso)}.admonition.seealso>.admonition-title:before{background-color:var(--color-admonition-title--seealso);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.admonition-todo{border-left-color:var(--color-admonition-title--admonition-todo)}.admonition.admonition-todo>.admonition-title{background-color:var(--color-admonition-title-background--admonition-todo)}.admonition.admonition-todo>.admonition-title:before{background-color:var(--color-admonition-title--admonition-todo);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition-todo>.admonition-title{text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd{margin-left:2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:first-child{margin-top:.125rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list,dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:last-child{margin-bottom:.75rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list>dt{font-size:var(--font-size--small);text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd:empty{margin-bottom:.5rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul{margin-left:-1.2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p:nth-child(2){margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p+p:last-child:empty{margin-bottom:0;margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{color:var(--color-api-overall)}.sig:not(.sig-inline){background:var(--color-api-background);border-radius:.25rem;font-family:var(--font-stack--monospace);font-size:var(--api-font-size);font-weight:700;margin-left:-.25rem;margin-right:-.25rem;padding:.25rem .5rem .25rem 3em;text-indent:-2.5em;transition:background .1s ease-out}.sig:not(.sig-inline):hover{background:var(--color-api-background-hover)}.sig:not(.sig-inline) a.reference .viewcode-link{font-weight:400;width:4.25rem}em.property{font-style:normal}em.property:first-child{color:var(--color-api-keyword)}.sig-name{color:var(--color-api-name)}.sig-prename{color:var(--color-api-pre-name);font-weight:400}.sig-paren{color:var(--color-api-paren)}.sig-param{font-style:normal}div.deprecated,div.versionadded,div.versionchanged,div.versionremoved{border-left:.1875rem solid;border-radius:.125rem;padding-left:.75rem}div.deprecated p,div.versionadded p,div.versionchanged p,div.versionremoved p{margin-bottom:.125rem;margin-top:.125rem}div.versionadded{border-color:var(--color-api-added-border)}div.versionadded .versionmodified{color:var(--color-api-added)}div.versionchanged{border-color:var(--color-api-changed-border)}div.versionchanged .versionmodified{color:var(--color-api-changed)}div.deprecated{border-color:var(--color-api-deprecated-border)}div.deprecated .versionmodified{color:var(--color-api-deprecated)}div.versionremoved{border-color:var(--color-api-removed-border)}div.versionremoved .versionmodified{color:var(--color-api-removed)}.viewcode-back,.viewcode-link{float:right;text-align:right}.line-block{margin-bottom:.75rem;margin-top:.5rem}.line-block .line-block{margin-bottom:0;margin-top:0;padding-left:1rem}.code-block-caption,article p.caption,table>caption{font-size:var(--font-size--small);text-align:center}.toctree-wrapper.compound .caption,.toctree-wrapper.compound :not(.caption)>.caption-text{font-size:var(--font-size--small);margin-bottom:0;text-align:initial;text-transform:uppercase}.toctree-wrapper.compound>ul{margin-bottom:0;margin-top:0}.sig-inline,code.literal{background:var(--color-inline-code-background);border-radius:.2em;font-size:var(--font-size--small--2);padding:.1em .2em}pre.literal-block .sig-inline,pre.literal-block code.literal{font-size:inherit;padding:0}p .sig-inline,p code.literal{border:1px solid var(--color-background-border)}.sig-inline{font-family:var(--font-stack--monospace)}div[class*=" highlight-"],div[class^=highlight-]{display:flex;margin:1em 0}div[class*=" highlight-"] .table-wrapper,div[class^=highlight-] .table-wrapper,pre{margin:0;padding:0}pre{overflow:auto}article[role=main] .highlight pre{line-height:1.5}.highlight pre,pre.literal-block{font-size:var(--code-font-size);padding:.625rem .875rem}pre.literal-block{background-color:var(--color-code-background);border-radius:.2rem;color:var(--color-code-foreground);margin-bottom:1rem;margin-top:1rem}.highlight{border-radius:.2rem;width:100%}.highlight .gp,.highlight span.linenos{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.highlight .hll{display:block;margin-left:-.875rem;margin-right:-.875rem;padding-left:.875rem;padding-right:.875rem}.code-block-caption{background-color:var(--color-code-background);border-bottom:1px solid;border-radius:.25rem;border-bottom-left-radius:0;border-bottom-right-radius:0;border-color:var(--color-background-border);color:var(--color-code-foreground);display:flex;font-weight:300;padding:.625rem .875rem}.code-block-caption+div[class]{margin-top:0}.code-block-caption+div[class]>.highlight{border-top-left-radius:0;border-top-right-radius:0}.highlighttable{display:block;width:100%}.highlighttable tbody{display:block}.highlighttable tr{display:flex}.highlighttable td.linenos{background-color:var(--color-code-background);border-bottom-left-radius:.2rem;border-top-left-radius:.2rem;color:var(--color-code-foreground);padding:.625rem 0 .625rem .875rem}.highlighttable .linenodiv{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;font-size:var(--code-font-size);padding-right:.875rem}.highlighttable td.code{display:block;flex:1;overflow:hidden;padding:0}.highlighttable td.code .highlight{border-bottom-left-radius:0;border-top-left-radius:0}.highlight span.linenos{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;display:inline-block;margin-right:.875rem;padding-left:0;padding-right:.875rem}.footnote-reference{font-size:var(--font-size--small--4);vertical-align:super}dl.footnote.brackets{color:var(--color-foreground-secondary);display:grid;font-size:var(--font-size--small);grid-template-columns:max-content auto}dl.footnote.brackets dt{margin:0}dl.footnote.brackets dt>.fn-backref{margin-left:.25rem}dl.footnote.brackets dt:after{content:":"}dl.footnote.brackets dt .brackets:before{content:"["}dl.footnote.brackets dt .brackets:after{content:"]"}dl.footnote.brackets dd{margin:0;padding:0 1rem}aside.footnote{color:var(--color-foreground-secondary);font-size:var(--font-size--small)}aside.footnote>span,div.citation>span{float:left;font-weight:500;padding-right:.25rem}aside.footnote>:not(span),div.citation>p{margin-left:2rem}img{box-sizing:border-box;height:auto;max-width:100%}article .figure,article figure{border-radius:.2rem;margin:0}article .figure :last-child,article figure :last-child{margin-bottom:0}article .align-left{clear:left;float:left;margin:0 1rem 1rem}article .align-right{clear:right;float:right;margin:0 1rem 1rem}article .align-center,article .align-default{display:block;margin-left:auto;margin-right:auto;text-align:center}article table.align-default{display:table;text-align:initial}.domainindex-jumpbox,.genindex-jumpbox{border-bottom:1px solid var(--color-background-border);border-top:1px solid var(--color-background-border);padding:.25rem}.domainindex-section h2,.genindex-section h2{margin-bottom:.5rem;margin-top:.75rem}.domainindex-section ul,.genindex-section ul{margin-bottom:0;margin-top:0}ol,ul{margin-bottom:1rem;margin-top:1rem;padding-left:1.2rem}ol li>p:first-child,ul li>p:first-child{margin-bottom:.25rem;margin-top:.25rem}ol li>p:last-child,ul li>p:last-child{margin-top:.25rem}ol li>ol,ol li>ul,ul li>ol,ul li>ul{margin-bottom:.5rem;margin-top:.5rem}ol.arabic{list-style:decimal}ol.loweralpha{list-style:lower-alpha}ol.upperalpha{list-style:upper-alpha}ol.lowerroman{list-style:lower-roman}ol.upperroman{list-style:upper-roman}.simple li>ol,.simple li>ul,.toctree-wrapper li>ol,.toctree-wrapper li>ul{margin-bottom:0;margin-top:0}.field-list dt,.option-list dt,dl.footnote dt,dl.glossary dt,dl.simple dt,dl:not([class]) dt{font-weight:500;margin-top:.25rem}.field-list dt+dt,.option-list dt+dt,dl.footnote dt+dt,dl.glossary dt+dt,dl.simple dt+dt,dl:not([class]) dt+dt{margin-top:0}.field-list dt .classifier:before,.option-list dt .classifier:before,dl.footnote dt .classifier:before,dl.glossary dt .classifier:before,dl.simple dt .classifier:before,dl:not([class]) dt .classifier:before{content:":";margin-left:.2rem;margin-right:.2rem}.field-list dd ul,.field-list dd>p:first-child,.option-list dd ul,.option-list dd>p:first-child,dl.footnote dd ul,dl.footnote dd>p:first-child,dl.glossary dd ul,dl.glossary dd>p:first-child,dl.simple dd ul,dl.simple dd>p:first-child,dl:not([class]) dd ul,dl:not([class]) dd>p:first-child{margin-top:.125rem}.field-list dd ul,.option-list dd ul,dl.footnote dd ul,dl.glossary dd ul,dl.simple dd ul,dl:not([class]) dd ul{margin-bottom:.125rem}.math-wrapper{overflow-x:auto;width:100%}div.math{position:relative;text-align:center}div.math .headerlink,div.math:focus .headerlink{display:none}div.math:hover .headerlink{display:inline-block}div.math span.eqno{position:absolute;right:.5rem;top:50%;transform:translateY(-50%);z-index:1}abbr[title]{cursor:help}.problematic{color:var(--color-problematic)}kbd:not(.compound){background-color:var(--color-background-secondary);border:1px solid var(--color-foreground-border);border-radius:.2rem;box-shadow:0 .0625rem 0 rgba(0,0,0,.2),inset 0 0 0 .125rem var(--color-background-primary);color:var(--color-foreground-primary);display:inline-block;font-size:var(--font-size--small--3);margin:0 .2rem;padding:0 .2rem;vertical-align:text-bottom}blockquote{background:var(--color-background-secondary);border-left:4px solid var(--color-background-border);margin-left:0;margin-right:0;padding:.5rem 1rem}blockquote .attribution{font-weight:600;text-align:right}blockquote.highlights,blockquote.pull-quote{font-size:1.25em}blockquote.epigraph,blockquote.pull-quote{border-left-width:0;border-radius:.5rem}blockquote.highlights{background:transparent;border-left-width:0}p .reference img{vertical-align:middle}p.rubric{font-size:1.125em;font-weight:700;line-height:1.25}dd p.rubric{font-size:var(--font-size--small);font-weight:inherit;line-height:inherit;text-transform:uppercase}article .sidebar{background-color:var(--color-background-secondary);border:1px solid var(--color-background-border);border-radius:.2rem;clear:right;float:right;margin-left:1rem;margin-right:0;width:30%}article .sidebar>*{padding-left:1rem;padding-right:1rem}article .sidebar>ol,article .sidebar>ul{padding-left:2.2rem}article .sidebar .sidebar-title{border-bottom:1px solid var(--color-background-border);font-weight:500;margin:0;padding:.5rem 1rem}[role=main] .table-wrapper.container{margin-bottom:.5rem;margin-top:1rem;overflow-x:auto;padding:.2rem .2rem .75rem;width:100%}table.docutils{border-collapse:collapse;border-radius:.2rem;border-spacing:0;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)}table.docutils th{background:var(--color-table-header-background)}table.docutils td,table.docutils th{border-bottom:1px solid var(--color-table-border);border-left:1px solid var(--color-table-border);border-right:1px solid var(--color-table-border);padding:0 .25rem}table.docutils td p,table.docutils th p{margin:.25rem}table.docutils td:first-child,table.docutils th:first-child{border-left:none}table.docutils td:last-child,table.docutils th:last-child{border-right:none}table.docutils td.text-left,table.docutils th.text-left{text-align:left}table.docutils td.text-right,table.docutils th.text-right{text-align:right}table.docutils td.text-center,table.docutils th.text-center{text-align:center}:target{scroll-margin-top:2.5rem}@media(max-width:67em){:target{scroll-margin-top:calc(2.5rem + var(--header-height))}section>span:target{scroll-margin-top:calc(2.8rem + var(--header-height))}}.headerlink{font-weight:100;-webkit-user-select:none;-moz-user-select:none;user-select:none}.code-block-caption>.headerlink,dl dt>.headerlink,figcaption p>.headerlink,h1>.headerlink,h2>.headerlink,h3>.headerlink,h4>.headerlink,h5>.headerlink,h6>.headerlink,p.caption>.headerlink,table>caption>.headerlink{margin-left:.5rem;visibility:hidden}.code-block-caption:hover>.headerlink,dl dt:hover>.headerlink,figcaption p:hover>.headerlink,h1:hover>.headerlink,h2:hover>.headerlink,h3:hover>.headerlink,h4:hover>.headerlink,h5:hover>.headerlink,h6:hover>.headerlink,p.caption:hover>.headerlink,table>caption:hover>.headerlink{visibility:visible}.code-block-caption>.toc-backref,dl dt>.toc-backref,figcaption p>.toc-backref,h1>.toc-backref,h2>.toc-backref,h3>.toc-backref,h4>.toc-backref,h5>.toc-backref,h6>.toc-backref,p.caption>.toc-backref,table>caption>.toc-backref{color:inherit;text-decoration-line:none}figure:hover>figcaption>p>.headerlink,table:hover>caption>.headerlink{visibility:visible}:target>h1:first-of-type,:target>h2:first-of-type,:target>h3:first-of-type,:target>h4:first-of-type,:target>h5:first-of-type,:target>h6:first-of-type,span:target~h1:first-of-type,span:target~h2:first-of-type,span:target~h3:first-of-type,span:target~h4:first-of-type,span:target~h5:first-of-type,span:target~h6:first-of-type{background-color:var(--color-highlight-on-target)}:target>h1:first-of-type code.literal,:target>h2:first-of-type code.literal,:target>h3:first-of-type code.literal,:target>h4:first-of-type code.literal,:target>h5:first-of-type code.literal,:target>h6:first-of-type code.literal,span:target~h1:first-of-type code.literal,span:target~h2:first-of-type code.literal,span:target~h3:first-of-type code.literal,span:target~h4:first-of-type code.literal,span:target~h5:first-of-type code.literal,span:target~h6:first-of-type code.literal{background-color:transparent}.literal-block-wrapper:target .code-block-caption,.this-will-duplicate-information-and-it-is-still-useful-here li :target,figure:target,table:target>caption{background-color:var(--color-highlight-on-target)}dt:target{background-color:var(--color-highlight-on-target)!important}.footnote-reference:target,.footnote>dt:target+dd{background-color:var(--color-highlight-on-target)}.guilabel{background-color:var(--color-guilabel-background);border:1px solid var(--color-guilabel-border);border-radius:.5em;color:var(--color-guilabel-text);font-size:.9em;padding:0 .3em}footer{display:flex;flex-direction:column;font-size:var(--font-size--small);margin-top:2rem}.bottom-of-page{align-items:center;border-top:1px solid var(--color-background-border);color:var(--color-foreground-secondary);display:flex;justify-content:space-between;line-height:1.5;margin-top:1rem;padding-bottom:1rem;padding-top:1rem}@media(max-width:46em){.bottom-of-page{flex-direction:column-reverse;gap:.25rem;text-align:center}}.bottom-of-page .left-details{font-size:var(--font-size--small)}.bottom-of-page .right-details{display:flex;flex-direction:column;gap:.25rem;text-align:right}.bottom-of-page .icons{display:flex;font-size:1rem;gap:.25rem;justify-content:flex-end}.bottom-of-page .icons a{text-decoration:none}.bottom-of-page .icons img,.bottom-of-page .icons svg{font-size:1.125rem;height:1em;width:1em}.related-pages a{align-items:center;display:flex;text-decoration:none}.related-pages a:hover .page-info .title{color:var(--color-link);text-decoration:underline;text-decoration-color:var(--color-link-underline)}.related-pages a svg.furo-related-icon,.related-pages a svg.furo-related-icon>use{color:var(--color-foreground-border);flex-shrink:0;height:.75rem;margin:0 .5rem;width:.75rem}.related-pages a.next-page{clear:right;float:right;max-width:50%;text-align:right}.related-pages a.prev-page{clear:left;float:left;max-width:50%}.related-pages a.prev-page svg{transform:rotate(180deg)}.page-info{display:flex;flex-direction:column;overflow-wrap:anywhere}.next-page .page-info{align-items:flex-end}.page-info .context{align-items:center;color:var(--color-foreground-muted);display:flex;font-size:var(--font-size--small);padding-bottom:.1rem;text-decoration:none}ul.search{list-style:none;padding-left:0}ul.search li{border-bottom:1px solid var(--color-background-border);padding:1rem 0}[role=main] .highlighted{background-color:var(--color-highlighted-background);color:var(--color-highlighted-text)}.sidebar-brand{display:flex;flex-direction:column;flex-shrink:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none}.sidebar-brand-text{color:var(--color-sidebar-brand-text);font-size:1.5rem;overflow-wrap:break-word}.sidebar-brand-text,.sidebar-logo-container{margin:var(--sidebar-item-spacing-vertical) 0}.sidebar-logo{display:block;margin:0 auto;max-width:100%}.sidebar-search-container{align-items:center;background:var(--color-sidebar-search-background);display:flex;margin-top:var(--sidebar-search-space-above);position:relative}.sidebar-search-container:focus-within,.sidebar-search-container:hover{background:var(--color-sidebar-search-background--focus)}.sidebar-search-container:before{background-color:var(--color-sidebar-search-icon);content:"";height:var(--sidebar-search-icon-size);left:var(--sidebar-item-spacing-horizontal);-webkit-mask-image:var(--icon-search);mask-image:var(--icon-search);position:absolute;width:var(--sidebar-search-icon-size)}.sidebar-search{background:transparent;border:none;border-bottom:1px solid var(--color-sidebar-search-border);border-top:1px solid var(--color-sidebar-search-border);box-sizing:border-box;color:var(--color-sidebar-search-foreground);padding:var(--sidebar-search-input-spacing-vertical) var(--sidebar-search-input-spacing-horizontal) var(--sidebar-search-input-spacing-vertical) calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size));width:100%;z-index:10}.sidebar-search:focus{outline:none}.sidebar-search::-moz-placeholder{font-size:var(--sidebar-search-input-font-size)}.sidebar-search::placeholder{font-size:var(--sidebar-search-input-font-size)}#searchbox .highlight-link{margin:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0;text-align:center}#searchbox .highlight-link a{color:var(--color-sidebar-search-icon);font-size:var(--font-size--small--2)}.sidebar-tree{font-size:var(--sidebar-item-font-size);margin-bottom:var(--sidebar-item-spacing-vertical);margin-top:var(--sidebar-tree-space-above)}.sidebar-tree ul{display:flex;flex-direction:column;list-style:none;margin-bottom:0;margin-top:0;padding:0}.sidebar-tree li{margin:0;position:relative}.sidebar-tree li>ul{margin-left:var(--sidebar-item-spacing-horizontal)}.sidebar-tree .icon,.sidebar-tree .reference{color:var(--color-sidebar-link-text)}.sidebar-tree .reference{box-sizing:border-box;display:inline-block;height:100%;line-height:var(--sidebar-item-line-height);overflow-wrap:anywhere;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none;width:100%}.sidebar-tree .reference:hover{background:var(--color-sidebar-item-background--hover);color:var(--color-sidebar-link-text)}.sidebar-tree .reference.external:after{color:var(--color-sidebar-link-text);content:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23607d8b' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' viewBox='0 0 24 24'%3E%3Cpath stroke='none' d='M0 0h24v24H0z'/%3E%3Cpath d='M11 7H6a2 2 0 0 0-2 2v9a2 2 0 0 0 2 2h9a2 2 0 0 0 2-2v-5M10 14 20 4M15 4h5v5'/%3E%3C/svg%3E");margin:0 .25rem;vertical-align:middle}.sidebar-tree .current-page>.reference{font-weight:700}.sidebar-tree label{align-items:center;cursor:pointer;display:flex;height:var(--sidebar-item-height);justify-content:center;position:absolute;right:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:var(--sidebar-expander-width)}.sidebar-tree .caption,.sidebar-tree :not(.caption)>.caption-text{color:var(--color-sidebar-caption-text);font-size:var(--sidebar-caption-font-size);font-weight:700;margin:var(--sidebar-caption-space-above) 0 0 0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-transform:uppercase}.sidebar-tree li.has-children>.reference{padding-right:var(--sidebar-expander-width)}.sidebar-tree .toctree-l1>.reference,.sidebar-tree .toctree-l1>label .icon{color:var(--color-sidebar-link-text--top-level)}.sidebar-tree label{background:var(--color-sidebar-item-expander-background)}.sidebar-tree label:hover{background:var(--color-sidebar-item-expander-background--hover)}.sidebar-tree .current>.reference{background:var(--color-sidebar-item-background--current)}.sidebar-tree .current>.reference:hover{background:var(--color-sidebar-item-background--hover)}.toctree-checkbox{display:none;position:absolute}.toctree-checkbox~ul{display:none}.toctree-checkbox~label .icon svg{transform:rotate(90deg)}.toctree-checkbox:checked~ul{display:block}.toctree-checkbox:checked~label .icon svg{transform:rotate(-90deg)}.toc-title-container{padding:var(--toc-title-padding);padding-top:var(--toc-spacing-vertical)}.toc-title{color:var(--color-toc-title-text);font-size:var(--toc-title-font-size);padding-left:var(--toc-spacing-horizontal);text-transform:uppercase}.no-toc{display:none}.toc-tree-container{padding-bottom:var(--toc-spacing-vertical)}.toc-tree{border-left:1px solid var(--color-background-border);font-size:var(--toc-font-size);line-height:1.3;padding-left:calc(var(--toc-spacing-horizontal) - var(--toc-item-spacing-horizontal))}.toc-tree>ul>li:first-child{padding-top:0}.toc-tree>ul>li:first-child>ul{padding-left:0}.toc-tree>ul>li:first-child>a{display:none}.toc-tree ul{list-style-type:none;margin-bottom:0;margin-top:0;padding-left:var(--toc-item-spacing-horizontal)}.toc-tree li{padding-top:var(--toc-item-spacing-vertical)}.toc-tree li.scroll-current>.reference{color:var(--color-toc-item-text--active);font-weight:700}.toc-tree a.reference{color:var(--color-toc-item-text);overflow-wrap:anywhere;text-decoration:none}.toc-scroll{max-height:100vh;overflow-y:scroll}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here){background:rgba(255,0,0,.25);color:var(--color-problematic)}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here):before{content:"ERROR: Adding a table of contents in Furo-based documentation is unnecessary, and does not work well with existing styling. Add a 'this-will-duplicate-information-and-it-is-still-useful-here' class, if you want an escape hatch."}.text-align\:left>p{text-align:left}.text-align\:center>p{text-align:center}.text-align\:right>p{text-align:right} +/*# sourceMappingURL=furo.css.map*/ \ No newline at end of file diff --git a/docs/_static/styles/furo.css.map b/docs/_static/styles/furo.css.map new file mode 100644 index 00000000..280b3fef --- /dev/null +++ b/docs/_static/styles/furo.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo.css","mappings":"AAAA,2EAA2E,CAU3E,KACE,gBAAiB,CACjB,6BACF,CASA,KACE,QACF,CAMA,KACE,aACF,CAOA,GACE,aAAc,CACd,cACF,CAUA,GACE,sBAAuB,CACvB,QAAS,CACT,gBACF,CAOA,IACE,+BAAiC,CACjC,aACF,CASA,EACE,4BACF,CAOA,YACE,kBAAmB,CACnB,yBAA0B,CAC1B,gCACF,CAMA,SAEE,kBACF,CAOA,cAGE,+BAAiC,CACjC,aACF,CAeA,QAEE,aAAc,CACd,aAAc,CACd,iBAAkB,CAClB,uBACF,CAEA,IACE,aACF,CAEA,IACE,SACF,CASA,IACE,iBACF,CAUA,sCAKE,mBAAoB,CACpB,cAAe,CACf,gBAAiB,CACjB,QACF,CAOA,aAEE,gBACF,CAOA,cAEE,mBACF,CAMA,gDAIE,yBACF,CAMA,wHAIE,iBAAkB,CAClB,SACF,CAMA,4GAIE,6BACF,CAMA,SACE,0BACF,CASA,OACE,qBAAsB,CACtB,aAAc,CACd,aAAc,CACd,cAAe,CACf,SAAU,CACV,kBACF,CAMA,SACE,uBACF,CAMA,SACE,aACF,CAOA,6BAEE,qBAAsB,CACtB,SACF,CAMA,kFAEE,WACF,CAOA,cACE,4BAA6B,CAC7B,mBACF,CAMA,yCACE,uBACF,CAOA,6BACE,yBAA0B,CAC1B,YACF,CASA,QACE,aACF,CAMA,QACE,iBACF,CAiBA,kBACE,YACF,CCvVA,aAcE,kEACE,uBAOF,WACE,iDAMF,kCACE,wBAEF,qCAEE,uBADA,uBACA,CAEF,SACE,wBAtBA,CCpBJ,iBAGE,qBAEA,sBACA,0BAFA,oBAHA,4BACA,oBAKA,6BAIA,2CAFA,mBACA,sCAFA,4BAGA,CAEF,gBACE,aCPF,KCCE,mHAGA,wGAGA,wCAAyC,CAEzC,wBAAyB,CACzB,wBAAyB,CACzB,4BAA6B,CAC7B,yBAA0B,CAC1B,2BAA4B,CAG5B,sDAAuD,CACvD,gDAAiD,CACjD,wDAAyD,CAGzD,0CAA2C,CAC3C,gDAAiD,CACjD,gDAAiD,CAKjD,gCAAiC,CACjC,sCAAuC,CAGvC,2CAA4C,CAG5C,uCAAwC,CCnCxC,+FAIA,uBAAwB,CAGxB,iCAAkC,CAClC,kCAAmC,CAEnC,+BAAgC,CAChC,sCAAuC,CACvC,sCAAuC,CACvC,qGAIA,mDAAoD,CAEpD,mCAAoC,CACpC,8CAA+C,CAC/C,gDAAiD,CACjD,kCAAmC,CACnC,6DAA8D,CAG9D,6BAA8B,CAC9B,6BAA8B,CAC9B,+BAAgC,CAChC,kCAAmC,CACnC,kCAAmC,CCRjC,+jBCaA,iqCAZF,iaCXA,8KAOA,4SAWA,4SAUA,0CACA,gEAGA,0CAGA,gEAGA,yCACA,+DAIA,4CACA,kEAGA,wCAUA,8DACA,uCAGA,4DACA,sCACA,2DAGA,4CACA,kEACA,uCAGA,6DACA,2GAGA,sHAEA,yFAEA,+CACA,+EAGA,4MAOA,gCACA,sHAIA,kCACA,uEACA,gEACA,4DACA,kEAGA,2DACA,sDACA,0CACA,8CACA,wGAGA,0BACA,iCAGA,+DACA,+BACA,sCACA,+DAEA,kGACA,oCACA,yDACA,sCL3HF,kCAEA,sDAIA,0CKyHE,kEAIA,oDACA,sDAGA,oCACA,oEAEA,0DACA,qDAIA,oDACA,6DAIA,iEAIA,2DAIA,2DAGA,4DACA,gEAIA,gEAEA,gFAEA,oNASA,qDLtKE,gFAGE,4DAIF,oEKgHF,yEAEA,6DAGA,0DAEA,uDACA,qDACA,wDAIA,6DAIA,yDACA,2DAIA,uCAGA,wCACA,sDAGA,+CAGA,6DAEA,iDACA,+DAEA,wDAEA,sEAMA,0DACA,sBACA,mEL5JI,wEAEA,iCACE,+BAMN,wEAGA,iCACE,kFAEA,uEAIF,gEACE,8BAGF,qEMzDA,sCAKA,wFAKA,iCAIA,0BAWA,iCACA,4BACA,mCAGA,+BAEA,sCACA,4BAEA,mCAEA,sCAKA,sDAIA,gCAEA,gEAQF,wCAME,sBACA,kCAKA,uBAEA,gEAIA,2BAIA,mCAEA,qCACA,iCAGE,+BACA,wEAEE,iCACA,kFAGF,6BACA,0CACF,kCAEE,8BACE,8BACA,qEAEE,sCACA,wFClFN,iCAGF,2DACE,4BACA,oCAKF,8BAGE,sCACA,+DAIA,sCAEA,sDAGA,gCACA,gEAGA,+CAEA,sBACE,yCAGF,uBACA,sEAIA,aAEA,mCAIA,kEACA,aACA,oEACA,YAIA,EAQE,4HAGA,gDACE,mBACA,wCAON,wCAGE,0DACA,mBAKA,mBACA,CANA,uCAKA,iBALA,iBAWA,mBAGF,mBACE,mDAIF,+BAEE,CAEA,yBAFA,kBAMA,CAJA,GACA,aAGA,mBAEF,wBAEE,iBACA,iBAEA,OACA,aAGF,CAHE,WAGF,GAEE,oBAEA,CAJF,gBAIE,aAEA,+CAKA,UANA,WACA,cADA,SAMA,WACA,iBAEE,GAMF,wBANE,yBAMF,kDACA,WAEA,gCACA,2DAGA,iBACE,uCAEJ,kEAIE,uCAGA,yDACE,cACA,+DAEA,yDAEE,mEAMJ,kEAMA,uBACA,kBAEA,uBACA,kDAKA,0DAIA,CALA,oBAKA,WACA,WAQA,4BAFF,0CAEE,CARA,qCAsBA,CAdA,iBAEA,kBACE,aADF,4BACE,WAMF,2BAGF,qCAEE,CAXE,UAWF,+BAGA,uBAEA,SAEA,0CAIE,CANF,qCAEA,CAIE,2DACE,gBAIN,+CAIA,CAEA,kDAKE,CAPF,8BAEA,CAOE,YACA,CAjBI,2BAGN,CAHM,WAcJ,UAGA,CAEA,2GAIF,iCAGE,8BAIA,qBACA,oBACF,uBAOI,0CAIA,CATF,6DAKE,CALF,sBASE,qCAKF,CACE,cACA,CAFF,sBAEE,CACA,+BAEA,qBAEE,WAKN,aACE,sCAGA,mBAEA,6BAMA,kCACA,CAJA,sBACA,aAEA,CAJA,eACA,MAIA,2FAEA,UAGA,YACA,sBACE,8BAEA,CALF,aACA,WAIE,OACA,oBAEF,uBACE,WAEF,YAFE,UAEF,eAgBA,kBACE,CAhBA,qDAQF,qCAGF,CAGI,YACF,CAJF,2BAGI,CAEA,eACA,qBAGA,mEAEA,qBACA,8BAIA,kBADF,kBACE,yBAEJ,oCAGI,qDAIJ,+BAGI,oCAEA,+CAQF,4CACE,yBACF,2BAOE,sBACA,CAHA,WACA,CAFF,cACE,CAJA,YAGF,CAEE,SAEA,mBAGA,kDAEE,CAJF,cAEA,cAEE,sBAEA,mBADA,YACA,uBACA,mDACE,CADF,YACE,iDAEA,uCAEN,+DAOE,mBADF,sBACE,mBAGF,aACE,sCAIA,aADF,WACE,CAKF,SACE,CAHJ,kBAEE,CAJE,gBAEJ,CAHI,iBAMA,yFAKA,aACA,eACA,cCxaJ,iBAEE,aADA,iBACA,6BAEA,kCAEA,SACA,UAIA,gCACA,CALA,SAEA,SAEA,CAJA,wEAEA,CAFA,OAKA,CAGA,mDACE,iBAGF,gCACE,CADF,UACE,aAEJ,iCAEE,CAFF,UAEE,wCAEA,WACA,WADA,UACA,CACA,4CAGA,MACA,CADA,KACA,wCACA,UAGA,CAJA,UAIA,6DAUA,0CACE,CAFF,mBAEE,wEACA,CAVA,YACA,CAMF,mBAJE,OAOA,gBAJJ,gCACE,CANE,cACA,CAHA,oBACA,CAGA,QAGJ,CAII,0BACA,CADA,UACA,wCAEJ,kBACE,0DACA,gCACE,kBACA,CADA,YACA,oEACA,2CAMF,mDAII,CALN,YACE,CANE,cAKJ,CACE,iBAII,kEACA,yCACE,kDACA,yDACE,+CACA,uBANN,CAMM,+BANN,uCACE,qDACA,4BAEE,mBADA,0CACA,CADA,qBACA,0DACE,wCACA,sGALJ,oCACA,sBACE,kBAFF,UAEE,2CACA,wFACE,cACA,kEANN,uBACE,iDACA,CADA,UACA,0DACE,wDAEE,iEACA,qEANN,sCACE,CAGE,iBAHF,gBAGE,qBACE,CAJJ,uBACA,gDACE,wDACA,6DAHF,2CACA,CADA,gBACA,eACE,CAGE,sBANN,8BACE,CAII,iBAFF,4DACA,WACE,YADF,uCACE,6EACA,2BANN,8CACE,kDACA,0CACE,8BACA,yFACE,sBACA,sFALJ,mEACA,sBACE,kEACA,6EACE,uCACA,kEALJ,qGAEE,kEACA,6EACE,uCACA,kEALJ,8CACA,uDACE,sEACA,2EACE,sCACA,iEALJ,mGACA,qCACE,oDACA,0DACE,6GACA,gDAGR,yDCvEA,sEACE,CACA,6GACE,gEACF,iGAIF,wFACE,qDAGA,mGAEE,2CAEF,4FACE,gCACF,wGACE,8DAEE,6FAIA,iJAKN,6GACE,gDAKF,yDACA,qCAGA,6BACA,kBACA,qDAKA,oCAEA,+DAGA,2CAGE,oDAIA,oEAEE,qBAGJ,wDAEE,uCAEF,kEAGA,8CAEA,uDAIF,gEAIE,6BACA,gEAIA,+CACE,0EAIF,sDAEE,+DAGF,sCACA,8BACE,oCAEJ,wBACE,4FAEE,gBAEJ,yGAGI,kBAGJ,CCnHE,2MCFF,oBAGE,wGAKA,iCACE,CADF,wBACE,8GAQA,mBCjBJ,2GAIE,mBACA,6HAMA,YACE,mIAYF,eACA,CAHF,YAGE,4FAGE,8BAKF,uBAkBE,sCACA,CADA,qBAbA,wCAIA,CALF,8BACE,CADF,gBAKE,wCACA,CAOA,kDACA,CACA,kCAKF,6BAGA,4CACE,kDACA,eAGF,cACE,aACA,iBACA,yBACA,8BACA,WAGJ,2BACE,cAGA,+BACA,CAHA,eAGA,wCACA,YACA,iBACA,uEAGA,0BACA,2CAEA,8EAGI,qBACA,CAFF,kBAEE,4DAMJ,mCACE,4BAGA,oBAGF,4CACE,qCACA,8BACA,gBACA,+CAEA,iCAEF,iCACE,oBACA,4CACA,qCAGF,8BAEE,+BAEA,WAEA,8BACE,oBACA,CADA,gBACA,yBAKF,gBADF,YACE,CACA,iBACA,qDAEA,mDCvIJ,2FAMA,iCACE,CACA,eAEA,CAFA,mBADA,wBAIA,8BACA,gBADA,YACA,0BAEE,8CAGA,wDAIE,gFAGE,iBAEN,wCAKF,+CACE,CACA,oDAEF,kDAIE,YAEF,CAHE,YAGF,CCpCE,mFAFA,QACA,UAIA,CAHA,IAGA,gDAGE,eACA,iEAGF,wBAEE,mBAMA,6CAEF,CAJE,mBACA,CAGF,kCAGE,CARF,kBACE,CAHA,eAUA,YACA,mBACA,CAFA,UAEA,wCC/BJ,mBACE,CDkCE,wBACA,sBCpCJ,iBACE,mDACA,2CACA,sBAGA,qBCDA,6CAIE,CATJ,uBAKE,CDGE,oBACF,yDAEE,CCDE,2CAGF,CAJA,kCACE,CDJJ,aAKE,eCXJ,CDME,uBCOE,gCACE,YAEF,2CAEE,wBACA,0BAIF,iBAEA,cADF,UACE,uBAEA,iCAEA,wCAEA,6CAMA,CAYF,gCATI,4BASJ,CAZE,mCAEE,iCAUJ,4BAGE,4DADA,+BACA,CAHF,qBAGE,sCACE,OAEF,iBAHA,SAGA,iHACE,2DAKF,CANA,8EAMA,uSAEE,kBAEF,+FACE,yCCjEJ,WACA,yBAGA,uBACA,gBAEA,uCAIA,CAJA,iCAIA,uCAGA,UACE,gBACA,qBAEA,0CClBJ,gBACE,KAGF,qBACE,YAGF,CAHE,cAGF,gCAEE,mBACA,iEAEA,oCACA,wCAEA,sBACA,WAEA,CAFA,YAEA,8EAEA,mCAFA,iBAEA,6BAIA,wEAKA,sDAIE,CARF,mDAIA,CAIE,cAEF,8CAIA,oBAFE,iBAEF,8CAGE,eAEF,CAFE,YAEF,OAEE,kBAGJ,CAJI,eACA,CAFF,mBAKF,yCCjDE,oBACA,CAFA,iBAEA,uCAKE,iBACA,qCAGA,mBCZJ,CDWI,gBCXJ,6BAEE,eACA,sBAGA,eAEA,sBACA,oDACA,iGAMA,gBAFE,YAEF,8FAME,iJCnBF,YACA,gNAWE,gDAEF,iSAaE,kBACE,gHAKF,oCACE,eACF,CADE,UACF,8CACE,gDACF,wCACE,oBCtCJ,oBAEF,6BACE,QACE,kDAGF,yBACE,kDAmBA,kDAEF,CAhBA,+CAaA,CAbA,oBAaA,0FACE,CADF,gGAfF,cACE,gBACA,CAaA,0BAGA,mQACE,gBAGF,oMACE,iBACA,CAFF,eACE,CADF,gBAEE,aAGJ,iCAEE,CAFF,wCAEE,wBAUE,+VAIE,uEAHA,2BAGA,wXAKJ,iDAGF,CARM,+CACE,iDAIN,CALI,gBAQN,mHACE,gBAGF,2DACE,0EAOA,0EAGF,gBAEE,6DCjFA,kDACA,gCACA,qDAGA,qBACA,qDCDA,cACA,eAEA,yBAGF,sBAEE,iBACA,sNAWA,iBACE,kBACA,wRAgBA,kBAEA,iOAgBA,uCACE,uEAEA,kBAEF,qUAuBE,iDAIJ,CACA,geCzFF,4BAEE,CAQA,6JACA,iDAIA,sEAGA,mDAOF,iDAGE,4DAIA,8CACA,qDAEE,eAFF,cAEE,oBAEF,uBAFE,kCAGA,eACA,iBACA,mBAIA,mDACA,CAHA,uCAEA,CAJA,0CACA,CAIA,gBAJA,gBACA,oBADA,gBAIA,wBAEJ,gBAGE,6BACA,YAHA,iBAGA,gCACA,iEAEA,6CACA,sDACA,0BADA,wBACA,0BACA,oIAIA,mBAFA,YAEA,qBACA,0CAIE,uBAEF,CAHA,yBACE,CAEF,iDACE,mFAKJ,oCACE,CANE,aAKJ,CACE,qEAIA,YAFA,WAEA,CAHA,aACA,CAEA,gBACE,4BACA,sBADA,aACA,gCAMF,oCACA,yDACA,2CAEA,qBAGE,kBAEA,CACA,mCAIF,CARE,YACA,CAOF,iCAEE,CAPA,oBACA,CAQA,oBACE,uDAEJ,sDAGA,CAHA,cAGA,0BACE,oDAIA,oCACA,4BACA,sBAGA,cAEA,oFAGA,sBAEA,yDACE,CAIF,iBAJE,wBAIF,6CAHE,6CAKA,eACA,aACA,CADA,cACA,yCAGJ,kBACE,CAKA,iDAEA,CARF,aACE,4CAGA,kBAIA,wEAGA,wDAGA,kCAOA,iDAGA,CAPF,WAEE,sCAEA,CAJF,2CACE,CAMA,qCACA,+BARF,kBACE,qCAOA,iBAsBA,sBACE,CAvBF,WAKA,CACE,0DAIF,CALA,uDACE,CANF,sBAqBA,4CACA,CALA,gRAIA,YAEE,6CAEN,mCAEE,+CASA,6EAIA,4BChNA,SDmNA,qFCnNA,gDACA,sCAGA,qCACA,sDACA,CAKA,kDAGA,CARA,0CAQA,kBAGA,YACA,sBACA,iBAFA,gBADF,YACE,CAHA,SAKA,kBAEA,SAFA,iBAEA,uEAGA,CAEE,6CAFF,oCAgBI,CAdF,yBACE,qBACF,CAGF,oBACE,CAIF,WACE,CALA,2CAGA,uBACF,CACE,mFAGE,CALF,qBAEA,UAGE,gCAIF,sDAEA,CALE,oCAKF,yCC7CJ,oCACE,CD+CA,yXAQE,sCCrDJ,wCAGA,oCACE","sources":["webpack:///./node_modules/normalize.css/normalize.css","webpack:///./src/furo/assets/styles/base/_print.sass","webpack:///./src/furo/assets/styles/base/_screen-readers.sass","webpack:///./src/furo/assets/styles/base/_theme.sass","webpack:///./src/furo/assets/styles/variables/_fonts.scss","webpack:///./src/furo/assets/styles/variables/_spacing.scss","webpack:///./src/furo/assets/styles/variables/_icons.scss","webpack:///./src/furo/assets/styles/variables/_admonitions.scss","webpack:///./src/furo/assets/styles/variables/_colors.scss","webpack:///./src/furo/assets/styles/base/_typography.sass","webpack:///./src/furo/assets/styles/_scaffold.sass","webpack:///./src/furo/assets/styles/content/_admonitions.sass","webpack:///./src/furo/assets/styles/content/_api.sass","webpack:///./src/furo/assets/styles/content/_blocks.sass","webpack:///./src/furo/assets/styles/content/_captions.sass","webpack:///./src/furo/assets/styles/content/_code.sass","webpack:///./src/furo/assets/styles/content/_footnotes.sass","webpack:///./src/furo/assets/styles/content/_images.sass","webpack:///./src/furo/assets/styles/content/_indexes.sass","webpack:///./src/furo/assets/styles/content/_lists.sass","webpack:///./src/furo/assets/styles/content/_math.sass","webpack:///./src/furo/assets/styles/content/_misc.sass","webpack:///./src/furo/assets/styles/content/_rubrics.sass","webpack:///./src/furo/assets/styles/content/_sidebar.sass","webpack:///./src/furo/assets/styles/content/_tables.sass","webpack:///./src/furo/assets/styles/content/_target.sass","webpack:///./src/furo/assets/styles/content/_gui-labels.sass","webpack:///./src/furo/assets/styles/components/_footer.sass","webpack:///./src/furo/assets/styles/components/_sidebar.sass","webpack:///./src/furo/assets/styles/components/_table_of_contents.sass","webpack:///./src/furo/assets/styles/_shame.sass"],"sourcesContent":["/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */\n\n/* Document\n ========================================================================== */\n\n/**\n * 1. Correct the line height in all browsers.\n * 2. Prevent adjustments of font size after orientation changes in iOS.\n */\n\nhtml {\n line-height: 1.15; /* 1 */\n -webkit-text-size-adjust: 100%; /* 2 */\n}\n\n/* Sections\n ========================================================================== */\n\n/**\n * Remove the margin in all browsers.\n */\n\nbody {\n margin: 0;\n}\n\n/**\n * Render the `main` element consistently in IE.\n */\n\nmain {\n display: block;\n}\n\n/**\n * Correct the font size and margin on `h1` elements within `section` and\n * `article` contexts in Chrome, Firefox, and Safari.\n */\n\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\n\n/* Grouping content\n ========================================================================== */\n\n/**\n * 1. Add the correct box sizing in Firefox.\n * 2. Show the overflow in Edge and IE.\n */\n\nhr {\n box-sizing: content-box; /* 1 */\n height: 0; /* 1 */\n overflow: visible; /* 2 */\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\npre {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/* Text-level semantics\n ========================================================================== */\n\n/**\n * Remove the gray background on active links in IE 10.\n */\n\na {\n background-color: transparent;\n}\n\n/**\n * 1. Remove the bottom border in Chrome 57-\n * 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n */\n\nabbr[title] {\n border-bottom: none; /* 1 */\n text-decoration: underline; /* 2 */\n text-decoration: underline dotted; /* 2 */\n}\n\n/**\n * Add the correct font weight in Chrome, Edge, and Safari.\n */\n\nb,\nstrong {\n font-weight: bolder;\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\ncode,\nkbd,\nsamp {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/**\n * Add the correct font size in all browsers.\n */\n\nsmall {\n font-size: 80%;\n}\n\n/**\n * Prevent `sub` and `sup` elements from affecting the line height in\n * all browsers.\n */\n\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\n\nsub {\n bottom: -0.25em;\n}\n\nsup {\n top: -0.5em;\n}\n\n/* Embedded content\n ========================================================================== */\n\n/**\n * Remove the border on images inside links in IE 10.\n */\n\nimg {\n border-style: none;\n}\n\n/* Forms\n ========================================================================== */\n\n/**\n * 1. Change the font styles in all browsers.\n * 2. Remove the margin in Firefox and Safari.\n */\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n font-family: inherit; /* 1 */\n font-size: 100%; /* 1 */\n line-height: 1.15; /* 1 */\n margin: 0; /* 2 */\n}\n\n/**\n * Show the overflow in IE.\n * 1. Show the overflow in Edge.\n */\n\nbutton,\ninput { /* 1 */\n overflow: visible;\n}\n\n/**\n * Remove the inheritance of text transform in Edge, Firefox, and IE.\n * 1. Remove the inheritance of text transform in Firefox.\n */\n\nbutton,\nselect { /* 1 */\n text-transform: none;\n}\n\n/**\n * Correct the inability to style clickable types in iOS and Safari.\n */\n\nbutton,\n[type=\"button\"],\n[type=\"reset\"],\n[type=\"submit\"] {\n -webkit-appearance: button;\n}\n\n/**\n * Remove the inner border and padding in Firefox.\n */\n\nbutton::-moz-focus-inner,\n[type=\"button\"]::-moz-focus-inner,\n[type=\"reset\"]::-moz-focus-inner,\n[type=\"submit\"]::-moz-focus-inner {\n border-style: none;\n padding: 0;\n}\n\n/**\n * Restore the focus styles unset by the previous rule.\n */\n\nbutton:-moz-focusring,\n[type=\"button\"]:-moz-focusring,\n[type=\"reset\"]:-moz-focusring,\n[type=\"submit\"]:-moz-focusring {\n outline: 1px dotted ButtonText;\n}\n\n/**\n * Correct the padding in Firefox.\n */\n\nfieldset {\n padding: 0.35em 0.75em 0.625em;\n}\n\n/**\n * 1. Correct the text wrapping in Edge and IE.\n * 2. Correct the color inheritance from `fieldset` elements in IE.\n * 3. Remove the padding so developers are not caught out when they zero out\n * `fieldset` elements in all browsers.\n */\n\nlegend {\n box-sizing: border-box; /* 1 */\n color: inherit; /* 2 */\n display: table; /* 1 */\n max-width: 100%; /* 1 */\n padding: 0; /* 3 */\n white-space: normal; /* 1 */\n}\n\n/**\n * Add the correct vertical alignment in Chrome, Firefox, and Opera.\n */\n\nprogress {\n vertical-align: baseline;\n}\n\n/**\n * Remove the default vertical scrollbar in IE 10+.\n */\n\ntextarea {\n overflow: auto;\n}\n\n/**\n * 1. Add the correct box sizing in IE 10.\n * 2. Remove the padding in IE 10.\n */\n\n[type=\"checkbox\"],\n[type=\"radio\"] {\n box-sizing: border-box; /* 1 */\n padding: 0; /* 2 */\n}\n\n/**\n * Correct the cursor style of increment and decrement buttons in Chrome.\n */\n\n[type=\"number\"]::-webkit-inner-spin-button,\n[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n/**\n * 1. Correct the odd appearance in Chrome and Safari.\n * 2. Correct the outline style in Safari.\n */\n\n[type=\"search\"] {\n -webkit-appearance: textfield; /* 1 */\n outline-offset: -2px; /* 2 */\n}\n\n/**\n * Remove the inner padding in Chrome and Safari on macOS.\n */\n\n[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n/**\n * 1. Correct the inability to style clickable types in iOS and Safari.\n * 2. Change font properties to `inherit` in Safari.\n */\n\n::-webkit-file-upload-button {\n -webkit-appearance: button; /* 1 */\n font: inherit; /* 2 */\n}\n\n/* Interactive\n ========================================================================== */\n\n/*\n * Add the correct display in Edge, IE 10+, and Firefox.\n */\n\ndetails {\n display: block;\n}\n\n/*\n * Add the correct display in all browsers.\n */\n\nsummary {\n display: list-item;\n}\n\n/* Misc\n ========================================================================== */\n\n/**\n * Add the correct display in IE 10+.\n */\n\ntemplate {\n display: none;\n}\n\n/**\n * Add the correct display in IE 10.\n */\n\n[hidden] {\n display: none;\n}\n","// This file contains styles for managing print media.\n\n////////////////////////////////////////////////////////////////////////////////\n// Hide elements not relevant to print media.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Hide icon container.\n .content-icon-container\n display: none !important\n\n // Hide showing header links if hovering over when printing.\n .headerlink\n display: none !important\n\n // Hide mobile header.\n .mobile-header\n display: none !important\n\n // Hide navigation links.\n .related-pages\n display: none !important\n\n////////////////////////////////////////////////////////////////////////////////\n// Tweaks related to decolorization.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Apply a border around code which no longer have a color background.\n .highlight\n border: 0.1pt solid var(--color-foreground-border)\n\n////////////////////////////////////////////////////////////////////////////////\n// Avoid page break in some relevant cases.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n ul, ol, dl, a, table, pre, blockquote, p\n page-break-inside: avoid\n\n h1, h2, h3, h4, h5, h6, img, figure, caption\n page-break-inside: avoid\n page-break-after: avoid\n\n ul, ol, dl\n page-break-before: avoid\n",".visually-hidden\n position: absolute !important\n width: 1px !important\n height: 1px !important\n padding: 0 !important\n margin: -1px !important\n overflow: hidden !important\n clip: rect(0,0,0,0) !important\n white-space: nowrap !important\n border: 0 !important\n color: var(--color-foreground-primary)\n background: var(--color-background-primary)\n\n:-moz-focusring\n outline: auto\n","// This file serves as the \"skeleton\" of the theming logic.\n//\n// This contains the bulk of the logic for handling dark mode, color scheme\n// toggling and the handling of color-scheme-specific hiding of elements.\n\n@use \"../variables\" as *\n\nbody\n @include fonts\n @include spacing\n @include icons\n @include admonitions\n @include default-admonition(#651fff, \"abstract\")\n @include default-topic(#14B8A6, \"pencil\")\n\n @include colors\n\n.only-light\n display: block !important\nhtml body .only-dark\n display: none !important\n\n// Ignore dark-mode hints if print media.\n@media not print\n // Enable dark-mode, if requested.\n body[data-theme=\"dark\"]\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n // Enable dark mode, unless explicitly told to avoid.\n @media (prefers-color-scheme: dark)\n body:not([data-theme=\"light\"])\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n//\n// Theme toggle presentation\n//\nbody[data-theme=\"auto\"]\n .theme-toggle svg.theme-icon-when-auto-light\n display: block\n\n @media (prefers-color-scheme: dark)\n .theme-toggle svg.theme-icon-when-auto-dark\n display: block\n .theme-toggle svg.theme-icon-when-auto-light\n display: none\n\nbody[data-theme=\"dark\"]\n .theme-toggle svg.theme-icon-when-dark\n display: block\n\nbody[data-theme=\"light\"]\n .theme-toggle svg.theme-icon-when-light\n display: block\n","// Fonts used by this theme.\n//\n// There are basically two things here -- using the system font stack and\n// defining sizes for various elements in %ages. We could have also used `em`\n// but %age is easier to reason about for me.\n\n@mixin fonts {\n // These are adapted from https://systemfontstack.com/\n --font-stack:\n -apple-system, BlinkMacSystemFont, Segoe UI, Helvetica, Arial, sans-serif,\n Apple Color Emoji, Segoe UI Emoji;\n --font-stack--monospace:\n \"SFMono-Regular\", Menlo, Consolas, Monaco, Liberation Mono, Lucida Console,\n monospace;\n --font-stack--headings: var(--font-stack);\n\n --font-size--normal: 100%;\n --font-size--small: 87.5%;\n --font-size--small--2: 81.25%;\n --font-size--small--3: 75%;\n --font-size--small--4: 62.5%;\n\n // Sidebar\n --sidebar-caption-font-size: var(--font-size--small--2);\n --sidebar-item-font-size: var(--font-size--small);\n --sidebar-search-input-font-size: var(--font-size--small);\n\n // Table of Contents\n --toc-font-size: var(--font-size--small--3);\n --toc-font-size--mobile: var(--font-size--normal);\n --toc-title-font-size: var(--font-size--small--4);\n\n // Admonitions\n //\n // These aren't defined in terms of %ages, since nesting these is permitted.\n --admonition-font-size: 0.8125rem;\n --admonition-title-font-size: 0.8125rem;\n\n // Code\n --code-font-size: var(--font-size--small--2);\n\n // API\n --api-font-size: var(--font-size--small);\n}\n","// Spacing for various elements on the page\n//\n// If the user wants to tweak things in a certain way, they are permitted to.\n// They also have to deal with the consequences though!\n\n@mixin spacing {\n // Header!\n --header-height: calc(\n var(--sidebar-item-line-height) + 4 *\n #{var(--sidebar-item-spacing-vertical)}\n );\n --header-padding: 0.5rem;\n\n // Sidebar\n --sidebar-tree-space-above: 1.5rem;\n --sidebar-caption-space-above: 1rem;\n\n --sidebar-item-line-height: 1rem;\n --sidebar-item-spacing-vertical: 0.5rem;\n --sidebar-item-spacing-horizontal: 1rem;\n --sidebar-item-height: calc(\n var(--sidebar-item-line-height) + 2 *#{var(--sidebar-item-spacing-vertical)}\n );\n\n --sidebar-expander-width: var(--sidebar-item-height); // be square\n\n --sidebar-search-space-above: 0.5rem;\n --sidebar-search-input-spacing-vertical: 0.5rem;\n --sidebar-search-input-spacing-horizontal: 0.5rem;\n --sidebar-search-input-height: 1rem;\n --sidebar-search-icon-size: var(--sidebar-search-input-height);\n\n // Table of Contents\n --toc-title-padding: 0.25rem 0;\n --toc-spacing-vertical: 1.5rem;\n --toc-spacing-horizontal: 1.5rem;\n --toc-item-spacing-vertical: 0.4rem;\n --toc-item-spacing-horizontal: 1rem;\n}\n","// Expose theme icons as CSS variables.\n\n$icons: (\n // Adapted from tabler-icons\n // url: https://tablericons.com/\n \"search\":\n url('data:image/svg+xml;charset=utf-8,'),\n // Factored out from mkdocs-material on 24-Aug-2020.\n // url: https://squidfunk.github.io/mkdocs-material/reference/admonitions/\n \"pencil\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"abstract\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"info\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"flame\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"question\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"warning\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"failure\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"spark\":\n url('data:image/svg+xml;charset=utf-8,')\n);\n\n@mixin icons {\n @each $name, $glyph in $icons {\n --icon-#{$name}: #{$glyph};\n }\n}\n","@use \"sass:list\";\n// Admonitions\n\n// Structure of these is:\n// admonition-class: color \"icon-name\";\n//\n// The colors are translated into CSS variables below. The icons are\n// used directly in the main declarations to set the `mask-image` in\n// the title.\n\n// prettier-ignore\n$admonitions: (\n // Each of these has an reST directives for it.\n \"caution\": #ff9100 \"spark\",\n \"warning\": #ff9100 \"warning\",\n \"danger\": #ff5252 \"spark\",\n \"attention\": #ff5252 \"warning\",\n \"error\": #ff5252 \"failure\",\n \"hint\": #00c852 \"question\",\n \"tip\": #00c852 \"info\",\n \"important\": #00bfa5 \"flame\",\n \"note\": #00b0ff \"pencil\",\n \"seealso\": #448aff \"info\",\n \"admonition-todo\": #808080 \"pencil\"\n);\n\n@mixin default-admonition($color, $icon-name) {\n --color-admonition-title: #{$color};\n --color-admonition-title-background: #{rgba($color, 0.2)};\n\n --icon-admonition-default: var(--icon-#{$icon-name});\n}\n\n@mixin default-topic($color, $icon-name) {\n --color-topic-title: #{$color};\n --color-topic-title-background: #{rgba($color, 0.2)};\n\n --icon-topic-default: var(--icon-#{$icon-name});\n}\n\n@mixin admonitions {\n @each $name, $values in $admonitions {\n --color-admonition-title--#{$name}: #{list.nth($values, 1)};\n --color-admonition-title-background--#{$name}: #{rgba(\n list.nth($values, 1),\n 0.2\n )};\n }\n}\n","// Colors used throughout this theme.\n//\n// The aim is to give the user more control. Thus, instead of hard-coding colors\n// in various parts of the stylesheet, the approach taken is to define all\n// colors as CSS variables and reusing them in all the places.\n//\n// `colors-dark` depends on `colors` being included at a lower specificity.\n\n@mixin colors {\n --color-problematic: #b30000;\n\n // Base Colors\n --color-foreground-primary: black; // for main text and headings\n --color-foreground-secondary: #5a5c63; // for secondary text\n --color-foreground-muted: #6b6f76; // for muted text\n --color-foreground-border: #878787; // for content borders\n\n --color-background-primary: white; // for content\n --color-background-secondary: #f8f9fb; // for navigation + ToC\n --color-background-hover: #efeff4ff; // for navigation-item hover\n --color-background-hover--transparent: #efeff400;\n --color-background-border: #eeebee; // for UI borders\n --color-background-item: #ccc; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #0a4bff;\n --color-brand-content: #2757dd;\n --color-brand-visited: #872ee0;\n\n // API documentation\n --color-api-background: var(--color-background-hover--transparent);\n --color-api-background-hover: var(--color-background-hover);\n --color-api-overall: var(--color-foreground-secondary);\n --color-api-name: var(--color-problematic);\n --color-api-pre-name: var(--color-problematic);\n --color-api-paren: var(--color-foreground-secondary);\n --color-api-keyword: var(--color-foreground-primary);\n\n --color-api-added: #21632c;\n --color-api-added-border: #38a84d;\n --color-api-changed: #046172;\n --color-api-changed-border: #06a1bc;\n --color-api-deprecated: #605706;\n --color-api-deprecated-border: #f0d90f;\n --color-api-removed: #b30000;\n --color-api-removed-border: #ff5c5c;\n\n --color-highlight-on-target: #ffffcc;\n\n // Inline code background\n --color-inline-code-background: var(--color-background-secondary);\n\n // Highlighted text (search)\n --color-highlighted-background: #ddeeff;\n --color-highlighted-text: var(--color-foreground-primary);\n\n // GUI Labels\n --color-guilabel-background: #ddeeff80;\n --color-guilabel-border: #bedaf580;\n --color-guilabel-text: var(--color-foreground-primary);\n\n // Admonitions!\n --color-admonition-background: transparent;\n\n //////////////////////////////////////////////////////////////////////////////\n // Everything below this should be one of:\n // - var(...)\n // - *-gradient(...)\n // - special literal values (eg: transparent, none)\n //////////////////////////////////////////////////////////////////////////////\n\n // Tables\n --color-table-header-background: var(--color-background-secondary);\n --color-table-border: var(--color-background-border);\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: transparent;\n --color-card-marginals-background: var(--color-background-secondary);\n\n // Header\n --color-header-background: var(--color-background-primary);\n --color-header-border: var(--color-background-border);\n --color-header-text: var(--color-foreground-primary);\n\n // Sidebar (left)\n --color-sidebar-background: var(--color-background-secondary);\n --color-sidebar-background-border: var(--color-background-border);\n\n --color-sidebar-brand-text: var(--color-foreground-primary);\n --color-sidebar-caption-text: var(--color-foreground-muted);\n --color-sidebar-link-text: var(--color-foreground-secondary);\n --color-sidebar-link-text--top-level: var(--color-brand-primary);\n\n --color-sidebar-item-background: var(--color-sidebar-background);\n --color-sidebar-item-background--current: var(\n --color-sidebar-item-background\n );\n --color-sidebar-item-background--hover: linear-gradient(\n 90deg,\n var(--color-background-hover--transparent) 0%,\n var(--color-background-hover) var(--sidebar-item-spacing-horizontal),\n var(--color-background-hover) 100%\n );\n\n --color-sidebar-item-expander-background: transparent;\n --color-sidebar-item-expander-background--hover: var(\n --color-background-hover\n );\n\n --color-sidebar-search-text: var(--color-foreground-primary);\n --color-sidebar-search-background: var(--color-background-secondary);\n --color-sidebar-search-background--focus: var(--color-background-primary);\n --color-sidebar-search-border: var(--color-background-border);\n --color-sidebar-search-icon: var(--color-foreground-muted);\n\n // Table of Contents (right)\n --color-toc-background: var(--color-background-primary);\n --color-toc-title-text: var(--color-foreground-muted);\n --color-toc-item-text: var(--color-foreground-secondary);\n --color-toc-item-text--hover: var(--color-foreground-primary);\n --color-toc-item-text--active: var(--color-brand-primary);\n\n // Actual page contents\n --color-content-foreground: var(--color-foreground-primary);\n --color-content-background: transparent;\n\n // Links\n --color-link: var(--color-brand-content);\n --color-link-underline: var(--color-background-border);\n --color-link--hover: var(--color-brand-content);\n --color-link-underline--hover: var(--color-foreground-border);\n\n --color-link--visited: var(--color-brand-visited);\n --color-link-underline--visited: var(--color-background-border);\n --color-link--visited--hover: var(--color-brand-visited);\n --color-link-underline--visited--hover: var(--color-foreground-border);\n}\n\n@mixin colors-dark {\n --color-problematic: #ee5151;\n\n // Base Colors\n --color-foreground-primary: #cfd0d0; // for main text and headings\n --color-foreground-secondary: #9ca0a5; // for secondary text\n --color-foreground-muted: #81868d; // for muted text\n --color-foreground-border: #666666; // for content borders\n\n --color-background-primary: #131416; // for content\n --color-background-secondary: #1a1c1e; // for navigation + ToC\n --color-background-hover: #1e2124ff; // for navigation-item hover\n --color-background-hover--transparent: #1e212400;\n --color-background-border: #303335; // for UI borders\n --color-background-item: #444; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #3d94ff;\n --color-brand-content: #5ca5ff;\n --color-brand-visited: #b27aeb;\n\n // Highlighted text (search)\n --color-highlighted-background: #083563;\n\n // GUI Labels\n --color-guilabel-background: #08356380;\n --color-guilabel-border: #13395f80;\n\n // API documentation\n --color-api-keyword: var(--color-foreground-secondary);\n --color-highlight-on-target: #333300;\n\n --color-api-added: #3db854;\n --color-api-added-border: #267334;\n --color-api-changed: #09b0ce;\n --color-api-changed-border: #056d80;\n --color-api-deprecated: #b1a10b;\n --color-api-deprecated-border: #6e6407;\n --color-api-removed: #ff7575;\n --color-api-removed-border: #b03b3b;\n\n // Admonitions\n --color-admonition-background: #18181a;\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: #18181a;\n --color-card-marginals-background: var(--color-background-hover);\n}\n","// This file contains the styling for making the content throughout the page,\n// including fonts, paragraphs, headings and spacing among these elements.\n\nbody\n font-family: var(--font-stack)\npre,\ncode,\nkbd,\nsamp\n font-family: var(--font-stack--monospace)\n\n// Make fonts look slightly nicer.\nbody\n -webkit-font-smoothing: antialiased\n -moz-osx-font-smoothing: grayscale\n\n// Line height from Bootstrap 4.1\narticle\n line-height: 1.5\n\n//\n// Headings\n//\nh1,\nh2,\nh3,\nh4,\nh5,\nh6\n line-height: 1.25\n font-family: var(--font-stack--headings)\n font-weight: bold\n\n border-radius: 0.5rem\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n margin-left: -0.5rem\n margin-right: -0.5rem\n padding-left: 0.5rem\n padding-right: 0.5rem\n\n + p\n margin-top: 0\n\nh1\n font-size: 2.5em\n margin-top: 1.75rem\n margin-bottom: 1rem\nh2\n font-size: 2em\n margin-top: 1.75rem\nh3\n font-size: 1.5em\nh4\n font-size: 1.25em\nh5\n font-size: 1.125em\nh6\n font-size: 1em\n\nsmall\n opacity: 75%\n font-size: 80%\n\n// Paragraph\np\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n\n// Horizontal rules\nhr.docutils\n height: 1px\n padding: 0\n margin: 2rem 0\n background-color: var(--color-background-border)\n border: 0\n\n.centered\n text-align: center\n\n// Links\na\n text-decoration: underline\n\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n &:visited\n color: var(--color-link--visited)\n text-decoration-color: var(--color-link-underline--visited)\n &:hover\n color: var(--color-link--visited--hover)\n text-decoration-color: var(--color-link-underline--visited--hover)\n\n &:hover\n color: var(--color-link--hover)\n text-decoration-color: var(--color-link-underline--hover)\n &.muted-link\n color: inherit\n &:hover\n color: var(--color-link--hover)\n text-decoration-color: var(--color-link-underline--hover)\n &:visited\n color: var(--color-link--visited--hover)\n text-decoration-color: var(--color-link-underline--visited--hover)\n","// This file contains the styles for the overall layouting of the documentation\n// skeleton, including the responsive changes as well as sidebar toggles.\n//\n// This is implemented as a mobile-last design, which isn't ideal, but it is\n// reasonably good-enough and I got pretty tired by the time I'd finished this\n// to move the rules around to fix this. Shouldn't take more than 3-4 hours,\n// if you know what you're doing tho.\n\n// HACK: Not all browsers account for the scrollbar width in media queries.\n// This results in horizontal scrollbars in the breakpoint where we go\n// from displaying everything to hiding the ToC. We accomodate for this by\n// adding a bit of padding to the TOC drawer, disabling the horizontal\n// scrollbar and allowing the scrollbars to cover the padding.\n// https://www.456bereastreet.com/archive/201301/media_query_width_and_vertical_scrollbars/\n\n// HACK: Always having the scrollbar visible, prevents certain browsers from\n// causing the content to stutter horizontally between taller-than-viewport and\n// not-taller-than-viewport pages.\n@use \"variables\" as *\n\nhtml\n overflow-x: hidden\n overflow-y: scroll\n scroll-behavior: smooth\n\n.sidebar-scroll, .toc-scroll, article[role=main] *\n scrollbar-width: thin\n scrollbar-color: var(--color-foreground-border) transparent\n\n//\n// Overalls\n//\nhtml,\nbody\n height: 100%\n color: var(--color-foreground-primary)\n background: var(--color-background-primary)\n\n.skip-to-content\n position: fixed\n padding: 1rem\n border-radius: 1rem\n left: 0.25rem\n top: 0.25rem\n z-index: 40\n background: var(--color-background-primary)\n color: var(--color-foreground-primary)\n\n transform: translateY(-200%)\n transition: transform 300ms ease-in-out\n\n &:focus-within\n transform: translateY(0%)\n\narticle\n color: var(--color-content-foreground)\n background: var(--color-content-background)\n overflow-wrap: break-word\n\n.page\n display: flex\n // fill the viewport for pages with little content.\n min-height: 100%\n\n.mobile-header\n width: 100%\n height: var(--header-height)\n background-color: var(--color-header-background)\n color: var(--color-header-text)\n border-bottom: 1px solid var(--color-header-border)\n\n // Looks like sub-script/super-script have this, and we need this to\n // be \"on top\" of those.\n z-index: 10\n\n // We don't show the header on large screens.\n display: none\n\n // Add shadow when scrolled\n &.scrolled\n border-bottom: none\n box-shadow: 0 0 0.2rem rgba(0, 0, 0, 0.1), 0 0.2rem 0.4rem rgba(0, 0, 0, 0.2)\n\n .header-center\n a\n color: var(--color-header-text)\n text-decoration: none\n\n.main\n display: flex\n flex: 1\n\n// Sidebar (left) also covers the entire left portion of screen.\n.sidebar-drawer\n box-sizing: border-box\n\n border-right: 1px solid var(--color-sidebar-background-border)\n background: var(--color-sidebar-background)\n\n display: flex\n justify-content: flex-end\n // These next two lines took me two days to figure out.\n width: calc((100% - #{$full-width}) / 2 + #{$sidebar-width})\n min-width: $sidebar-width\n\n// Scroll-along sidebars\n.sidebar-container,\n.toc-drawer\n box-sizing: border-box\n width: $sidebar-width\n\n.toc-drawer\n background: var(--color-toc-background)\n // See HACK described on top of this document\n padding-right: 1rem\n\n.sidebar-sticky,\n.toc-sticky\n position: sticky\n top: 0\n height: min(100%, 100vh)\n height: 100vh\n\n display: flex\n flex-direction: column\n\n.sidebar-scroll,\n.toc-scroll\n flex-grow: 1\n flex-shrink: 1\n\n overflow: auto\n scroll-behavior: smooth\n\n// Central items.\n.content\n padding: 0 $content-padding\n width: $content-width\n\n display: flex\n flex-direction: column\n justify-content: space-between\n\n.icon\n display: inline-block\n height: 1rem\n width: 1rem\n svg\n width: 100%\n height: 100%\n\n//\n// Accommodate announcement banner\n//\n.announcement\n background-color: var(--color-announcement-background)\n color: var(--color-announcement-text)\n\n height: var(--header-height)\n display: flex\n align-items: center\n overflow-x: auto\n & + .page\n min-height: calc(100% - var(--header-height))\n\n.announcement-content\n box-sizing: border-box\n padding: 0.5rem\n min-width: 100%\n white-space: nowrap\n text-align: center\n\n a\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-announcement-text)\n\n &:hover\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-link--hover)\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for theme\n////////////////////////////////////////////////////////////////////////////////\n.no-js .theme-toggle-container // don't show theme toggle if there's no JS\n display: none\n\n.theme-toggle-container\n display: flex\n\n.theme-toggle\n display: flex\n cursor: pointer\n border: none\n padding: 0\n background: transparent\n\n.theme-toggle svg\n height: 1.25rem\n width: 1.25rem\n color: var(--color-foreground-primary)\n display: none\n\n.theme-toggle-header\n display: flex\n align-items: center\n justify-content: center\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for elements\n////////////////////////////////////////////////////////////////////////////////\n.toc-overlay-icon, .nav-overlay-icon\n display: none\n cursor: pointer\n\n .icon\n color: var(--color-foreground-secondary)\n height: 1.5rem\n width: 1.5rem\n\n.toc-header-icon, .nav-overlay-icon\n // for when we set display: flex\n justify-content: center\n align-items: center\n\n.toc-content-icon\n height: 1.5rem\n width: 1.5rem\n\n.content-icon-container\n float: right\n display: flex\n margin-top: 1.5rem\n margin-left: 1rem\n margin-bottom: 1rem\n gap: 0.5rem\n\n .edit-this-page, .view-this-page\n svg\n color: inherit\n height: 1.25rem\n width: 1.25rem\n\n.sidebar-toggle\n position: absolute\n display: none\n// \n.sidebar-toggle[name=\"__toc\"]\n left: 20px\n.sidebar-toggle:checked\n left: 40px\n// \n\n.overlay\n position: fixed\n top: 0\n width: 0\n height: 0\n\n transition: width 0ms, height 0ms, opacity 250ms ease-out\n\n opacity: 0\n background-color: rgba(0, 0, 0, 0.54)\n.sidebar-overlay\n z-index: 20\n.toc-overlay\n z-index: 40\n\n// Keep things on top and smooth.\n.sidebar-drawer\n z-index: 30\n transition: left 250ms ease-in-out\n.toc-drawer\n z-index: 50\n transition: right 250ms ease-in-out\n\n// Show the Sidebar\n#__navigation:checked\n & ~ .sidebar-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .sidebar-drawer\n top: 0\n left: 0\n // Show the toc sidebar\n#__toc:checked\n & ~ .toc-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .toc-drawer\n top: 0\n right: 0\n\n////////////////////////////////////////////////////////////////////////////////\n// Back to top\n////////////////////////////////////////////////////////////////////////////////\n.back-to-top\n text-decoration: none\n\n display: none\n position: fixed\n left: 0\n top: 1rem\n padding: 0.5rem\n padding-right: 0.75rem\n border-radius: 1rem\n font-size: 0.8125rem\n\n background: var(--color-background-primary)\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), #6b728080 0px 0px 1px 0px\n\n z-index: 10\n\n margin-left: 50%\n transform: translateX(-50%)\n svg\n height: 1rem\n width: 1rem\n fill: currentColor\n display: inline-block\n\n span\n margin-left: 0.25rem\n\n .show-back-to-top &\n display: flex\n align-items: center\n\n////////////////////////////////////////////////////////////////////////////////\n// Responsive layouting\n////////////////////////////////////////////////////////////////////////////////\n// Make things a bit bigger on bigger screens.\n@media (min-width: $full-width + $sidebar-width)\n html\n font-size: 110%\n\n@media (max-width: $full-width)\n // Collapse \"toc\" into the icon.\n .toc-content-icon\n display: flex\n .toc-drawer\n position: fixed\n height: 100vh\n top: 0\n right: -$sidebar-width\n border-left: 1px solid var(--color-background-muted)\n .toc-tree\n border-left: none\n font-size: var(--toc-font-size--mobile)\n\n // Accomodate for a changed content width.\n .sidebar-drawer\n width: calc((100% - #{$full-width - $sidebar-width}) / 2 + #{$sidebar-width})\n\n@media (max-width: $content-padded-width + $sidebar-width)\n // Center the page\n .content\n margin-left: auto\n margin-right: auto\n padding: 0 $content-padding--small\n\n@media (max-width: $content-padded-width--small + $sidebar-width)\n // Collapse \"navigation\".\n .nav-overlay-icon\n display: flex\n .sidebar-drawer\n position: fixed\n height: 100vh\n width: $sidebar-width\n\n top: 0\n left: -$sidebar-width\n\n // Swap which icon is visible.\n .toc-header-icon, .theme-toggle-header\n display: flex\n .toc-content-icon, .theme-toggle-content\n display: none\n\n // Show the header.\n .mobile-header\n position: sticky\n top: 0\n display: flex\n justify-content: space-between\n align-items: center\n\n .header-left,\n .header-right\n display: flex\n height: var(--header-height)\n padding: 0 var(--header-padding)\n label\n height: 100%\n width: 100%\n user-select: none\n\n .nav-overlay-icon .icon,\n .theme-toggle svg\n height: 1.5rem\n width: 1.5rem\n\n // Add a scroll margin for the content\n :target\n scroll-margin-top: calc(var(--header-height) + 2.5rem)\n\n // Show back-to-top below the header\n .back-to-top\n top: calc(var(--header-height) + 0.5rem)\n\n // Accommodate for the header.\n .page\n flex-direction: column\n justify-content: center\n\n@media (max-width: $content-width + 2* $content-padding--small)\n // Content should respect window limits.\n .content\n width: 100%\n overflow-x: auto\n\n@media (max-width: $content-width)\n article[role=main] aside.sidebar\n float: none\n width: 100%\n margin: 1rem 0\n","@use \"sass:list\"\n@use \"../variables\" as *\n\n// The design here is strongly inspired by mkdocs-material.\n.admonition, .topic\n margin: 1rem auto\n padding: 0 0.5rem 0.5rem 0.5rem\n\n background: var(--color-admonition-background)\n\n border-radius: 0.2rem\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n font-size: var(--admonition-font-size)\n\n overflow: hidden\n page-break-inside: avoid\n\n // First element should have no margin, since the title has it.\n > :nth-child(2)\n margin-top: 0\n\n // Last item should have no margin, since we'll control that w/ padding\n > :last-child\n margin-bottom: 0\n\n.admonition p.admonition-title,\np.topic-title\n position: relative\n margin: 0 -0.5rem 0.5rem\n padding-left: 2rem\n padding-right: .5rem\n padding-top: .4rem\n padding-bottom: .4rem\n\n font-weight: 500\n font-size: var(--admonition-title-font-size)\n line-height: 1.3\n\n // Our fancy icon\n &::before\n content: \"\"\n position: absolute\n left: 0.5rem\n width: 1rem\n height: 1rem\n\n// Default styles\np.admonition-title\n background-color: var(--color-admonition-title-background)\n &::before\n background-color: var(--color-admonition-title)\n mask-image: var(--icon-admonition-default)\n mask-repeat: no-repeat\n\np.topic-title\n background-color: var(--color-topic-title-background)\n &::before\n background-color: var(--color-topic-title)\n mask-image: var(--icon-topic-default)\n mask-repeat: no-repeat\n\n//\n// Variants\n//\n.admonition\n border-left: 0.2rem solid var(--color-admonition-title)\n\n @each $type, $value in $admonitions\n &.#{$type}\n border-left-color: var(--color-admonition-title--#{$type})\n > .admonition-title\n background-color: var(--color-admonition-title-background--#{$type})\n &::before\n background-color: var(--color-admonition-title--#{$type})\n mask-image: var(--icon-#{list.nth($value, 2)})\n\n.admonition-todo > .admonition-title\n text-transform: uppercase\n","// This file stylizes the API documentation (stuff generated by autodoc). It's\n// deeply nested due to how autodoc structures the HTML without enough classes\n// to select the relevant items.\n\n// API docs!\ndl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)\n // Tweak the spacing of all the things!\n dd\n margin-left: 2rem\n > :first-child\n margin-top: 0.125rem\n > :last-child\n margin-bottom: 0.75rem\n\n // This is used for the arguments\n .field-list\n margin-bottom: 0.75rem\n\n // \"Headings\" (like \"Parameters\" and \"Return\")\n > dt\n text-transform: uppercase\n font-size: var(--font-size--small)\n\n dd:empty\n margin-bottom: 0.5rem\n dd > ul\n margin-left: -1.2rem\n > li\n > p:nth-child(2)\n margin-top: 0\n // When the last-empty-paragraph follows a paragraph, it doesn't need\n // to augument the existing spacing.\n > p + p:last-child:empty\n margin-top: 0\n margin-bottom: 0\n\n // Colorize the elements\n > dt\n color: var(--color-api-overall)\n\n.sig:not(.sig-inline)\n font-weight: bold\n\n font-size: var(--api-font-size)\n font-family: var(--font-stack--monospace)\n\n margin-left: -0.25rem\n margin-right: -0.25rem\n padding-top: 0.25rem\n padding-bottom: 0.25rem\n padding-right: 0.5rem\n\n // These are intentionally em, to properly match the font size.\n padding-left: 3em\n text-indent: -2.5em\n\n border-radius: 0.25rem\n\n background: var(--color-api-background)\n transition: background 100ms ease-out\n\n &:hover\n background: var(--color-api-background-hover)\n\n // adjust the size of the [source] link on the right.\n a.reference\n .viewcode-link\n font-weight: normal\n width: 4.25rem\n\nem.property\n font-style: normal\n &:first-child\n color: var(--color-api-keyword)\n.sig-name\n color: var(--color-api-name)\n.sig-prename\n font-weight: normal\n color: var(--color-api-pre-name)\n.sig-paren\n color: var(--color-api-paren)\n.sig-param\n font-style: normal\n\ndiv.versionadded,\ndiv.versionchanged,\ndiv.deprecated,\ndiv.versionremoved\n border-left: 0.1875rem solid\n border-radius: 0.125rem\n\n padding-left: 0.75rem\n\n p\n margin-top: 0.125rem\n margin-bottom: 0.125rem\n\ndiv.versionadded\n border-color: var(--color-api-added-border)\n .versionmodified\n color: var(--color-api-added)\n\ndiv.versionchanged\n border-color: var(--color-api-changed-border)\n .versionmodified\n color: var(--color-api-changed)\n\ndiv.deprecated\n border-color: var(--color-api-deprecated-border)\n .versionmodified\n color: var(--color-api-deprecated)\n\ndiv.versionremoved\n border-color: var(--color-api-removed-border)\n .versionmodified\n color: var(--color-api-removed)\n\n// Align the [docs] and [source] to the right.\n.viewcode-link, .viewcode-back\n float: right\n text-align: right\n",".line-block\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n .line-block\n margin-top: 0rem\n margin-bottom: 0rem\n padding-left: 1rem\n","// Captions\narticle p.caption,\ntable > caption,\n.code-block-caption\n font-size: var(--font-size--small)\n text-align: center\n\n// Caption above a TOCTree\n.toctree-wrapper.compound\n .caption, :not(.caption) > .caption-text\n font-size: var(--font-size--small)\n text-transform: uppercase\n\n text-align: initial\n margin-bottom: 0\n\n > ul\n margin-top: 0\n margin-bottom: 0\n","// Inline code\ncode.literal, .sig-inline\n background: var(--color-inline-code-background)\n border-radius: 0.2em\n // Make the font smaller, and use padding to recover.\n font-size: var(--font-size--small--2)\n padding: 0.1em 0.2em\n\n pre.literal-block &\n font-size: inherit\n padding: 0\n\n p &\n border: 1px solid var(--color-background-border)\n\n.sig-inline\n font-family: var(--font-stack--monospace)\n\n// Code and Literal Blocks\n$code-spacing-vertical: 0.625rem\n$code-spacing-horizontal: 0.875rem\n\n// Wraps every literal block + line numbers.\ndiv[class*=\" highlight-\"],\ndiv[class^=\"highlight-\"]\n margin: 1em 0\n display: flex\n\n .table-wrapper\n margin: 0\n padding: 0\n\npre\n margin: 0\n padding: 0\n overflow: auto\n\n // Needed to have more specificity than pygments' \"pre\" selector. :(\n article[role=\"main\"] .highlight &\n line-height: 1.5\n\n &.literal-block,\n .highlight &\n font-size: var(--code-font-size)\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n // Make it look like all the other blocks.\n &.literal-block\n margin-top: 1rem\n margin-bottom: 1rem\n\n border-radius: 0.2rem\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n\n// All code is always contained in this.\n.highlight\n width: 100%\n border-radius: 0.2rem\n\n // Make line numbers and prompts un-selectable.\n .gp, span.linenos\n user-select: none\n pointer-events: none\n\n // Expand the line-highlighting.\n .hll\n display: block\n margin-left: -$code-spacing-horizontal\n margin-right: -$code-spacing-horizontal\n padding-left: $code-spacing-horizontal\n padding-right: $code-spacing-horizontal\n\n/* Make code block captions be nicely integrated */\n.code-block-caption\n display: flex\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n border-radius: 0.25rem\n border-bottom-left-radius: 0\n border-bottom-right-radius: 0\n font-weight: 300\n border-bottom: 1px solid\n\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n border-color: var(--color-background-border)\n\n + div[class]\n margin-top: 0\n > .highlight\n border-top-left-radius: 0\n border-top-right-radius: 0\n\n// When `html_codeblock_linenos_style` is table.\n.highlighttable\n width: 100%\n display: block\n tbody\n display: block\n\n tr\n display: flex\n\n // Line numbers\n td.linenos\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n padding: $code-spacing-vertical $code-spacing-horizontal\n padding-right: 0\n border-top-left-radius: 0.2rem\n border-bottom-left-radius: 0.2rem\n\n .linenodiv\n padding-right: $code-spacing-horizontal\n font-size: var(--code-font-size)\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n\n // Actual code\n td.code\n padding: 0\n display: block\n flex: 1\n overflow: hidden\n\n .highlight\n border-top-left-radius: 0\n border-bottom-left-radius: 0\n\n// When `html_codeblock_linenos_style` is inline.\n.highlight\n span.linenos\n display: inline-block\n padding-left: 0\n padding-right: $code-spacing-horizontal\n margin-right: $code-spacing-horizontal\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n","// Inline Footnote Reference\n.footnote-reference\n font-size: var(--font-size--small--4)\n vertical-align: super\n\n// Definition list, listing the content of each note.\n// docutils <= 0.17\ndl.footnote.brackets\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\n display: grid\n grid-template-columns: max-content auto\n dt\n margin: 0\n > .fn-backref\n margin-left: 0.25rem\n\n &:after\n content: \":\"\n\n .brackets\n &:before\n content: \"[\"\n &:after\n content: \"]\"\n\n dd\n margin: 0\n padding: 0 1rem\n\n// docutils >= 0.18\naside.footnote\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\naside.footnote > span,\ndiv.citation > span\n float: left\n font-weight: 500\n padding-right: 0.25rem\n\naside.footnote > *:not(span),\ndiv.citation > p\n margin-left: 2rem\n","//\n// Figures\n//\nimg\n box-sizing: border-box\n max-width: 100%\n height: auto\n\narticle\n figure, .figure\n border-radius: 0.2rem\n\n margin: 0\n :last-child\n margin-bottom: 0\n\n .align-left\n float: left\n clear: left\n margin: 0 1rem 1rem\n\n .align-right\n float: right\n clear: right\n margin: 0 1rem 1rem\n\n .align-default,\n .align-center\n display: block\n text-align: center\n margin-left: auto\n margin-right: auto\n\n // WELL, table needs to be stylised like a table.\n table.align-default\n display: table\n text-align: initial\n",".genindex-jumpbox, .domainindex-jumpbox\n border-top: 1px solid var(--color-background-border)\n border-bottom: 1px solid var(--color-background-border)\n padding: 0.25rem\n\n.genindex-section, .domainindex-section\n h2\n margin-top: 0.75rem\n margin-bottom: 0.5rem\n ul\n margin-top: 0\n margin-bottom: 0\n","ul,\nol\n padding-left: 1.2rem\n\n // Space lists out like paragraphs\n margin-top: 1rem\n margin-bottom: 1rem\n // reduce margins within li.\n li\n > p:first-child\n margin-top: 0.25rem\n margin-bottom: 0.25rem\n\n > p:last-child\n margin-top: 0.25rem\n\n > ul,\n > ol\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n\nol\n &.arabic\n list-style: decimal\n &.loweralpha\n list-style: lower-alpha\n &.upperalpha\n list-style: upper-alpha\n &.lowerroman\n list-style: lower-roman\n &.upperroman\n list-style: upper-roman\n\n// Don't space lists out when they're \"simple\" or in a `.. toctree::`\n.simple,\n.toctree-wrapper\n li\n > ul,\n > ol\n margin-top: 0\n margin-bottom: 0\n\n// Definition Lists\n.field-list,\n.option-list,\ndl:not([class]),\ndl.simple,\ndl.footnote,\ndl.glossary\n dt\n font-weight: 500\n margin-top: 0.25rem\n + dt\n margin-top: 0\n\n .classifier::before\n content: \":\"\n margin-left: 0.2rem\n margin-right: 0.2rem\n\n dd\n > p:first-child,\n ul\n margin-top: 0.125rem\n\n ul\n margin-bottom: 0.125rem\n",".math-wrapper\n width: 100%\n overflow-x: auto\n\ndiv.math\n position: relative\n text-align: center\n\n .headerlink,\n &:focus .headerlink\n display: none\n\n &:hover .headerlink\n display: inline-block\n\n span.eqno\n position: absolute\n right: 0.5rem\n top: 50%\n transform: translate(0, -50%)\n z-index: 1\n","// Abbreviations\nabbr[title]\n cursor: help\n\n// \"Problematic\" content, as identified by Sphinx\n.problematic\n color: var(--color-problematic)\n\n// Keyboard / Mouse \"instructions\"\nkbd:not(.compound)\n margin: 0 0.2rem\n padding: 0 0.2rem\n border-radius: 0.2rem\n border: 1px solid var(--color-foreground-border)\n color: var(--color-foreground-primary)\n vertical-align: text-bottom\n\n font-size: var(--font-size--small--3)\n display: inline-block\n\n box-shadow: 0 0.0625rem 0 rgba(0, 0, 0, 0.2), inset 0 0 0 0.125rem var(--color-background-primary)\n\n background-color: var(--color-background-secondary)\n\n// Blockquote\nblockquote\n border-left: 4px solid var(--color-background-border)\n background: var(--color-background-secondary)\n\n margin-left: 0\n margin-right: 0\n padding: 0.5rem 1rem\n\n .attribution\n font-weight: 600\n text-align: right\n\n &.pull-quote,\n &.highlights\n font-size: 1.25em\n\n &.epigraph,\n &.pull-quote\n border-left-width: 0\n border-radius: 0.5rem\n\n &.highlights\n border-left-width: 0\n background: transparent\n\n// Center align embedded-in-text images\np .reference img\n vertical-align: middle\n","p.rubric\n line-height: 1.25\n font-weight: bold\n font-size: 1.125em\n\n // For Numpy-style documentation that's got rubrics within it.\n // https://github.com/pradyunsg/furo/discussions/505\n dd &\n line-height: inherit\n font-weight: inherit\n\n font-size: var(--font-size--small)\n text-transform: uppercase\n","article .sidebar\n float: right\n clear: right\n width: 30%\n\n margin-left: 1rem\n margin-right: 0\n\n border-radius: 0.2rem\n background-color: var(--color-background-secondary)\n border: var(--color-background-border) 1px solid\n\n > *\n padding-left: 1rem\n padding-right: 1rem\n\n > ul, > ol // lists need additional padding, because bullets.\n padding-left: 2.2rem\n\n .sidebar-title\n margin: 0\n padding: 0.5rem 1rem\n border-bottom: var(--color-background-border) 1px solid\n\n font-weight: 500\n\n// TODO: subtitle\n// TODO: dedicated variables?\n","[role=main] .table-wrapper.container\n width: 100%\n overflow-x: auto\n margin-top: 1rem\n margin-bottom: 0.5rem\n padding: 0.2rem 0.2rem 0.75rem\n\ntable.docutils\n border-radius: 0.2rem\n border-spacing: 0\n border-collapse: collapse\n\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n th\n background: var(--color-table-header-background)\n\n td,\n th\n // Space things out properly\n padding: 0 0.25rem\n\n // Get the borders looking just-right.\n border-left: 1px solid var(--color-table-border)\n border-right: 1px solid var(--color-table-border)\n border-bottom: 1px solid var(--color-table-border)\n\n p\n margin: 0.25rem\n\n &:first-child\n border-left: none\n &:last-child\n border-right: none\n\n // MyST-parser tables set these classes for control of column alignment\n &.text-left\n text-align: left\n &.text-right\n text-align: right\n &.text-center\n text-align: center\n","@use \"../variables\" as *\n\n:target\n scroll-margin-top: 2.5rem\n\n@media (max-width: $full-width - $sidebar-width)\n :target\n scroll-margin-top: calc(2.5rem + var(--header-height))\n\n // When a heading is selected\n section > span:target\n scroll-margin-top: calc(2.8rem + var(--header-height))\n\n// Permalinks\n.headerlink\n font-weight: 100\n user-select: none\n\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\ndl dt,\np.caption,\nfigcaption p,\ntable > caption,\n.code-block-caption\n > .headerlink\n margin-left: 0.5rem\n visibility: hidden\n &:hover > .headerlink\n visibility: visible\n\n // Don't change to link-like, if someone adds the contents directive.\n > .toc-backref\n color: inherit\n text-decoration-line: none\n\n// Figure and table captions are special.\nfigure:hover > figcaption > p > .headerlink,\ntable:hover > caption > .headerlink\n visibility: visible\n\n:target >, // Regular section[id] style anchors\nspan:target ~ // Non-regular span[id] style \"extra\" anchors\n h1,\n h2,\n h3,\n h4,\n h5,\n h6\n &:nth-of-type(1)\n background-color: var(--color-highlight-on-target)\n // .headerlink\n // visibility: visible\n code.literal\n background-color: transparent\n\ntable:target > caption,\nfigure:target\n background-color: var(--color-highlight-on-target)\n\n// Inline page contents\n.this-will-duplicate-information-and-it-is-still-useful-here li :target\n background-color: var(--color-highlight-on-target)\n\n// Code block permalinks\n.literal-block-wrapper:target .code-block-caption\n background-color: var(--color-highlight-on-target)\n\n// When a definition list item is selected\n//\n// There isn't really an alternative to !important here, due to the\n// high-specificity of API documentation's selector.\ndt:target\n background-color: var(--color-highlight-on-target) !important\n\n// When a footnote reference is selected\n.footnote > dt:target + dd,\n.footnote-reference:target\n background-color: var(--color-highlight-on-target)\n",".guilabel\n background-color: var(--color-guilabel-background)\n border: 1px solid var(--color-guilabel-border)\n color: var(--color-guilabel-text)\n\n padding: 0 0.3em\n border-radius: 0.5em\n font-size: 0.9em\n","// This file contains the styles used for stylizing the footer that's shown\n// below the content.\n@use \"../variables\" as *\n\nfooter\n font-size: var(--font-size--small)\n display: flex\n flex-direction: column\n\n margin-top: 2rem\n\n// Bottom of page information\n.bottom-of-page\n display: flex\n align-items: center\n justify-content: space-between\n\n margin-top: 1rem\n padding-top: 1rem\n padding-bottom: 1rem\n\n color: var(--color-foreground-secondary)\n border-top: 1px solid var(--color-background-border)\n\n line-height: 1.5\n\n @media (max-width: $content-width)\n text-align: center\n flex-direction: column-reverse\n gap: 0.25rem\n\n .left-details\n font-size: var(--font-size--small)\n\n .right-details\n display: flex\n flex-direction: column\n gap: 0.25rem\n text-align: right\n\n .icons\n display: flex\n justify-content: flex-end\n gap: 0.25rem\n font-size: 1rem\n\n a\n text-decoration: none\n\n svg,\n img\n font-size: 1.125rem\n height: 1em\n width: 1em\n\n// Next/Prev page information\n.related-pages\n a\n display: flex\n align-items: center\n\n text-decoration: none\n &:hover .page-info .title\n text-decoration: underline\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n svg.furo-related-icon,\n svg.furo-related-icon > use\n flex-shrink: 0\n\n color: var(--color-foreground-border)\n\n width: 0.75rem\n height: 0.75rem\n margin: 0 0.5rem\n\n &.next-page\n max-width: 50%\n\n float: right\n clear: right\n text-align: right\n\n &.prev-page\n max-width: 50%\n\n float: left\n clear: left\n\n svg\n transform: rotate(180deg)\n\n.page-info\n display: flex\n flex-direction: column\n overflow-wrap: anywhere\n\n .next-page &\n align-items: flex-end\n\n .context\n display: flex\n align-items: center\n\n padding-bottom: 0.1rem\n\n color: var(--color-foreground-muted)\n font-size: var(--font-size--small)\n text-decoration: none\n","// This file contains the styles for the contents of the left sidebar, which\n// contains the navigation tree, logo, search etc.\n\n////////////////////////////////////////////////////////////////////////////////\n// Brand on top of the scrollable tree.\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-brand\n display: flex\n flex-direction: column\n flex-shrink: 0\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n text-decoration: none\n\n.sidebar-brand-text\n color: var(--color-sidebar-brand-text)\n overflow-wrap: break-word\n margin: var(--sidebar-item-spacing-vertical) 0\n font-size: 1.5rem\n\n.sidebar-logo-container\n margin: var(--sidebar-item-spacing-vertical) 0\n\n.sidebar-logo\n margin: 0 auto\n display: block\n max-width: 100%\n\n////////////////////////////////////////////////////////////////////////////////\n// Search\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-search-container\n display: flex\n align-items: center\n margin-top: var(--sidebar-search-space-above)\n\n position: relative\n\n background: var(--color-sidebar-search-background)\n &:hover,\n &:focus-within\n background: var(--color-sidebar-search-background--focus)\n\n &::before\n content: \"\"\n position: absolute\n left: var(--sidebar-item-spacing-horizontal)\n width: var(--sidebar-search-icon-size)\n height: var(--sidebar-search-icon-size)\n\n background-color: var(--color-sidebar-search-icon)\n mask-image: var(--icon-search)\n\n.sidebar-search\n box-sizing: border-box\n\n border: none\n border-top: 1px solid var(--color-sidebar-search-border)\n border-bottom: 1px solid var(--color-sidebar-search-border)\n\n padding-top: var(--sidebar-search-input-spacing-vertical)\n padding-bottom: var(--sidebar-search-input-spacing-vertical)\n padding-right: var(--sidebar-search-input-spacing-horizontal)\n padding-left: calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size))\n\n width: 100%\n\n color: var(--color-sidebar-search-foreground)\n background: transparent\n z-index: 10\n\n &:focus\n outline: none\n\n &::placeholder\n font-size: var(--sidebar-search-input-font-size)\n\n//\n// Hide Search Matches link\n//\n#searchbox .highlight-link\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0\n margin: 0\n text-align: center\n\n a\n color: var(--color-sidebar-search-icon)\n font-size: var(--font-size--small--2)\n\n////////////////////////////////////////////////////////////////////////////////\n// Structure/Skeleton of the navigation tree (left)\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-tree\n font-size: var(--sidebar-item-font-size)\n margin-top: var(--sidebar-tree-space-above)\n margin-bottom: var(--sidebar-item-spacing-vertical)\n\n ul\n padding: 0\n margin-top: 0\n margin-bottom: 0\n\n display: flex\n flex-direction: column\n\n list-style: none\n\n li\n position: relative\n margin: 0\n\n > ul\n margin-left: var(--sidebar-item-spacing-horizontal)\n\n .icon\n color: var(--color-sidebar-link-text)\n\n .reference\n box-sizing: border-box\n color: var(--color-sidebar-link-text)\n\n // Fill the parent.\n display: inline-block\n line-height: var(--sidebar-item-line-height)\n text-decoration: none\n\n // Don't allow long words to cause wrapping.\n overflow-wrap: anywhere\n\n height: 100%\n width: 100%\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n &:hover\n color: var(--color-sidebar-link-text)\n background: var(--color-sidebar-item-background--hover)\n\n // Add a nice little \"external-link\" arrow here.\n &.external::after\n content: url('data:image/svg+xml,')\n margin: 0 0.25rem\n vertical-align: middle\n color: var(--color-sidebar-link-text)\n\n // Make the current page reference bold.\n .current-page > .reference\n font-weight: bold\n\n label\n position: absolute\n top: 0\n right: 0\n height: var(--sidebar-item-height)\n width: var(--sidebar-expander-width)\n\n cursor: pointer\n user-select: none\n\n display: flex\n justify-content: center\n align-items: center\n\n .caption, :not(.caption) > .caption-text\n font-size: var(--sidebar-caption-font-size)\n color: var(--color-sidebar-caption-text)\n\n font-weight: bold\n text-transform: uppercase\n\n margin: var(--sidebar-caption-space-above) 0 0 0\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n // If it has children, add a bit more padding to wrap the content to avoid\n // overlapping with the `, :doc:`GOES-R `, :doc:`MERRA-2 `. + +Wildfire +~~~~~~~~ + +:doc:`FIRMS `, :doc:`FPA-FOD Tabular `, :doc:`FPA-FOD Weekly `, :doc:`LANDFIRE `, :doc:`MTBS `, :doc:`WFIGS `. + +Flood +~~~~~ + +:doc:`Caravan `, :doc:`FloodCastBench `, :doc:`HydroBench `, :doc:`NOAA Flood Events `, :doc:`WaterBench `. + +Earthquake +~~~~~~~~~~ + +:doc:`AEFA Forecast `, :doc:`pick-benchmark `, :doc:`SeisBench `. + +Tropical Cyclone +~~~~~~~~~~~~~~~~ + +:doc:`IBTrACS `, :doc:`TCBench Alpha `, :doc:`TropiCycloneNet-Dataset `. + +Developer Dataset Workflow +-------------------------- + +Use this section when you need the package-level registry and dataset +builder interface rather than the public catalog presentation. + +Inspect an External Dataset Source +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + +Load a Registered Dataset +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from pyhazards.datasets import available_datasets, load_dataset + + print(available_datasets()) + data = load_dataset( + "seisbench_waveforms", + micro=True, + ).load() + print(sorted(data.splits.keys())) + +Register a Custom Dataset +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from pyhazards.datasets import ( + DataBundle, + DataSplit, + Dataset, + FeatureSpec, + LabelSpec, + register_dataset, + ) + + class MyDataset(Dataset): + name = "my_dataset" + + def _load(self) -> DataBundle: + raise NotImplementedError("Return a populated DataBundle here.") + + register_dataset("my_dataset", MyDataset) + +Notes +~~~~~ + +- Public dataset docs are generated from cards in ``pyhazards/dataset_cards``. +- Run ``python scripts/render_dataset_docs.py`` after editing cards or generated dataset docs. +- Use :doc:`/implementation` for the full contributor workflow. + +Submodules +---------- + +pyhazards.datasets.base module +------------------------------ + +.. automodule:: pyhazards.datasets.base + :members: + :undoc-members: + :show-inheritance: + +pyhazards.datasets.registry module +----------------------------------- + +.. automodule:: pyhazards.datasets.registry + :members: + :undoc-members: + :show-inheritance: + +pyhazards.datasets.transforms package +------------------------------------- + +.. automodule:: pyhazards.datasets.transforms + :members: + :undoc-members: + :show-inheritance: + +pyhazards.datasets.hazards package +----------------------------------- + +.. automodule:: pyhazards.datasets.hazards + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.datasets + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/build/html/_sources/api/pyhazards.engine.rst.txt b/docs/build/html/_sources/api/pyhazards.engine.rst.txt new file mode 100644 index 00000000..a4ad03da --- /dev/null +++ b/docs/build/html/_sources/api/pyhazards.engine.rst.txt @@ -0,0 +1,37 @@ +pyhazards.engine package +======================== + +Submodules +---------- + +pyhazards.engine.trainer module +------------------------------- + +.. automodule:: pyhazards.engine.trainer + :members: + :undoc-members: + :show-inheritance: + +pyhazards.engine.distributed module +------------------------------------ + +.. automodule:: pyhazards.engine.distributed + :members: + :undoc-members: + :show-inheritance: + +pyhazards.engine.inference module +---------------------------------- + +.. automodule:: pyhazards.engine.inference + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.engine + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/build/html/_sources/api/pyhazards.metrics.rst.txt b/docs/build/html/_sources/api/pyhazards.metrics.rst.txt new file mode 100644 index 00000000..a79a09ac --- /dev/null +++ b/docs/build/html/_sources/api/pyhazards.metrics.rst.txt @@ -0,0 +1,7 @@ +pyhazards.metrics package +========================= + +.. automodule:: pyhazards.metrics + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/build/html/_sources/api/pyhazards.models.rst.txt b/docs/build/html/_sources/api/pyhazards.models.rst.txt new file mode 100644 index 00000000..f2ee6f21 --- /dev/null +++ b/docs/build/html/_sources/api/pyhazards.models.rst.txt @@ -0,0 +1,137 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +pyhazards.models package +======================== + +Catalog Summary +--------------- + +This page links the public model catalog, the developer registry +workflow, and the package submodules used to implement model builders. + +For the curated browsing experience, use :doc:`/pyhazards_models`. + +Wildfire +~~~~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`ASUFM `, :doc:`DNN-LSTM-AutoEncoder `, :doc:`FireCastNet `, :doc:`ForeFire Adapter `, :doc:`Wildfire Forecasting `, :doc:`WildfireSpreadTS `, :doc:`WRF-SFIRE Adapter `, :doc:`CNN-ASPP `. + +Earthquake +~~~~~~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`EQNet `, :doc:`EQTransformer `, :doc:`GPD `, :doc:`PhaseNet `, :doc:`WaveCastNet `. + +Flood +~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`EA-LSTM `, :doc:`FloodCast `, :doc:`Google Flood Forecasting `, :doc:`NeuralHydrology LSTM `, :doc:`UrbanFloodCast `, :doc:`HydroGraphNet `. + +Tropical Cyclone +~~~~~~~~~~~~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`Hurricast `, :doc:`SAF-Net `, :doc:`TCIF-fusion `, :doc:`Tropical Cyclone MLP `, :doc:`TropiCycloneNet `. + +Experimental Adapters ++++++++++++++++++++++ + +:doc:`FourCastNet TC Adapter `, :doc:`GraphCast TC Adapter `, :doc:`Pangu TC Adapter `. + +Developer Registry Workflow +--------------------------- + +Use this section when you need the package-level builder and registry +interface rather than the public catalog presentation. + +Build a Registered Model +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from pyhazards.models import build_model + + model = build_model( + name="phasenet", + task="regression", + in_channels=3, + ) + +Register a Custom Model +~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + import torch.nn as nn + from pyhazards.models import build_model, register_model + + def my_custom_builder(task: str, in_dim: int, out_dim: int, **kwargs) -> nn.Module: + hidden = kwargs.get("hidden_dim", 128) + return nn.Sequential( + nn.Linear(in_dim, hidden), + nn.ReLU(), + nn.Linear(hidden, out_dim), + ) + + register_model("my_mlp", my_custom_builder, defaults={"hidden_dim": 128}) + model = build_model(name="my_mlp", task="regression", in_dim=16, out_dim=1) + +Notes +~~~~~ + +- Builders receive ``task`` plus any kwargs you pass. +- ``register_model`` stores optional defaults so configs can stay small. +- Use :doc:`/implementation` for the full contributor workflow. + +Submodules +---------- + +pyhazards.models.backbones module +---------------------------------- + +.. automodule:: pyhazards.models.backbones + :members: + :undoc-members: + :show-inheritance: + +pyhazards.models.heads module +------------------------------ + +.. automodule:: pyhazards.models.heads + :members: + :undoc-members: + :show-inheritance: + +pyhazards.models.builder module +------------------------------- + +.. automodule:: pyhazards.models.builder + :members: + :undoc-members: + :show-inheritance: + +pyhazards.models.registry module +-------------------------------- + +.. automodule:: pyhazards.models.registry + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/build/html/_sources/api/pyhazards.rst.txt b/docs/build/html/_sources/api/pyhazards.rst.txt new file mode 100644 index 00000000..385a8563 --- /dev/null +++ b/docs/build/html/_sources/api/pyhazards.rst.txt @@ -0,0 +1,37 @@ +pyhazards package +================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + pyhazards.datasets + pyhazards.models + pyhazards.benchmarks + pyhazards.configs + pyhazards.reports + pyhazards.engine + pyhazards.metrics + pyhazards.utils + +Submodules +---------- + +pyhazards.interactive_map module +-------------------------------- + +.. automodule:: pyhazards.interactive_map + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards + :members: + :undoc-members: + :show-inheritance: + :exclude-members: BenchmarkRunner, GraphTemporalDataset, graph_collate, WildfireMamba, wildfire_mamba_builder diff --git a/docs/build/html/_sources/api/pyhazards.utils.rst.txt b/docs/build/html/_sources/api/pyhazards.utils.rst.txt new file mode 100644 index 00000000..5b552aa3 --- /dev/null +++ b/docs/build/html/_sources/api/pyhazards.utils.rst.txt @@ -0,0 +1,29 @@ +pyhazards.utils package +======================= + +Submodules +---------- + +pyhazards.utils.hardware module +-------------------------------- + +.. automodule:: pyhazards.utils.hardware + :members: + :undoc-members: + :show-inheritance: + +pyhazards.utils.common module +------------------------------ + +.. automodule:: pyhazards.utils.common + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/build/html/_sources/cite.rst.txt b/docs/build/html/_sources/cite.rst.txt new file mode 100644 index 00000000..6e53ea01 --- /dev/null +++ b/docs/build/html/_sources/cite.rst.txt @@ -0,0 +1,19 @@ +How to Cite +=========== + +Use the following citation for the PyHazards software package itself. If you are +also relying on specific datasets or model papers, cite those sources from +:doc:`references` as well. + +Library Citation +---------------- + +.. code-block:: bibtex + + @misc{pyhazards2025, + title = {PyHazards: An Open-Source Library for AI-Powered Hazard Prediction}, + author = {Cheng et al.}, + year = {2025}, + howpublished = {\url{https://github.com/LabRAI/PyHazards}}, + note = {GitHub repository} + } diff --git a/docs/build/html/_sources/implementation.rst.txt b/docs/build/html/_sources/implementation.rst.txt new file mode 100644 index 00000000..36af24da --- /dev/null +++ b/docs/build/html/_sources/implementation.rst.txt @@ -0,0 +1,497 @@ +Implementation Guide +==================== + +Use this guide when you want to extend PyHazards itself. It is written for +contributors who are adding new datasets, new models, smoke tests, catalog +cards, or documentation updates for the public site. + +This page explains the public contributor workflow. For repository operations +and maintainer automation details, also see ``.github/IMPLEMENTATION.md``. + +Who This Guide Is For +--------------------- + +This guide assumes you already know Python and PyTorch, but you have not yet +worked inside the PyHazards codebase. It is most useful when you are doing one +of the following: + +- adding a new dataset loader or dataset inspection entrypoint, +- porting a paper or external implementation into ``pyhazards.models``, +- updating the public dataset or model catalogs and generated documentation, +- preparing a pull request that should be easy to review and merge. + +If you only want to install the library and run a first example, use +:doc:`installation` and :doc:`quick_start` instead. + +Repository Mental Model +----------------------- + +PyHazards is organized around a small set of extension points: + +- ``pyhazards.datasets`` contains dataset abstractions, the dataset registry, + and inspection entrypoints for supported data sources. +- ``pyhazards.models`` contains model builders, reusable components, and the + model registry used by ``build_model(...)``. +- ``pyhazards.engine`` contains the shared training and evaluation workflow. +- ``pyhazards/dataset_cards`` contains YAML cards used to generate the public + dataset catalog and per-dataset documentation pages. +- ``pyhazards/model_cards`` contains YAML cards used to generate the public + model tables and per-model documentation pages. +- ``docs/source`` contains handwritten Sphinx pages, while the committed + ``docs/`` directory contains the rendered HTML published on GitHub Pages. + +There are three separate layers to keep in mind: + +1. registry availability: + a dataset or model can be constructed from Python once it is registered; +2. catalog visibility: + a public dataset or model only appears on the website when it also has a + matching catalog card; +3. published website output: + GitHub Pages only changes after the rendered HTML in ``docs/`` is rebuilt. + +Typical Contribution Workflow +----------------------------- + +Most changes should follow the same sequence: + +1. decide whether you are extending a dataset, a model, or both; +2. implement the code in ``pyhazards/datasets`` or ``pyhazards/models``; +3. register the new entrypoint so it is discoverable from the library API; +4. add or update smoke-test coverage for the new behavior; +5. update the relevant docs source and, for public datasets or models, the + matching catalog cards; +6. run the smallest local validation commands that match the change; +7. rebuild the published docs HTML if the website output changed; +8. open a pull request with the required metadata and validation notes. + +Treat code, validation, generated docs, and published docs as one contribution. +A public dataset or model implementation is not complete if users cannot +discover it or if the website catalog still describes the old state of the +library. + +Adding a Dataset +---------------- + +Datasets are built around ``Dataset`` and ``DataBundle``. A dataset subclass +implements ``_load()`` and returns train/validation/test splits plus feature and +label metadata. + +The minimum pattern looks like this: + +.. code-block:: python + + import torch + from pyhazards.datasets import ( + DataBundle, + DataSplit, + Dataset, + FeatureSpec, + LabelSpec, + register_dataset, + ) + + class MyHazardDataset(Dataset): + name = "my_hazard" + + def _load(self) -> DataBundle: + x = torch.randn(1000, 16) + y = torch.randint(0, 2, (1000,)) + + splits = { + "train": DataSplit(x[:800], y[:800]), + "val": DataSplit(x[800:900], y[800:900]), + "test": DataSplit(x[900:], y[900:]), + } + + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=16, + description="Example tabular hazard features.", + ), + label_spec=LabelSpec( + num_targets=2, + task_type="classification", + description="Binary hazard label.", + ), + ) + + register_dataset(MyHazardDataset.name, MyHazardDataset) + +Keep the following expectations in mind when you add a dataset: + +- use ``DataBundle`` to make split names, feature dimensions, and target + semantics explicit; +- keep the builder/import path lightweight so the dataset can be imported + without triggering heavy side effects; +- register the dataset with ``register_dataset(...)`` so + ``load_dataset(name=...)`` can construct it; +- if the dataset belongs in the public catalog, add or update a card in + ``pyhazards/dataset_cards`` and regenerate the dataset docs; +- prefer clear metadata over implicit conventions, especially when a model + depends on shapes, channels, graph structure, or task type. + +Dataset Inspection Entry Points +------------------------------- + +PyHazards also includes inspection modules under ``pyhazards.datasets`` for +supported external data sources. If you add a new dataset family, keep the +inspection module consistent with the existing ones: + +- it should be importable as ``python -m pyhazards.datasets..inspection``; +- ``--help`` should exit cleanly; +- argument parsing should work without requiring optional plotting or network + dependencies at import time; +- if the dataset belongs in the public dataset table, its inspection workflow + should be stable enough for ``scripts/verify_table_entries.py``. + +The goal is simple: users should be able to discover the dataset from the docs, +inspect it from the command line, and load it from Python through the registry. + +Dataset Cards and Generated Docs +-------------------------------- + +Public datasets are documented through cards in ``pyhazards/dataset_cards``. +These cards are the source of truth for the public dataset catalog and the +generated per-dataset detail pages. + +A typical dataset card includes: + +- the public display name and hazard family, +- a one-sentence summary and source role, +- provider, geometry, cadence, and period-of-record metadata, +- the primary source or product reference, +- the inspection command when the dataset is inspection-first, +- the registry name and example when it is public through + ``load_dataset(...)``, +- related model and benchmark links when those cross-links help users navigate + the library. + +After updating dataset cards, refresh the generated docs: + +.. code-block:: bash + + python scripts/render_dataset_docs.py + +Use the ``--check`` mode when you want to confirm the generated files are +already up to date: + +.. code-block:: bash + + python scripts/render_dataset_docs.py --check + +Adding a Model +-------------- + +Models are registered builders that can be constructed through: + +.. code-block:: python + + from pyhazards.models import build_model + + model = build_model(name="", task="", **kwargs) + +When you port a paper or external repository into PyHazards, define the library +contract first. Your builder should: + +- accept ``task: str``, +- accept the shape and hyperparameter arguments needed to construct the model, +- return an ``nn.Module``, +- validate unsupported tasks early with a clear error, +- accept ``**kwargs`` so extra configuration keys do not break the call path. + +The minimum pattern looks like this: + +.. code-block:: python + + from __future__ import annotations + + import torch + import torch.nn as nn + from pyhazards.models import register_model + + + class MyModel(nn.Module): + def __init__(self, in_dim: int, out_dim: int, hidden_dim: int = 128): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, out_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 2: + raise ValueError(f"Expected input of shape (batch, features), got {tuple(x.shape)}") + return self.net(x) + + + def my_model_builder( + task: str, + in_dim: int, + out_dim: int, + hidden_dim: int = 128, + **kwargs, + ) -> nn.Module: + _ = kwargs + if task.lower() not in {"classification", "regression"}: + raise ValueError(f"MyModel does not support task={task!r}") + return MyModel(in_dim=in_dim, out_dim=out_dim, hidden_dim=hidden_dim) + + + register_model( + "my_model", + my_model_builder, + defaults={"hidden_dim": 128}, + ) + +In practice, good model ports also include: + +- a short paper-to-library mapping from the original repository into the new + PyHazards module and builder kwargs; +- explicit input-shape validation in ``forward()`` so integration failures are + easy to diagnose; +- clear task handling when the same architecture can be used for different + objectives; +- minimal defaults in the registry so ``build_model(...)`` is predictable. + +Match the Forward Signature to the Data Path +-------------------------------------------- + +PyHazards supports more than one input style. Some models work with plain tensor +pairs, while others expect mappings, graph batches, or custom dataset objects. +Make that contract explicit. + +As a rule: + +- if your model expects ``Tensor -> Tensor``, keep the shape assumptions simple + and document them in the model card; +- if your model expects graph or structured inputs, prefer dataset and collate + behavior that produces the mapping your ``forward()`` already consumes; +- use ``FeatureSpec``, ``LabelSpec``, and split metadata to record dimensions, + channels, and task semantics instead of burying them in comments. + +Porting Training Logic +---------------------- + +Do not copy an upstream training loop into PyHazards unless the architecture +truly depends on custom runtime behavior. In most cases you should: + +- keep the architecture inside ``nn.Module``, +- keep custom losses or helper blocks close to the model implementation, +- use ``pyhazards.engine.Trainer`` for fit, evaluate, and predict workflows, +- document intentional differences from the paper repository in the pull request. + +If the PyHazards port changes preprocessing, outputs, or optimization behavior, +state that clearly in the PR's parity notes. Review is much faster when the +intended differences are explicit. + +Model Cards and Generated Docs +------------------------------ + +Public models are documented through cards in ``pyhazards/model_cards``. A model +card is not optional when you want a model to appear on the website. + +A typical card includes: + +- the public model name and display name, +- the hazard family used for the model table, +- the source file and builder name, +- a short summary and description, +- the paper citation or technical reference, +- supported tasks, +- one runnable example, +- a synthetic smoke-test specification. + +For example: + +.. code-block:: yaml + + model_name: my_model + display_name: My Model + hazard: Flood + source_file: pyhazards/models/my_model.py + builder_name: my_model_builder + summary: > + Short description of the public model entrypoint. + paper: + title: Example paper title + url: https://example.com/paper + tasks: + - regression + smoke_test: + task: regression + build_kwargs: + in_dim: 16 + out_dim: 1 + input: + kind: tensor + shape: [4, 16] + expected_output: + kind: tensor + shape: [4, 1] + +Model cards drive the generated pages in :doc:`pyhazards_models`. They also +control public visibility: + +- if a model is registered but has no card, it can still be used from Python but + it will not appear in the public model tables; +- if a card sets ``include_in_public_catalog: false``, the implementation stays + in the library but is hidden from the public catalog; +- if the hazard name in the card is new, the generated model page creates a new + hazard section automatically. + +After updating a card, refresh the generated docs: + +.. code-block:: bash + + python scripts/render_model_docs.py + +Use the ``--check`` mode when you want to confirm the generated files are +already up to date: + +.. code-block:: bash + + python scripts/render_model_docs.py --check + +Validation Workflow +------------------- + +Run the smallest set of checks that covers your change. The core validation +commands in this repository are: + +.. code-block:: bash + + python -c "import pyhazards; print(pyhazards.__version__)" + python scripts/render_dataset_docs.py --check + python scripts/render_model_docs.py --check + python scripts/verify_table_entries.py + +Use them for the following purposes: + +- ``python -c "import pyhazards; print(pyhazards.__version__)"`` + verifies that the package still imports cleanly; +- ``python scripts/render_dataset_docs.py --check`` + verifies that generated dataset docs and catalog pages are in sync with the + current dataset cards; +- ``python scripts/render_model_docs.py --check`` + verifies that generated model docs and catalog pages are in sync with the + current model cards; +- ``python scripts/verify_table_entries.py`` + exercises dataset inspection entrypoints and runs smoke tests for cataloged + public models. + +When you changed a specific model, also run the model-scoped smoke test: + +.. code-block:: bash + + python scripts/smoke_test_models.py --models + +This uses the model card's smoke-test spec, so it is the fastest way to confirm +that a new public model can build and run with synthetic inputs. + +If your change touched the model catalog or its generation logic, also run: + +.. code-block:: bash + + python -m pytest tests/test_model_catalog.py + +If you changed runtime behavior in the training path and you have the required +hardware available, run the broader smoke path described in ``test.py`` as well. + +Preparing a Model Pull Request +------------------------------ + +Model PRs should make the implementation easy to review against the original +paper or upstream repository. The PR template asks for a few specific fields for +that reason: + +- ``Model Summary`` should describe the architecture and public API you are + adding, not just the file names you changed; +- ``Hazard Scenario`` should name the model table that owns the entry, and it + should explicitly call out when the PR introduces a new hazard family; +- ``Registry Name`` should list the exact ``build_model(name=...)`` entrypoints + added or changed in the PR; +- ``Paper / Source`` should link the scientific paper, source repository, or + technical reference that the implementation follows; +- ``Smoke Test`` should list the commands you ran or point to the card's + smoke-test specification; +- ``Parity Notes`` should explain intentional differences from the upstream + implementation, especially around preprocessing, outputs, or objectives. + +PR automation can only help when this metadata is present and accurate. A +catalog-backed model PR is expected to include the implementation, the registry +wiring, the model card, the smoke-test path, and refreshed generated docs. + +Registration, Catalog, and Published HTML +----------------------------------------- + +It is easy to update one layer of the repo and forget the others. Keep this +distinction in mind: + +- code registration makes a dataset or model usable from Python; +- dataset cards make a public dataset discoverable in the generated docs; +- model cards make a public model discoverable in the generated docs; +- Sphinx source updates change the documentation source tree; +- rebuilding ``docs/`` updates the committed HTML published on GitHub Pages. + +If the website output changed, rebuild the site locally: + +.. code-block:: bash + + cd docs + sphinx-build -b html source build/html + cp -r build/html/* . + +That final copy step matters in this repository because the published website is +served from the committed ``docs/`` directory, not from ``docs/source``. + +Common Mistakes +--------------- + +These are the issues that most often block review: + +- the new dataset or model exists in code but was never registered; +- a public dataset changed, but ``pyhazards/dataset_cards`` or the generated + dataset docs were not updated; +- a public model was implemented without a matching card in + ``pyhazards/model_cards``; +- generated docs were not refreshed after the model card changed; +- ``docs/source`` was updated but the committed ``docs/`` HTML was not rebuilt; +- the builder does not validate unsupported tasks or accepts the wrong shape + arguments for the intended use; +- a hidden or internal model was accidentally left visible in the public + catalog; +- an inspection module imports optional heavy dependencies at module import time, + which breaks ``python -m ... --help`` in clean environments. + +Contributor Checklist +--------------------- + +Before you open a pull request, confirm all of the following: + +- the implementation lives in the correct dataset or model module; +- the new entrypoint is registered and can be constructed from the public API; +- task handling and input-shape validation are clear and actionable; +- public datasets have a complete card when they belong in the public catalog; +- generated dataset docs are refreshed and pass ``render_dataset_docs.py --check``; +- public models have a complete card with a runnable smoke-test spec; +- generated model docs are refreshed and pass ``render_model_docs.py --check``; +- dataset inspection entrypoints and public tables pass + ``scripts/verify_table_entries.py``; +- the published docs HTML in ``docs/`` was rebuilt if the visible website output + changed; +- the pull request explains the source paper, registry name, hazard scenario, + smoke-test commands, and parity notes. + +Next Steps +---------- + +After you finish a contributor-oriented change: + +- browse the public catalogs in :doc:`pyhazards_datasets` and + :doc:`pyhazards_models` to confirm the new entry is discoverable; +- use :doc:`quick_start` to check that the user path still feels coherent; +- keep ``.github/IMPLEMENTATION.md`` and this page aligned when the repository + workflow changes. diff --git a/docs/build/html/_sources/index.rst.txt b/docs/build/html/_sources/index.rst.txt new file mode 100644 index 00000000..7804de80 --- /dev/null +++ b/docs/build/html/_sources/index.rst.txt @@ -0,0 +1,307 @@ +.. title:: PyHazards + +.. image:: _static/logo.png + :alt: PyHazards Icon + :width: 220px + :align: center + :class: landing-hero-logo + +.. raw:: html + + + +Overview +-------- + +PyHazards brings together public dataset catalogs, registry-based models, +benchmark families, experiment configs, and shared training or reporting +workflows across wildfire, earthquake, flood, and tropical cyclone tasks. + +It is designed for researchers and practitioners who need one coherent library +for reproducing baselines, comparing methods, and extending hazard-ML +workflows without rebuilding the software stack for each hazard family. + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid home-kicker-grid home-hero-stats + + .. grid-item-card:: Hazard Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + Wildfire, earthquake, flood, and tropical cyclone workflows under one library. + + .. grid-item-card:: Public Datasets + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 20 + + .. container:: catalog-stat-note + + Curated dataset pages covering forcing sources and hazard-specific benchmark adapters. + + .. grid-item-card:: Implemented Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 24 + + .. container:: catalog-stat-note + + Public implemented baselines and variants surfaced through the model catalog. + + .. grid-item-card:: Benchmark Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + Shared evaluator families with linked ecosystems, smoke configs, and reports. + +Start Here +---------- + +.. container:: home-section-note + + Use one of these four paths to move from overview to action quickly. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid home-link-grid + + .. grid-item-card:: Quick Start + :class-card: catalog-detail-card + + Run the first benchmark-aware workflow and verify the package. + + **Open:** :doc:`Quick Start ` + + .. grid-item-card:: Browse Datasets + :class-card: catalog-detail-card + + Explore forcing sources, benchmark adapters, and inspection entrypoints. + + **Open:** :doc:`Datasets ` + + .. grid-item-card:: Browse Models + :class-card: catalog-detail-card + + Compare implemented baselines, variants, and benchmark-linked model detail pages. + + **Open:** :doc:`Models ` + + .. grid-item-card:: Browse Benchmarks + :class-card: catalog-detail-card + + Compare hazard benchmark families, ecosystem mappings, and smoke coverage. + + **Open:** :doc:`Benchmarks ` + +Why PyHazards +------------- + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-grid home-pillar-grid + + .. grid-item-card:: Unified Datasets + :class-card: catalog-detail-card + + Public datasets, forcing sources, and inspection surfaces are documented through one hazard-first catalog. + + .. grid-item-card:: Benchmark-aligned Evaluation + :class-card: catalog-detail-card + + Shared benchmark families, smoke configs, and report exports make model comparisons more reproducible. + + .. grid-item-card:: Registry-based Models + :class-card: catalog-detail-card + + Baselines and adapters are exposed through a consistent build surface instead of one-off scripts. + + .. grid-item-card:: Shared Training and Inference + :class-card: catalog-detail-card + + One engine layer supports training, evaluation, prediction, and benchmark execution across hazard tasks. + +Hazard Coverage +--------------- + +.. container:: home-section-note + + PyHazards spans four hazard families with public datasets, models, and benchmark pages designed to work together. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid home-hazard-grid + + .. grid-item-card:: Wildfire + :class-card: catalog-detail-card + + Danger forecasting, weekly forecasting, spread baselines, fuels, burn products, and active-fire sources. + + **Explore:** :doc:`Datasets ` | :doc:`Models ` + + .. grid-item-card:: Earthquake + :class-card: catalog-detail-card + + Waveform picking, dense-grid forecasting adapters, and linked benchmark ecosystems for phase-picking workflows. + + **Explore:** :doc:`Models ` | :doc:`Benchmarks ` + + .. grid-item-card:: Flood + :class-card: catalog-detail-card + + Streamflow and inundation baselines with benchmark-backed datasets, configs, and evaluation coverage. + + **Explore:** :doc:`Datasets ` | :doc:`Benchmarks ` + + .. grid-item-card:: Tropical Cyclone + :class-card: catalog-detail-card + + Track-and-intensity forecasting baselines plus shared benchmark ecosystems and experimental weather-model adapters. + + **Explore:** :doc:`Models ` | :doc:`Benchmarks ` + +Featured Example +---------------- + +.. container:: home-section-note + + Run a benchmark-aligned smoke configuration with one command, then move into the full Quick Start for model building and training workflows. + +.. code-block:: bash + + python scripts/run_benchmark.py --config pyhazards/configs/flood/hydrographnet_smoke.yaml + +.. container:: catalog-link-row + + **Next step:** :doc:`Quick Start ` for the first full workflow, or :doc:`Models ` to browse benchmark-linked baselines. + +Explore the Docs +---------------- + +.. grid:: 1 1 2 3 + :gutter: 2 + :class-container: catalog-recommend-grid home-link-grid + + .. grid-item-card:: Installation + :class-card: catalog-detail-card + + Set up PyHazards from PyPI or source and verify the environment. + + **Open:** :doc:`installation` + + .. grid-item-card:: Quick Start + :class-card: catalog-detail-card + + Run the shortest end-to-end workflow in the library. + + **Open:** :doc:`quick_start` + + .. grid-item-card:: Datasets + :class-card: catalog-detail-card + + Browse hazard-grouped dataset cards, detail pages, and inspection entrypoints. + + **Open:** :doc:`pyhazards_datasets` + + .. grid-item-card:: Models + :class-card: catalog-detail-card + + Compare implemented models, variants, and benchmark-linked detail pages. + + **Open:** :doc:`pyhazards_models` + + .. grid-item-card:: Benchmarks + :class-card: catalog-detail-card + + Review benchmark families, ecosystem mappings, and smoke-config coverage. + + **Open:** :doc:`pyhazards_benchmarks` + + .. grid-item-card:: Reports and Configs + :class-card: catalog-detail-card + + Load reproducible experiment YAML files and export benchmark summaries. + + **Open:** :doc:`pyhazards_configs` | :doc:`pyhazards_reports` + +For Contributors +---------------- + +PyHazards is registry-driven and uses dataset cards, model cards, and benchmark +cards to generate the public catalogs. If you plan to extend the library, use +:doc:`implementation` for the contributor workflow and :doc:`appendix_a_coverage` +for the audited gap list behind the current roadmap work. + +Citation +-------- + +If you use PyHazards in your research, please cite: + +.. code-block:: bibtex + + @misc{pyhazards2025, + title = {PyHazards: An Open-Source Library for AI-Powered Hazard Prediction}, + author = {Cheng et al.}, + year = {2025}, + howpublished = {\url{https://github.com/LabRAI/PyHazards}}, + note = {GitHub repository} + } + +Community +--------- + +Use the `RAI Lab Slack channel `_ +for project discussion and coordination. + + +.. toctree:: + :maxdepth: 2 + :caption: Getting Started + :hidden: + + installation + quick_start + +.. toctree:: + :maxdepth: 1 + :caption: API Reference + :hidden: + + pyhazards_datasets + pyhazards_models + pyhazards_benchmarks + pyhazards_configs + pyhazards_reports + pyhazards_engine + pyhazards_metrics + pyhazards_utils + interactive_map + +.. toctree:: + :maxdepth: 2 + :caption: Additional Information + :hidden: + + implementation + appendix_a_coverage + cite + references + team diff --git a/docs/build/html/_sources/installation.rst.txt b/docs/build/html/_sources/installation.rst.txt new file mode 100644 index 00000000..9bd47237 --- /dev/null +++ b/docs/build/html/_sources/installation.rst.txt @@ -0,0 +1,63 @@ +Installation +============ + +Use this page to install PyHazards, verify that the package imports correctly, +and choose the right setup path for local use or contribution. PyHazards +supports Python 3.8 through 3.12 and installs with ``pip``. + +Requirements +------------ + +- Python ``>=3.8, <3.13`` +- PyTorch ``>=2.3, <3.0`` + +Install from PyPI +----------------- + +Install from PyPI: + +.. code-block:: bash + + pip install pyhazards + +GPU Install +----------- + +If you plan to run on GPU, install a matching PyTorch build first and then +install PyHazards. + +Example for CUDA 12.6: + +.. code-block:: bash + + pip install torch --index-url https://download.pytorch.org/whl/cu126 + pip install pyhazards + +Install from Source +------------------- + +Use an editable install when you are contributing code or documentation: + +.. code-block:: bash + + git clone https://github.com/LabRAI/PyHazards.git + cd PyHazards + python -m pip install -e . + +Verify the Installation +----------------------- + +Run a small import check to confirm that the package is available in the +environment: + +.. code-block:: bash + + python -c "import pyhazards; print(pyhazards.__version__)" + +You should see the installed package version printed to stdout. + +Next Steps +---------- + +- Continue to :doc:`quick_start` for the first end-to-end workflow. +- See :doc:`implementation` if you are setting up a contributor workflow. diff --git a/docs/build/html/_sources/pyhazards_datasets.rst.txt b/docs/build/html/_sources/pyhazards_datasets.rst.txt new file mode 100644 index 00000000..31b4f17a --- /dev/null +++ b/docs/build/html/_sources/pyhazards_datasets.rst.txt @@ -0,0 +1,929 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +Datasets +=================== + +Browse PyHazards datasets across hazard families, compare source roles, +inspection paths, and registry surfaces, and navigate to dataset-specific +detail pages. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Groups + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 5 + + .. container:: catalog-stat-note + + Public dataset tabs grouped by the curated hazard-first taxonomy. + + .. grid-item-card:: Public Datasets + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 20 + + .. container:: catalog-stat-note + + Curated datasets surfaced on the public site. + + .. grid-item-card:: Inspection Entry Points + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 10 + + .. container:: catalog-stat-note + + Datasets with an explicit inspection command documented on the site. + + .. grid-item-card:: Registry-loadable Datasets + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 12 + + .. container:: catalog-stat-note + + Datasets with a documented public ``load_dataset(...)`` path. + + +Catalog by Hazard +----------------- + +Use the hazard tabs below to browse the public dataset catalog. Each +card keeps the summary short, then links into the detail page, the +primary source, and the most relevant inspection or registry surface. + +.. tab-set:: + :class: catalog-tabs + + .. tab-item:: Shared Forcing + + .. container:: catalog-section-note + + Cross-hazard meteorology and imagery sources that support multiple PyHazards workflows, inspections, and forcing pipelines. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: ERA5 + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + ECMWF's global reanalysis used as a high-resolution meteorological baseline for hazard experiments. + + .. container:: catalog-chip-row + + :bdg-secondary:`Reanalysis` :bdg-info:`Regular latitude-longitude grid` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Daily ERA5T updates with about 5-day latency, followed by final validated releases after 2-3 months + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10`` + + .. container:: catalog-link-row + + **Details:** :doc:`ERA5 ` + + .. container:: catalog-link-row + + **Primary Source:** `Hersbach et al. (2020). The ERA5 global reanalysis. `_ + + .. grid-item-card:: GOES-R + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Rapid-refresh GOES-R satellite imagery used for smoke, fire, and weather monitoring workflows. + + .. container:: catalog-chip-row + + :bdg-secondary:`Geostationary Imagery` :bdg-info:`Raster imagery time series on the ABI fixed grid` + + .. container:: catalog-meta-row + + **Coverage:** Western Hemisphere / Americas geostationary view + + .. container:: catalog-meta-row + + **Update Cadence:** Continuous ingest as new files become available + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.goesr.inspection --path /path/to/goesr_data --max-items 10`` + + .. container:: catalog-link-row + + **Details:** :doc:`GOES-R ` + + .. container:: catalog-link-row + + **Primary Source:** `Schmit et al. (2017). A closer look at the ABI on the GOES-R series. `_ + + .. grid-item-card:: MERRA-2 + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Global atmospheric reanalysis from NASA GMAO used as a shared meteorological backbone for hazard modeling. + + .. container:: catalog-chip-row + + :bdg-secondary:`Reanalysis` :bdg-info:`Regular latitude-longitude grid` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Published monthly with typical 2-3 week latency after month end + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.merra2.inspection 20260101`` + + .. container:: catalog-link-row + + **Details:** :doc:`MERRA-2 ` + + .. container:: catalog-link-row + + **Primary Source:** `Gelaro et al. (2017). The Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2). `_ + + + .. tab-item:: Wildfire + + .. container:: catalog-section-note + + Wildfire datasets span authoritative incident records, active-fire detections, fuels, burn severity, and forecast-ready benchmark adapters. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: FIRMS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + NASA's near-real-time active fire detections used for operational wildfire monitoring and event labeling. + + .. container:: catalog-chip-row + + :bdg-secondary:`Active Fire Detections` :bdg-info:`Event-based point detections` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Fire maps refresh about every 5 minutes and downloadable files refresh about hourly + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.firms.inspection --path /path/to/firms_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`FIRMS ` + + .. container:: catalog-link-row + + **Primary Source:** `Schroeder et al. (2014). The New VIIRS 375 m active fire detection data product. `_ + + .. grid-item-card:: FPA-FOD Tabular + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Incident-level FPA-FOD features packaged for wildfire cause and size classification. + + .. container:: catalog-chip-row + + :bdg-secondary:`Incident Tabular` :bdg-info:`Tabular feature vectors` + + .. container:: catalog-meta-row + + **Coverage:** User-provided FPA-FOD coverage + + .. container:: catalog-meta-row + + **Update Cadence:** User-managed local inputs or deterministic micro mode + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.fpa_fod_tabular.inspection --task cause --micro`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`FPA-FOD Tabular ` + + .. container:: catalog-link-row + + **Primary Source:** `PyHazards FPA-FOD tabular adaptation for the wildfire incident classification path. `_ + + .. grid-item-card:: FPA-FOD Weekly + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Weekly FPA-FOD aggregates packaged for next-week wildfire count forecasting by size group. + + .. container:: catalog-chip-row + + :bdg-secondary:`Weekly Forecasting` :bdg-info:`Temporal tabular sequences` + + .. container:: catalog-meta-row + + **Coverage:** User-provided FPA-FOD coverage + + .. container:: catalog-meta-row + + **Update Cadence:** User-managed local inputs or deterministic micro mode + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`FPA-FOD Weekly ` + + .. container:: catalog-link-row + + **Primary Source:** `PyHazards FPA-FOD weekly adaptation for the wildfire forecasting path. `_ + + .. grid-item-card:: LANDFIRE + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Nationwide fuels, vegetation, and canopy layers used as static wildfire covariates. + + .. container:: catalog-chip-row + + :bdg-secondary:`Fuels and Vegetation` :bdg-info:`Gridded raster layers` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Annual versioned update suites + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.landfire.inspection --path /path/to/landfire_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`LANDFIRE ` + + .. container:: catalog-link-row + + **Primary Source:** `Rollins (2009). LANDFIRE: A nationally consistent vegetation, wildland fire, and fuel assessment. `_ + + .. grid-item-card:: MTBS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + U.S. burn severity and fire perimeter products used for post-fire analysis and wildfire evaluation. + + .. container:: catalog-chip-row + + :bdg-secondary:`Burn Severity` :bdg-info:`Per-fire rasters with associated vector perimeters` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Continuous mapping with quarterly releases + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.mtbs.inspection --path /path/to/mtbs_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`MTBS ` + + .. container:: catalog-link-row + + **Primary Source:** `Eidenshink et al. (2007). A project for monitoring trends in burn severity. `_ + + .. grid-item-card:: WFIGS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Interagency wildfire incident records used as authoritative wildfire ground truth across the United States. + + .. container:: catalog-chip-row + + :bdg-secondary:`Incident Records` :bdg-info:`Incident points and perimeters` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Refreshed from IRWIN roughly every 5 minutes, with perimeter changes often appearing within 15 minutes + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.wfigs.inspection --path /path/to/wfigs_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`WFIGS ` + + .. container:: catalog-link-row + + **Primary Source:** `National Interagency Fire Center. Wildland Fire Incident Geospatial Services (WFIGS). `_ + + + .. tab-item:: Flood + + .. container:: catalog-section-note + + Flood datasets combine event records with streamflow and inundation benchmark adapters used by the public flood models. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Caravan + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed streamflow benchmark adapter aligned to the Caravan large-sample hydrology ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Streamflow Benchmark` :bdg-info:`Graph-temporal basin or node sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned streamflow forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('caravan_streamflow', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`Caravan ` + + .. container:: catalog-link-row + + **Details:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Primary Source:** `Caravan - A global community dataset for large-sample hydrology `_ + + .. grid-item-card:: FloodCastBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed inundation benchmark adapter aligned to the FloodCastBench evaluation ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Inundation Benchmark` :bdg-info:`Raster inundation sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned flood inundation samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('floodcastbench_inundation', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Primary Source:** `FloodCastBench `_ + + .. grid-item-card:: HydroBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed streamflow diagnostics adapter aligned to the HydroBench ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Streamflow Benchmark` :bdg-info:`Graph-temporal basin or node sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned streamflow forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('hydrobench_streamflow', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Primary Source:** `HydroBench `_ + + .. grid-item-card:: NOAA Flood Events + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Historical NOAA storm-event flood records used as event labels and impact targets for flood studies. + + .. container:: catalog-chip-row + + :bdg-secondary:`Event Records` :bdg-info:`Tabular event records with administrative regions and optional point coordinates` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Updated monthly, typically 75-90 days after the end of a data month + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.noaa_flood.inspection --path /path/to/noaa_flood_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`NOAA Flood Events ` + + .. container:: catalog-link-row + + **Primary Source:** `NOAA National Centers for Environmental Information. Storm Events Database Documentation. `_ + + .. grid-item-card:: WaterBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed streamflow benchmark adapter aligned to the WaterBench ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Streamflow Benchmark` :bdg-info:`Graph-temporal basin or node sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned streamflow forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('waterbench_streamflow', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Primary Source:** `WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting `_ + + + .. tab-item:: Earthquake + + .. container:: catalog-section-note + + Earthquake datasets cover waveform-picking and forecasting adapters that align the public models with the shared earthquake benchmark. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: AEFA Forecast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed dense-grid forecasting adapter aligned to the AEFA earthquake forecasting workflow. + + .. container:: catalog-chip-row + + :bdg-secondary:`Forecast Benchmark` :bdg-info:`Dense-grid wavefield tensors` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned earthquake forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('aefa_forecast', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`AEFA ` + + .. container:: catalog-link-row + + **Details:** :doc:`AEFA Forecast ` + + .. container:: catalog-link-row + + **Primary Source:** `AEFA `_ + + .. grid-item-card:: pick-benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed waveform picking adapter aligned to the pick-benchmark evaluation ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Waveform Benchmark` :bdg-info:`Multichannel waveform windows` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned earthquake phase-picking samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('pick_benchmark_waveforms', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Primary Source:** `pick-benchmark `_ + + .. grid-item-card:: SeisBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed waveform picking adapter aligned to the SeisBench ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Waveform Benchmark` :bdg-info:`Multichannel waveform windows` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned earthquake phase-picking samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('seisbench_waveforms', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Primary Source:** `SeisBench - A Toolbox for Machine Learning in Seismology `_ + + + .. tab-item:: Tropical Cyclone + + .. container:: catalog-section-note + + Storm datasets cover best-track archives and benchmark adapters used by the shared tropical cyclone track-intensity workflow. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: IBTrACS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed storm-track adapter aligned to the IBTrACS tropical cyclone archive. + + .. container:: catalog-chip-row + + :bdg-secondary:`Track Archive` :bdg-info:`Storm-track history sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned tropical cyclone track and intensity samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('ibtracs_tracks', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Details:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Primary Source:** `IBTrACS `_ + + .. grid-item-card:: TCBench Alpha + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed storm-track benchmark adapter aligned to the TCBench Alpha ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Track Benchmark` :bdg-info:`Storm-track history sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned tropical cyclone track and intensity samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('tcbench_alpha', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Details:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Primary Source:** `TCBench Alpha `_ + + .. grid-item-card:: TropiCycloneNet-Dataset + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed storm-track benchmark adapter aligned to the TropiCycloneNet-Dataset ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Track Benchmark` :bdg-info:`Storm-track history sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned tropical cyclone track and intensity samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('tropicyclonenet_dataset', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Details:** :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Primary Source:** `TropiCycloneNet-Dataset `_ + + + +Recommended Entry Points +------------------------ + +If you are new to PyHazards, start with one high-signal dataset per +hazard group before branching into the full catalog. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid + + .. grid-item-card:: Shared Forcing + :class-card: catalog-detail-card + + **Start with:** :doc:`ERA5 ` + + ECMWF's global reanalysis used as a high-resolution meteorological baseline for hazard experiments. + + **Primary Surface:** Inspection: ``python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10`` + + .. grid-item-card:: Wildfire + :class-card: catalog-detail-card + + **Start with:** :doc:`FPA-FOD Weekly ` + + Weekly FPA-FOD aggregates packaged for next-week wildfire count forecasting by size group. + + **Primary Surface:** Inspection: ``python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12`` + + .. grid-item-card:: Flood + :class-card: catalog-detail-card + + **Start with:** :doc:`Caravan ` + + Synthetic-backed streamflow benchmark adapter aligned to the Caravan large-sample hydrology ecosystem. + + **Primary Surface:** Registry: ``load_dataset('caravan_streamflow', ...)`` + + .. grid-item-card:: Earthquake + :class-card: catalog-detail-card + + **Start with:** :doc:`SeisBench ` + + Synthetic-backed waveform picking adapter aligned to the SeisBench ecosystem. + + **Primary Surface:** Registry: ``load_dataset('seisbench_waveforms', ...)`` + + .. grid-item-card:: Tropical Cyclone + :class-card: catalog-detail-card + + **Start with:** :doc:`IBTrACS ` + + Synthetic-backed storm-track adapter aligned to the IBTrACS tropical cyclone archive. + + **Primary Surface:** Registry: ``load_dataset('ibtracs_tracks', ...)`` + + +Programmatic Use +---------------- + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "fpa_fod_weekly", + micro=True, + lookback_weeks=12, + features="counts+time", + ).load() + print(sorted(data.splits.keys())) + +Use :doc:`api/pyhazards.datasets` for the developer dataset workflow +and package-level API lookup. Pair this page with :doc:`pyhazards_models` +and :doc:`pyhazards_benchmarks` when you need to trace datasets into +model and evaluation coverage. + +.. toctree:: + :maxdepth: 1 + :hidden: + + datasets/era5 + datasets/goesr + datasets/merra2 + datasets/firms + datasets/fpa_fod_tabular + datasets/fpa_fod_weekly + datasets/landfire + datasets/mtbs + datasets/wfigs + datasets/caravan_streamflow + datasets/floodcastbench_inundation + datasets/hydrobench_streamflow + datasets/noaa_flood + datasets/waterbench_streamflow + datasets/aefa_forecast + datasets/pick_benchmark_waveforms + datasets/seisbench_waveforms + datasets/ibtracs_tracks + datasets/tcbench_alpha + datasets/tropicyclonenet_dataset diff --git a/docs/build/html/_sources/pyhazards_engine.rst.txt b/docs/build/html/_sources/pyhazards_engine.rst.txt new file mode 100644 index 00000000..8e5e310a --- /dev/null +++ b/docs/build/html/_sources/pyhazards_engine.rst.txt @@ -0,0 +1,47 @@ +Engine +=================== + +Overview +-------- + +Use the engine when you want a shared interface for training, evaluation, and +prediction without rewriting the loop for every hazard task. + +Core modules +------------ + +- ``pyhazards.engine.trainer``: the ``Trainer`` class with ``fit``, + ``evaluate``, and ``predict``. +- ``pyhazards.engine.distributed``: distributed-strategy helpers. +- ``pyhazards.engine.inference``: inference utilities for large grids or + sliding-window style workflows. + +Typical Usage +------------- + +.. code-block:: python + + import torch + from pyhazards.engine import Trainer + from pyhazards.metrics import ClassificationMetrics + from pyhazards.models import build_model + + model = build_model(name="mlp", task="classification", in_dim=16, out_dim=2) + trainer = Trainer(model=model, metrics=[ClassificationMetrics()], mixed_precision=True) + + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + loss_fn = torch.nn.CrossEntropyLoss() + + trainer.fit(data_bundle, optimizer=optimizer, loss_fn=loss_fn, max_epochs=10) + results = trainer.evaluate(data_bundle, split="test") + preds = trainer.predict(data_bundle, split="test") + +Device and Distributed Notes +---------------------------- + +- ``Trainer(strategy="auto")`` uses DDP when multiple GPUs are available; otherwise runs single-device. +- ``mixed_precision=True`` enables AMP when on CUDA. +- Device selection is handled via ``pyhazards.utils.hardware.auto_device`` by default. + +Next step: pair this page with :doc:`pyhazards_metrics` and +:doc:`pyhazards_utils` when you want to customize evaluation or device behavior. diff --git a/docs/build/html/_sources/pyhazards_metrics.rst.txt b/docs/build/html/_sources/pyhazards_metrics.rst.txt new file mode 100644 index 00000000..128f5896 --- /dev/null +++ b/docs/build/html/_sources/pyhazards_metrics.rst.txt @@ -0,0 +1,29 @@ +Metrics +=================== + +Overview +-------- + +PyHazards includes small, task-oriented metric classes that accumulate +predictions and targets across a full split. + +Core Classes +------------ + +- ``MetricBase``: shared interface with ``update``, ``compute``, and ``reset``. +- ``ClassificationMetrics``: basic classification metrics such as accuracy. +- ``RegressionMetrics``: MAE and RMSE style regression summaries. +- ``SegmentationMetrics``: segmentation-oriented aggregation. + +Usage +----- + +.. code-block:: python + + from pyhazards.metrics import ClassificationMetrics + + metrics = [ClassificationMetrics()] + # pass to Trainer or update metrics directly + +Use this page together with :doc:`pyhazards_engine` if you want a consistent +train/evaluate workflow. diff --git a/docs/build/html/_sources/pyhazards_models.rst.txt b/docs/build/html/_sources/pyhazards_models.rst.txt new file mode 100644 index 00000000..fd46d953 --- /dev/null +++ b/docs/build/html/_sources/pyhazards_models.rst.txt @@ -0,0 +1,952 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Models +=================== + +Browse PyHazards model implementations across hazard families, compare +scope and maturity, and navigate to model-specific detail pages. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + Catalog tabs grouped by the normalized public hazard taxonomy. + + .. grid-item-card:: Implemented Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 24 + + .. container:: catalog-stat-note + + Public core baselines plus additional implemented variants. + + .. grid-item-card:: Experimental Adapters + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 3 + + .. container:: catalog-stat-note + + Prototype weather-model integrations kept separate from the stable catalog. + + .. grid-item-card:: Benchmark-linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 27 + + .. container:: catalog-stat-note + + Models with explicit benchmark-family or ecosystem links on this page. + + +Catalog by Hazard +----------------- + +Use the hazard tabs below to browse the public catalog. Each card keeps +the index-page summary short, then links into model-specific detail +pages and compatible benchmark coverage. + +.. tab-set:: + :class: catalog-tabs + + .. tab-item:: Wildfire + + .. container:: catalog-section-note + + Wildfire models cover danger forecasting, weekly activity forecasting, and spread prediction under the shared wildfire benchmark family. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: ASUFM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A temporal convolution baseline for weekly wildfire activity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`ASUFM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Wildfire Spread Prediction in North America Using Satellite Imagery and Vision Transformer `_ | **Repo:** `Repository `__ + + .. grid-item-card:: DNN-LSTM-AutoEncoder + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A two-stage wildfire framework with a DNN stage for incident-level cause and size prediction plus an LSTM + autoencoder stage for weekly forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Classification` :bdg-secondary:`Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`DNN-LSTM-AutoEncoder ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Developing risk assessment framework for wildfire in the United States `_ + + .. grid-item-card:: FireCastNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact encoder-decoder baseline for wildfire spread mask prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`FireCastNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `FireCastNet: Earth-as-a-Graph for Seasonal Fire Prediction `_ | **Repo:** `Repository `__ + + .. grid-item-card:: ForeFire Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A lightweight simulator-style wildfire spread adapter inspired by front-propagation systems. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`ForeFire Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `ForeFire: A Modular, Scriptable C++ Simulation Engine and Library for Wildland-Fire Spread `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Wildfire Forecasting + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A sequence forecasting baseline for next-window wildfire activity across weekly count features. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Wildfire Forecasting ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Wildfire Danger Prediction and Understanding with Deep Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: WildfireSpreadTS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A temporal convolution wildfire spread baseline over short raster history windows. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction `_ | **Repo:** `Repository `__ + + .. grid-item-card:: WRF-SFIRE Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A lightweight raster wildfire spread adapter inspired by WRF-SFIRE style transport. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`WRF-SFIRE Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `Coupled atmosphere-wildland fire modeling with WRF 3.3 and SFIRE 2011 `_ | **Repo:** `Repository `__ + + .. grid-item-card:: CNN-ASPP + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An explainable CNN segmentation model with an ASPP mechanism for next-day wildfire spread prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`CNN-ASPP ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `Application of Explainable Artificial Intelligence in Predicting Wildfire Spread `_ + + + .. tab-item:: Earthquake + + .. container:: catalog-section-note + + Earthquake models span phase picking and dense-grid forecasting, with detail pages linked to the shared earthquake benchmark coverage. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: EQNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A transformer-style earthquake phase-picking baseline for modern sequence modeling comparisons. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`EQNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Paper:** `An End-To-End Earthquake Detection Method for Joint Phase Picking and Association Using Deep Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: EQTransformer + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A bidirectional sequence encoder for joint earthquake phase picking with attention pooling over waveform windows. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`EQTransformer ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Earthquake Transformer-An attentive deep-learning model for simultaneous earthquake detection and phase picking `_ | **Repo:** `Repository `__ + + .. grid-item-card:: GPD + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact CNN baseline for generalized phase detection and historical earthquake picking comparisons. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`GPD ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Generalized Seismic Phase Detection with Deep Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: PhaseNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A lightweight phase-picking baseline that predicts P- and S-arrival indices from multichannel waveform windows. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`PhaseNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Paper:** `PhaseNet: A Deep-Neural-Network-Based Seismic Arrival Time Picking Method `_ | **Repo:** `Repository `__ + + .. grid-item-card:: WaveCastNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A ConvLEM-based sequence-to-sequence model for dense-grid earthquake wavefield forecasting and early-warning style rollout experiments. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Wavefield Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`WaveCastNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`AEFA `, :doc:`pyCSEP ` + + .. container:: catalog-link-row + + **Paper:** `Rapid wavefield forecasting for earthquake early warning via deep sequence to sequence learning `_ + + + .. tab-item:: Flood + + .. container:: catalog-section-note + + Flood models cover streamflow and inundation forecasting, ranging from sequence baselines to dense-grid flood-mapping architectures. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: EA-LSTM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An entity-aware hydrology baseline with static-feature gating over streamflow histories. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`EA-LSTM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Paper:** `Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets `_ | **Repo:** `Repository `__ + + .. grid-item-card:: FloodCast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact spatiotemporal flood-inundation baseline for raster forecast experiments. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Inundation` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`FloodCast ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Paper:** `Large-scale flood modeling and forecasting with FloodCast `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Google Flood Forecasting + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A transformer-style sequence baseline for nodewise streamflow forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Google Flood Forecasting ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Paper:** `Global Flood Forecasting at a Fine Catchment Resolution using Machine Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: NeuralHydrology LSTM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An adapter-style LSTM baseline for nodewise streamflow forecasting on graph-temporal inputs. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`NeuralHydrology LSTM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Paper:** `Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets `_ | **Repo:** `Repository `__ + + .. grid-item-card:: UrbanFloodCast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A U-Net style urban inundation baseline for dense-grid flood prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Inundation` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`UrbanFloodCast ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Paper:** `UrbanFloodCast: WMO Urban Flooding Forecasting Challenge `_ | **Repo:** `Repository `__ + + .. grid-item-card:: HydroGraphNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A physics-informed graph neural network for flood forecasting with interpretable KAN-style components, residual message passing, and delta-state decoding. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`HydroGraphNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Paper:** `Interpretable physics-informed graph neural networks for flood forecasting `_ + + + .. tab-item:: Tropical Cyclone + + .. container:: catalog-section-note + + Storm models are organized under one tropical-cyclone family, including basin-specific hurricane baselines and shared all-basin forecasting models. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hurricast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact multimodal storm baseline for hurricane track and intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Hurricast ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `Hurricane Forecasting: A Novel Multimodal Machine Learning Framework `_ | **Repo:** `Repository `__ + + .. grid-item-card:: SAF-Net + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A spatiotemporal tropical-cyclone baseline with an intensity-focused head and shared trajectory output. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`SAF-Net ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `SAF-Net: A spatio-temporal deep learning method for typhoon intensity prediction `_ | **Repo:** `Repository `__ + + .. grid-item-card:: TCIF-fusion + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A knowledge-guided fusion baseline for tropical cyclone track and intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`TCIF-fusion ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `Tropical cyclone intensity forecasting using model knowledge guided deep learning model `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Tropical Cyclone MLP + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact MLP baseline for hurricane track and intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Tropical Cyclone MLP ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `Deep Learning Experiments for Tropical Cyclone Intensity Forecasts `_ | **Repo:** `Repository `__ + + .. grid-item-card:: TropiCycloneNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A GRU plus attention baseline for all-basin tropical cyclone forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`TropiCycloneNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Paper:** `Benchmark dataset and deep learning method for global tropical cyclone forecasting `_ | **Repo:** `Repository `__ + + .. rubric:: Experimental Adapters + + .. container:: catalog-section-note + + These entries remain public as lightweight wrapper or prototype integrations and should not be counted as stable implemented methods. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: FourCastNet TC Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An experimental wrapper-style storm adapter inspired by FourCastNet forecast fields. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-warning:`Experimental Adapter` + + .. container:: catalog-meta-row + + **Details:** :doc:`FourCastNet TC Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `FourCastNet: A Global Data-driven High-resolution Weather Model using Adaptive Fourier Neural Operators `_ | **Repo:** `Repository `__ + + .. grid-item-card:: GraphCast TC Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An experimental wrapper-style storm adapter inspired by GraphCast/GenCast forecast fields. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-warning:`Experimental Adapter` + + .. container:: catalog-meta-row + + **Details:** :doc:`GraphCast TC Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `GraphCast: Learning skillful medium-range global weather forecasting `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Pangu TC Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An experimental wrapper-style storm adapter inspired by Pangu-Weather forecast fields. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-warning:`Experimental Adapter` + + .. container:: catalog-meta-row + + **Details:** :doc:`Pangu TC Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `Accurate medium-range global weather forecasting with 3D neural networks `_ | **Repo:** `Repository `__ + + + +Recommended Entry Points +------------------------ + +If you are new to PyHazards, these four models provide the clearest +starting point for each hazard family. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid + + .. grid-item-card:: Wildfire + :class-card: catalog-detail-card + + **Start with:** :doc:`FireCastNet ` + + A compact encoder-decoder baseline for wildfire spread mask prediction. + + **Benchmark:** :doc:`Wildfire Benchmark ` + + .. grid-item-card:: Earthquake + :class-card: catalog-detail-card + + **Start with:** :doc:`PhaseNet ` + + A lightweight phase-picking baseline that predicts P- and S-arrival indices from multichannel waveform windows. + + **Benchmark:** :doc:`Earthquake Benchmark ` + + .. grid-item-card:: Flood + :class-card: catalog-detail-card + + **Start with:** :doc:`FloodCast ` + + A compact spatiotemporal flood-inundation baseline for raster forecast experiments. + + **Benchmark:** :doc:`Flood Benchmark ` + + .. grid-item-card:: Tropical Cyclone + :class-card: catalog-detail-card + + **Start with:** :doc:`Hurricast ` + + A compact multimodal storm baseline for hurricane track and intensity forecasting. + + **Benchmark:** :doc:`Tropical Cyclone Benchmark ` + + +Programmatic Use +---------------- + +Use :doc:`api/pyhazards.models` for the developer registry workflow, +builder examples, and package-level API lookup. Use +:doc:`pyhazards_benchmarks` to compare compatible benchmark families +before selecting a model for evaluation. + +.. toctree:: + :maxdepth: 1 + :hidden: + + modules/models_asufm + modules/models_eqnet + modules/models_eqtransformer + modules/models_firecastnet + modules/models_floodcast + modules/models_forefire + modules/models_fourcastnet_tc + modules/models_google_flood_forecasting + modules/models_gpd + modules/models_graphcast_tc + modules/models_hurricast + modules/models_hydrographnet + modules/models_neuralhydrology_ealstm + modules/models_neuralhydrology_lstm + modules/models_pangu_tc + modules/models_phasenet + modules/models_saf_net + modules/models_tcif_fusion + modules/models_tropicalcyclone_mlp + modules/models_tropicyclonenet + modules/models_urbanfloodcast + modules/models_wavecastnet + modules/models_wildfire_aspp + modules/models_wildfire_forecasting + modules/models_wildfire_fpa + modules/models_wildfirespreadts + modules/models_wrf_sfire diff --git a/docs/build/html/_sources/pyhazards_utils.rst.txt b/docs/build/html/_sources/pyhazards_utils.rst.txt new file mode 100644 index 00000000..713beb65 --- /dev/null +++ b/docs/build/html/_sources/pyhazards_utils.rst.txt @@ -0,0 +1,22 @@ +Utils +=================== + +Overview +-------- + +Utility helpers keep the rest of the library concise. Use these modules for +device selection, reproducibility, and small shared helpers. + +Submodules +---------- + +- :mod:`pyhazards.utils.hardware`: device helpers and automatic device selection. +- :mod:`pyhazards.utils.common`: reproducibility, logging, and shared utility + functions. + +Typical Uses +------------ + +- choose CPU or GPU behavior explicitly, +- set deterministic seeds for experiments, +- reuse small helpers instead of copying project-specific boilerplate. diff --git a/docs/build/html/_sources/quick_start.rst.txt b/docs/build/html/_sources/quick_start.rst.txt new file mode 100644 index 00000000..0a9c6f5c --- /dev/null +++ b/docs/build/html/_sources/quick_start.rst.txt @@ -0,0 +1,111 @@ +Quick Start +=========== + +Use this page after :doc:`installation` to run the first end-to-end PyHazards +workflow: verify the package, inspect example data, build a model, and execute +one short training loop. + +Step 1: Verify the Package +-------------------------- + +Confirm that Python can import the package cleanly: + +.. code-block:: bash + + python -c "import pyhazards; print(pyhazards.__version__)" + +Step 2: Inspect Example Data +---------------------------- + +Use the ERA5 inspection entrypoint to validate the bundled sample data before +training: + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + +Step 3: Build a Model +--------------------- + +Instantiate ``hydrographnet`` through the unified model registry: + +.. code-block:: python + + from pyhazards.models import build_model + + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, + ) + print(type(model).__name__) + +Step 4: Run a Short Train/Evaluate Loop +--------------------------------------- + +This example pairs the ERA5 subset with ``hydrographnet`` to confirm that the +dataset, model, and training engine work together in one workflow. + +.. code-block:: python + + import torch + from pyhazards.data.load_hydrograph_data import load_hydrograph_data + from pyhazards.datasets import graph_collate + from pyhazards.engine import Trainer + from pyhazards.models import build_model + + data = load_hydrograph_data("pyhazards/data/era5_subset", max_nodes=50) + + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, + ) + + trainer = Trainer(model=model, mixed_precision=False) + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + loss_fn = torch.nn.MSELoss() + + trainer.fit( + data, + optimizer=optimizer, + loss_fn=loss_fn, + max_epochs=1, + batch_size=1, + collate_fn=graph_collate, + ) + + metrics = trainer.evaluate( + data, + split="train", + batch_size=1, + collate_fn=graph_collate, + ) + print(metrics) + +Step 5: Next Steps +------------------ + +- Go to :doc:`pyhazards_datasets` to browse supported datasets. +- Go to :doc:`pyhazards_models` to compare built-in models. +- Go to :doc:`implementation` to add your own dataset or model. + +Device Notes +------------ + +PyHazards uses CUDA automatically when available. To force a device: + +.. code-block:: bash + + export PYHAZARDS_DEVICE=cuda:0 + +.. code-block:: python + + from pyhazards.utils import set_device + + set_device("cuda:0") + set_device("cpu") diff --git a/docs/build/html/_sources/references.rst.txt b/docs/build/html/_sources/references.rst.txt new file mode 100644 index 00000000..71a1176b --- /dev/null +++ b/docs/build/html/_sources/references.rst.txt @@ -0,0 +1,76 @@ +References +========== + +This page collects the main dataset and model references cited throughout the +PyHazards docs. It is a project reference list, not an exhaustive bibliography. + +Dataset References +------------------ + +- Gelaro, R., McCarty, W., Suárez, M. J., et al. (2017). *The Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2)*. `[link] `__. +- Hersbach, H., Bell, B., Berrisford, P., et al. (2020). *The ERA5 global reanalysis*. `[link] `__. +- NOAA National Centers for Environmental Information (NCEI). *Storm Events Database Documentation*. `[link] `__. +- Schroeder, W., Oliva, P., Giglio, L., and Csiszar, I. (2014). *The New VIIRS 375 m active fire detection data product: Algorithm description and initial assessment*. `[link] `__. +- Eidenshink, J., Schwind, B., Brewer, K., Zhu, Z., Quayle, B., and Howard, S. (2007). *A project for monitoring trends in burn severity*. `[link] `__. +- Rollins, M. G. (2009). *LANDFIRE: A nationally consistent vegetation, wildland fire, and fuel assessment*. `[link] `__. +- National Interagency Fire Center (NIFC). *Wildland Fire Incident Geospatial Services (WFIGS)*. `[link] `__. +- Schmit, T. J., Griffith, P., Gunshor, M. M., et al. (2017). *A closer look at the ABI on the GOES-R series*. `[link] `__. + +Model References +---------------- + +Wildfire +~~~~~~~~ + +- *Developing risk assessment framework for wildfire in the United States*. `[paper] `__. +- *Application of Explainable Artificial Intelligence in Predicting Wildfire Spread: An ASPP-Enabled CNN Approach*. `[paper] `__. +- *Wildfire Danger Prediction and Understanding with Deep Learning*. `[paper] `__, `[repo] `__. +- *WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction*. `[paper] `__, `[repo] `__. +- *Wildfire Spread Prediction in North America Using Satellite Imagery and Vision Transformer*. `[paper] `__, `[repo] `__. +- *ForeFire: A Modular, Scriptable C++ Simulation Engine and Library for Wildland-Fire Spread*. `[paper] `__, `[repo] `__. +- *Coupled atmosphere-wildland fire modeling with WRF 3.3 and SFIRE 2011*. `[paper] `__, `[repo] `__. +- *FireCastNet: Earth-as-a-Graph for Seasonal Fire Prediction*. `[paper] `__, `[repo] `__. + +Earthquake +~~~~~~~~~~ + +- *Rapid wavefield forecasting for earthquake early warning via deep sequence to sequence learning*. `[paper] `__. +- *PhaseNet: A Deep-Neural-Network-Based Seismic Arrival Time Picking Method*. `[paper] `__, `[repo] `__. +- *Earthquake Transformer-An attentive deep-learning model for simultaneous earthquake detection and phase picking*. `[paper] `__, `[repo] `__. +- *Generalized Seismic Phase Detection with Deep Learning*. `[paper] `__, `[repo] `__. +- *An End-To-End Earthquake Detection Method for Joint Phase Picking and Association Using Deep Learning*. `[paper] `__, `[repo] `__. + +Flood +~~~~~ + +- *Interpretable physics-informed graph neural networks for flood forecasting*. `[paper] `__. +- *Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets*. `[paper] `__, `[repo] `__. +- *Large-scale flood modeling and forecasting with FloodCast*. `[paper] `__, `[repo] `__. +- *UrbanFloodCast: WMO Urban Flooding Forecasting Challenge*. `[paper] `__, `[repo] `__. +- *Global Flood Forecasting at a Fine Catchment Resolution using Machine Learning*. `[paper] `__, `[repo] `__. + +Hurricane and Tropical Cyclone +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- *Hurricane Forecasting: A Novel Multimodal Machine Learning Framework*. `[paper] `__, `[repo] `__. +- *Deep Learning Experiments for Tropical Cyclone Intensity Forecasts*. `[paper] `__, `[repo] `__. +- *Benchmark dataset and deep learning method for global tropical cyclone forecasting*. `[paper] `__, `[repo] `__. +- *SAF-Net: A spatio-temporal deep learning method for typhoon intensity prediction*. `[paper] `__, `[repo] `__. +- *Tropical cyclone intensity forecasting using model knowledge guided deep learning model*. `[paper] `__, `[repo] `__. +- *GraphCast: Learning skillful medium-range global weather forecasting*. `[paper] `__, `[repo] `__. +- *Accurate medium-range global weather forecasting with 3D neural networks*. `[paper] `__, `[repo] `__. +- *FourCastNet: A Global Data-driven High-resolution Weather Model using Adaptive Fourier Neural Operators*. `[paper] `__, `[repo] `__. + +Benchmark and Data Resources +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- *SeisBench - A Toolbox for Machine Learning in Seismology*. `[paper] `__, `[repo] `__. +- *pick-benchmark*. `[repo] `__. +- *pyCSEP*. `[repo] `__. +- *AEFA*. `[repo] `__. +- *Caravan - A global community dataset for large-sample hydrology*. `[paper] `__, `[repo] `__. +- *WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting*. `[paper] `__, `[repo] `__. +- *FloodCastBench*. `[repo] `__. +- *HydroBench*. `[repo] `__. +- *TCBench Alpha*. `[repo] `__. +- *IBTrACS*. `[product page] `__. diff --git a/docs/build/html/_sources/team.rst.txt b/docs/build/html/_sources/team.rst.txt new file mode 100644 index 00000000..0a031d10 --- /dev/null +++ b/docs/build/html/_sources/team.rst.txt @@ -0,0 +1,39 @@ +Core Team +========= + +PyHazards is maintained by researchers and engineers working on hazard-focused +machine learning, data systems, and model development. + +Lead Developer +-------------- + +- Xueqi Cheng, Florida State University (xc25@fsu.edu) + +Founder +------- + +- Yushun Dong, Florida State University + +Principal Contributors & Maintainers +------------------------------------ + +- Yangshuang Xu, Florida State University +- Runyang Xu, Florida State University +- Hugh Long, Florida State University + +Core Contributors +----------------- + +- Lex Schneier, Florida State University +- Sharan Kumar Reddy Kodudula, Florida State University +- Cristian Victoria, Florida State University +- Deyang Hsu, University of Southern California +- Dacheng Shen, University of Southern California + +What the Team Maintains +----------------------- + +- technical direction for the library, +- code review and quality checks, +- documentation and examples, +- ongoing maintenance of public releases. diff --git a/docs/build/html/_static/basic.css b/docs/build/html/_static/basic.css new file mode 100644 index 00000000..4738b2ed --- /dev/null +++ b/docs/build/html/_static/basic.css @@ -0,0 +1,906 @@ +/* + * Sphinx stylesheet -- basic theme. + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin-top: 10px; +} + +ul.search li { + padding: 5px 0; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/build/html/_static/debug.css b/docs/build/html/_static/debug.css new file mode 100644 index 00000000..74d4aec3 --- /dev/null +++ b/docs/build/html/_static/debug.css @@ -0,0 +1,69 @@ +/* + This CSS file should be overridden by the theme authors. It's + meant for debugging and developing the skeleton that this theme provides. +*/ +body { + font-family: -apple-system, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, + "Apple Color Emoji", "Segoe UI Emoji"; + background: lavender; +} +.sb-announcement { + background: rgb(131, 131, 131); +} +.sb-announcement__inner { + background: black; + color: white; +} +.sb-header { + background: lightskyblue; +} +.sb-header__inner { + background: royalblue; + color: white; +} +.sb-header-secondary { + background: lightcyan; +} +.sb-header-secondary__inner { + background: cornflowerblue; + color: white; +} +.sb-sidebar-primary { + background: lightgreen; +} +.sb-main { + background: blanchedalmond; +} +.sb-main__inner { + background: antiquewhite; +} +.sb-header-article { + background: lightsteelblue; +} +.sb-article-container { + background: snow; +} +.sb-article-main { + background: white; +} +.sb-footer-article { + background: lightpink; +} +.sb-sidebar-secondary { + background: lightgoldenrodyellow; +} +.sb-footer-content { + background: plum; +} +.sb-footer-content__inner { + background: palevioletred; +} +.sb-footer { + background: pink; +} +.sb-footer__inner { + background: salmon; +} +.sb-article { + background: white; +} diff --git a/docs/build/html/_static/doctools.js b/docs/build/html/_static/doctools.js new file mode 100644 index 00000000..0398ebb9 --- /dev/null +++ b/docs/build/html/_static/doctools.js @@ -0,0 +1,149 @@ +/* + * Base JavaScript utilities for all Sphinx HTML documentation. + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/docs/build/html/_static/documentation_options.js b/docs/build/html/_static/documentation_options.js new file mode 100644 index 00000000..841b958a --- /dev/null +++ b/docs/build/html/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '1.0.5', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: true, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/docs/build/html/_static/file.png b/docs/build/html/_static/file.png new file mode 100644 index 00000000..a858a410 Binary files /dev/null and b/docs/build/html/_static/file.png differ diff --git a/docs/build/html/_static/github.svg b/docs/build/html/_static/github.svg new file mode 100644 index 00000000..013e0253 --- /dev/null +++ b/docs/build/html/_static/github.svg @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/docs/build/html/_static/language_data.js b/docs/build/html/_static/language_data.js new file mode 100644 index 00000000..c7fe6c6f --- /dev/null +++ b/docs/build/html/_static/language_data.js @@ -0,0 +1,192 @@ +/* + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, if available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/docs/build/html/_static/logo.png b/docs/build/html/_static/logo.png new file mode 100644 index 00000000..3a7451eb Binary files /dev/null and b/docs/build/html/_static/logo.png differ diff --git a/docs/build/html/_static/minus.png b/docs/build/html/_static/minus.png new file mode 100644 index 00000000..d96755fd Binary files /dev/null and b/docs/build/html/_static/minus.png differ diff --git a/docs/build/html/_static/plus.png b/docs/build/html/_static/plus.png new file mode 100644 index 00000000..7107cec9 Binary files /dev/null and b/docs/build/html/_static/plus.png differ diff --git a/docs/build/html/_static/pygments.css b/docs/build/html/_static/pygments.css new file mode 100644 index 00000000..9d1083bf --- /dev/null +++ b/docs/build/html/_static/pygments.css @@ -0,0 +1,250 @@ +.highlight pre { line-height: 125%; } +.highlight td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +.highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #fdf2e2 } +.highlight { background: #f2f2f2; color: #1E1E1E } +.highlight .c { color: #515151 } /* Comment */ +.highlight .err { color: #D71835 } /* Error */ +.highlight .k { color: #8045E5 } /* Keyword */ +.highlight .l { color: #7F4707 } /* Literal */ +.highlight .n { color: #1E1E1E } /* Name */ +.highlight .o { color: #163 } /* Operator */ +.highlight .p { color: #1E1E1E } /* Punctuation */ +.highlight .ch { color: #515151 } /* Comment.Hashbang */ +.highlight .cm { color: #515151 } /* Comment.Multiline */ +.highlight .cp { color: #515151 } /* Comment.Preproc */ +.highlight .cpf { color: #515151 } /* Comment.PreprocFile */ +.highlight .c1 { color: #515151 } /* Comment.Single */ +.highlight .cs { color: #515151 } /* Comment.Special */ +.highlight .gd { color: #00749C } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gh { color: #00749C } /* Generic.Heading */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #00749C } /* Generic.Subheading */ +.highlight .kc { color: #8045E5 } /* Keyword.Constant */ +.highlight .kd { color: #8045E5 } /* Keyword.Declaration */ +.highlight .kn { color: #8045E5 } /* Keyword.Namespace */ +.highlight .kp { color: #8045E5 } /* Keyword.Pseudo */ +.highlight .kr { color: #8045E5 } /* Keyword.Reserved */ +.highlight .kt { color: #7F4707 } /* Keyword.Type */ +.highlight .ld { color: #7F4707 } /* Literal.Date */ +.highlight .m { color: #7F4707 } /* Literal.Number */ +.highlight .s { color: #163 } /* Literal.String */ +.highlight .na { color: #7F4707 } /* Name.Attribute */ +.highlight .nb { color: #7F4707 } /* Name.Builtin */ +.highlight .nc { color: #00749C } /* Name.Class */ +.highlight .no { color: #00749C } /* Name.Constant */ +.highlight .nd { color: #7F4707 } /* Name.Decorator */ +.highlight .ni { color: #163 } /* Name.Entity */ +.highlight .ne { color: #8045E5 } /* Name.Exception */ +.highlight .nf { color: #00749C } /* Name.Function */ +.highlight .nl { color: #7F4707 } /* Name.Label */ +.highlight .nn { color: #1E1E1E } /* Name.Namespace */ +.highlight .nx { color: #1E1E1E } /* Name.Other */ +.highlight .py { color: #00749C } /* Name.Property */ +.highlight .nt { color: #00749C } /* Name.Tag */ +.highlight .nv { color: #D71835 } /* Name.Variable */ +.highlight .ow { color: #8045E5 } /* Operator.Word */ +.highlight .pm { color: #1E1E1E } /* Punctuation.Marker */ +.highlight .w { color: #1E1E1E } /* Text.Whitespace */ +.highlight .mb { color: #7F4707 } /* Literal.Number.Bin */ +.highlight .mf { color: #7F4707 } /* Literal.Number.Float */ +.highlight .mh { color: #7F4707 } /* Literal.Number.Hex */ +.highlight .mi { color: #7F4707 } /* Literal.Number.Integer */ +.highlight .mo { color: #7F4707 } /* Literal.Number.Oct */ +.highlight .sa { color: #163 } /* Literal.String.Affix */ +.highlight .sb { color: #163 } /* Literal.String.Backtick */ +.highlight .sc { color: #163 } /* Literal.String.Char */ +.highlight .dl { color: #163 } /* Literal.String.Delimiter */ +.highlight .sd { color: #163 } /* Literal.String.Doc */ +.highlight .s2 { color: #163 } /* Literal.String.Double */ +.highlight .se { color: #163 } /* Literal.String.Escape */ +.highlight .sh { color: #163 } /* Literal.String.Heredoc */ +.highlight .si { color: #163 } /* Literal.String.Interpol */ +.highlight .sx { color: #163 } /* Literal.String.Other */ +.highlight .sr { color: #D71835 } /* Literal.String.Regex */ +.highlight .s1 { color: #163 } /* Literal.String.Single */ +.highlight .ss { color: #00749C } /* Literal.String.Symbol */ +.highlight .bp { color: #7F4707 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #00749C } /* Name.Function.Magic */ +.highlight .vc { color: #D71835 } /* Name.Variable.Class */ +.highlight .vg { color: #D71835 } /* Name.Variable.Global */ +.highlight .vi { color: #D71835 } /* Name.Variable.Instance */ +.highlight .vm { color: #7F4707 } /* Name.Variable.Magic */ +.highlight .il { color: #7F4707 } /* Literal.Number.Integer.Long */ +@media not print { +body[data-theme="dark"] .highlight pre { line-height: 125%; } +body[data-theme="dark"] .highlight td.linenos .normal { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body[data-theme="dark"] .highlight .hll { background-color: #404040 } +body[data-theme="dark"] .highlight { background: #202020; color: #D0D0D0 } +body[data-theme="dark"] .highlight .c { color: #ABABAB; font-style: italic } /* Comment */ +body[data-theme="dark"] .highlight .err { color: #A61717; background-color: #E3D2D2 } /* Error */ +body[data-theme="dark"] .highlight .esc { color: #D0D0D0 } /* Escape */ +body[data-theme="dark"] .highlight .g { color: #D0D0D0 } /* Generic */ +body[data-theme="dark"] .highlight .k { color: #6EBF26; font-weight: bold } /* Keyword */ +body[data-theme="dark"] .highlight .l { color: #D0D0D0 } /* Literal */ +body[data-theme="dark"] .highlight .n { color: #D0D0D0 } /* Name */ +body[data-theme="dark"] .highlight .o { color: #D0D0D0 } /* Operator */ +body[data-theme="dark"] .highlight .x { color: #D0D0D0 } /* Other */ +body[data-theme="dark"] .highlight .p { color: #D0D0D0 } /* Punctuation */ +body[data-theme="dark"] .highlight .ch { color: #ABABAB; font-style: italic } /* Comment.Hashbang */ +body[data-theme="dark"] .highlight .cm { color: #ABABAB; font-style: italic } /* Comment.Multiline */ +body[data-theme="dark"] .highlight .cp { color: #FF3A3A; font-weight: bold } /* Comment.Preproc */ +body[data-theme="dark"] .highlight .cpf { color: #ABABAB; font-style: italic } /* Comment.PreprocFile */ +body[data-theme="dark"] .highlight .c1 { color: #ABABAB; font-style: italic } /* Comment.Single */ +body[data-theme="dark"] .highlight .cs { color: #E50808; font-weight: bold; background-color: #520000 } /* Comment.Special */ +body[data-theme="dark"] .highlight .gd { color: #FF3A3A } /* Generic.Deleted */ +body[data-theme="dark"] .highlight .ge { color: #D0D0D0; font-style: italic } /* Generic.Emph */ +body[data-theme="dark"] .highlight .ges { color: #D0D0D0; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +body[data-theme="dark"] .highlight .gr { color: #FF3A3A } /* Generic.Error */ +body[data-theme="dark"] .highlight .gh { color: #FFF; font-weight: bold } /* Generic.Heading */ +body[data-theme="dark"] .highlight .gi { color: #589819 } /* Generic.Inserted */ +body[data-theme="dark"] .highlight .go { color: #CCC } /* Generic.Output */ +body[data-theme="dark"] .highlight .gp { color: #AAA } /* Generic.Prompt */ +body[data-theme="dark"] .highlight .gs { color: #D0D0D0; font-weight: bold } /* Generic.Strong */ +body[data-theme="dark"] .highlight .gu { color: #FFF; text-decoration: underline } /* Generic.Subheading */ +body[data-theme="dark"] .highlight .gt { color: #FF3A3A } /* Generic.Traceback */ +body[data-theme="dark"] .highlight .kc { color: #6EBF26; font-weight: bold } /* Keyword.Constant */ +body[data-theme="dark"] .highlight .kd { color: #6EBF26; font-weight: bold } /* Keyword.Declaration */ +body[data-theme="dark"] .highlight .kn { color: #6EBF26; font-weight: bold } /* Keyword.Namespace */ +body[data-theme="dark"] .highlight .kp { color: #6EBF26 } /* Keyword.Pseudo */ +body[data-theme="dark"] .highlight .kr { color: #6EBF26; font-weight: bold } /* Keyword.Reserved */ +body[data-theme="dark"] .highlight .kt { color: #6EBF26; font-weight: bold } /* Keyword.Type */ +body[data-theme="dark"] .highlight .ld { color: #D0D0D0 } /* Literal.Date */ +body[data-theme="dark"] .highlight .m { color: #51B2FD } /* Literal.Number */ +body[data-theme="dark"] .highlight .s { color: #ED9D13 } /* Literal.String */ +body[data-theme="dark"] .highlight .na { color: #BBB } /* Name.Attribute */ +body[data-theme="dark"] .highlight .nb { color: #2FBCCD } /* Name.Builtin */ +body[data-theme="dark"] .highlight .nc { color: #71ADFF; text-decoration: underline } /* Name.Class */ +body[data-theme="dark"] .highlight .no { color: #40FFFF } /* Name.Constant */ +body[data-theme="dark"] .highlight .nd { color: #FFA500 } /* Name.Decorator */ +body[data-theme="dark"] .highlight .ni { color: #D0D0D0 } /* Name.Entity */ +body[data-theme="dark"] .highlight .ne { color: #BBB } /* Name.Exception */ +body[data-theme="dark"] .highlight .nf { color: #71ADFF } /* Name.Function */ +body[data-theme="dark"] .highlight .nl { color: #D0D0D0 } /* Name.Label */ +body[data-theme="dark"] .highlight .nn { color: #71ADFF; text-decoration: underline } /* Name.Namespace */ +body[data-theme="dark"] .highlight .nx { color: #D0D0D0 } /* Name.Other */ +body[data-theme="dark"] .highlight .py { color: #D0D0D0 } /* Name.Property */ +body[data-theme="dark"] .highlight .nt { color: #6EBF26; font-weight: bold } /* Name.Tag */ +body[data-theme="dark"] .highlight .nv { color: #40FFFF } /* Name.Variable */ +body[data-theme="dark"] .highlight .ow { color: #6EBF26; font-weight: bold } /* Operator.Word */ +body[data-theme="dark"] .highlight .pm { color: #D0D0D0 } /* Punctuation.Marker */ +body[data-theme="dark"] .highlight .w { color: #666 } /* Text.Whitespace */ +body[data-theme="dark"] .highlight .mb { color: #51B2FD } /* Literal.Number.Bin */ +body[data-theme="dark"] .highlight .mf { color: #51B2FD } /* Literal.Number.Float */ +body[data-theme="dark"] .highlight .mh { color: #51B2FD } /* Literal.Number.Hex */ +body[data-theme="dark"] .highlight .mi { color: #51B2FD } /* Literal.Number.Integer */ +body[data-theme="dark"] .highlight .mo { color: #51B2FD } /* Literal.Number.Oct */ +body[data-theme="dark"] .highlight .sa { color: #ED9D13 } /* Literal.String.Affix */ +body[data-theme="dark"] .highlight .sb { color: #ED9D13 } /* Literal.String.Backtick */ +body[data-theme="dark"] .highlight .sc { color: #ED9D13 } /* Literal.String.Char */ +body[data-theme="dark"] .highlight .dl { color: #ED9D13 } /* Literal.String.Delimiter */ +body[data-theme="dark"] .highlight .sd { color: #ED9D13 } /* Literal.String.Doc */ +body[data-theme="dark"] .highlight .s2 { color: #ED9D13 } /* Literal.String.Double */ +body[data-theme="dark"] .highlight .se { color: #ED9D13 } /* Literal.String.Escape */ +body[data-theme="dark"] .highlight .sh { color: #ED9D13 } /* Literal.String.Heredoc */ +body[data-theme="dark"] .highlight .si { color: #ED9D13 } /* Literal.String.Interpol */ +body[data-theme="dark"] .highlight .sx { color: #FFA500 } /* Literal.String.Other */ +body[data-theme="dark"] .highlight .sr { color: #ED9D13 } /* Literal.String.Regex */ +body[data-theme="dark"] .highlight .s1 { color: #ED9D13 } /* Literal.String.Single */ +body[data-theme="dark"] .highlight .ss { color: #ED9D13 } /* Literal.String.Symbol */ +body[data-theme="dark"] .highlight .bp { color: #2FBCCD } /* Name.Builtin.Pseudo */ +body[data-theme="dark"] .highlight .fm { color: #71ADFF } /* Name.Function.Magic */ +body[data-theme="dark"] .highlight .vc { color: #40FFFF } /* Name.Variable.Class */ +body[data-theme="dark"] .highlight .vg { color: #40FFFF } /* Name.Variable.Global */ +body[data-theme="dark"] .highlight .vi { color: #40FFFF } /* Name.Variable.Instance */ +body[data-theme="dark"] .highlight .vm { color: #40FFFF } /* Name.Variable.Magic */ +body[data-theme="dark"] .highlight .il { color: #51B2FD } /* Literal.Number.Integer.Long */ +@media (prefers-color-scheme: dark) { +body:not([data-theme="light"]) .highlight pre { line-height: 125%; } +body:not([data-theme="light"]) .highlight td.linenos .normal { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos { color: #aaaaaa; background-color: transparent; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +body:not([data-theme="light"]) .highlight .hll { background-color: #404040 } +body:not([data-theme="light"]) .highlight { background: #202020; color: #D0D0D0 } +body:not([data-theme="light"]) .highlight .c { color: #ABABAB; font-style: italic } /* Comment */ +body:not([data-theme="light"]) .highlight .err { color: #A61717; background-color: #E3D2D2 } /* Error */ +body:not([data-theme="light"]) .highlight .esc { color: #D0D0D0 } /* Escape */ +body:not([data-theme="light"]) .highlight .g { color: #D0D0D0 } /* Generic */ +body:not([data-theme="light"]) .highlight .k { color: #6EBF26; font-weight: bold } /* Keyword */ +body:not([data-theme="light"]) .highlight .l { color: #D0D0D0 } /* Literal */ +body:not([data-theme="light"]) .highlight .n { color: #D0D0D0 } /* Name */ +body:not([data-theme="light"]) .highlight .o { color: #D0D0D0 } /* Operator */ +body:not([data-theme="light"]) .highlight .x { color: #D0D0D0 } /* Other */ +body:not([data-theme="light"]) .highlight .p { color: #D0D0D0 } /* Punctuation */ +body:not([data-theme="light"]) .highlight .ch { color: #ABABAB; font-style: italic } /* Comment.Hashbang */ +body:not([data-theme="light"]) .highlight .cm { color: #ABABAB; font-style: italic } /* Comment.Multiline */ +body:not([data-theme="light"]) .highlight .cp { color: #FF3A3A; font-weight: bold } /* Comment.Preproc */ +body:not([data-theme="light"]) .highlight .cpf { color: #ABABAB; font-style: italic } /* Comment.PreprocFile */ +body:not([data-theme="light"]) .highlight .c1 { color: #ABABAB; font-style: italic } /* Comment.Single */ +body:not([data-theme="light"]) .highlight .cs { color: #E50808; font-weight: bold; background-color: #520000 } /* Comment.Special */ +body:not([data-theme="light"]) .highlight .gd { color: #FF3A3A } /* Generic.Deleted */ +body:not([data-theme="light"]) .highlight .ge { color: #D0D0D0; font-style: italic } /* Generic.Emph */ +body:not([data-theme="light"]) .highlight .ges { color: #D0D0D0; font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +body:not([data-theme="light"]) .highlight .gr { color: #FF3A3A } /* Generic.Error */ +body:not([data-theme="light"]) .highlight .gh { color: #FFF; font-weight: bold } /* Generic.Heading */ +body:not([data-theme="light"]) .highlight .gi { color: #589819 } /* Generic.Inserted */ +body:not([data-theme="light"]) .highlight .go { color: #CCC } /* Generic.Output */ +body:not([data-theme="light"]) .highlight .gp { color: #AAA } /* Generic.Prompt */ +body:not([data-theme="light"]) .highlight .gs { color: #D0D0D0; font-weight: bold } /* Generic.Strong */ +body:not([data-theme="light"]) .highlight .gu { color: #FFF; text-decoration: underline } /* Generic.Subheading */ +body:not([data-theme="light"]) .highlight .gt { color: #FF3A3A } /* Generic.Traceback */ +body:not([data-theme="light"]) .highlight .kc { color: #6EBF26; font-weight: bold } /* Keyword.Constant */ +body:not([data-theme="light"]) .highlight .kd { color: #6EBF26; font-weight: bold } /* Keyword.Declaration */ +body:not([data-theme="light"]) .highlight .kn { color: #6EBF26; font-weight: bold } /* Keyword.Namespace */ +body:not([data-theme="light"]) .highlight .kp { color: #6EBF26 } /* Keyword.Pseudo */ +body:not([data-theme="light"]) .highlight .kr { color: #6EBF26; font-weight: bold } /* Keyword.Reserved */ +body:not([data-theme="light"]) .highlight .kt { color: #6EBF26; font-weight: bold } /* Keyword.Type */ +body:not([data-theme="light"]) .highlight .ld { color: #D0D0D0 } /* Literal.Date */ +body:not([data-theme="light"]) .highlight .m { color: #51B2FD } /* Literal.Number */ +body:not([data-theme="light"]) .highlight .s { color: #ED9D13 } /* Literal.String */ +body:not([data-theme="light"]) .highlight .na { color: #BBB } /* Name.Attribute */ +body:not([data-theme="light"]) .highlight .nb { color: #2FBCCD } /* Name.Builtin */ +body:not([data-theme="light"]) .highlight .nc { color: #71ADFF; text-decoration: underline } /* Name.Class */ +body:not([data-theme="light"]) .highlight .no { color: #40FFFF } /* Name.Constant */ +body:not([data-theme="light"]) .highlight .nd { color: #FFA500 } /* Name.Decorator */ +body:not([data-theme="light"]) .highlight .ni { color: #D0D0D0 } /* Name.Entity */ +body:not([data-theme="light"]) .highlight .ne { color: #BBB } /* Name.Exception */ +body:not([data-theme="light"]) .highlight .nf { color: #71ADFF } /* Name.Function */ +body:not([data-theme="light"]) .highlight .nl { color: #D0D0D0 } /* Name.Label */ +body:not([data-theme="light"]) .highlight .nn { color: #71ADFF; text-decoration: underline } /* Name.Namespace */ +body:not([data-theme="light"]) .highlight .nx { color: #D0D0D0 } /* Name.Other */ +body:not([data-theme="light"]) .highlight .py { color: #D0D0D0 } /* Name.Property */ +body:not([data-theme="light"]) .highlight .nt { color: #6EBF26; font-weight: bold } /* Name.Tag */ +body:not([data-theme="light"]) .highlight .nv { color: #40FFFF } /* Name.Variable */ +body:not([data-theme="light"]) .highlight .ow { color: #6EBF26; font-weight: bold } /* Operator.Word */ +body:not([data-theme="light"]) .highlight .pm { color: #D0D0D0 } /* Punctuation.Marker */ +body:not([data-theme="light"]) .highlight .w { color: #666 } /* Text.Whitespace */ +body:not([data-theme="light"]) .highlight .mb { color: #51B2FD } /* Literal.Number.Bin */ +body:not([data-theme="light"]) .highlight .mf { color: #51B2FD } /* Literal.Number.Float */ +body:not([data-theme="light"]) .highlight .mh { color: #51B2FD } /* Literal.Number.Hex */ +body:not([data-theme="light"]) .highlight .mi { color: #51B2FD } /* Literal.Number.Integer */ +body:not([data-theme="light"]) .highlight .mo { color: #51B2FD } /* Literal.Number.Oct */ +body:not([data-theme="light"]) .highlight .sa { color: #ED9D13 } /* Literal.String.Affix */ +body:not([data-theme="light"]) .highlight .sb { color: #ED9D13 } /* Literal.String.Backtick */ +body:not([data-theme="light"]) .highlight .sc { color: #ED9D13 } /* Literal.String.Char */ +body:not([data-theme="light"]) .highlight .dl { color: #ED9D13 } /* Literal.String.Delimiter */ +body:not([data-theme="light"]) .highlight .sd { color: #ED9D13 } /* Literal.String.Doc */ +body:not([data-theme="light"]) .highlight .s2 { color: #ED9D13 } /* Literal.String.Double */ +body:not([data-theme="light"]) .highlight .se { color: #ED9D13 } /* Literal.String.Escape */ +body:not([data-theme="light"]) .highlight .sh { color: #ED9D13 } /* Literal.String.Heredoc */ +body:not([data-theme="light"]) .highlight .si { color: #ED9D13 } /* Literal.String.Interpol */ +body:not([data-theme="light"]) .highlight .sx { color: #FFA500 } /* Literal.String.Other */ +body:not([data-theme="light"]) .highlight .sr { color: #ED9D13 } /* Literal.String.Regex */ +body:not([data-theme="light"]) .highlight .s1 { color: #ED9D13 } /* Literal.String.Single */ +body:not([data-theme="light"]) .highlight .ss { color: #ED9D13 } /* Literal.String.Symbol */ +body:not([data-theme="light"]) .highlight .bp { color: #2FBCCD } /* Name.Builtin.Pseudo */ +body:not([data-theme="light"]) .highlight .fm { color: #71ADFF } /* Name.Function.Magic */ +body:not([data-theme="light"]) .highlight .vc { color: #40FFFF } /* Name.Variable.Class */ +body:not([data-theme="light"]) .highlight .vg { color: #40FFFF } /* Name.Variable.Global */ +body:not([data-theme="light"]) .highlight .vi { color: #40FFFF } /* Name.Variable.Instance */ +body:not([data-theme="light"]) .highlight .vm { color: #40FFFF } /* Name.Variable.Magic */ +body:not([data-theme="light"]) .highlight .il { color: #51B2FD } /* Literal.Number.Integer.Long */ +} +} \ No newline at end of file diff --git a/examples/defense/__init__.py b/docs/build/html/_static/scripts/furo-extensions.js similarity index 100% rename from examples/defense/__init__.py rename to docs/build/html/_static/scripts/furo-extensions.js diff --git a/docs/build/html/_static/scripts/furo.js b/docs/build/html/_static/scripts/furo.js new file mode 100644 index 00000000..87e1767f --- /dev/null +++ b/docs/build/html/_static/scripts/furo.js @@ -0,0 +1,3 @@ +/*! For license information please see furo.js.LICENSE.txt */ +(()=>{var t={856:function(t,e,n){var o,r;r=void 0!==n.g?n.g:"undefined"!=typeof window?window:this,o=function(){return function(t){"use strict";var e={navClass:"active",contentClass:"active",nested:!1,nestedClass:"active",offset:0,reflow:!1,events:!0},n=function(t,e,n){if(n.settings.events){var o=new CustomEvent(t,{bubbles:!0,cancelable:!0,detail:n});e.dispatchEvent(o)}},o=function(t){var e=0;if(t.offsetParent)for(;t;)e+=t.offsetTop,t=t.offsetParent;return e>=0?e:0},r=function(t){t&&t.sort(function(t,e){return o(t.content)=Math.max(document.body.scrollHeight,document.documentElement.scrollHeight,document.body.offsetHeight,document.documentElement.offsetHeight,document.body.clientHeight,document.documentElement.clientHeight)},l=function(t,e){var n=t[t.length-1];if(function(t,e){return!(!s()||!c(t.content,e,!0))}(n,e))return n;for(var o=t.length-1;o>=0;o--)if(c(t[o].content,e))return t[o]},a=function(t,e){if(e.nested&&t.parentNode){var n=t.parentNode.closest("li");n&&(n.classList.remove(e.nestedClass),a(n,e))}},i=function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.remove(e.navClass),t.content.classList.remove(e.contentClass),a(o,e),n("gumshoeDeactivate",o,{link:t.nav,content:t.content,settings:e}))}},u=function(t,e){if(e.nested){var n=t.parentNode.closest("li");n&&(n.classList.add(e.nestedClass),u(n,e))}};return function(o,c){var s,a,d,f,m,v={setup:function(){s=document.querySelectorAll(o),a=[],Array.prototype.forEach.call(s,function(t){var e=document.getElementById(decodeURIComponent(t.hash.substr(1)));e&&a.push({nav:t,content:e})}),r(a)},detect:function(){var t=l(a,m);t?d&&t.content===d.content||(i(d,m),function(t,e){if(t){var o=t.nav.closest("li");o&&(o.classList.add(e.navClass),t.content.classList.add(e.contentClass),u(o,e),n("gumshoeActivate",o,{link:t.nav,content:t.content,settings:e}))}}(t,m),d=t):d&&(i(d,m),d=null)}},h=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame(v.detect)},g=function(e){f&&t.cancelAnimationFrame(f),f=t.requestAnimationFrame(function(){r(a),v.detect()})};return v.destroy=function(){d&&i(d,m),t.removeEventListener("scroll",h,!1),m.reflow&&t.removeEventListener("resize",g,!1),a=null,s=null,d=null,f=null,m=null},m=function(){var t={};return Array.prototype.forEach.call(arguments,function(e){for(var n in e){if(!e.hasOwnProperty(n))return;t[n]=e[n]}}),t}(e,c||{}),v.setup(),v.detect(),t.addEventListener("scroll",h,!1),m.reflow&&t.addEventListener("resize",g,!1),v}}(r)}.apply(e,[]),void 0===o||(t.exports=o)}},e={};function n(o){var r=e[o];if(void 0!==r)return r.exports;var c=e[o]={exports:{}};return t[o].call(c.exports,c,c.exports,n),c.exports}n.n=t=>{var e=t&&t.__esModule?()=>t.default:()=>t;return n.d(e,{a:e}),e},n.d=(t,e)=>{for(var o in e)n.o(e,o)&&!n.o(t,o)&&Object.defineProperty(t,o,{enumerable:!0,get:e[o]})},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(t){if("object"==typeof window)return window}}(),n.o=(t,e)=>Object.prototype.hasOwnProperty.call(t,e),(()=>{"use strict";var t=n(856),e=n.n(t),o=null,r=null,c=document.documentElement.scrollTop;function s(){const t=localStorage.getItem("theme")||"auto";var e;"light"!==(e=window.matchMedia("(prefers-color-scheme: dark)").matches?"auto"===t?"light":"light"==t?"dark":"auto":"auto"===t?"dark":"dark"==t?"light":"auto")&&"dark"!==e&&"auto"!==e&&(console.error(`Got invalid theme mode: ${e}. Resetting to auto.`),e="auto"),document.body.dataset.theme=e,localStorage.setItem("theme",e),console.log(`Changed to ${e} mode.`)}function l(){!function(){const t=document.getElementsByClassName("theme-toggle");Array.from(t).forEach(t=>{t.addEventListener("click",s)})}(),function(){let t=0,e=!1;window.addEventListener("scroll",function(n){t=window.scrollY,e||(window.requestAnimationFrame(function(){var n;(function(t){t>0?r.classList.add("scrolled"):r.classList.remove("scrolled")})(n=t),function(t){t<64?document.documentElement.classList.remove("show-back-to-top"):tc&&document.documentElement.classList.remove("show-back-to-top"),c=t}(n),function(t){null!==o&&(0==t?o.scrollTo(0,0):Math.ceil(t)>=Math.floor(document.documentElement.scrollHeight-window.innerHeight)?o.scrollTo(0,o.scrollHeight):document.querySelector(".scroll-current"))}(n),e=!1}),e=!0)}),window.scroll()}(),null!==o&&new(e())(".toc-tree a",{reflow:!0,recursive:!0,navClass:"scroll-current",offset:()=>{let t=parseFloat(getComputedStyle(document.documentElement).fontSize);const e=r.getBoundingClientRect();return e.top+e.height+2.5*t+1}})}document.addEventListener("DOMContentLoaded",function(){document.body.parentNode.classList.remove("no-js"),r=document.querySelector("header"),o=document.querySelector(".toc-scroll"),l()})})()})(); +//# sourceMappingURL=furo.js.map \ No newline at end of file diff --git a/docs/build/html/_static/scripts/furo.js.LICENSE.txt b/docs/build/html/_static/scripts/furo.js.LICENSE.txt new file mode 100644 index 00000000..1632189c --- /dev/null +++ b/docs/build/html/_static/scripts/furo.js.LICENSE.txt @@ -0,0 +1,7 @@ +/*! + * gumshoejs v5.1.2 (patched by @pradyunsg) + * A simple, framework-agnostic scrollspy script. + * (c) 2019 Chris Ferdinandi + * MIT License + * http://github.com/cferdinandi/gumshoe + */ diff --git a/docs/build/html/_static/scripts/furo.js.map b/docs/build/html/_static/scripts/furo.js.map new file mode 100644 index 00000000..3b316f3a --- /dev/null +++ b/docs/build/html/_static/scripts/furo.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/furo.js","mappings":";iCAAA,MAQWA,SAWS,IAAX,EAAAC,EACH,EAAAA,EACkB,oBAAXC,OACLA,OACAC,KAbO,EAAF,WACP,OAaJ,SAAUD,GACR,aAMA,IAAIE,EAAW,CAEbC,SAAU,SACVC,aAAc,SAGdC,QAAQ,EACRC,YAAa,SAGbC,OAAQ,EACRC,QAAQ,EAGRC,QAAQ,GA6BNC,EAAY,SAAUC,EAAMC,EAAMC,GAEpC,GAAKA,EAAOC,SAASL,OAArB,CAGA,IAAIM,EAAQ,IAAIC,YAAYL,EAAM,CAChCM,SAAS,EACTC,YAAY,EACZL,OAAQA,IAIVD,EAAKO,cAAcJ,EAVgB,CAWrC,EAOIK,EAAe,SAAUR,GAC3B,IAAIS,EAAW,EACf,GAAIT,EAAKU,aACP,KAAOV,GACLS,GAAYT,EAAKW,UACjBX,EAAOA,EAAKU,aAGhB,OAAOD,GAAY,EAAIA,EAAW,CACpC,EAMIG,EAAe,SAAUC,GACvBA,GACFA,EAASC,KAAK,SAAUC,EAAOC,GAG7B,OAFcR,EAAaO,EAAME,SACnBT,EAAaQ,EAAMC,UACF,EACxB,CACT,EAEJ,EAwCIC,EAAW,SAAUlB,EAAME,EAAUiB,GACvC,IAAIC,EAASpB,EAAKqB,wBACd1B,EAnCU,SAAUO,GAExB,MAA+B,mBAApBA,EAASP,OACX2B,WAAWpB,EAASP,UAItB2B,WAAWpB,EAASP,OAC7B,CA2Be4B,CAAUrB,GACvB,OAAIiB,EAEAK,SAASJ,EAAOD,OAAQ,KACvB/B,EAAOqC,aAAeC,SAASC,gBAAgBC,cAG7CJ,SAASJ,EAAOS,IAAK,KAAOlC,CACrC,EAMImC,EAAa,WACf,OACEC,KAAKC,KAAK5C,EAAOqC,YAAcrC,EAAO6C,cAnCjCF,KAAKG,IACVR,SAASS,KAAKC,aACdV,SAASC,gBAAgBS,aACzBV,SAASS,KAAKE,aACdX,SAASC,gBAAgBU,aACzBX,SAASS,KAAKP,aACdF,SAASC,gBAAgBC,aAkC7B,EAmBIU,EAAY,SAAUzB,EAAUX,GAClC,IAAIqC,EAAO1B,EAASA,EAAS2B,OAAS,GACtC,GAbgB,SAAUC,EAAMvC,GAChC,SAAI4B,MAAgBZ,EAASuB,EAAKxB,QAASf,GAAU,GAEvD,CAUMwC,CAAYH,EAAMrC,GAAW,OAAOqC,EACxC,IAAK,IAAII,EAAI9B,EAAS2B,OAAS,EAAGG,GAAK,EAAGA,IACxC,GAAIzB,EAASL,EAAS8B,GAAG1B,QAASf,GAAW,OAAOW,EAAS8B,EAEjE,EAOIC,EAAmB,SAAUC,EAAK3C,GAEpC,GAAKA,EAAST,QAAWoD,EAAIC,WAA7B,CAGA,IAAIC,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASR,aAG7BkD,EAAiBG,EAAI7C,GAV0B,CAWjD,EAOIiD,EAAa,SAAUC,EAAOlD,GAEhC,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUC,OAAOhD,EAASX,UAC7B6D,EAAMnC,QAAQgC,UAAUC,OAAOhD,EAASV,cAGxCoD,EAAiBG,EAAI7C,GAGrBJ,EAAU,oBAAqBiD,EAAI,CACjCM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,IAjBM,CAmBpB,EAOIoD,EAAiB,SAAUT,EAAK3C,GAElC,GAAKA,EAAST,OAAd,CAGA,IAAIsD,EAAKF,EAAIC,WAAWE,QAAQ,MAC3BD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASR,aAG1B4D,EAAeP,EAAI7C,GAVS,CAW9B,EA6LA,OA1JkB,SAAUsD,EAAUC,GAKpC,IACIC,EAAU7C,EAAU8C,EAASC,EAAS1D,EADtC2D,EAAa,CAUjBA,MAAmB,WAEjBH,EAAWhC,SAASoC,iBAAiBN,GAGrC3C,EAAW,GAGXkD,MAAMC,UAAUC,QAAQC,KAAKR,EAAU,SAAUjB,GAE/C,IAAIxB,EAAUS,SAASyC,eACrBC,mBAAmB3B,EAAK4B,KAAKC,OAAO,KAEjCrD,GAGLJ,EAAS0D,KAAK,CACZ1B,IAAKJ,EACLxB,QAASA,GAEb,GAGAL,EAAaC,EACf,EAKAgD,OAAoB,WAElB,IAAIW,EAASlC,EAAUzB,EAAUX,GAG5BsE,EASDb,GAAWa,EAAOvD,UAAY0C,EAAQ1C,UAG1CkC,EAAWQ,EAASzD,GAzFT,SAAUkD,EAAOlD,GAE9B,GAAKkD,EAAL,CAGA,IAAIL,EAAKK,EAAMP,IAAIG,QAAQ,MACtBD,IAGLA,EAAGE,UAAUM,IAAIrD,EAASX,UAC1B6D,EAAMnC,QAAQgC,UAAUM,IAAIrD,EAASV,cAGrC8D,EAAeP,EAAI7C,GAGnBJ,EAAU,kBAAmBiD,EAAI,CAC/BM,KAAMD,EAAMP,IACZ5B,QAASmC,EAAMnC,QACff,SAAUA,IAjBM,CAmBpB,CAqEIuE,CAASD,EAAQtE,GAGjByD,EAAUa,GAfJb,IACFR,EAAWQ,EAASzD,GACpByD,EAAU,KAchB,GAMIe,EAAgB,SAAUvE,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,sBAAsBf,EAAWgB,OACpD,EAMIC,EAAgB,SAAU3E,GAExByD,GACFxE,EAAOuF,qBAAqBf,GAI9BA,EAAUxE,EAAOwF,sBAAsB,WACrChE,EAAaC,GACbgD,EAAWgB,QACb,EACF,EAkDA,OA7CAhB,EAAWkB,QAAU,WAEfpB,GACFR,EAAWQ,EAASzD,GAItBd,EAAO4F,oBAAoB,SAAUN,GAAe,GAChDxE,EAASN,QACXR,EAAO4F,oBAAoB,SAAUF,GAAe,GAItDjE,EAAW,KACX6C,EAAW,KACXC,EAAU,KACVC,EAAU,KACV1D,EAAW,IACb,EAOEA,EA3XS,WACX,IAAI+E,EAAS,CAAC,EAOd,OANAlB,MAAMC,UAAUC,QAAQC,KAAKgB,UAAW,SAAUC,GAChD,IAAK,IAAIC,KAAOD,EAAK,CACnB,IAAKA,EAAIE,eAAeD,GAAM,OAC9BH,EAAOG,GAAOD,EAAIC,EACpB,CACF,GACOH,CACT,CAkXeK,CAAOhG,EAAUmE,GAAW,CAAC,GAGxCI,EAAW0B,QAGX1B,EAAWgB,SAGXzF,EAAOoG,iBAAiB,SAAUd,GAAe,GAC7CxE,EAASN,QACXR,EAAOoG,iBAAiB,SAAUV,GAAe,GAS9CjB,CACT,CAOF,CArcW4B,CAAQvG,EAChB,UAFM,SAEN,oB,GCXDwG,EAA2B,CAAC,EAGhC,SAASC,EAAoBC,GAE5B,IAAIC,EAAeH,EAAyBE,GAC5C,QAAqBE,IAAjBD,EACH,OAAOA,EAAaE,QAGrB,IAAIC,EAASN,EAAyBE,GAAY,CAGjDG,QAAS,CAAC,GAOX,OAHAE,EAAoBL,GAAU1B,KAAK8B,EAAOD,QAASC,EAAQA,EAAOD,QAASJ,GAGpEK,EAAOD,OACf,CCrBAJ,EAAoBO,EAAKF,IACxB,IAAIG,EAASH,GAAUA,EAAOI,WAC7B,IAAOJ,EAAiB,QACxB,IAAM,EAEP,OADAL,EAAoBU,EAAEF,EAAQ,CAAEG,EAAGH,IAC5BA,GCLRR,EAAoBU,EAAI,CAACN,EAASQ,KACjC,IAAI,IAAInB,KAAOmB,EACXZ,EAAoBa,EAAED,EAAYnB,KAASO,EAAoBa,EAAET,EAASX,IAC5EqB,OAAOC,eAAeX,EAASX,EAAK,CAAEuB,YAAY,EAAMC,IAAKL,EAAWnB,MCJ3EO,EAAoBxG,EAAI,WACvB,GAA0B,iBAAf0H,WAAyB,OAAOA,WAC3C,IACC,OAAOxH,MAAQ,IAAIyH,SAAS,cAAb,EAChB,CAAE,MAAOC,GACR,GAAsB,iBAAX3H,OAAqB,OAAOA,MACxC,CACA,CAPuB,GCAxBuG,EAAoBa,EAAI,CAACrB,EAAK6B,IAAUP,OAAOzC,UAAUqB,eAAenB,KAAKiB,EAAK6B,G,yCCK9EC,EAAY,KACZC,EAAS,KACTC,EAAgBzF,SAASC,gBAAgByF,UA4E7C,SAASC,IACP,MAAMC,EAAeC,aAAaC,QAAQ,UAAY,OAZxD,IAAkBC,EACH,WADGA,EAaIrI,OAAOsI,WAAW,gCAAgCC,QAI/C,SAAjBL,EACO,QACgB,SAAhBA,EACA,OAEA,OAIU,SAAjBA,EACO,OACgB,QAAhBA,EACA,QAEA,SA9BoB,SAATG,GAA4B,SAATA,IACzCG,QAAQC,MAAM,2BAA2BJ,yBACzCA,EAAO,QAGT/F,SAASS,KAAK2F,QAAQC,MAAQN,EAC9BF,aAAaS,QAAQ,QAASP,GAC9BG,QAAQK,IAAI,cAAcR,UA0B5B,CAmDA,SAASlC,KART,WAEE,MAAM2C,EAAUxG,SAASyG,uBAAuB,gBAChDpE,MAAMqE,KAAKF,GAASjE,QAASoE,IAC3BA,EAAI7C,iBAAiB,QAAS6B,IAElC,CAGEiB,GA/CF,WAEE,IAAIC,EAA6B,EAC7BC,GAAU,EAEdpJ,OAAOoG,iBAAiB,SAAU,SAAUuB,GAC1CwB,EAA6BnJ,OAAOqJ,QAE/BD,IACHpJ,OAAOwF,sBAAsB,WAzDnC,IAAuB8D,GArDvB,SAAgCA,GAC1BA,EAAY,EACdxB,EAAOjE,UAAUM,IAAI,YAErB2D,EAAOjE,UAAUC,OAAO,WAE5B,EAgDEyF,CADqBD,EA0DDH,GAvGtB,SAAmCG,GAC7BA,EAXmB,GAYrBhH,SAASC,gBAAgBsB,UAAUC,OAAO,oBAEtCwF,EAAYvB,EACdzF,SAASC,gBAAgBsB,UAAUM,IAAI,oBAC9BmF,EAAYvB,GACrBzF,SAASC,gBAAgBsB,UAAUC,OAAO,oBAG9CiE,EAAgBuB,CAClB,CAoCEE,CAA0BF,GAlC5B,SAA6BA,GACT,OAAdzB,IAKa,GAAbyB,EACFzB,EAAU4B,SAAS,EAAG,GAGtB9G,KAAKC,KAAK0G,IACV3G,KAAK+G,MAAMpH,SAASC,gBAAgBS,aAAehD,OAAOqC,aAE1DwF,EAAU4B,SAAS,EAAG5B,EAAU7E,cAGhBV,SAASqH,cAAc,mBAc3C,CAKEC,CAAoBN,GAwDdF,GAAU,CACZ,GAEAA,GAAU,EAEd,GACApJ,OAAO6J,QACT,CA8BEC,GA3BkB,OAAdjC,GAKJ,IAAI,IAAJ,CAAY,cAAe,CACzBrH,QAAQ,EACRuJ,WAAW,EACX5J,SAAU,iBACVI,OAAQ,KACN,IAAIyJ,EAAM9H,WAAW+H,iBAAiB3H,SAASC,iBAAiB2H,UAChE,MAAMC,EAAarC,EAAO7F,wBAC1B,OAAOkI,EAAW1H,IAAM0H,EAAWC,OAAS,IAAMJ,EAAM,IAiB9D,CAcA1H,SAAS8D,iBAAiB,mBAT1B,WACE9D,SAASS,KAAKW,WAAWG,UAAUC,OAAO,SAE1CgE,EAASxF,SAASqH,cAAc,UAChC9B,EAAYvF,SAASqH,cAAc,eAEnCxD,GACF,E","sources":["webpack:///./src/furo/assets/scripts/gumshoe-patched.js","webpack:///webpack/bootstrap","webpack:///webpack/runtime/compat get default export","webpack:///webpack/runtime/define property getters","webpack:///webpack/runtime/global","webpack:///webpack/runtime/hasOwnProperty shorthand","webpack:///./src/furo/assets/scripts/furo.js"],"sourcesContent":["/*!\n * gumshoejs v5.1.2 (patched by @pradyunsg)\n * A simple, framework-agnostic scrollspy script.\n * (c) 2019 Chris Ferdinandi\n * MIT License\n * http://github.com/cferdinandi/gumshoe\n */\n\n(function (root, factory) {\n if (typeof define === \"function\" && define.amd) {\n define([], function () {\n return factory(root);\n });\n } else if (typeof exports === \"object\") {\n module.exports = factory(root);\n } else {\n root.Gumshoe = factory(root);\n }\n})(\n typeof global !== \"undefined\"\n ? global\n : typeof window !== \"undefined\"\n ? window\n : this,\n function (window) {\n \"use strict\";\n\n //\n // Defaults\n //\n\n var defaults = {\n // Active classes\n navClass: \"active\",\n contentClass: \"active\",\n\n // Nested navigation\n nested: false,\n nestedClass: \"active\",\n\n // Offset & reflow\n offset: 0,\n reflow: false,\n\n // Event support\n events: true,\n };\n\n //\n // Methods\n //\n\n /**\n * Merge two or more objects together.\n * @param {Object} objects The objects to merge together\n * @returns {Object} Merged values of defaults and options\n */\n var extend = function () {\n var merged = {};\n Array.prototype.forEach.call(arguments, function (obj) {\n for (var key in obj) {\n if (!obj.hasOwnProperty(key)) return;\n merged[key] = obj[key];\n }\n });\n return merged;\n };\n\n /**\n * Emit a custom event\n * @param {String} type The event type\n * @param {Node} elem The element to attach the event to\n * @param {Object} detail Any details to pass along with the event\n */\n var emitEvent = function (type, elem, detail) {\n // Make sure events are enabled\n if (!detail.settings.events) return;\n\n // Create a new event\n var event = new CustomEvent(type, {\n bubbles: true,\n cancelable: true,\n detail: detail,\n });\n\n // Dispatch the event\n elem.dispatchEvent(event);\n };\n\n /**\n * Get an element's distance from the top of the Document.\n * @param {Node} elem The element\n * @return {Number} Distance from the top in pixels\n */\n var getOffsetTop = function (elem) {\n var location = 0;\n if (elem.offsetParent) {\n while (elem) {\n location += elem.offsetTop;\n elem = elem.offsetParent;\n }\n }\n return location >= 0 ? location : 0;\n };\n\n /**\n * Sort content from first to last in the DOM\n * @param {Array} contents The content areas\n */\n var sortContents = function (contents) {\n if (contents) {\n contents.sort(function (item1, item2) {\n var offset1 = getOffsetTop(item1.content);\n var offset2 = getOffsetTop(item2.content);\n if (offset1 < offset2) return -1;\n return 1;\n });\n }\n };\n\n /**\n * Get the offset to use for calculating position\n * @param {Object} settings The settings for this instantiation\n * @return {Float} The number of pixels to offset the calculations\n */\n var getOffset = function (settings) {\n // if the offset is a function run it\n if (typeof settings.offset === \"function\") {\n return parseFloat(settings.offset());\n }\n\n // Otherwise, return it as-is\n return parseFloat(settings.offset);\n };\n\n /**\n * Get the document element's height\n * @private\n * @returns {Number}\n */\n var getDocumentHeight = function () {\n return Math.max(\n document.body.scrollHeight,\n document.documentElement.scrollHeight,\n document.body.offsetHeight,\n document.documentElement.offsetHeight,\n document.body.clientHeight,\n document.documentElement.clientHeight,\n );\n };\n\n /**\n * Determine if an element is in view\n * @param {Node} elem The element\n * @param {Object} settings The settings for this instantiation\n * @param {Boolean} bottom If true, check if element is above bottom of viewport instead\n * @return {Boolean} Returns true if element is in the viewport\n */\n var isInView = function (elem, settings, bottom) {\n var bounds = elem.getBoundingClientRect();\n var offset = getOffset(settings);\n if (bottom) {\n return (\n parseInt(bounds.bottom, 10) <\n (window.innerHeight || document.documentElement.clientHeight)\n );\n }\n return parseInt(bounds.top, 10) <= offset;\n };\n\n /**\n * Check if at the bottom of the viewport\n * @return {Boolean} If true, page is at the bottom of the viewport\n */\n var isAtBottom = function () {\n if (\n Math.ceil(window.innerHeight + window.pageYOffset) >=\n getDocumentHeight()\n )\n return true;\n return false;\n };\n\n /**\n * Check if the last item should be used (even if not at the top of the page)\n * @param {Object} item The last item\n * @param {Object} settings The settings for this instantiation\n * @return {Boolean} If true, use the last item\n */\n var useLastItem = function (item, settings) {\n if (isAtBottom() && isInView(item.content, settings, true)) return true;\n return false;\n };\n\n /**\n * Get the active content\n * @param {Array} contents The content areas\n * @param {Object} settings The settings for this instantiation\n * @return {Object} The content area and matching navigation link\n */\n var getActive = function (contents, settings) {\n var last = contents[contents.length - 1];\n if (useLastItem(last, settings)) return last;\n for (var i = contents.length - 1; i >= 0; i--) {\n if (isInView(contents[i].content, settings)) return contents[i];\n }\n };\n\n /**\n * Deactivate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var deactivateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested || !nav.parentNode) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Remove the active class\n li.classList.remove(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n deactivateNested(li, settings);\n };\n\n /**\n * Deactivate a nav and content area\n * @param {Object} items The nav item and content to deactivate\n * @param {Object} settings The settings for this instantiation\n */\n var deactivate = function (items, settings) {\n // Make sure there are items to deactivate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Remove the active class from the nav and content\n li.classList.remove(settings.navClass);\n items.content.classList.remove(settings.contentClass);\n\n // Deactivate any parent navs in a nested navigation\n deactivateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeDeactivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Activate parent navs in a nested navigation\n * @param {Node} nav The starting navigation element\n * @param {Object} settings The settings for this instantiation\n */\n var activateNested = function (nav, settings) {\n // If nesting isn't activated, bail\n if (!settings.nested) return;\n\n // Get the parent navigation\n var li = nav.parentNode.closest(\"li\");\n if (!li) return;\n\n // Add the active class\n li.classList.add(settings.nestedClass);\n\n // Apply recursively to any parent navigation elements\n activateNested(li, settings);\n };\n\n /**\n * Activate a nav and content area\n * @param {Object} items The nav item and content to activate\n * @param {Object} settings The settings for this instantiation\n */\n var activate = function (items, settings) {\n // Make sure there are items to activate\n if (!items) return;\n\n // Get the parent list item\n var li = items.nav.closest(\"li\");\n if (!li) return;\n\n // Add the active class to the nav and content\n li.classList.add(settings.navClass);\n items.content.classList.add(settings.contentClass);\n\n // Activate any parent navs in a nested navigation\n activateNested(li, settings);\n\n // Emit a custom event\n emitEvent(\"gumshoeActivate\", li, {\n link: items.nav,\n content: items.content,\n settings: settings,\n });\n };\n\n /**\n * Create the Constructor object\n * @param {String} selector The selector to use for navigation items\n * @param {Object} options User options and settings\n */\n var Constructor = function (selector, options) {\n //\n // Variables\n //\n\n var publicAPIs = {};\n var navItems, contents, current, timeout, settings;\n\n //\n // Methods\n //\n\n /**\n * Set variables from DOM elements\n */\n publicAPIs.setup = function () {\n // Get all nav items\n navItems = document.querySelectorAll(selector);\n\n // Create contents array\n contents = [];\n\n // Loop through each item, get it's matching content, and push to the array\n Array.prototype.forEach.call(navItems, function (item) {\n // Get the content for the nav item\n var content = document.getElementById(\n decodeURIComponent(item.hash.substr(1)),\n );\n if (!content) return;\n\n // Push to the contents array\n contents.push({\n nav: item,\n content: content,\n });\n });\n\n // Sort contents by the order they appear in the DOM\n sortContents(contents);\n };\n\n /**\n * Detect which content is currently active\n */\n publicAPIs.detect = function () {\n // Get the active content\n var active = getActive(contents, settings);\n\n // if there's no active content, deactivate and bail\n if (!active) {\n if (current) {\n deactivate(current, settings);\n current = null;\n }\n return;\n }\n\n // If the active content is the one currently active, do nothing\n if (current && active.content === current.content) return;\n\n // Deactivate the current content and activate the new content\n deactivate(current, settings);\n activate(active, settings);\n\n // Update the currently active content\n current = active;\n };\n\n /**\n * Detect the active content on scroll\n * Debounced for performance\n */\n var scrollHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(publicAPIs.detect);\n };\n\n /**\n * Update content sorting on resize\n * Debounced for performance\n */\n var resizeHandler = function (event) {\n // If there's a timer, cancel it\n if (timeout) {\n window.cancelAnimationFrame(timeout);\n }\n\n // Setup debounce callback\n timeout = window.requestAnimationFrame(function () {\n sortContents(contents);\n publicAPIs.detect();\n });\n };\n\n /**\n * Destroy the current instantiation\n */\n publicAPIs.destroy = function () {\n // Undo DOM changes\n if (current) {\n deactivate(current, settings);\n }\n\n // Remove event listeners\n window.removeEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.removeEventListener(\"resize\", resizeHandler, false);\n }\n\n // Reset variables\n contents = null;\n navItems = null;\n current = null;\n timeout = null;\n settings = null;\n };\n\n /**\n * Initialize the current instantiation\n */\n var init = function () {\n // Merge user options into defaults\n settings = extend(defaults, options || {});\n\n // Setup variables based on the current DOM\n publicAPIs.setup();\n\n // Find the currently active content\n publicAPIs.detect();\n\n // Setup event listeners\n window.addEventListener(\"scroll\", scrollHandler, false);\n if (settings.reflow) {\n window.addEventListener(\"resize\", resizeHandler, false);\n }\n };\n\n //\n // Initialize and return the public APIs\n //\n\n init();\n return publicAPIs;\n };\n\n //\n // Return the Constructor\n //\n\n return Constructor;\n },\n);\n","// The module cache\nvar __webpack_module_cache__ = {};\n\n// The require function\nfunction __webpack_require__(moduleId) {\n\t// Check if module is in cache\n\tvar cachedModule = __webpack_module_cache__[moduleId];\n\tif (cachedModule !== undefined) {\n\t\treturn cachedModule.exports;\n\t}\n\t// Create a new module (and put it into the cache)\n\tvar module = __webpack_module_cache__[moduleId] = {\n\t\t// no module.id needed\n\t\t// no module.loaded needed\n\t\texports: {}\n\t};\n\n\t// Execute the module function\n\t__webpack_modules__[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n\t// Return the exports of the module\n\treturn module.exports;\n}\n\n","// getDefaultExport function for compatibility with non-harmony modules\n__webpack_require__.n = (module) => {\n\tvar getter = module && module.__esModule ?\n\t\t() => (module['default']) :\n\t\t() => (module);\n\t__webpack_require__.d(getter, { a: getter });\n\treturn getter;\n};","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.g = (function() {\n\tif (typeof globalThis === 'object') return globalThis;\n\ttry {\n\t\treturn this || new Function('return this')();\n\t} catch (e) {\n\t\tif (typeof window === 'object') return window;\n\t}\n})();","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","import Gumshoe from \"./gumshoe-patched.js\";\n\n////////////////////////////////////////////////////////////////////////////////\n// Scroll Handling\n////////////////////////////////////////////////////////////////////////////////\nvar tocScroll = null;\nvar header = null;\nvar lastScrollTop = document.documentElement.scrollTop;\nconst GO_TO_TOP_OFFSET = 64;\n\nfunction scrollHandlerForHeader(positionY) {\n if (positionY > 0) {\n header.classList.add(\"scrolled\");\n } else {\n header.classList.remove(\"scrolled\");\n }\n}\n\nfunction scrollHandlerForBackToTop(positionY) {\n if (positionY < GO_TO_TOP_OFFSET) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n } else {\n if (positionY < lastScrollTop) {\n document.documentElement.classList.add(\"show-back-to-top\");\n } else if (positionY > lastScrollTop) {\n document.documentElement.classList.remove(\"show-back-to-top\");\n }\n }\n lastScrollTop = positionY;\n}\n\nfunction scrollHandlerForTOC(positionY) {\n if (tocScroll === null) {\n return;\n }\n\n // top of page.\n if (positionY == 0) {\n tocScroll.scrollTo(0, 0);\n } else if (\n // bottom of page.\n Math.ceil(positionY) >=\n Math.floor(document.documentElement.scrollHeight - window.innerHeight)\n ) {\n tocScroll.scrollTo(0, tocScroll.scrollHeight);\n } else {\n // somewhere in the middle.\n const current = document.querySelector(\".scroll-current\");\n if (current == null) {\n return;\n }\n\n // https://github.com/pypa/pip/issues/9159 This breaks scroll behaviours.\n // // scroll the currently \"active\" heading in toc, into view.\n // const rect = current.getBoundingClientRect();\n // if (0 > rect.top) {\n // current.scrollIntoView(true); // the argument is \"alignTop\"\n // } else if (rect.bottom > window.innerHeight) {\n // current.scrollIntoView(false);\n // }\n }\n}\n\nfunction scrollHandler(positionY) {\n scrollHandlerForHeader(positionY);\n scrollHandlerForBackToTop(positionY);\n scrollHandlerForTOC(positionY);\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Theme Toggle\n////////////////////////////////////////////////////////////////////////////////\nfunction setTheme(mode) {\n if (mode !== \"light\" && mode !== \"dark\" && mode !== \"auto\") {\n console.error(`Got invalid theme mode: ${mode}. Resetting to auto.`);\n mode = \"auto\";\n }\n\n document.body.dataset.theme = mode;\n localStorage.setItem(\"theme\", mode);\n console.log(`Changed to ${mode} mode.`);\n}\n\nfunction cycleThemeOnce() {\n const currentTheme = localStorage.getItem(\"theme\") || \"auto\";\n const prefersDark = window.matchMedia(\"(prefers-color-scheme: dark)\").matches;\n\n if (prefersDark) {\n // Auto (dark) -> Light -> Dark\n if (currentTheme === \"auto\") {\n setTheme(\"light\");\n } else if (currentTheme == \"light\") {\n setTheme(\"dark\");\n } else {\n setTheme(\"auto\");\n }\n } else {\n // Auto (light) -> Dark -> Light\n if (currentTheme === \"auto\") {\n setTheme(\"dark\");\n } else if (currentTheme == \"dark\") {\n setTheme(\"light\");\n } else {\n setTheme(\"auto\");\n }\n }\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Setup\n////////////////////////////////////////////////////////////////////////////////\nfunction setupScrollHandler() {\n // Taken from https://developer.mozilla.org/en-US/docs/Web/API/Document/scroll_event\n let last_known_scroll_position = 0;\n let ticking = false;\n\n window.addEventListener(\"scroll\", function (e) {\n last_known_scroll_position = window.scrollY;\n\n if (!ticking) {\n window.requestAnimationFrame(function () {\n scrollHandler(last_known_scroll_position);\n ticking = false;\n });\n\n ticking = true;\n }\n });\n window.scroll();\n}\n\nfunction setupScrollSpy() {\n if (tocScroll === null) {\n return;\n }\n\n // Scrollspy -- highlight table on contents, based on scroll\n new Gumshoe(\".toc-tree a\", {\n reflow: true,\n recursive: true,\n navClass: \"scroll-current\",\n offset: () => {\n let rem = parseFloat(getComputedStyle(document.documentElement).fontSize);\n const headerRect = header.getBoundingClientRect();\n return headerRect.top + headerRect.height + 2.5 * rem + 1;\n },\n });\n}\n\nfunction setupTheme() {\n // Attach event handlers for toggling themes\n const buttons = document.getElementsByClassName(\"theme-toggle\");\n Array.from(buttons).forEach((btn) => {\n btn.addEventListener(\"click\", cycleThemeOnce);\n });\n}\n\nfunction setup() {\n setupTheme();\n setupScrollHandler();\n setupScrollSpy();\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Main entrypoint\n////////////////////////////////////////////////////////////////////////////////\nfunction main() {\n document.body.parentNode.classList.remove(\"no-js\");\n\n header = document.querySelector(\"header\");\n tocScroll = document.querySelector(\".toc-scroll\");\n\n setup();\n}\n\ndocument.addEventListener(\"DOMContentLoaded\", main);\n"],"names":["root","g","window","this","defaults","navClass","contentClass","nested","nestedClass","offset","reflow","events","emitEvent","type","elem","detail","settings","event","CustomEvent","bubbles","cancelable","dispatchEvent","getOffsetTop","location","offsetParent","offsetTop","sortContents","contents","sort","item1","item2","content","isInView","bottom","bounds","getBoundingClientRect","parseFloat","getOffset","parseInt","innerHeight","document","documentElement","clientHeight","top","isAtBottom","Math","ceil","pageYOffset","max","body","scrollHeight","offsetHeight","getActive","last","length","item","useLastItem","i","deactivateNested","nav","parentNode","li","closest","classList","remove","deactivate","items","link","activateNested","add","selector","options","navItems","current","timeout","publicAPIs","querySelectorAll","Array","prototype","forEach","call","getElementById","decodeURIComponent","hash","substr","push","active","activate","scrollHandler","cancelAnimationFrame","requestAnimationFrame","detect","resizeHandler","destroy","removeEventListener","merged","arguments","obj","key","hasOwnProperty","extend","setup","addEventListener","factory","__webpack_module_cache__","__webpack_require__","moduleId","cachedModule","undefined","exports","module","__webpack_modules__","n","getter","__esModule","d","a","definition","o","Object","defineProperty","enumerable","get","globalThis","Function","e","prop","tocScroll","header","lastScrollTop","scrollTop","cycleThemeOnce","currentTheme","localStorage","getItem","mode","matchMedia","matches","console","error","dataset","theme","setItem","log","buttons","getElementsByClassName","from","btn","setupTheme","last_known_scroll_position","ticking","scrollY","positionY","scrollHandlerForHeader","scrollHandlerForBackToTop","scrollTo","floor","querySelector","scrollHandlerForTOC","scroll","setupScrollHandler","recursive","rem","getComputedStyle","fontSize","headerRect","height"],"sourceRoot":""} \ No newline at end of file diff --git a/docs/build/html/_static/searchtools.js b/docs/build/html/_static/searchtools.js new file mode 100644 index 00000000..91f4be57 --- /dev/null +++ b/docs/build/html/_static/searchtools.js @@ -0,0 +1,635 @@ +/* + * Sphinx JavaScript utilities for the full-text search. + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename, kind] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +// Global search result kind enum, used by themes to style search results. +class SearchResultKind { + static get index() { return "index"; } + static get object() { return "object"; } + static get text() { return "text"; } + static get title() { return "title"; } +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename, kind] = item; + + let listItem = document.createElement("li"); + // Add a class representing the item's type: + // can be used by a theme's CSS selector for styling + // See SearchResultKind for the class names. + listItem.classList.add(`kind-${kind}`); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms, anchor) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = Documentation.ngettext( + "Search finished, found one page matching the search query.", + "Search finished, found ${resultCount} pages matching the search query.", + resultCount, + ).replace('${resultCount}', resultCount); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; +// Helper function used by query() to order search results. +// Each input is an array of [docname, title, anchor, descr, score, filename, kind]. +// Order the results by score (in opposite order of appearance, since the +// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically. +const _orderResultsByScoreThenName = (a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString, anchor) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + for (const removalQuery of [".headerlink", "script", "style"]) { + htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() }); + } + if (anchor) { + const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`); + if (anchorContent) return anchorContent.textContent; + + console.warn( + `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.` + ); + } + + // if anchor not specified or not found, fall back to main content + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent) return docContent.textContent; + + console.warn( + "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.setAttribute("role", "list"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + _parseQuery: (query) => { + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + return [query, searchTerms, excludedTerms, highlightTerms, objectTerms]; + }, + + /** + * execute search (requires search index to be loaded) + */ + _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // Collect multiple result groups to be sorted separately and then ordered. + // Each is an array of [docname, title, anchor, descr, score, filename, kind]. + const normalResults = []; + const nonMainIndexResults = []; + + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase().trim(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + const score = Math.round(Scorer.title * queryLower.length / title.length); + const boost = titles[file] === title ? 1 : 0; // add a boost for document titles + normalResults.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score + boost, + filenames[file], + SearchResultKind.title, + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id, isMain] of foundEntries) { + const score = Math.round(100 * queryLower.length / entry.length); + const result = [ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + SearchResultKind.index, + ]; + if (isMain) { + normalResults.push(result); + } else { + nonMainIndexResults.push(result); + } + } + } + } + + // lookup as object + objectTerms.forEach((term) => + normalResults.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) { + normalResults.forEach((item) => (item[4] = Scorer.score(item))); + nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item))); + } + + // Sort each group of results by score and then alphabetically by name. + normalResults.sort(_orderResultsByScoreThenName); + nonMainIndexResults.sort(_orderResultsByScoreThenName); + + // Combine the result groups in (reverse) order. + // Non-main index entries are typically arbitrary cross-references, + // so display them after other results. + let results = [...nonMainIndexResults, ...normalResults]; + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + return results.reverse(); + }, + + query: (query) => { + const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query); + const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + SearchResultKind.object, + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + // find documents, if any, containing the query word in their text/title term indices + // use Object.hasOwnProperty to avoid mismatching against prototype properties + const arr = [ + { files: terms.hasOwnProperty(word) ? terms[word] : undefined, score: Scorer.term }, + { files: titleTerms.hasOwnProperty(word) ? titleTerms[word] : undefined, score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + if (!terms.hasOwnProperty(word)) { + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + } + if (!titleTerms.hasOwnProperty(word)) { + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: titleTerms[term], score: Scorer.partialTitle }); + }); + } + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, new Map()); + const fileScores = scoreMap.get(file); + fileScores.set(word, record.score); + }); + }); + + // create the mapping + files.forEach((file) => { + if (!fileMap.has(file)) fileMap.set(file, [word]); + else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file).get(w))); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + SearchResultKind.text, + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords, anchor) => { + const text = Search.htmlToText(htmlText, anchor); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/docs/build/html/_static/skeleton.css b/docs/build/html/_static/skeleton.css new file mode 100644 index 00000000..467c878c --- /dev/null +++ b/docs/build/html/_static/skeleton.css @@ -0,0 +1,296 @@ +/* Some sane resets. */ +html { + height: 100%; +} + +body { + margin: 0; + min-height: 100%; +} + +/* All the flexbox magic! */ +body, +.sb-announcement, +.sb-content, +.sb-main, +.sb-container, +.sb-container__inner, +.sb-article-container, +.sb-footer-content, +.sb-header, +.sb-header-secondary, +.sb-footer { + display: flex; +} + +/* These order things vertically */ +body, +.sb-main, +.sb-article-container { + flex-direction: column; +} + +/* Put elements in the center */ +.sb-header, +.sb-header-secondary, +.sb-container, +.sb-content, +.sb-footer, +.sb-footer-content { + justify-content: center; +} +/* Put elements at the ends */ +.sb-article-container { + justify-content: space-between; +} + +/* These elements grow. */ +.sb-main, +.sb-content, +.sb-container, +article { + flex-grow: 1; +} + +/* Because padding making this wider is not fun */ +article { + box-sizing: border-box; +} + +/* The announcements element should never be wider than the page. */ +.sb-announcement { + max-width: 100%; +} + +.sb-sidebar-primary, +.sb-sidebar-secondary { + flex-shrink: 0; + width: 17rem; +} + +.sb-announcement__inner { + justify-content: center; + + box-sizing: border-box; + height: 3rem; + + overflow-x: auto; + white-space: nowrap; +} + +/* Sidebars, with checkbox-based toggle */ +.sb-sidebar-primary, +.sb-sidebar-secondary { + position: fixed; + height: 100%; + top: 0; +} + +.sb-sidebar-primary { + left: -17rem; + transition: left 250ms ease-in-out; +} +.sb-sidebar-secondary { + right: -17rem; + transition: right 250ms ease-in-out; +} + +.sb-sidebar-toggle { + display: none; +} +.sb-sidebar-overlay { + position: fixed; + top: 0; + width: 0; + height: 0; + + transition: width 0ms ease 250ms, height 0ms ease 250ms, opacity 250ms ease; + + opacity: 0; + background-color: rgba(0, 0, 0, 0.54); +} + +#sb-sidebar-toggle--primary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--primary"], +#sb-sidebar-toggle--secondary:checked + ~ .sb-sidebar-overlay[for="sb-sidebar-toggle--secondary"] { + width: 100%; + height: 100%; + opacity: 1; + transition: width 0ms ease, height 0ms ease, opacity 250ms ease; +} + +#sb-sidebar-toggle--primary:checked ~ .sb-container .sb-sidebar-primary { + left: 0; +} +#sb-sidebar-toggle--secondary:checked ~ .sb-container .sb-sidebar-secondary { + right: 0; +} + +/* Full-width mode */ +.drop-secondary-sidebar-for-full-width-content + .hide-when-secondary-sidebar-shown { + display: none !important; +} +.drop-secondary-sidebar-for-full-width-content .sb-sidebar-secondary { + display: none !important; +} + +/* Mobile views */ +.sb-page-width { + width: 100%; +} + +.sb-article-container, +.sb-footer-content__inner, +.drop-secondary-sidebar-for-full-width-content .sb-article, +.drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 100vw; +} + +.sb-article, +.match-content-width { + padding: 0 1rem; + box-sizing: border-box; +} + +@media (min-width: 32rem) { + .sb-article, + .match-content-width { + padding: 0 2rem; + } +} + +/* Tablet views */ +@media (min-width: 42rem) { + .sb-article-container { + width: auto; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 42rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 46rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 46rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 50rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 50rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Tablet views */ +@media (min-width: 59rem) { + .sb-sidebar-secondary { + position: static; + } + .hide-when-secondary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} +@media (min-width: 63rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } + .sb-article, + .match-content-width { + width: 46rem; + } +} +@media (min-width: 67rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-article, + .match-content-width { + width: 50rem; + } +} + +/* Desktop views */ +@media (min-width: 76rem) { + .sb-sidebar-primary { + position: static; + } + .hide-when-primary-sidebar-shown { + display: none !important; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 59rem; + } + .sb-article, + .match-content-width { + width: 42rem; + } +} + +/* Full desktop views */ +@media (min-width: 80rem) { + .sb-article, + .match-content-width { + width: 46rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 63rem; + } +} + +@media (min-width: 84rem) { + .sb-article, + .match-content-width { + width: 50rem; + } + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } +} + +@media (min-width: 88rem) { + .sb-footer-content__inner, + .drop-secondary-sidebar-for-full-width-content .sb-article, + .drop-secondary-sidebar-for-full-width-content .match-content-width { + width: 67rem; + } + .sb-page-width { + width: 88rem; + } +} diff --git a/docs/build/html/_static/sphinx_highlight.js b/docs/build/html/_static/sphinx_highlight.js new file mode 100644 index 00000000..8a96c69a --- /dev/null +++ b/docs/build/html/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/docs/build/html/_static/styles/furo-extensions.css b/docs/build/html/_static/styles/furo-extensions.css new file mode 100644 index 00000000..2d74267f --- /dev/null +++ b/docs/build/html/_static/styles/furo-extensions.css @@ -0,0 +1,2 @@ +#furo-sidebar-ad-placement{padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)}#furo-sidebar-ad-placement .ethical-sidebar{background:var(--color-background-secondary);border:none;box-shadow:none}#furo-sidebar-ad-placement .ethical-sidebar:hover{background:var(--color-background-hover)}#furo-sidebar-ad-placement .ethical-sidebar a{color:var(--color-foreground-primary)}#furo-sidebar-ad-placement .ethical-callout a{color:var(--color-foreground-secondary)!important}#furo-readthedocs-versions{background:transparent;display:block;position:static;width:100%}#furo-readthedocs-versions .rst-versions{background:#1a1c1e}#furo-readthedocs-versions .rst-current-version{background:var(--color-sidebar-item-background);cursor:unset}#furo-readthedocs-versions .rst-current-version:hover{background:var(--color-sidebar-item-background)}#furo-readthedocs-versions .rst-current-version .fa-book{color:var(--color-foreground-primary)}#furo-readthedocs-versions>.rst-other-versions{padding:0}#furo-readthedocs-versions>.rst-other-versions small{opacity:1}#furo-readthedocs-versions .injected .rst-versions{position:unset}#furo-readthedocs-versions:focus-within,#furo-readthedocs-versions:hover{box-shadow:0 0 0 1px var(--color-sidebar-background-border)}#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:hover .rst-current-version{background:#1a1c1e;font-size:inherit;height:auto;line-height:inherit;padding:12px;text-align:right}#furo-readthedocs-versions:focus-within .rst-current-version .fa-book,#furo-readthedocs-versions:hover .rst-current-version .fa-book{color:#fff;float:left}#furo-readthedocs-versions:focus-within .fa-caret-down,#furo-readthedocs-versions:hover .fa-caret-down{display:none}#furo-readthedocs-versions:focus-within .injected,#furo-readthedocs-versions:focus-within .rst-current-version,#furo-readthedocs-versions:focus-within .rst-other-versions,#furo-readthedocs-versions:hover .injected,#furo-readthedocs-versions:hover .rst-current-version,#furo-readthedocs-versions:hover .rst-other-versions{display:block}#furo-readthedocs-versions:focus-within>.rst-current-version,#furo-readthedocs-versions:hover>.rst-current-version{display:none}.highlight:hover button.copybtn{color:var(--color-code-foreground)}.highlight button.copybtn{align-items:center;background-color:var(--color-code-background);border:none;color:var(--color-background-item);cursor:pointer;height:1.25em;right:.5rem;top:.625rem;transition:color .3s,opacity .3s;width:1.25em}.highlight button.copybtn:hover{background-color:var(--color-code-background);color:var(--color-brand-content)}.highlight button.copybtn:after{background-color:transparent;color:var(--color-code-foreground);display:none}.highlight button.copybtn.success{color:#22863a;transition:color 0s}.highlight button.copybtn.success:after{display:block}.highlight button.copybtn svg{padding:0}body{--sd-color-primary:var(--color-brand-primary);--sd-color-primary-highlight:var(--color-brand-content);--sd-color-primary-text:var(--color-background-primary);--sd-color-shadow:rgba(0,0,0,.05);--sd-color-card-border:var(--color-card-border);--sd-color-card-border-hover:var(--color-brand-content);--sd-color-card-background:var(--color-card-background);--sd-color-card-text:var(--color-foreground-primary);--sd-color-card-header:var(--color-card-marginals-background);--sd-color-card-footer:var(--color-card-marginals-background);--sd-color-tabs-label-active:var(--color-brand-content);--sd-color-tabs-label-hover:var(--color-foreground-muted);--sd-color-tabs-label-inactive:var(--color-foreground-muted);--sd-color-tabs-underline-active:var(--color-brand-content);--sd-color-tabs-underline-hover:var(--color-foreground-border);--sd-color-tabs-underline-inactive:var(--color-background-border);--sd-color-tabs-overline:var(--color-background-border);--sd-color-tabs-underline:var(--color-background-border)}.sd-tab-content{box-shadow:0 -2px var(--sd-color-tabs-overline),0 1px var(--sd-color-tabs-underline)}.sd-card{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)}.sd-shadow-sm{box-shadow:0 .1rem .25rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-md{box-shadow:0 .3rem .75rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-shadow-lg{box-shadow:0 .6rem 1.5rem var(--sd-color-shadow),0 0 .0625rem rgba(0,0,0,.1)!important}.sd-card-hover:hover{transform:none}.sd-cards-carousel{gap:.25rem;padding:.25rem}body{--tabs--label-text:var(--color-foreground-muted);--tabs--label-text--hover:var(--color-foreground-muted);--tabs--label-text--active:var(--color-brand-content);--tabs--label-text--active--hover:var(--color-brand-content);--tabs--label-background:transparent;--tabs--label-background--hover:transparent;--tabs--label-background--active:transparent;--tabs--label-background--active--hover:transparent;--tabs--padding-x:0.25em;--tabs--margin-x:1em;--tabs--border:var(--color-background-border);--tabs--label-border:transparent;--tabs--label-border--hover:var(--color-foreground-muted);--tabs--label-border--active:var(--color-brand-content);--tabs--label-border--active--hover:var(--color-brand-content)}[role=main] .container{max-width:none;padding-left:0;padding-right:0}.shadow.docutils{border:none;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)!important}.sphinx-bs .card{background-color:var(--color-background-secondary);color:var(--color-foreground)} +/*# sourceMappingURL=furo-extensions.css.map*/ \ No newline at end of file diff --git a/docs/build/html/_static/styles/furo-extensions.css.map b/docs/build/html/_static/styles/furo-extensions.css.map new file mode 100644 index 00000000..68fb7fd0 --- /dev/null +++ b/docs/build/html/_static/styles/furo-extensions.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo-extensions.css","mappings":"AAGA,2BACE,oFACA,4CAKE,6CAHA,YACA,eAEA,CACA,kDACE,yCAEF,8CACE,sCAEJ,8CACE,kDAEJ,2BAGE,uBACA,cAHA,gBACA,UAEA,CAGA,yCACE,mBAEF,gDAEE,gDADA,YACA,CACA,sDACE,gDACF,yDACE,sCAEJ,+CACE,UACA,qDACE,UAGF,mDACE,eAEJ,yEAEE,4DAEA,mHASE,mBAPA,kBAEA,YADA,oBAGA,aADA,gBAIA,CAEA,qIAEE,WADA,UACA,CAEJ,uGACE,aAEF,iUAGE,cAEF,mHACE,aC1EJ,gCACE,mCAEF,0BAEE,mBAUA,8CACA,YAFA,mCAKA,eAZA,cAIA,YADA,YAYA,iCAdA,YAcA,CAEA,gCAEE,8CADA,gCACA,CAEF,gCAGE,6BADA,mCADA,YAEA,CAEF,kCAEE,cADA,mBACA,CACA,wCACE,cAEJ,8BACE,UCzCN,KAEE,6CAA8C,CAC9C,uDAAwD,CACxD,uDAAwD,CAGxD,iCAAsC,CAGtC,+CAAgD,CAChD,uDAAwD,CACxD,uDAAwD,CACxD,oDAAqD,CACrD,6DAA8D,CAC9D,6DAA8D,CAG9D,uDAAwD,CACxD,yDAA0D,CAC1D,4DAA6D,CAC7D,2DAA4D,CAC5D,8DAA+D,CAC/D,iEAAkE,CAClE,uDAAwD,CACxD,wDAAyD,CAG3D,gBACE,qFAGF,SACE,6EAEF,cACE,uFAEF,cACE,uFAEF,cACE,uFAGF,qBACE,eAEF,mBACE,WACA,eChDF,KACE,gDAAiD,CACjD,uDAAwD,CACxD,qDAAsD,CACtD,4DAA6D,CAC7D,oCAAqC,CACrC,2CAA4C,CAC5C,4CAA6C,CAC7C,mDAAoD,CACpD,wBAAyB,CACzB,oBAAqB,CACrB,6CAA8C,CAC9C,gCAAiC,CACjC,yDAA0D,CAC1D,uDAAwD,CACxD,8DAA+D,CCbjE,uBACE,eACA,eACA,gBAGF,iBACE,YACA,+EAGF,iBACE,mDACA","sources":["webpack:///./src/furo/assets/styles/extensions/_readthedocs.sass","webpack:///./src/furo/assets/styles/extensions/_copybutton.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-design.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-inline-tabs.sass","webpack:///./src/furo/assets/styles/extensions/_sphinx-panels.sass"],"sourcesContent":["// This file contains the styles used for tweaking how ReadTheDoc's embedded\n// contents would show up inside the theme.\n\n#furo-sidebar-ad-placement\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n .ethical-sidebar\n // Remove the border and box-shadow.\n border: none\n box-shadow: none\n // Manage the background colors.\n background: var(--color-background-secondary)\n &:hover\n background: var(--color-background-hover)\n // Ensure the text is legible.\n a\n color: var(--color-foreground-primary)\n\n .ethical-callout a\n color: var(--color-foreground-secondary) !important\n\n#furo-readthedocs-versions\n position: static\n width: 100%\n background: transparent\n display: block\n\n // Make the background color fit with the theme's aesthetic.\n .rst-versions\n background: rgb(26, 28, 30)\n\n .rst-current-version\n cursor: unset\n background: var(--color-sidebar-item-background)\n &:hover\n background: var(--color-sidebar-item-background)\n .fa-book\n color: var(--color-foreground-primary)\n\n > .rst-other-versions\n padding: 0\n small\n opacity: 1\n\n .injected\n .rst-versions\n position: unset\n\n &:hover,\n &:focus-within\n box-shadow: 0 0 0 1px var(--color-sidebar-background-border)\n\n .rst-current-version\n // Undo the tweaks done in RTD's CSS\n font-size: inherit\n line-height: inherit\n height: auto\n text-align: right\n padding: 12px\n\n // Match the rest of the body\n background: #1a1c1e\n\n .fa-book\n float: left\n color: white\n\n .fa-caret-down\n display: none\n\n .rst-current-version,\n .rst-other-versions,\n .injected\n display: block\n\n > .rst-current-version\n display: none\n",".highlight\n &:hover button.copybtn\n color: var(--color-code-foreground)\n\n button.copybtn\n // Align things correctly\n align-items: center\n\n height: 1.25em\n width: 1.25em\n\n top: 0.625rem // $code-spacing-vertical\n right: 0.5rem\n\n // Make it look better\n color: var(--color-background-item)\n background-color: var(--color-code-background)\n border: none\n\n // Change to cursor to make it obvious that you can click on it\n cursor: pointer\n\n // Transition smoothly, for aesthetics\n transition: color 300ms, opacity 300ms\n\n &:hover\n color: var(--color-brand-content)\n background-color: var(--color-code-background)\n\n &::after\n display: none\n color: var(--color-code-foreground)\n background-color: transparent\n\n &.success\n transition: color 0ms\n color: #22863a\n &::after\n display: block\n\n svg\n padding: 0\n","body\n // Colors\n --sd-color-primary: var(--color-brand-primary)\n --sd-color-primary-highlight: var(--color-brand-content)\n --sd-color-primary-text: var(--color-background-primary)\n\n // Shadows\n --sd-color-shadow: rgba(0, 0, 0, 0.05)\n\n // Cards\n --sd-color-card-border: var(--color-card-border)\n --sd-color-card-border-hover: var(--color-brand-content)\n --sd-color-card-background: var(--color-card-background)\n --sd-color-card-text: var(--color-foreground-primary)\n --sd-color-card-header: var(--color-card-marginals-background)\n --sd-color-card-footer: var(--color-card-marginals-background)\n\n // Tabs\n --sd-color-tabs-label-active: var(--color-brand-content)\n --sd-color-tabs-label-hover: var(--color-foreground-muted)\n --sd-color-tabs-label-inactive: var(--color-foreground-muted)\n --sd-color-tabs-underline-active: var(--color-brand-content)\n --sd-color-tabs-underline-hover: var(--color-foreground-border)\n --sd-color-tabs-underline-inactive: var(--color-background-border)\n --sd-color-tabs-overline: var(--color-background-border)\n --sd-color-tabs-underline: var(--color-background-border)\n\n// Tabs\n.sd-tab-content\n box-shadow: 0 -2px var(--sd-color-tabs-overline), 0 1px var(--sd-color-tabs-underline)\n\n// Shadows\n.sd-card // Have a shadow by default\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n.sd-shadow-sm\n box-shadow: 0 0.1rem 0.25rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-md\n box-shadow: 0 0.3rem 0.75rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n.sd-shadow-lg\n box-shadow: 0 0.6rem 1.5rem var(--sd-color-shadow), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Cards\n.sd-card-hover:hover // Don't change scale on hover\n transform: none\n\n.sd-cards-carousel // Have a bit of gap in the carousel by default\n gap: 0.25rem\n padding: 0.25rem\n","// This file contains styles to tweak sphinx-inline-tabs to work well with Furo.\n\nbody\n --tabs--label-text: var(--color-foreground-muted)\n --tabs--label-text--hover: var(--color-foreground-muted)\n --tabs--label-text--active: var(--color-brand-content)\n --tabs--label-text--active--hover: var(--color-brand-content)\n --tabs--label-background: transparent\n --tabs--label-background--hover: transparent\n --tabs--label-background--active: transparent\n --tabs--label-background--active--hover: transparent\n --tabs--padding-x: 0.25em\n --tabs--margin-x: 1em\n --tabs--border: var(--color-background-border)\n --tabs--label-border: transparent\n --tabs--label-border--hover: var(--color-foreground-muted)\n --tabs--label-border--active: var(--color-brand-content)\n --tabs--label-border--active--hover: var(--color-brand-content)\n","// This file contains styles to tweak sphinx-panels to work well with Furo.\n\n// sphinx-panels includes Bootstrap 4, which uses .container which can conflict\n// with docutils' `.. container::` directive.\n[role=\"main\"] .container\n max-width: initial\n padding-left: initial\n padding-right: initial\n\n// Make the panels look nicer!\n.shadow.docutils\n border: none\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1) !important\n\n// Make panel colors respond to dark mode\n.sphinx-bs .card\n background-color: var(--color-background-secondary)\n color: var(--color-foreground)\n"],"names":[],"sourceRoot":""} \ No newline at end of file diff --git a/docs/build/html/_static/styles/furo.css b/docs/build/html/_static/styles/furo.css new file mode 100644 index 00000000..592d5bff --- /dev/null +++ b/docs/build/html/_static/styles/furo.css @@ -0,0 +1,2 @@ +/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}[hidden],template{display:none}@media print{.content-icon-container,.headerlink,.mobile-header,.related-pages{display:none!important}.highlight{border:.1pt solid var(--color-foreground-border)}a,blockquote,dl,ol,p,pre,table,ul{page-break-inside:avoid}caption,figure,h1,h2,h3,h4,h5,h6,img{page-break-after:avoid;page-break-inside:avoid}dl,ol,ul{page-break-before:avoid}}.visually-hidden{height:1px!important;margin:-1px!important;overflow:hidden!important;padding:0!important;position:absolute!important;width:1px!important;clip:rect(0,0,0,0)!important;background:var(--color-background-primary);border:0!important;color:var(--color-foreground-primary);white-space:nowrap!important}:-moz-focusring{outline:auto}body{--font-stack:-apple-system,BlinkMacSystemFont,Segoe UI,Helvetica,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji;--font-stack--monospace:"SFMono-Regular",Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace;--font-stack--headings:var(--font-stack);--font-size--normal:100%;--font-size--small:87.5%;--font-size--small--2:81.25%;--font-size--small--3:75%;--font-size--small--4:62.5%;--sidebar-caption-font-size:var(--font-size--small--2);--sidebar-item-font-size:var(--font-size--small);--sidebar-search-input-font-size:var(--font-size--small);--toc-font-size:var(--font-size--small--3);--toc-font-size--mobile:var(--font-size--normal);--toc-title-font-size:var(--font-size--small--4);--admonition-font-size:0.8125rem;--admonition-title-font-size:0.8125rem;--code-font-size:var(--font-size--small--2);--api-font-size:var(--font-size--small);--header-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*4);--header-padding:0.5rem;--sidebar-tree-space-above:1.5rem;--sidebar-caption-space-above:1rem;--sidebar-item-line-height:1rem;--sidebar-item-spacing-vertical:0.5rem;--sidebar-item-spacing-horizontal:1rem;--sidebar-item-height:calc(var(--sidebar-item-line-height) + var(--sidebar-item-spacing-vertical)*2);--sidebar-expander-width:var(--sidebar-item-height);--sidebar-search-space-above:0.5rem;--sidebar-search-input-spacing-vertical:0.5rem;--sidebar-search-input-spacing-horizontal:0.5rem;--sidebar-search-input-height:1rem;--sidebar-search-icon-size:var(--sidebar-search-input-height);--toc-title-padding:0.25rem 0;--toc-spacing-vertical:1.5rem;--toc-spacing-horizontal:1.5rem;--toc-item-spacing-vertical:0.4rem;--toc-item-spacing-horizontal:1rem;--icon-search:url('data:image/svg+xml;charset=utf-8,');--icon-pencil:url('data:image/svg+xml;charset=utf-8,');--icon-abstract:url('data:image/svg+xml;charset=utf-8,');--icon-info:url('data:image/svg+xml;charset=utf-8,');--icon-flame:url('data:image/svg+xml;charset=utf-8,');--icon-question:url('data:image/svg+xml;charset=utf-8,');--icon-warning:url('data:image/svg+xml;charset=utf-8,');--icon-failure:url('data:image/svg+xml;charset=utf-8,');--icon-spark:url('data:image/svg+xml;charset=utf-8,');--color-admonition-title--caution:#ff9100;--color-admonition-title-background--caution:rgba(255,145,0,.2);--color-admonition-title--warning:#ff9100;--color-admonition-title-background--warning:rgba(255,145,0,.2);--color-admonition-title--danger:#ff5252;--color-admonition-title-background--danger:rgba(255,82,82,.2);--color-admonition-title--attention:#ff5252;--color-admonition-title-background--attention:rgba(255,82,82,.2);--color-admonition-title--error:#ff5252;--color-admonition-title-background--error:rgba(255,82,82,.2);--color-admonition-title--hint:#00c852;--color-admonition-title-background--hint:rgba(0,200,82,.2);--color-admonition-title--tip:#00c852;--color-admonition-title-background--tip:rgba(0,200,82,.2);--color-admonition-title--important:#00bfa5;--color-admonition-title-background--important:rgba(0,191,165,.2);--color-admonition-title--note:#00b0ff;--color-admonition-title-background--note:rgba(0,176,255,.2);--color-admonition-title--seealso:#448aff;--color-admonition-title-background--seealso:rgba(68,138,255,.2);--color-admonition-title--admonition-todo:grey;--color-admonition-title-background--admonition-todo:hsla(0,0%,50%,.2);--color-admonition-title:#651fff;--color-admonition-title-background:rgba(101,31,255,.2);--icon-admonition-default:var(--icon-abstract);--color-topic-title:#14b8a6;--color-topic-title-background:rgba(20,184,166,.2);--icon-topic-default:var(--icon-pencil);--color-problematic:#b30000;--color-foreground-primary:#000;--color-foreground-secondary:#5a5c63;--color-foreground-muted:#6b6f76;--color-foreground-border:#878787;--color-background-primary:#fff;--color-background-secondary:#f8f9fb;--color-background-hover:#efeff4;--color-background-hover--transparent:#efeff400;--color-background-border:#eeebee;--color-background-item:#ccc;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#0a4bff;--color-brand-content:#2757dd;--color-brand-visited:#872ee0;--color-api-background:var(--color-background-hover--transparent);--color-api-background-hover:var(--color-background-hover);--color-api-overall:var(--color-foreground-secondary);--color-api-name:var(--color-problematic);--color-api-pre-name:var(--color-problematic);--color-api-paren:var(--color-foreground-secondary);--color-api-keyword:var(--color-foreground-primary);--color-api-added:#21632c;--color-api-added-border:#38a84d;--color-api-changed:#046172;--color-api-changed-border:#06a1bc;--color-api-deprecated:#605706;--color-api-deprecated-border:#f0d90f;--color-api-removed:#b30000;--color-api-removed-border:#ff5c5c;--color-highlight-on-target:#ffc;--color-inline-code-background:var(--color-background-secondary);--color-highlighted-background:#def;--color-highlighted-text:var(--color-foreground-primary);--color-guilabel-background:#ddeeff80;--color-guilabel-border:#bedaf580;--color-guilabel-text:var(--color-foreground-primary);--color-admonition-background:transparent;--color-table-header-background:var(--color-background-secondary);--color-table-border:var(--color-background-border);--color-card-border:var(--color-background-secondary);--color-card-background:transparent;--color-card-marginals-background:var(--color-background-secondary);--color-header-background:var(--color-background-primary);--color-header-border:var(--color-background-border);--color-header-text:var(--color-foreground-primary);--color-sidebar-background:var(--color-background-secondary);--color-sidebar-background-border:var(--color-background-border);--color-sidebar-brand-text:var(--color-foreground-primary);--color-sidebar-caption-text:var(--color-foreground-muted);--color-sidebar-link-text:var(--color-foreground-secondary);--color-sidebar-link-text--top-level:var(--color-brand-primary);--color-sidebar-item-background:var(--color-sidebar-background);--color-sidebar-item-background--current:var( --color-sidebar-item-background );--color-sidebar-item-background--hover:linear-gradient(90deg,var(--color-background-hover--transparent) 0%,var(--color-background-hover) var(--sidebar-item-spacing-horizontal),var(--color-background-hover) 100%);--color-sidebar-item-expander-background:transparent;--color-sidebar-item-expander-background--hover:var( --color-background-hover );--color-sidebar-search-text:var(--color-foreground-primary);--color-sidebar-search-background:var(--color-background-secondary);--color-sidebar-search-background--focus:var(--color-background-primary);--color-sidebar-search-border:var(--color-background-border);--color-sidebar-search-icon:var(--color-foreground-muted);--color-toc-background:var(--color-background-primary);--color-toc-title-text:var(--color-foreground-muted);--color-toc-item-text:var(--color-foreground-secondary);--color-toc-item-text--hover:var(--color-foreground-primary);--color-toc-item-text--active:var(--color-brand-primary);--color-content-foreground:var(--color-foreground-primary);--color-content-background:transparent;--color-link:var(--color-brand-content);--color-link-underline:var(--color-background-border);--color-link--hover:var(--color-brand-content);--color-link-underline--hover:var(--color-foreground-border);--color-link--visited:var(--color-brand-visited);--color-link-underline--visited:var(--color-background-border);--color-link--visited--hover:var(--color-brand-visited);--color-link-underline--visited--hover:var(--color-foreground-border)}.only-light{display:block!important}html body .only-dark{display:none!important}@media not print{body[data-theme=dark]{--color-problematic:#ee5151;--color-foreground-primary:#cfd0d0;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#3d94ff;--color-brand-content:#5ca5ff;--color-brand-visited:#b27aeb;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-api-added:#3db854;--color-api-added-border:#267334;--color-api-changed:#09b0ce;--color-api-changed-border:#056d80;--color-api-deprecated:#b1a10b;--color-api-deprecated-border:#6e6407;--color-api-removed:#ff7575;--color-api-removed-border:#b03b3b;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body[data-theme=dark] .only-light{display:none!important}body[data-theme=dark] .only-dark{display:block!important}@media(prefers-color-scheme:dark){body:not([data-theme=light]){--color-problematic:#ee5151;--color-foreground-primary:#cfd0d0;--color-foreground-secondary:#9ca0a5;--color-foreground-muted:#81868d;--color-foreground-border:#666;--color-background-primary:#131416;--color-background-secondary:#1a1c1e;--color-background-hover:#1e2124;--color-background-hover--transparent:#1e212400;--color-background-border:#303335;--color-background-item:#444;--color-announcement-background:#000000dd;--color-announcement-text:#eeebee;--color-brand-primary:#3d94ff;--color-brand-content:#5ca5ff;--color-brand-visited:#b27aeb;--color-highlighted-background:#083563;--color-guilabel-background:#08356380;--color-guilabel-border:#13395f80;--color-api-keyword:var(--color-foreground-secondary);--color-highlight-on-target:#330;--color-api-added:#3db854;--color-api-added-border:#267334;--color-api-changed:#09b0ce;--color-api-changed-border:#056d80;--color-api-deprecated:#b1a10b;--color-api-deprecated-border:#6e6407;--color-api-removed:#ff7575;--color-api-removed-border:#b03b3b;--color-admonition-background:#18181a;--color-card-border:var(--color-background-secondary);--color-card-background:#18181a;--color-card-marginals-background:var(--color-background-hover)}html body:not([data-theme=light]) .only-light{display:none!important}body:not([data-theme=light]) .only-dark{display:block!important}}}body[data-theme=auto] .theme-toggle svg.theme-icon-when-auto-light{display:block}@media(prefers-color-scheme:dark){body[data-theme=auto] .theme-toggle svg.theme-icon-when-auto-dark{display:block}body[data-theme=auto] .theme-toggle svg.theme-icon-when-auto-light{display:none}}body[data-theme=dark] .theme-toggle svg.theme-icon-when-dark,body[data-theme=light] .theme-toggle svg.theme-icon-when-light{display:block}body{font-family:var(--font-stack)}code,kbd,pre,samp{font-family:var(--font-stack--monospace)}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}article{line-height:1.5}h1,h2,h3,h4,h5,h6{border-radius:.5rem;font-family:var(--font-stack--headings);font-weight:700;line-height:1.25;margin:.5rem -.5rem;padding-left:.5rem;padding-right:.5rem}h1+p,h2+p,h3+p,h4+p,h5+p,h6+p{margin-top:0}h1{font-size:2.5em;margin-bottom:1rem}h1,h2{margin-top:1.75rem}h2{font-size:2em}h3{font-size:1.5em}h4{font-size:1.25em}h5{font-size:1.125em}h6{font-size:1em}small{font-size:80%;opacity:75%}p{margin-bottom:.75rem;margin-top:.5rem}hr.docutils{background-color:var(--color-background-border);border:0;height:1px;margin:2rem 0;padding:0}.centered{text-align:center}a{color:var(--color-link);text-decoration:underline;text-decoration-color:var(--color-link-underline)}a:visited{color:var(--color-link--visited);text-decoration-color:var(--color-link-underline--visited)}a:visited:hover{color:var(--color-link--visited--hover);text-decoration-color:var(--color-link-underline--visited--hover)}a:hover{color:var(--color-link--hover);text-decoration-color:var(--color-link-underline--hover)}a.muted-link{color:inherit}a.muted-link:hover{color:var(--color-link--hover);text-decoration-color:var(--color-link-underline--hover)}a.muted-link:hover:visited{color:var(--color-link--visited--hover);text-decoration-color:var(--color-link-underline--visited--hover)}html{overflow-x:hidden;overflow-y:scroll;scroll-behavior:smooth}.sidebar-scroll,.toc-scroll,article[role=main] *{scrollbar-color:var(--color-foreground-border) transparent;scrollbar-width:thin}body,html{height:100%}.skip-to-content,body,html{background:var(--color-background-primary);color:var(--color-foreground-primary)}.skip-to-content{border-radius:1rem;left:.25rem;padding:1rem;position:fixed;top:.25rem;transform:translateY(-200%);transition:transform .3s ease-in-out;z-index:40}.skip-to-content:focus-within{transform:translateY(0)}article{background:var(--color-content-background);color:var(--color-content-foreground);overflow-wrap:break-word}.page{display:flex;min-height:100%}.mobile-header{background-color:var(--color-header-background);border-bottom:1px solid var(--color-header-border);color:var(--color-header-text);display:none;height:var(--header-height);width:100%;z-index:10}.mobile-header.scrolled{border-bottom:none;box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2)}.mobile-header .header-center a{color:var(--color-header-text);text-decoration:none}.main{display:flex;flex:1}.sidebar-drawer{background:var(--color-sidebar-background);border-right:1px solid var(--color-sidebar-background-border);box-sizing:border-box;display:flex;justify-content:flex-end;min-width:15em;width:calc(50% - 26em)}.sidebar-container,.toc-drawer{box-sizing:border-box;width:15em}.toc-drawer{background:var(--color-toc-background);padding-right:1rem}.sidebar-sticky,.toc-sticky{display:flex;flex-direction:column;height:min(100%,100vh);height:100vh;position:sticky;top:0}.sidebar-scroll,.toc-scroll{flex-grow:1;flex-shrink:1;overflow:auto;scroll-behavior:smooth}.content{display:flex;flex-direction:column;justify-content:space-between;padding:0 3em;width:46em}.icon{display:inline-block;height:1rem;width:1rem}.icon svg{height:100%;width:100%}.announcement{align-items:center;background-color:var(--color-announcement-background);color:var(--color-announcement-text);display:flex;height:var(--header-height);overflow-x:auto}.announcement+.page{min-height:calc(100% - var(--header-height))}.announcement-content{box-sizing:border-box;min-width:100%;padding:.5rem;text-align:center;white-space:nowrap}.announcement-content a{color:var(--color-announcement-text);text-decoration-color:var(--color-announcement-text)}.announcement-content a:hover{color:var(--color-announcement-text);text-decoration-color:var(--color-link--hover)}.no-js .theme-toggle-container{display:none}.theme-toggle-container{display:flex}.theme-toggle{background:transparent;border:none;cursor:pointer;display:flex;padding:0}.theme-toggle svg{color:var(--color-foreground-primary);display:none;height:1.25rem;width:1.25rem}.theme-toggle-header{align-items:center;display:flex;justify-content:center}.nav-overlay-icon,.toc-overlay-icon{cursor:pointer;display:none}.nav-overlay-icon .icon,.toc-overlay-icon .icon{color:var(--color-foreground-secondary);height:1.5rem;width:1.5rem}.nav-overlay-icon,.toc-header-icon{align-items:center;justify-content:center}.toc-content-icon{height:1.5rem;width:1.5rem}.content-icon-container{display:flex;float:right;gap:.5rem;margin-bottom:1rem;margin-left:1rem;margin-top:1.5rem}.content-icon-container .edit-this-page svg,.content-icon-container .view-this-page svg{color:inherit;height:1.25rem;width:1.25rem}.sidebar-toggle{display:none;position:absolute}.sidebar-toggle[name=__toc]{left:20px}.sidebar-toggle:checked{left:40px}.overlay{background-color:rgba(0,0,0,.54);height:0;opacity:0;position:fixed;top:0;transition:width 0s,height 0s,opacity .25s ease-out;width:0}.sidebar-overlay{z-index:20}.toc-overlay{z-index:40}.sidebar-drawer{transition:left .25s ease-in-out;z-index:30}.toc-drawer{transition:right .25s ease-in-out;z-index:50}#__navigation:checked~.sidebar-overlay{height:100%;opacity:1;width:100%}#__navigation:checked~.page .sidebar-drawer{left:0;top:0}#__toc:checked~.toc-overlay{height:100%;opacity:1;width:100%}#__toc:checked~.page .toc-drawer{right:0;top:0}.back-to-top{background:var(--color-background-primary);border-radius:1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 1px 0 hsla(220,9%,46%,.502);display:none;font-size:.8125rem;left:0;margin-left:50%;padding:.5rem .75rem .5rem .5rem;position:fixed;text-decoration:none;top:1rem;transform:translateX(-50%);z-index:10}.back-to-top svg{height:1rem;width:1rem;fill:currentColor;display:inline-block}.back-to-top span{margin-left:.25rem}.show-back-to-top .back-to-top{align-items:center;display:flex}@media(min-width:97em){html{font-size:110%}}@media(max-width:82em){.toc-content-icon{display:flex}.toc-drawer{border-left:1px solid var(--color-background-muted);height:100vh;position:fixed;right:-15em;top:0}.toc-tree{border-left:none;font-size:var(--toc-font-size--mobile)}.sidebar-drawer{width:calc(50% - 18.5em)}}@media(max-width:67em){.content{margin-left:auto;margin-right:auto;padding:0 1em}}@media(max-width:63em){.nav-overlay-icon{display:flex}.sidebar-drawer{height:100vh;left:-15em;position:fixed;top:0;width:15em}.theme-toggle-header,.toc-header-icon{display:flex}.theme-toggle-content,.toc-content-icon{display:none}.mobile-header{align-items:center;display:flex;justify-content:space-between;position:sticky;top:0}.mobile-header .header-left,.mobile-header .header-right{display:flex;height:var(--header-height);padding:0 var(--header-padding)}.mobile-header .header-left label,.mobile-header .header-right label{height:100%;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:100%}.nav-overlay-icon .icon,.theme-toggle svg{height:1.5rem;width:1.5rem}:target{scroll-margin-top:calc(var(--header-height) + 2.5rem)}.back-to-top{top:calc(var(--header-height) + .5rem)}.page{flex-direction:column;justify-content:center}}@media(max-width:48em){.content{overflow-x:auto;width:100%}}@media(max-width:46em){article[role=main] aside.sidebar{float:none;margin:1rem 0;width:100%}}.admonition,.topic{background:var(--color-admonition-background);border-radius:.2rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1);font-size:var(--admonition-font-size);margin:1rem auto;overflow:hidden;padding:0 .5rem .5rem;page-break-inside:avoid}.admonition>:nth-child(2),.topic>:nth-child(2){margin-top:0}.admonition>:last-child,.topic>:last-child{margin-bottom:0}.admonition p.admonition-title,p.topic-title{font-size:var(--admonition-title-font-size);font-weight:500;line-height:1.3;margin:0 -.5rem .5rem;padding:.4rem .5rem .4rem 2rem;position:relative}.admonition p.admonition-title:before,p.topic-title:before{content:"";height:1rem;left:.5rem;position:absolute;width:1rem}p.admonition-title{background-color:var(--color-admonition-title-background)}p.admonition-title:before{background-color:var(--color-admonition-title);-webkit-mask-image:var(--icon-admonition-default);mask-image:var(--icon-admonition-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}p.topic-title{background-color:var(--color-topic-title-background)}p.topic-title:before{background-color:var(--color-topic-title);-webkit-mask-image:var(--icon-topic-default);mask-image:var(--icon-topic-default);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat}.admonition{border-left:.2rem solid var(--color-admonition-title)}.admonition.caution{border-left-color:var(--color-admonition-title--caution)}.admonition.caution>.admonition-title{background-color:var(--color-admonition-title-background--caution)}.admonition.caution>.admonition-title:before{background-color:var(--color-admonition-title--caution);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.warning{border-left-color:var(--color-admonition-title--warning)}.admonition.warning>.admonition-title{background-color:var(--color-admonition-title-background--warning)}.admonition.warning>.admonition-title:before{background-color:var(--color-admonition-title--warning);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.danger{border-left-color:var(--color-admonition-title--danger)}.admonition.danger>.admonition-title{background-color:var(--color-admonition-title-background--danger)}.admonition.danger>.admonition-title:before{background-color:var(--color-admonition-title--danger);-webkit-mask-image:var(--icon-spark);mask-image:var(--icon-spark)}.admonition.attention{border-left-color:var(--color-admonition-title--attention)}.admonition.attention>.admonition-title{background-color:var(--color-admonition-title-background--attention)}.admonition.attention>.admonition-title:before{background-color:var(--color-admonition-title--attention);-webkit-mask-image:var(--icon-warning);mask-image:var(--icon-warning)}.admonition.error{border-left-color:var(--color-admonition-title--error)}.admonition.error>.admonition-title{background-color:var(--color-admonition-title-background--error)}.admonition.error>.admonition-title:before{background-color:var(--color-admonition-title--error);-webkit-mask-image:var(--icon-failure);mask-image:var(--icon-failure)}.admonition.hint{border-left-color:var(--color-admonition-title--hint)}.admonition.hint>.admonition-title{background-color:var(--color-admonition-title-background--hint)}.admonition.hint>.admonition-title:before{background-color:var(--color-admonition-title--hint);-webkit-mask-image:var(--icon-question);mask-image:var(--icon-question)}.admonition.tip{border-left-color:var(--color-admonition-title--tip)}.admonition.tip>.admonition-title{background-color:var(--color-admonition-title-background--tip)}.admonition.tip>.admonition-title:before{background-color:var(--color-admonition-title--tip);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.important{border-left-color:var(--color-admonition-title--important)}.admonition.important>.admonition-title{background-color:var(--color-admonition-title-background--important)}.admonition.important>.admonition-title:before{background-color:var(--color-admonition-title--important);-webkit-mask-image:var(--icon-flame);mask-image:var(--icon-flame)}.admonition.note{border-left-color:var(--color-admonition-title--note)}.admonition.note>.admonition-title{background-color:var(--color-admonition-title-background--note)}.admonition.note>.admonition-title:before{background-color:var(--color-admonition-title--note);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition.seealso{border-left-color:var(--color-admonition-title--seealso)}.admonition.seealso>.admonition-title{background-color:var(--color-admonition-title-background--seealso)}.admonition.seealso>.admonition-title:before{background-color:var(--color-admonition-title--seealso);-webkit-mask-image:var(--icon-info);mask-image:var(--icon-info)}.admonition.admonition-todo{border-left-color:var(--color-admonition-title--admonition-todo)}.admonition.admonition-todo>.admonition-title{background-color:var(--color-admonition-title-background--admonition-todo)}.admonition.admonition-todo>.admonition-title:before{background-color:var(--color-admonition-title--admonition-todo);-webkit-mask-image:var(--icon-pencil);mask-image:var(--icon-pencil)}.admonition-todo>.admonition-title{text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd{margin-left:2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:first-child{margin-top:.125rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list,dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) dd>:last-child{margin-bottom:.75rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list>dt{font-size:var(--font-size--small);text-transform:uppercase}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd:empty{margin-bottom:.5rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul{margin-left:-1.2rem}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p:nth-child(2){margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple) .field-list dd>ul>li>p+p:last-child:empty{margin-bottom:0;margin-top:0}dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)>dt{color:var(--color-api-overall)}.sig:not(.sig-inline){background:var(--color-api-background);border-radius:.25rem;font-family:var(--font-stack--monospace);font-size:var(--api-font-size);font-weight:700;margin-left:-.25rem;margin-right:-.25rem;padding:.25rem .5rem .25rem 3em;text-indent:-2.5em;transition:background .1s ease-out}.sig:not(.sig-inline):hover{background:var(--color-api-background-hover)}.sig:not(.sig-inline) a.reference .viewcode-link{font-weight:400;width:4.25rem}em.property{font-style:normal}em.property:first-child{color:var(--color-api-keyword)}.sig-name{color:var(--color-api-name)}.sig-prename{color:var(--color-api-pre-name);font-weight:400}.sig-paren{color:var(--color-api-paren)}.sig-param{font-style:normal}div.deprecated,div.versionadded,div.versionchanged,div.versionremoved{border-left:.1875rem solid;border-radius:.125rem;padding-left:.75rem}div.deprecated p,div.versionadded p,div.versionchanged p,div.versionremoved p{margin-bottom:.125rem;margin-top:.125rem}div.versionadded{border-color:var(--color-api-added-border)}div.versionadded .versionmodified{color:var(--color-api-added)}div.versionchanged{border-color:var(--color-api-changed-border)}div.versionchanged .versionmodified{color:var(--color-api-changed)}div.deprecated{border-color:var(--color-api-deprecated-border)}div.deprecated .versionmodified{color:var(--color-api-deprecated)}div.versionremoved{border-color:var(--color-api-removed-border)}div.versionremoved .versionmodified{color:var(--color-api-removed)}.viewcode-back,.viewcode-link{float:right;text-align:right}.line-block{margin-bottom:.75rem;margin-top:.5rem}.line-block .line-block{margin-bottom:0;margin-top:0;padding-left:1rem}.code-block-caption,article p.caption,table>caption{font-size:var(--font-size--small);text-align:center}.toctree-wrapper.compound .caption,.toctree-wrapper.compound :not(.caption)>.caption-text{font-size:var(--font-size--small);margin-bottom:0;text-align:initial;text-transform:uppercase}.toctree-wrapper.compound>ul{margin-bottom:0;margin-top:0}.sig-inline,code.literal{background:var(--color-inline-code-background);border-radius:.2em;font-size:var(--font-size--small--2);padding:.1em .2em}pre.literal-block .sig-inline,pre.literal-block code.literal{font-size:inherit;padding:0}p .sig-inline,p code.literal{border:1px solid var(--color-background-border)}.sig-inline{font-family:var(--font-stack--monospace)}div[class*=" highlight-"],div[class^=highlight-]{display:flex;margin:1em 0}div[class*=" highlight-"] .table-wrapper,div[class^=highlight-] .table-wrapper,pre{margin:0;padding:0}pre{overflow:auto}article[role=main] .highlight pre{line-height:1.5}.highlight pre,pre.literal-block{font-size:var(--code-font-size);padding:.625rem .875rem}pre.literal-block{background-color:var(--color-code-background);border-radius:.2rem;color:var(--color-code-foreground);margin-bottom:1rem;margin-top:1rem}.highlight{border-radius:.2rem;width:100%}.highlight .gp,.highlight span.linenos{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.highlight .hll{display:block;margin-left:-.875rem;margin-right:-.875rem;padding-left:.875rem;padding-right:.875rem}.code-block-caption{background-color:var(--color-code-background);border-bottom:1px solid;border-radius:.25rem;border-bottom-left-radius:0;border-bottom-right-radius:0;border-color:var(--color-background-border);color:var(--color-code-foreground);display:flex;font-weight:300;padding:.625rem .875rem}.code-block-caption+div[class]{margin-top:0}.code-block-caption+div[class]>.highlight{border-top-left-radius:0;border-top-right-radius:0}.highlighttable{display:block;width:100%}.highlighttable tbody{display:block}.highlighttable tr{display:flex}.highlighttable td.linenos{background-color:var(--color-code-background);border-bottom-left-radius:.2rem;border-top-left-radius:.2rem;color:var(--color-code-foreground);padding:.625rem 0 .625rem .875rem}.highlighttable .linenodiv{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;font-size:var(--code-font-size);padding-right:.875rem}.highlighttable td.code{display:block;flex:1;overflow:hidden;padding:0}.highlighttable td.code .highlight{border-bottom-left-radius:0;border-top-left-radius:0}.highlight span.linenos{box-shadow:-.0625rem 0 var(--color-foreground-border) inset;display:inline-block;margin-right:.875rem;padding-left:0;padding-right:.875rem}.footnote-reference{font-size:var(--font-size--small--4);vertical-align:super}dl.footnote.brackets{color:var(--color-foreground-secondary);display:grid;font-size:var(--font-size--small);grid-template-columns:max-content auto}dl.footnote.brackets dt{margin:0}dl.footnote.brackets dt>.fn-backref{margin-left:.25rem}dl.footnote.brackets dt:after{content:":"}dl.footnote.brackets dt .brackets:before{content:"["}dl.footnote.brackets dt .brackets:after{content:"]"}dl.footnote.brackets dd{margin:0;padding:0 1rem}aside.footnote{color:var(--color-foreground-secondary);font-size:var(--font-size--small)}aside.footnote>span,div.citation>span{float:left;font-weight:500;padding-right:.25rem}aside.footnote>:not(span),div.citation>p{margin-left:2rem}img{box-sizing:border-box;height:auto;max-width:100%}article .figure,article figure{border-radius:.2rem;margin:0}article .figure :last-child,article figure :last-child{margin-bottom:0}article .align-left{clear:left;float:left;margin:0 1rem 1rem}article .align-right{clear:right;float:right;margin:0 1rem 1rem}article .align-center,article .align-default{display:block;margin-left:auto;margin-right:auto;text-align:center}article table.align-default{display:table;text-align:initial}.domainindex-jumpbox,.genindex-jumpbox{border-bottom:1px solid var(--color-background-border);border-top:1px solid var(--color-background-border);padding:.25rem}.domainindex-section h2,.genindex-section h2{margin-bottom:.5rem;margin-top:.75rem}.domainindex-section ul,.genindex-section ul{margin-bottom:0;margin-top:0}ol,ul{margin-bottom:1rem;margin-top:1rem;padding-left:1.2rem}ol li>p:first-child,ul li>p:first-child{margin-bottom:.25rem;margin-top:.25rem}ol li>p:last-child,ul li>p:last-child{margin-top:.25rem}ol li>ol,ol li>ul,ul li>ol,ul li>ul{margin-bottom:.5rem;margin-top:.5rem}ol.arabic{list-style:decimal}ol.loweralpha{list-style:lower-alpha}ol.upperalpha{list-style:upper-alpha}ol.lowerroman{list-style:lower-roman}ol.upperroman{list-style:upper-roman}.simple li>ol,.simple li>ul,.toctree-wrapper li>ol,.toctree-wrapper li>ul{margin-bottom:0;margin-top:0}.field-list dt,.option-list dt,dl.footnote dt,dl.glossary dt,dl.simple dt,dl:not([class]) dt{font-weight:500;margin-top:.25rem}.field-list dt+dt,.option-list dt+dt,dl.footnote dt+dt,dl.glossary dt+dt,dl.simple dt+dt,dl:not([class]) dt+dt{margin-top:0}.field-list dt .classifier:before,.option-list dt .classifier:before,dl.footnote dt .classifier:before,dl.glossary dt .classifier:before,dl.simple dt .classifier:before,dl:not([class]) dt .classifier:before{content:":";margin-left:.2rem;margin-right:.2rem}.field-list dd ul,.field-list dd>p:first-child,.option-list dd ul,.option-list dd>p:first-child,dl.footnote dd ul,dl.footnote dd>p:first-child,dl.glossary dd ul,dl.glossary dd>p:first-child,dl.simple dd ul,dl.simple dd>p:first-child,dl:not([class]) dd ul,dl:not([class]) dd>p:first-child{margin-top:.125rem}.field-list dd ul,.option-list dd ul,dl.footnote dd ul,dl.glossary dd ul,dl.simple dd ul,dl:not([class]) dd ul{margin-bottom:.125rem}.math-wrapper{overflow-x:auto;width:100%}div.math{position:relative;text-align:center}div.math .headerlink,div.math:focus .headerlink{display:none}div.math:hover .headerlink{display:inline-block}div.math span.eqno{position:absolute;right:.5rem;top:50%;transform:translateY(-50%);z-index:1}abbr[title]{cursor:help}.problematic{color:var(--color-problematic)}kbd:not(.compound){background-color:var(--color-background-secondary);border:1px solid var(--color-foreground-border);border-radius:.2rem;box-shadow:0 .0625rem 0 rgba(0,0,0,.2),inset 0 0 0 .125rem var(--color-background-primary);color:var(--color-foreground-primary);display:inline-block;font-size:var(--font-size--small--3);margin:0 .2rem;padding:0 .2rem;vertical-align:text-bottom}blockquote{background:var(--color-background-secondary);border-left:4px solid var(--color-background-border);margin-left:0;margin-right:0;padding:.5rem 1rem}blockquote .attribution{font-weight:600;text-align:right}blockquote.highlights,blockquote.pull-quote{font-size:1.25em}blockquote.epigraph,blockquote.pull-quote{border-left-width:0;border-radius:.5rem}blockquote.highlights{background:transparent;border-left-width:0}p .reference img{vertical-align:middle}p.rubric{font-size:1.125em;font-weight:700;line-height:1.25}dd p.rubric{font-size:var(--font-size--small);font-weight:inherit;line-height:inherit;text-transform:uppercase}article .sidebar{background-color:var(--color-background-secondary);border:1px solid var(--color-background-border);border-radius:.2rem;clear:right;float:right;margin-left:1rem;margin-right:0;width:30%}article .sidebar>*{padding-left:1rem;padding-right:1rem}article .sidebar>ol,article .sidebar>ul{padding-left:2.2rem}article .sidebar .sidebar-title{border-bottom:1px solid var(--color-background-border);font-weight:500;margin:0;padding:.5rem 1rem}[role=main] .table-wrapper.container{margin-bottom:.5rem;margin-top:1rem;overflow-x:auto;padding:.2rem .2rem .75rem;width:100%}table.docutils{border-collapse:collapse;border-radius:.2rem;border-spacing:0;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .0625rem rgba(0,0,0,.1)}table.docutils th{background:var(--color-table-header-background)}table.docutils td,table.docutils th{border-bottom:1px solid var(--color-table-border);border-left:1px solid var(--color-table-border);border-right:1px solid var(--color-table-border);padding:0 .25rem}table.docutils td p,table.docutils th p{margin:.25rem}table.docutils td:first-child,table.docutils th:first-child{border-left:none}table.docutils td:last-child,table.docutils th:last-child{border-right:none}table.docutils td.text-left,table.docutils th.text-left{text-align:left}table.docutils td.text-right,table.docutils th.text-right{text-align:right}table.docutils td.text-center,table.docutils th.text-center{text-align:center}:target{scroll-margin-top:2.5rem}@media(max-width:67em){:target{scroll-margin-top:calc(2.5rem + var(--header-height))}section>span:target{scroll-margin-top:calc(2.8rem + var(--header-height))}}.headerlink{font-weight:100;-webkit-user-select:none;-moz-user-select:none;user-select:none}.code-block-caption>.headerlink,dl dt>.headerlink,figcaption p>.headerlink,h1>.headerlink,h2>.headerlink,h3>.headerlink,h4>.headerlink,h5>.headerlink,h6>.headerlink,p.caption>.headerlink,table>caption>.headerlink{margin-left:.5rem;visibility:hidden}.code-block-caption:hover>.headerlink,dl dt:hover>.headerlink,figcaption p:hover>.headerlink,h1:hover>.headerlink,h2:hover>.headerlink,h3:hover>.headerlink,h4:hover>.headerlink,h5:hover>.headerlink,h6:hover>.headerlink,p.caption:hover>.headerlink,table>caption:hover>.headerlink{visibility:visible}.code-block-caption>.toc-backref,dl dt>.toc-backref,figcaption p>.toc-backref,h1>.toc-backref,h2>.toc-backref,h3>.toc-backref,h4>.toc-backref,h5>.toc-backref,h6>.toc-backref,p.caption>.toc-backref,table>caption>.toc-backref{color:inherit;text-decoration-line:none}figure:hover>figcaption>p>.headerlink,table:hover>caption>.headerlink{visibility:visible}:target>h1:first-of-type,:target>h2:first-of-type,:target>h3:first-of-type,:target>h4:first-of-type,:target>h5:first-of-type,:target>h6:first-of-type,span:target~h1:first-of-type,span:target~h2:first-of-type,span:target~h3:first-of-type,span:target~h4:first-of-type,span:target~h5:first-of-type,span:target~h6:first-of-type{background-color:var(--color-highlight-on-target)}:target>h1:first-of-type code.literal,:target>h2:first-of-type code.literal,:target>h3:first-of-type code.literal,:target>h4:first-of-type code.literal,:target>h5:first-of-type code.literal,:target>h6:first-of-type code.literal,span:target~h1:first-of-type code.literal,span:target~h2:first-of-type code.literal,span:target~h3:first-of-type code.literal,span:target~h4:first-of-type code.literal,span:target~h5:first-of-type code.literal,span:target~h6:first-of-type code.literal{background-color:transparent}.literal-block-wrapper:target .code-block-caption,.this-will-duplicate-information-and-it-is-still-useful-here li :target,figure:target,table:target>caption{background-color:var(--color-highlight-on-target)}dt:target{background-color:var(--color-highlight-on-target)!important}.footnote-reference:target,.footnote>dt:target+dd{background-color:var(--color-highlight-on-target)}.guilabel{background-color:var(--color-guilabel-background);border:1px solid var(--color-guilabel-border);border-radius:.5em;color:var(--color-guilabel-text);font-size:.9em;padding:0 .3em}footer{display:flex;flex-direction:column;font-size:var(--font-size--small);margin-top:2rem}.bottom-of-page{align-items:center;border-top:1px solid var(--color-background-border);color:var(--color-foreground-secondary);display:flex;justify-content:space-between;line-height:1.5;margin-top:1rem;padding-bottom:1rem;padding-top:1rem}@media(max-width:46em){.bottom-of-page{flex-direction:column-reverse;gap:.25rem;text-align:center}}.bottom-of-page .left-details{font-size:var(--font-size--small)}.bottom-of-page .right-details{display:flex;flex-direction:column;gap:.25rem;text-align:right}.bottom-of-page .icons{display:flex;font-size:1rem;gap:.25rem;justify-content:flex-end}.bottom-of-page .icons a{text-decoration:none}.bottom-of-page .icons img,.bottom-of-page .icons svg{font-size:1.125rem;height:1em;width:1em}.related-pages a{align-items:center;display:flex;text-decoration:none}.related-pages a:hover .page-info .title{color:var(--color-link);text-decoration:underline;text-decoration-color:var(--color-link-underline)}.related-pages a svg.furo-related-icon,.related-pages a svg.furo-related-icon>use{color:var(--color-foreground-border);flex-shrink:0;height:.75rem;margin:0 .5rem;width:.75rem}.related-pages a.next-page{clear:right;float:right;max-width:50%;text-align:right}.related-pages a.prev-page{clear:left;float:left;max-width:50%}.related-pages a.prev-page svg{transform:rotate(180deg)}.page-info{display:flex;flex-direction:column;overflow-wrap:anywhere}.next-page .page-info{align-items:flex-end}.page-info .context{align-items:center;color:var(--color-foreground-muted);display:flex;font-size:var(--font-size--small);padding-bottom:.1rem;text-decoration:none}ul.search{list-style:none;padding-left:0}ul.search li{border-bottom:1px solid var(--color-background-border);padding:1rem 0}[role=main] .highlighted{background-color:var(--color-highlighted-background);color:var(--color-highlighted-text)}.sidebar-brand{display:flex;flex-direction:column;flex-shrink:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none}.sidebar-brand-text{color:var(--color-sidebar-brand-text);font-size:1.5rem;overflow-wrap:break-word}.sidebar-brand-text,.sidebar-logo-container{margin:var(--sidebar-item-spacing-vertical) 0}.sidebar-logo{display:block;margin:0 auto;max-width:100%}.sidebar-search-container{align-items:center;background:var(--color-sidebar-search-background);display:flex;margin-top:var(--sidebar-search-space-above);position:relative}.sidebar-search-container:focus-within,.sidebar-search-container:hover{background:var(--color-sidebar-search-background--focus)}.sidebar-search-container:before{background-color:var(--color-sidebar-search-icon);content:"";height:var(--sidebar-search-icon-size);left:var(--sidebar-item-spacing-horizontal);-webkit-mask-image:var(--icon-search);mask-image:var(--icon-search);position:absolute;width:var(--sidebar-search-icon-size)}.sidebar-search{background:transparent;border:none;border-bottom:1px solid var(--color-sidebar-search-border);border-top:1px solid var(--color-sidebar-search-border);box-sizing:border-box;color:var(--color-sidebar-search-foreground);padding:var(--sidebar-search-input-spacing-vertical) var(--sidebar-search-input-spacing-horizontal) var(--sidebar-search-input-spacing-vertical) calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size));width:100%;z-index:10}.sidebar-search:focus{outline:none}.sidebar-search::-moz-placeholder{font-size:var(--sidebar-search-input-font-size)}.sidebar-search::placeholder{font-size:var(--sidebar-search-input-font-size)}#searchbox .highlight-link{margin:0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0;text-align:center}#searchbox .highlight-link a{color:var(--color-sidebar-search-icon);font-size:var(--font-size--small--2)}.sidebar-tree{font-size:var(--sidebar-item-font-size);margin-bottom:var(--sidebar-item-spacing-vertical);margin-top:var(--sidebar-tree-space-above)}.sidebar-tree ul{display:flex;flex-direction:column;list-style:none;margin-bottom:0;margin-top:0;padding:0}.sidebar-tree li{margin:0;position:relative}.sidebar-tree li>ul{margin-left:var(--sidebar-item-spacing-horizontal)}.sidebar-tree .icon,.sidebar-tree .reference{color:var(--color-sidebar-link-text)}.sidebar-tree .reference{box-sizing:border-box;display:inline-block;height:100%;line-height:var(--sidebar-item-line-height);overflow-wrap:anywhere;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-decoration:none;width:100%}.sidebar-tree .reference:hover{background:var(--color-sidebar-item-background--hover);color:var(--color-sidebar-link-text)}.sidebar-tree .reference.external:after{color:var(--color-sidebar-link-text);content:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23607d8b' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' viewBox='0 0 24 24'%3E%3Cpath stroke='none' d='M0 0h24v24H0z'/%3E%3Cpath d='M11 7H6a2 2 0 0 0-2 2v9a2 2 0 0 0 2 2h9a2 2 0 0 0 2-2v-5M10 14 20 4M15 4h5v5'/%3E%3C/svg%3E");margin:0 .25rem;vertical-align:middle}.sidebar-tree .current-page>.reference{font-weight:700}.sidebar-tree label{align-items:center;cursor:pointer;display:flex;height:var(--sidebar-item-height);justify-content:center;position:absolute;right:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none;width:var(--sidebar-expander-width)}.sidebar-tree .caption,.sidebar-tree :not(.caption)>.caption-text{color:var(--color-sidebar-caption-text);font-size:var(--sidebar-caption-font-size);font-weight:700;margin:var(--sidebar-caption-space-above) 0 0 0;padding:var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal);text-transform:uppercase}.sidebar-tree li.has-children>.reference{padding-right:var(--sidebar-expander-width)}.sidebar-tree .toctree-l1>.reference,.sidebar-tree .toctree-l1>label .icon{color:var(--color-sidebar-link-text--top-level)}.sidebar-tree label{background:var(--color-sidebar-item-expander-background)}.sidebar-tree label:hover{background:var(--color-sidebar-item-expander-background--hover)}.sidebar-tree .current>.reference{background:var(--color-sidebar-item-background--current)}.sidebar-tree .current>.reference:hover{background:var(--color-sidebar-item-background--hover)}.toctree-checkbox{display:none;position:absolute}.toctree-checkbox~ul{display:none}.toctree-checkbox~label .icon svg{transform:rotate(90deg)}.toctree-checkbox:checked~ul{display:block}.toctree-checkbox:checked~label .icon svg{transform:rotate(-90deg)}.toc-title-container{padding:var(--toc-title-padding);padding-top:var(--toc-spacing-vertical)}.toc-title{color:var(--color-toc-title-text);font-size:var(--toc-title-font-size);padding-left:var(--toc-spacing-horizontal);text-transform:uppercase}.no-toc{display:none}.toc-tree-container{padding-bottom:var(--toc-spacing-vertical)}.toc-tree{border-left:1px solid var(--color-background-border);font-size:var(--toc-font-size);line-height:1.3;padding-left:calc(var(--toc-spacing-horizontal) - var(--toc-item-spacing-horizontal))}.toc-tree>ul>li:first-child{padding-top:0}.toc-tree>ul>li:first-child>ul{padding-left:0}.toc-tree>ul>li:first-child>a{display:none}.toc-tree ul{list-style-type:none;margin-bottom:0;margin-top:0;padding-left:var(--toc-item-spacing-horizontal)}.toc-tree li{padding-top:var(--toc-item-spacing-vertical)}.toc-tree li.scroll-current>.reference{color:var(--color-toc-item-text--active);font-weight:700}.toc-tree a.reference{color:var(--color-toc-item-text);overflow-wrap:anywhere;text-decoration:none}.toc-scroll{max-height:100vh;overflow-y:scroll}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here){background:rgba(255,0,0,.25);color:var(--color-problematic)}.contents:not(.this-will-duplicate-information-and-it-is-still-useful-here):before{content:"ERROR: Adding a table of contents in Furo-based documentation is unnecessary, and does not work well with existing styling. Add a 'this-will-duplicate-information-and-it-is-still-useful-here' class, if you want an escape hatch."}.text-align\:left>p{text-align:left}.text-align\:center>p{text-align:center}.text-align\:right>p{text-align:right} +/*# sourceMappingURL=furo.css.map*/ \ No newline at end of file diff --git a/docs/build/html/_static/styles/furo.css.map b/docs/build/html/_static/styles/furo.css.map new file mode 100644 index 00000000..280b3fef --- /dev/null +++ b/docs/build/html/_static/styles/furo.css.map @@ -0,0 +1 @@ +{"version":3,"file":"styles/furo.css","mappings":"AAAA,2EAA2E,CAU3E,KACE,gBAAiB,CACjB,6BACF,CASA,KACE,QACF,CAMA,KACE,aACF,CAOA,GACE,aAAc,CACd,cACF,CAUA,GACE,sBAAuB,CACvB,QAAS,CACT,gBACF,CAOA,IACE,+BAAiC,CACjC,aACF,CASA,EACE,4BACF,CAOA,YACE,kBAAmB,CACnB,yBAA0B,CAC1B,gCACF,CAMA,SAEE,kBACF,CAOA,cAGE,+BAAiC,CACjC,aACF,CAeA,QAEE,aAAc,CACd,aAAc,CACd,iBAAkB,CAClB,uBACF,CAEA,IACE,aACF,CAEA,IACE,SACF,CASA,IACE,iBACF,CAUA,sCAKE,mBAAoB,CACpB,cAAe,CACf,gBAAiB,CACjB,QACF,CAOA,aAEE,gBACF,CAOA,cAEE,mBACF,CAMA,gDAIE,yBACF,CAMA,wHAIE,iBAAkB,CAClB,SACF,CAMA,4GAIE,6BACF,CAMA,SACE,0BACF,CASA,OACE,qBAAsB,CACtB,aAAc,CACd,aAAc,CACd,cAAe,CACf,SAAU,CACV,kBACF,CAMA,SACE,uBACF,CAMA,SACE,aACF,CAOA,6BAEE,qBAAsB,CACtB,SACF,CAMA,kFAEE,WACF,CAOA,cACE,4BAA6B,CAC7B,mBACF,CAMA,yCACE,uBACF,CAOA,6BACE,yBAA0B,CAC1B,YACF,CASA,QACE,aACF,CAMA,QACE,iBACF,CAiBA,kBACE,YACF,CCvVA,aAcE,kEACE,uBAOF,WACE,iDAMF,kCACE,wBAEF,qCAEE,uBADA,uBACA,CAEF,SACE,wBAtBA,CCpBJ,iBAGE,qBAEA,sBACA,0BAFA,oBAHA,4BACA,oBAKA,6BAIA,2CAFA,mBACA,sCAFA,4BAGA,CAEF,gBACE,aCPF,KCCE,mHAGA,wGAGA,wCAAyC,CAEzC,wBAAyB,CACzB,wBAAyB,CACzB,4BAA6B,CAC7B,yBAA0B,CAC1B,2BAA4B,CAG5B,sDAAuD,CACvD,gDAAiD,CACjD,wDAAyD,CAGzD,0CAA2C,CAC3C,gDAAiD,CACjD,gDAAiD,CAKjD,gCAAiC,CACjC,sCAAuC,CAGvC,2CAA4C,CAG5C,uCAAwC,CCnCxC,+FAIA,uBAAwB,CAGxB,iCAAkC,CAClC,kCAAmC,CAEnC,+BAAgC,CAChC,sCAAuC,CACvC,sCAAuC,CACvC,qGAIA,mDAAoD,CAEpD,mCAAoC,CACpC,8CAA+C,CAC/C,gDAAiD,CACjD,kCAAmC,CACnC,6DAA8D,CAG9D,6BAA8B,CAC9B,6BAA8B,CAC9B,+BAAgC,CAChC,kCAAmC,CACnC,kCAAmC,CCRjC,+jBCaA,iqCAZF,iaCXA,8KAOA,4SAWA,4SAUA,0CACA,gEAGA,0CAGA,gEAGA,yCACA,+DAIA,4CACA,kEAGA,wCAUA,8DACA,uCAGA,4DACA,sCACA,2DAGA,4CACA,kEACA,uCAGA,6DACA,2GAGA,sHAEA,yFAEA,+CACA,+EAGA,4MAOA,gCACA,sHAIA,kCACA,uEACA,gEACA,4DACA,kEAGA,2DACA,sDACA,0CACA,8CACA,wGAGA,0BACA,iCAGA,+DACA,+BACA,sCACA,+DAEA,kGACA,oCACA,yDACA,sCL3HF,kCAEA,sDAIA,0CKyHE,kEAIA,oDACA,sDAGA,oCACA,oEAEA,0DACA,qDAIA,oDACA,6DAIA,iEAIA,2DAIA,2DAGA,4DACA,gEAIA,gEAEA,gFAEA,oNASA,qDLtKE,gFAGE,4DAIF,oEKgHF,yEAEA,6DAGA,0DAEA,uDACA,qDACA,wDAIA,6DAIA,yDACA,2DAIA,uCAGA,wCACA,sDAGA,+CAGA,6DAEA,iDACA,+DAEA,wDAEA,sEAMA,0DACA,sBACA,mEL5JI,wEAEA,iCACE,+BAMN,wEAGA,iCACE,kFAEA,uEAIF,gEACE,8BAGF,qEMzDA,sCAKA,wFAKA,iCAIA,0BAWA,iCACA,4BACA,mCAGA,+BAEA,sCACA,4BAEA,mCAEA,sCAKA,sDAIA,gCAEA,gEAQF,wCAME,sBACA,kCAKA,uBAEA,gEAIA,2BAIA,mCAEA,qCACA,iCAGE,+BACA,wEAEE,iCACA,kFAGF,6BACA,0CACF,kCAEE,8BACE,8BACA,qEAEE,sCACA,wFClFN,iCAGF,2DACE,4BACA,oCAKF,8BAGE,sCACA,+DAIA,sCAEA,sDAGA,gCACA,gEAGA,+CAEA,sBACE,yCAGF,uBACA,sEAIA,aAEA,mCAIA,kEACA,aACA,oEACA,YAIA,EAQE,4HAGA,gDACE,mBACA,wCAON,wCAGE,0DACA,mBAKA,mBACA,CANA,uCAKA,iBALA,iBAWA,mBAGF,mBACE,mDAIF,+BAEE,CAEA,yBAFA,kBAMA,CAJA,GACA,aAGA,mBAEF,wBAEE,iBACA,iBAEA,OACA,aAGF,CAHE,WAGF,GAEE,oBAEA,CAJF,gBAIE,aAEA,+CAKA,UANA,WACA,cADA,SAMA,WACA,iBAEE,GAMF,wBANE,yBAMF,kDACA,WAEA,gCACA,2DAGA,iBACE,uCAEJ,kEAIE,uCAGA,yDACE,cACA,+DAEA,yDAEE,mEAMJ,kEAMA,uBACA,kBAEA,uBACA,kDAKA,0DAIA,CALA,oBAKA,WACA,WAQA,4BAFF,0CAEE,CARA,qCAsBA,CAdA,iBAEA,kBACE,aADF,4BACE,WAMF,2BAGF,qCAEE,CAXE,UAWF,+BAGA,uBAEA,SAEA,0CAIE,CANF,qCAEA,CAIE,2DACE,gBAIN,+CAIA,CAEA,kDAKE,CAPF,8BAEA,CAOE,YACA,CAjBI,2BAGN,CAHM,WAcJ,UAGA,CAEA,2GAIF,iCAGE,8BAIA,qBACA,oBACF,uBAOI,0CAIA,CATF,6DAKE,CALF,sBASE,qCAKF,CACE,cACA,CAFF,sBAEE,CACA,+BAEA,qBAEE,WAKN,aACE,sCAGA,mBAEA,6BAMA,kCACA,CAJA,sBACA,aAEA,CAJA,eACA,MAIA,2FAEA,UAGA,YACA,sBACE,8BAEA,CALF,aACA,WAIE,OACA,oBAEF,uBACE,WAEF,YAFE,UAEF,eAgBA,kBACE,CAhBA,qDAQF,qCAGF,CAGI,YACF,CAJF,2BAGI,CAEA,eACA,qBAGA,mEAEA,qBACA,8BAIA,kBADF,kBACE,yBAEJ,oCAGI,qDAIJ,+BAGI,oCAEA,+CAQF,4CACE,yBACF,2BAOE,sBACA,CAHA,WACA,CAFF,cACE,CAJA,YAGF,CAEE,SAEA,mBAGA,kDAEE,CAJF,cAEA,cAEE,sBAEA,mBADA,YACA,uBACA,mDACE,CADF,YACE,iDAEA,uCAEN,+DAOE,mBADF,sBACE,mBAGF,aACE,sCAIA,aADF,WACE,CAKF,SACE,CAHJ,kBAEE,CAJE,gBAEJ,CAHI,iBAMA,yFAKA,aACA,eACA,cCxaJ,iBAEE,aADA,iBACA,6BAEA,kCAEA,SACA,UAIA,gCACA,CALA,SAEA,SAEA,CAJA,wEAEA,CAFA,OAKA,CAGA,mDACE,iBAGF,gCACE,CADF,UACE,aAEJ,iCAEE,CAFF,UAEE,wCAEA,WACA,WADA,UACA,CACA,4CAGA,MACA,CADA,KACA,wCACA,UAGA,CAJA,UAIA,6DAUA,0CACE,CAFF,mBAEE,wEACA,CAVA,YACA,CAMF,mBAJE,OAOA,gBAJJ,gCACE,CANE,cACA,CAHA,oBACA,CAGA,QAGJ,CAII,0BACA,CADA,UACA,wCAEJ,kBACE,0DACA,gCACE,kBACA,CADA,YACA,oEACA,2CAMF,mDAII,CALN,YACE,CANE,cAKJ,CACE,iBAII,kEACA,yCACE,kDACA,yDACE,+CACA,uBANN,CAMM,+BANN,uCACE,qDACA,4BAEE,mBADA,0CACA,CADA,qBACA,0DACE,wCACA,sGALJ,oCACA,sBACE,kBAFF,UAEE,2CACA,wFACE,cACA,kEANN,uBACE,iDACA,CADA,UACA,0DACE,wDAEE,iEACA,qEANN,sCACE,CAGE,iBAHF,gBAGE,qBACE,CAJJ,uBACA,gDACE,wDACA,6DAHF,2CACA,CADA,gBACA,eACE,CAGE,sBANN,8BACE,CAII,iBAFF,4DACA,WACE,YADF,uCACE,6EACA,2BANN,8CACE,kDACA,0CACE,8BACA,yFACE,sBACA,sFALJ,mEACA,sBACE,kEACA,6EACE,uCACA,kEALJ,qGAEE,kEACA,6EACE,uCACA,kEALJ,8CACA,uDACE,sEACA,2EACE,sCACA,iEALJ,mGACA,qCACE,oDACA,0DACE,6GACA,gDAGR,yDCvEA,sEACE,CACA,6GACE,gEACF,iGAIF,wFACE,qDAGA,mGAEE,2CAEF,4FACE,gCACF,wGACE,8DAEE,6FAIA,iJAKN,6GACE,gDAKF,yDACA,qCAGA,6BACA,kBACA,qDAKA,oCAEA,+DAGA,2CAGE,oDAIA,oEAEE,qBAGJ,wDAEE,uCAEF,kEAGA,8CAEA,uDAIF,gEAIE,6BACA,gEAIA,+CACE,0EAIF,sDAEE,+DAGF,sCACA,8BACE,oCAEJ,wBACE,4FAEE,gBAEJ,yGAGI,kBAGJ,CCnHE,2MCFF,oBAGE,wGAKA,iCACE,CADF,wBACE,8GAQA,mBCjBJ,2GAIE,mBACA,6HAMA,YACE,mIAYF,eACA,CAHF,YAGE,4FAGE,8BAKF,uBAkBE,sCACA,CADA,qBAbA,wCAIA,CALF,8BACE,CADF,gBAKE,wCACA,CAOA,kDACA,CACA,kCAKF,6BAGA,4CACE,kDACA,eAGF,cACE,aACA,iBACA,yBACA,8BACA,WAGJ,2BACE,cAGA,+BACA,CAHA,eAGA,wCACA,YACA,iBACA,uEAGA,0BACA,2CAEA,8EAGI,qBACA,CAFF,kBAEE,4DAMJ,mCACE,4BAGA,oBAGF,4CACE,qCACA,8BACA,gBACA,+CAEA,iCAEF,iCACE,oBACA,4CACA,qCAGF,8BAEE,+BAEA,WAEA,8BACE,oBACA,CADA,gBACA,yBAKF,gBADF,YACE,CACA,iBACA,qDAEA,mDCvIJ,2FAMA,iCACE,CACA,eAEA,CAFA,mBADA,wBAIA,8BACA,gBADA,YACA,0BAEE,8CAGA,wDAIE,gFAGE,iBAEN,wCAKF,+CACE,CACA,oDAEF,kDAIE,YAEF,CAHE,YAGF,CCpCE,mFAFA,QACA,UAIA,CAHA,IAGA,gDAGE,eACA,iEAGF,wBAEE,mBAMA,6CAEF,CAJE,mBACA,CAGF,kCAGE,CARF,kBACE,CAHA,eAUA,YACA,mBACA,CAFA,UAEA,wCC/BJ,mBACE,CDkCE,wBACA,sBCpCJ,iBACE,mDACA,2CACA,sBAGA,qBCDA,6CAIE,CATJ,uBAKE,CDGE,oBACF,yDAEE,CCDE,2CAGF,CAJA,kCACE,CDJJ,aAKE,eCXJ,CDME,uBCOE,gCACE,YAEF,2CAEE,wBACA,0BAIF,iBAEA,cADF,UACE,uBAEA,iCAEA,wCAEA,6CAMA,CAYF,gCATI,4BASJ,CAZE,mCAEE,iCAUJ,4BAGE,4DADA,+BACA,CAHF,qBAGE,sCACE,OAEF,iBAHA,SAGA,iHACE,2DAKF,CANA,8EAMA,uSAEE,kBAEF,+FACE,yCCjEJ,WACA,yBAGA,uBACA,gBAEA,uCAIA,CAJA,iCAIA,uCAGA,UACE,gBACA,qBAEA,0CClBJ,gBACE,KAGF,qBACE,YAGF,CAHE,cAGF,gCAEE,mBACA,iEAEA,oCACA,wCAEA,sBACA,WAEA,CAFA,YAEA,8EAEA,mCAFA,iBAEA,6BAIA,wEAKA,sDAIE,CARF,mDAIA,CAIE,cAEF,8CAIA,oBAFE,iBAEF,8CAGE,eAEF,CAFE,YAEF,OAEE,kBAGJ,CAJI,eACA,CAFF,mBAKF,yCCjDE,oBACA,CAFA,iBAEA,uCAKE,iBACA,qCAGA,mBCZJ,CDWI,gBCXJ,6BAEE,eACA,sBAGA,eAEA,sBACA,oDACA,iGAMA,gBAFE,YAEF,8FAME,iJCnBF,YACA,gNAWE,gDAEF,iSAaE,kBACE,gHAKF,oCACE,eACF,CADE,UACF,8CACE,gDACF,wCACE,oBCtCJ,oBAEF,6BACE,QACE,kDAGF,yBACE,kDAmBA,kDAEF,CAhBA,+CAaA,CAbA,oBAaA,0FACE,CADF,gGAfF,cACE,gBACA,CAaA,0BAGA,mQACE,gBAGF,oMACE,iBACA,CAFF,eACE,CADF,gBAEE,aAGJ,iCAEE,CAFF,wCAEE,wBAUE,+VAIE,uEAHA,2BAGA,wXAKJ,iDAGF,CARM,+CACE,iDAIN,CALI,gBAQN,mHACE,gBAGF,2DACE,0EAOA,0EAGF,gBAEE,6DCjFA,kDACA,gCACA,qDAGA,qBACA,qDCDA,cACA,eAEA,yBAGF,sBAEE,iBACA,sNAWA,iBACE,kBACA,wRAgBA,kBAEA,iOAgBA,uCACE,uEAEA,kBAEF,qUAuBE,iDAIJ,CACA,geCzFF,4BAEE,CAQA,6JACA,iDAIA,sEAGA,mDAOF,iDAGE,4DAIA,8CACA,qDAEE,eAFF,cAEE,oBAEF,uBAFE,kCAGA,eACA,iBACA,mBAIA,mDACA,CAHA,uCAEA,CAJA,0CACA,CAIA,gBAJA,gBACA,oBADA,gBAIA,wBAEJ,gBAGE,6BACA,YAHA,iBAGA,gCACA,iEAEA,6CACA,sDACA,0BADA,wBACA,0BACA,oIAIA,mBAFA,YAEA,qBACA,0CAIE,uBAEF,CAHA,yBACE,CAEF,iDACE,mFAKJ,oCACE,CANE,aAKJ,CACE,qEAIA,YAFA,WAEA,CAHA,aACA,CAEA,gBACE,4BACA,sBADA,aACA,gCAMF,oCACA,yDACA,2CAEA,qBAGE,kBAEA,CACA,mCAIF,CARE,YACA,CAOF,iCAEE,CAPA,oBACA,CAQA,oBACE,uDAEJ,sDAGA,CAHA,cAGA,0BACE,oDAIA,oCACA,4BACA,sBAGA,cAEA,oFAGA,sBAEA,yDACE,CAIF,iBAJE,wBAIF,6CAHE,6CAKA,eACA,aACA,CADA,cACA,yCAGJ,kBACE,CAKA,iDAEA,CARF,aACE,4CAGA,kBAIA,wEAGA,wDAGA,kCAOA,iDAGA,CAPF,WAEE,sCAEA,CAJF,2CACE,CAMA,qCACA,+BARF,kBACE,qCAOA,iBAsBA,sBACE,CAvBF,WAKA,CACE,0DAIF,CALA,uDACE,CANF,sBAqBA,4CACA,CALA,gRAIA,YAEE,6CAEN,mCAEE,+CASA,6EAIA,4BChNA,SDmNA,qFCnNA,gDACA,sCAGA,qCACA,sDACA,CAKA,kDAGA,CARA,0CAQA,kBAGA,YACA,sBACA,iBAFA,gBADF,YACE,CAHA,SAKA,kBAEA,SAFA,iBAEA,uEAGA,CAEE,6CAFF,oCAgBI,CAdF,yBACE,qBACF,CAGF,oBACE,CAIF,WACE,CALA,2CAGA,uBACF,CACE,mFAGE,CALF,qBAEA,UAGE,gCAIF,sDAEA,CALE,oCAKF,yCC7CJ,oCACE,CD+CA,yXAQE,sCCrDJ,wCAGA,oCACE","sources":["webpack:///./node_modules/normalize.css/normalize.css","webpack:///./src/furo/assets/styles/base/_print.sass","webpack:///./src/furo/assets/styles/base/_screen-readers.sass","webpack:///./src/furo/assets/styles/base/_theme.sass","webpack:///./src/furo/assets/styles/variables/_fonts.scss","webpack:///./src/furo/assets/styles/variables/_spacing.scss","webpack:///./src/furo/assets/styles/variables/_icons.scss","webpack:///./src/furo/assets/styles/variables/_admonitions.scss","webpack:///./src/furo/assets/styles/variables/_colors.scss","webpack:///./src/furo/assets/styles/base/_typography.sass","webpack:///./src/furo/assets/styles/_scaffold.sass","webpack:///./src/furo/assets/styles/content/_admonitions.sass","webpack:///./src/furo/assets/styles/content/_api.sass","webpack:///./src/furo/assets/styles/content/_blocks.sass","webpack:///./src/furo/assets/styles/content/_captions.sass","webpack:///./src/furo/assets/styles/content/_code.sass","webpack:///./src/furo/assets/styles/content/_footnotes.sass","webpack:///./src/furo/assets/styles/content/_images.sass","webpack:///./src/furo/assets/styles/content/_indexes.sass","webpack:///./src/furo/assets/styles/content/_lists.sass","webpack:///./src/furo/assets/styles/content/_math.sass","webpack:///./src/furo/assets/styles/content/_misc.sass","webpack:///./src/furo/assets/styles/content/_rubrics.sass","webpack:///./src/furo/assets/styles/content/_sidebar.sass","webpack:///./src/furo/assets/styles/content/_tables.sass","webpack:///./src/furo/assets/styles/content/_target.sass","webpack:///./src/furo/assets/styles/content/_gui-labels.sass","webpack:///./src/furo/assets/styles/components/_footer.sass","webpack:///./src/furo/assets/styles/components/_sidebar.sass","webpack:///./src/furo/assets/styles/components/_table_of_contents.sass","webpack:///./src/furo/assets/styles/_shame.sass"],"sourcesContent":["/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */\n\n/* Document\n ========================================================================== */\n\n/**\n * 1. Correct the line height in all browsers.\n * 2. Prevent adjustments of font size after orientation changes in iOS.\n */\n\nhtml {\n line-height: 1.15; /* 1 */\n -webkit-text-size-adjust: 100%; /* 2 */\n}\n\n/* Sections\n ========================================================================== */\n\n/**\n * Remove the margin in all browsers.\n */\n\nbody {\n margin: 0;\n}\n\n/**\n * Render the `main` element consistently in IE.\n */\n\nmain {\n display: block;\n}\n\n/**\n * Correct the font size and margin on `h1` elements within `section` and\n * `article` contexts in Chrome, Firefox, and Safari.\n */\n\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\n\n/* Grouping content\n ========================================================================== */\n\n/**\n * 1. Add the correct box sizing in Firefox.\n * 2. Show the overflow in Edge and IE.\n */\n\nhr {\n box-sizing: content-box; /* 1 */\n height: 0; /* 1 */\n overflow: visible; /* 2 */\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\npre {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/* Text-level semantics\n ========================================================================== */\n\n/**\n * Remove the gray background on active links in IE 10.\n */\n\na {\n background-color: transparent;\n}\n\n/**\n * 1. Remove the bottom border in Chrome 57-\n * 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n */\n\nabbr[title] {\n border-bottom: none; /* 1 */\n text-decoration: underline; /* 2 */\n text-decoration: underline dotted; /* 2 */\n}\n\n/**\n * Add the correct font weight in Chrome, Edge, and Safari.\n */\n\nb,\nstrong {\n font-weight: bolder;\n}\n\n/**\n * 1. Correct the inheritance and scaling of font size in all browsers.\n * 2. Correct the odd `em` font sizing in all browsers.\n */\n\ncode,\nkbd,\nsamp {\n font-family: monospace, monospace; /* 1 */\n font-size: 1em; /* 2 */\n}\n\n/**\n * Add the correct font size in all browsers.\n */\n\nsmall {\n font-size: 80%;\n}\n\n/**\n * Prevent `sub` and `sup` elements from affecting the line height in\n * all browsers.\n */\n\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\n\nsub {\n bottom: -0.25em;\n}\n\nsup {\n top: -0.5em;\n}\n\n/* Embedded content\n ========================================================================== */\n\n/**\n * Remove the border on images inside links in IE 10.\n */\n\nimg {\n border-style: none;\n}\n\n/* Forms\n ========================================================================== */\n\n/**\n * 1. Change the font styles in all browsers.\n * 2. Remove the margin in Firefox and Safari.\n */\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n font-family: inherit; /* 1 */\n font-size: 100%; /* 1 */\n line-height: 1.15; /* 1 */\n margin: 0; /* 2 */\n}\n\n/**\n * Show the overflow in IE.\n * 1. Show the overflow in Edge.\n */\n\nbutton,\ninput { /* 1 */\n overflow: visible;\n}\n\n/**\n * Remove the inheritance of text transform in Edge, Firefox, and IE.\n * 1. Remove the inheritance of text transform in Firefox.\n */\n\nbutton,\nselect { /* 1 */\n text-transform: none;\n}\n\n/**\n * Correct the inability to style clickable types in iOS and Safari.\n */\n\nbutton,\n[type=\"button\"],\n[type=\"reset\"],\n[type=\"submit\"] {\n -webkit-appearance: button;\n}\n\n/**\n * Remove the inner border and padding in Firefox.\n */\n\nbutton::-moz-focus-inner,\n[type=\"button\"]::-moz-focus-inner,\n[type=\"reset\"]::-moz-focus-inner,\n[type=\"submit\"]::-moz-focus-inner {\n border-style: none;\n padding: 0;\n}\n\n/**\n * Restore the focus styles unset by the previous rule.\n */\n\nbutton:-moz-focusring,\n[type=\"button\"]:-moz-focusring,\n[type=\"reset\"]:-moz-focusring,\n[type=\"submit\"]:-moz-focusring {\n outline: 1px dotted ButtonText;\n}\n\n/**\n * Correct the padding in Firefox.\n */\n\nfieldset {\n padding: 0.35em 0.75em 0.625em;\n}\n\n/**\n * 1. Correct the text wrapping in Edge and IE.\n * 2. Correct the color inheritance from `fieldset` elements in IE.\n * 3. Remove the padding so developers are not caught out when they zero out\n * `fieldset` elements in all browsers.\n */\n\nlegend {\n box-sizing: border-box; /* 1 */\n color: inherit; /* 2 */\n display: table; /* 1 */\n max-width: 100%; /* 1 */\n padding: 0; /* 3 */\n white-space: normal; /* 1 */\n}\n\n/**\n * Add the correct vertical alignment in Chrome, Firefox, and Opera.\n */\n\nprogress {\n vertical-align: baseline;\n}\n\n/**\n * Remove the default vertical scrollbar in IE 10+.\n */\n\ntextarea {\n overflow: auto;\n}\n\n/**\n * 1. Add the correct box sizing in IE 10.\n * 2. Remove the padding in IE 10.\n */\n\n[type=\"checkbox\"],\n[type=\"radio\"] {\n box-sizing: border-box; /* 1 */\n padding: 0; /* 2 */\n}\n\n/**\n * Correct the cursor style of increment and decrement buttons in Chrome.\n */\n\n[type=\"number\"]::-webkit-inner-spin-button,\n[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n/**\n * 1. Correct the odd appearance in Chrome and Safari.\n * 2. Correct the outline style in Safari.\n */\n\n[type=\"search\"] {\n -webkit-appearance: textfield; /* 1 */\n outline-offset: -2px; /* 2 */\n}\n\n/**\n * Remove the inner padding in Chrome and Safari on macOS.\n */\n\n[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n/**\n * 1. Correct the inability to style clickable types in iOS and Safari.\n * 2. Change font properties to `inherit` in Safari.\n */\n\n::-webkit-file-upload-button {\n -webkit-appearance: button; /* 1 */\n font: inherit; /* 2 */\n}\n\n/* Interactive\n ========================================================================== */\n\n/*\n * Add the correct display in Edge, IE 10+, and Firefox.\n */\n\ndetails {\n display: block;\n}\n\n/*\n * Add the correct display in all browsers.\n */\n\nsummary {\n display: list-item;\n}\n\n/* Misc\n ========================================================================== */\n\n/**\n * Add the correct display in IE 10+.\n */\n\ntemplate {\n display: none;\n}\n\n/**\n * Add the correct display in IE 10.\n */\n\n[hidden] {\n display: none;\n}\n","// This file contains styles for managing print media.\n\n////////////////////////////////////////////////////////////////////////////////\n// Hide elements not relevant to print media.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Hide icon container.\n .content-icon-container\n display: none !important\n\n // Hide showing header links if hovering over when printing.\n .headerlink\n display: none !important\n\n // Hide mobile header.\n .mobile-header\n display: none !important\n\n // Hide navigation links.\n .related-pages\n display: none !important\n\n////////////////////////////////////////////////////////////////////////////////\n// Tweaks related to decolorization.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n // Apply a border around code which no longer have a color background.\n .highlight\n border: 0.1pt solid var(--color-foreground-border)\n\n////////////////////////////////////////////////////////////////////////////////\n// Avoid page break in some relevant cases.\n////////////////////////////////////////////////////////////////////////////////\n@media print\n ul, ol, dl, a, table, pre, blockquote, p\n page-break-inside: avoid\n\n h1, h2, h3, h4, h5, h6, img, figure, caption\n page-break-inside: avoid\n page-break-after: avoid\n\n ul, ol, dl\n page-break-before: avoid\n",".visually-hidden\n position: absolute !important\n width: 1px !important\n height: 1px !important\n padding: 0 !important\n margin: -1px !important\n overflow: hidden !important\n clip: rect(0,0,0,0) !important\n white-space: nowrap !important\n border: 0 !important\n color: var(--color-foreground-primary)\n background: var(--color-background-primary)\n\n:-moz-focusring\n outline: auto\n","// This file serves as the \"skeleton\" of the theming logic.\n//\n// This contains the bulk of the logic for handling dark mode, color scheme\n// toggling and the handling of color-scheme-specific hiding of elements.\n\n@use \"../variables\" as *\n\nbody\n @include fonts\n @include spacing\n @include icons\n @include admonitions\n @include default-admonition(#651fff, \"abstract\")\n @include default-topic(#14B8A6, \"pencil\")\n\n @include colors\n\n.only-light\n display: block !important\nhtml body .only-dark\n display: none !important\n\n// Ignore dark-mode hints if print media.\n@media not print\n // Enable dark-mode, if requested.\n body[data-theme=\"dark\"]\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n // Enable dark mode, unless explicitly told to avoid.\n @media (prefers-color-scheme: dark)\n body:not([data-theme=\"light\"])\n @include colors-dark\n\n html & .only-light\n display: none !important\n .only-dark\n display: block !important\n\n//\n// Theme toggle presentation\n//\nbody[data-theme=\"auto\"]\n .theme-toggle svg.theme-icon-when-auto-light\n display: block\n\n @media (prefers-color-scheme: dark)\n .theme-toggle svg.theme-icon-when-auto-dark\n display: block\n .theme-toggle svg.theme-icon-when-auto-light\n display: none\n\nbody[data-theme=\"dark\"]\n .theme-toggle svg.theme-icon-when-dark\n display: block\n\nbody[data-theme=\"light\"]\n .theme-toggle svg.theme-icon-when-light\n display: block\n","// Fonts used by this theme.\n//\n// There are basically two things here -- using the system font stack and\n// defining sizes for various elements in %ages. We could have also used `em`\n// but %age is easier to reason about for me.\n\n@mixin fonts {\n // These are adapted from https://systemfontstack.com/\n --font-stack:\n -apple-system, BlinkMacSystemFont, Segoe UI, Helvetica, Arial, sans-serif,\n Apple Color Emoji, Segoe UI Emoji;\n --font-stack--monospace:\n \"SFMono-Regular\", Menlo, Consolas, Monaco, Liberation Mono, Lucida Console,\n monospace;\n --font-stack--headings: var(--font-stack);\n\n --font-size--normal: 100%;\n --font-size--small: 87.5%;\n --font-size--small--2: 81.25%;\n --font-size--small--3: 75%;\n --font-size--small--4: 62.5%;\n\n // Sidebar\n --sidebar-caption-font-size: var(--font-size--small--2);\n --sidebar-item-font-size: var(--font-size--small);\n --sidebar-search-input-font-size: var(--font-size--small);\n\n // Table of Contents\n --toc-font-size: var(--font-size--small--3);\n --toc-font-size--mobile: var(--font-size--normal);\n --toc-title-font-size: var(--font-size--small--4);\n\n // Admonitions\n //\n // These aren't defined in terms of %ages, since nesting these is permitted.\n --admonition-font-size: 0.8125rem;\n --admonition-title-font-size: 0.8125rem;\n\n // Code\n --code-font-size: var(--font-size--small--2);\n\n // API\n --api-font-size: var(--font-size--small);\n}\n","// Spacing for various elements on the page\n//\n// If the user wants to tweak things in a certain way, they are permitted to.\n// They also have to deal with the consequences though!\n\n@mixin spacing {\n // Header!\n --header-height: calc(\n var(--sidebar-item-line-height) + 4 *\n #{var(--sidebar-item-spacing-vertical)}\n );\n --header-padding: 0.5rem;\n\n // Sidebar\n --sidebar-tree-space-above: 1.5rem;\n --sidebar-caption-space-above: 1rem;\n\n --sidebar-item-line-height: 1rem;\n --sidebar-item-spacing-vertical: 0.5rem;\n --sidebar-item-spacing-horizontal: 1rem;\n --sidebar-item-height: calc(\n var(--sidebar-item-line-height) + 2 *#{var(--sidebar-item-spacing-vertical)}\n );\n\n --sidebar-expander-width: var(--sidebar-item-height); // be square\n\n --sidebar-search-space-above: 0.5rem;\n --sidebar-search-input-spacing-vertical: 0.5rem;\n --sidebar-search-input-spacing-horizontal: 0.5rem;\n --sidebar-search-input-height: 1rem;\n --sidebar-search-icon-size: var(--sidebar-search-input-height);\n\n // Table of Contents\n --toc-title-padding: 0.25rem 0;\n --toc-spacing-vertical: 1.5rem;\n --toc-spacing-horizontal: 1.5rem;\n --toc-item-spacing-vertical: 0.4rem;\n --toc-item-spacing-horizontal: 1rem;\n}\n","// Expose theme icons as CSS variables.\n\n$icons: (\n // Adapted from tabler-icons\n // url: https://tablericons.com/\n \"search\":\n url('data:image/svg+xml;charset=utf-8,'),\n // Factored out from mkdocs-material on 24-Aug-2020.\n // url: https://squidfunk.github.io/mkdocs-material/reference/admonitions/\n \"pencil\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"abstract\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"info\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"flame\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"question\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"warning\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"failure\":\n url('data:image/svg+xml;charset=utf-8,'),\n \"spark\":\n url('data:image/svg+xml;charset=utf-8,')\n);\n\n@mixin icons {\n @each $name, $glyph in $icons {\n --icon-#{$name}: #{$glyph};\n }\n}\n","@use \"sass:list\";\n// Admonitions\n\n// Structure of these is:\n// admonition-class: color \"icon-name\";\n//\n// The colors are translated into CSS variables below. The icons are\n// used directly in the main declarations to set the `mask-image` in\n// the title.\n\n// prettier-ignore\n$admonitions: (\n // Each of these has an reST directives for it.\n \"caution\": #ff9100 \"spark\",\n \"warning\": #ff9100 \"warning\",\n \"danger\": #ff5252 \"spark\",\n \"attention\": #ff5252 \"warning\",\n \"error\": #ff5252 \"failure\",\n \"hint\": #00c852 \"question\",\n \"tip\": #00c852 \"info\",\n \"important\": #00bfa5 \"flame\",\n \"note\": #00b0ff \"pencil\",\n \"seealso\": #448aff \"info\",\n \"admonition-todo\": #808080 \"pencil\"\n);\n\n@mixin default-admonition($color, $icon-name) {\n --color-admonition-title: #{$color};\n --color-admonition-title-background: #{rgba($color, 0.2)};\n\n --icon-admonition-default: var(--icon-#{$icon-name});\n}\n\n@mixin default-topic($color, $icon-name) {\n --color-topic-title: #{$color};\n --color-topic-title-background: #{rgba($color, 0.2)};\n\n --icon-topic-default: var(--icon-#{$icon-name});\n}\n\n@mixin admonitions {\n @each $name, $values in $admonitions {\n --color-admonition-title--#{$name}: #{list.nth($values, 1)};\n --color-admonition-title-background--#{$name}: #{rgba(\n list.nth($values, 1),\n 0.2\n )};\n }\n}\n","// Colors used throughout this theme.\n//\n// The aim is to give the user more control. Thus, instead of hard-coding colors\n// in various parts of the stylesheet, the approach taken is to define all\n// colors as CSS variables and reusing them in all the places.\n//\n// `colors-dark` depends on `colors` being included at a lower specificity.\n\n@mixin colors {\n --color-problematic: #b30000;\n\n // Base Colors\n --color-foreground-primary: black; // for main text and headings\n --color-foreground-secondary: #5a5c63; // for secondary text\n --color-foreground-muted: #6b6f76; // for muted text\n --color-foreground-border: #878787; // for content borders\n\n --color-background-primary: white; // for content\n --color-background-secondary: #f8f9fb; // for navigation + ToC\n --color-background-hover: #efeff4ff; // for navigation-item hover\n --color-background-hover--transparent: #efeff400;\n --color-background-border: #eeebee; // for UI borders\n --color-background-item: #ccc; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #0a4bff;\n --color-brand-content: #2757dd;\n --color-brand-visited: #872ee0;\n\n // API documentation\n --color-api-background: var(--color-background-hover--transparent);\n --color-api-background-hover: var(--color-background-hover);\n --color-api-overall: var(--color-foreground-secondary);\n --color-api-name: var(--color-problematic);\n --color-api-pre-name: var(--color-problematic);\n --color-api-paren: var(--color-foreground-secondary);\n --color-api-keyword: var(--color-foreground-primary);\n\n --color-api-added: #21632c;\n --color-api-added-border: #38a84d;\n --color-api-changed: #046172;\n --color-api-changed-border: #06a1bc;\n --color-api-deprecated: #605706;\n --color-api-deprecated-border: #f0d90f;\n --color-api-removed: #b30000;\n --color-api-removed-border: #ff5c5c;\n\n --color-highlight-on-target: #ffffcc;\n\n // Inline code background\n --color-inline-code-background: var(--color-background-secondary);\n\n // Highlighted text (search)\n --color-highlighted-background: #ddeeff;\n --color-highlighted-text: var(--color-foreground-primary);\n\n // GUI Labels\n --color-guilabel-background: #ddeeff80;\n --color-guilabel-border: #bedaf580;\n --color-guilabel-text: var(--color-foreground-primary);\n\n // Admonitions!\n --color-admonition-background: transparent;\n\n //////////////////////////////////////////////////////////////////////////////\n // Everything below this should be one of:\n // - var(...)\n // - *-gradient(...)\n // - special literal values (eg: transparent, none)\n //////////////////////////////////////////////////////////////////////////////\n\n // Tables\n --color-table-header-background: var(--color-background-secondary);\n --color-table-border: var(--color-background-border);\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: transparent;\n --color-card-marginals-background: var(--color-background-secondary);\n\n // Header\n --color-header-background: var(--color-background-primary);\n --color-header-border: var(--color-background-border);\n --color-header-text: var(--color-foreground-primary);\n\n // Sidebar (left)\n --color-sidebar-background: var(--color-background-secondary);\n --color-sidebar-background-border: var(--color-background-border);\n\n --color-sidebar-brand-text: var(--color-foreground-primary);\n --color-sidebar-caption-text: var(--color-foreground-muted);\n --color-sidebar-link-text: var(--color-foreground-secondary);\n --color-sidebar-link-text--top-level: var(--color-brand-primary);\n\n --color-sidebar-item-background: var(--color-sidebar-background);\n --color-sidebar-item-background--current: var(\n --color-sidebar-item-background\n );\n --color-sidebar-item-background--hover: linear-gradient(\n 90deg,\n var(--color-background-hover--transparent) 0%,\n var(--color-background-hover) var(--sidebar-item-spacing-horizontal),\n var(--color-background-hover) 100%\n );\n\n --color-sidebar-item-expander-background: transparent;\n --color-sidebar-item-expander-background--hover: var(\n --color-background-hover\n );\n\n --color-sidebar-search-text: var(--color-foreground-primary);\n --color-sidebar-search-background: var(--color-background-secondary);\n --color-sidebar-search-background--focus: var(--color-background-primary);\n --color-sidebar-search-border: var(--color-background-border);\n --color-sidebar-search-icon: var(--color-foreground-muted);\n\n // Table of Contents (right)\n --color-toc-background: var(--color-background-primary);\n --color-toc-title-text: var(--color-foreground-muted);\n --color-toc-item-text: var(--color-foreground-secondary);\n --color-toc-item-text--hover: var(--color-foreground-primary);\n --color-toc-item-text--active: var(--color-brand-primary);\n\n // Actual page contents\n --color-content-foreground: var(--color-foreground-primary);\n --color-content-background: transparent;\n\n // Links\n --color-link: var(--color-brand-content);\n --color-link-underline: var(--color-background-border);\n --color-link--hover: var(--color-brand-content);\n --color-link-underline--hover: var(--color-foreground-border);\n\n --color-link--visited: var(--color-brand-visited);\n --color-link-underline--visited: var(--color-background-border);\n --color-link--visited--hover: var(--color-brand-visited);\n --color-link-underline--visited--hover: var(--color-foreground-border);\n}\n\n@mixin colors-dark {\n --color-problematic: #ee5151;\n\n // Base Colors\n --color-foreground-primary: #cfd0d0; // for main text and headings\n --color-foreground-secondary: #9ca0a5; // for secondary text\n --color-foreground-muted: #81868d; // for muted text\n --color-foreground-border: #666666; // for content borders\n\n --color-background-primary: #131416; // for content\n --color-background-secondary: #1a1c1e; // for navigation + ToC\n --color-background-hover: #1e2124ff; // for navigation-item hover\n --color-background-hover--transparent: #1e212400;\n --color-background-border: #303335; // for UI borders\n --color-background-item: #444; // for \"background\" items (eg: copybutton)\n\n // Announcements\n --color-announcement-background: #000000dd;\n --color-announcement-text: #eeebee;\n\n // Brand colors\n --color-brand-primary: #3d94ff;\n --color-brand-content: #5ca5ff;\n --color-brand-visited: #b27aeb;\n\n // Highlighted text (search)\n --color-highlighted-background: #083563;\n\n // GUI Labels\n --color-guilabel-background: #08356380;\n --color-guilabel-border: #13395f80;\n\n // API documentation\n --color-api-keyword: var(--color-foreground-secondary);\n --color-highlight-on-target: #333300;\n\n --color-api-added: #3db854;\n --color-api-added-border: #267334;\n --color-api-changed: #09b0ce;\n --color-api-changed-border: #056d80;\n --color-api-deprecated: #b1a10b;\n --color-api-deprecated-border: #6e6407;\n --color-api-removed: #ff7575;\n --color-api-removed-border: #b03b3b;\n\n // Admonitions\n --color-admonition-background: #18181a;\n\n // Cards\n --color-card-border: var(--color-background-secondary);\n --color-card-background: #18181a;\n --color-card-marginals-background: var(--color-background-hover);\n}\n","// This file contains the styling for making the content throughout the page,\n// including fonts, paragraphs, headings and spacing among these elements.\n\nbody\n font-family: var(--font-stack)\npre,\ncode,\nkbd,\nsamp\n font-family: var(--font-stack--monospace)\n\n// Make fonts look slightly nicer.\nbody\n -webkit-font-smoothing: antialiased\n -moz-osx-font-smoothing: grayscale\n\n// Line height from Bootstrap 4.1\narticle\n line-height: 1.5\n\n//\n// Headings\n//\nh1,\nh2,\nh3,\nh4,\nh5,\nh6\n line-height: 1.25\n font-family: var(--font-stack--headings)\n font-weight: bold\n\n border-radius: 0.5rem\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n margin-left: -0.5rem\n margin-right: -0.5rem\n padding-left: 0.5rem\n padding-right: 0.5rem\n\n + p\n margin-top: 0\n\nh1\n font-size: 2.5em\n margin-top: 1.75rem\n margin-bottom: 1rem\nh2\n font-size: 2em\n margin-top: 1.75rem\nh3\n font-size: 1.5em\nh4\n font-size: 1.25em\nh5\n font-size: 1.125em\nh6\n font-size: 1em\n\nsmall\n opacity: 75%\n font-size: 80%\n\n// Paragraph\np\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n\n// Horizontal rules\nhr.docutils\n height: 1px\n padding: 0\n margin: 2rem 0\n background-color: var(--color-background-border)\n border: 0\n\n.centered\n text-align: center\n\n// Links\na\n text-decoration: underline\n\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n &:visited\n color: var(--color-link--visited)\n text-decoration-color: var(--color-link-underline--visited)\n &:hover\n color: var(--color-link--visited--hover)\n text-decoration-color: var(--color-link-underline--visited--hover)\n\n &:hover\n color: var(--color-link--hover)\n text-decoration-color: var(--color-link-underline--hover)\n &.muted-link\n color: inherit\n &:hover\n color: var(--color-link--hover)\n text-decoration-color: var(--color-link-underline--hover)\n &:visited\n color: var(--color-link--visited--hover)\n text-decoration-color: var(--color-link-underline--visited--hover)\n","// This file contains the styles for the overall layouting of the documentation\n// skeleton, including the responsive changes as well as sidebar toggles.\n//\n// This is implemented as a mobile-last design, which isn't ideal, but it is\n// reasonably good-enough and I got pretty tired by the time I'd finished this\n// to move the rules around to fix this. Shouldn't take more than 3-4 hours,\n// if you know what you're doing tho.\n\n// HACK: Not all browsers account for the scrollbar width in media queries.\n// This results in horizontal scrollbars in the breakpoint where we go\n// from displaying everything to hiding the ToC. We accomodate for this by\n// adding a bit of padding to the TOC drawer, disabling the horizontal\n// scrollbar and allowing the scrollbars to cover the padding.\n// https://www.456bereastreet.com/archive/201301/media_query_width_and_vertical_scrollbars/\n\n// HACK: Always having the scrollbar visible, prevents certain browsers from\n// causing the content to stutter horizontally between taller-than-viewport and\n// not-taller-than-viewport pages.\n@use \"variables\" as *\n\nhtml\n overflow-x: hidden\n overflow-y: scroll\n scroll-behavior: smooth\n\n.sidebar-scroll, .toc-scroll, article[role=main] *\n scrollbar-width: thin\n scrollbar-color: var(--color-foreground-border) transparent\n\n//\n// Overalls\n//\nhtml,\nbody\n height: 100%\n color: var(--color-foreground-primary)\n background: var(--color-background-primary)\n\n.skip-to-content\n position: fixed\n padding: 1rem\n border-radius: 1rem\n left: 0.25rem\n top: 0.25rem\n z-index: 40\n background: var(--color-background-primary)\n color: var(--color-foreground-primary)\n\n transform: translateY(-200%)\n transition: transform 300ms ease-in-out\n\n &:focus-within\n transform: translateY(0%)\n\narticle\n color: var(--color-content-foreground)\n background: var(--color-content-background)\n overflow-wrap: break-word\n\n.page\n display: flex\n // fill the viewport for pages with little content.\n min-height: 100%\n\n.mobile-header\n width: 100%\n height: var(--header-height)\n background-color: var(--color-header-background)\n color: var(--color-header-text)\n border-bottom: 1px solid var(--color-header-border)\n\n // Looks like sub-script/super-script have this, and we need this to\n // be \"on top\" of those.\n z-index: 10\n\n // We don't show the header on large screens.\n display: none\n\n // Add shadow when scrolled\n &.scrolled\n border-bottom: none\n box-shadow: 0 0 0.2rem rgba(0, 0, 0, 0.1), 0 0.2rem 0.4rem rgba(0, 0, 0, 0.2)\n\n .header-center\n a\n color: var(--color-header-text)\n text-decoration: none\n\n.main\n display: flex\n flex: 1\n\n// Sidebar (left) also covers the entire left portion of screen.\n.sidebar-drawer\n box-sizing: border-box\n\n border-right: 1px solid var(--color-sidebar-background-border)\n background: var(--color-sidebar-background)\n\n display: flex\n justify-content: flex-end\n // These next two lines took me two days to figure out.\n width: calc((100% - #{$full-width}) / 2 + #{$sidebar-width})\n min-width: $sidebar-width\n\n// Scroll-along sidebars\n.sidebar-container,\n.toc-drawer\n box-sizing: border-box\n width: $sidebar-width\n\n.toc-drawer\n background: var(--color-toc-background)\n // See HACK described on top of this document\n padding-right: 1rem\n\n.sidebar-sticky,\n.toc-sticky\n position: sticky\n top: 0\n height: min(100%, 100vh)\n height: 100vh\n\n display: flex\n flex-direction: column\n\n.sidebar-scroll,\n.toc-scroll\n flex-grow: 1\n flex-shrink: 1\n\n overflow: auto\n scroll-behavior: smooth\n\n// Central items.\n.content\n padding: 0 $content-padding\n width: $content-width\n\n display: flex\n flex-direction: column\n justify-content: space-between\n\n.icon\n display: inline-block\n height: 1rem\n width: 1rem\n svg\n width: 100%\n height: 100%\n\n//\n// Accommodate announcement banner\n//\n.announcement\n background-color: var(--color-announcement-background)\n color: var(--color-announcement-text)\n\n height: var(--header-height)\n display: flex\n align-items: center\n overflow-x: auto\n & + .page\n min-height: calc(100% - var(--header-height))\n\n.announcement-content\n box-sizing: border-box\n padding: 0.5rem\n min-width: 100%\n white-space: nowrap\n text-align: center\n\n a\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-announcement-text)\n\n &:hover\n color: var(--color-announcement-text)\n text-decoration-color: var(--color-link--hover)\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for theme\n////////////////////////////////////////////////////////////////////////////////\n.no-js .theme-toggle-container // don't show theme toggle if there's no JS\n display: none\n\n.theme-toggle-container\n display: flex\n\n.theme-toggle\n display: flex\n cursor: pointer\n border: none\n padding: 0\n background: transparent\n\n.theme-toggle svg\n height: 1.25rem\n width: 1.25rem\n color: var(--color-foreground-primary)\n display: none\n\n.theme-toggle-header\n display: flex\n align-items: center\n justify-content: center\n\n////////////////////////////////////////////////////////////////////////////////\n// Toggles for elements\n////////////////////////////////////////////////////////////////////////////////\n.toc-overlay-icon, .nav-overlay-icon\n display: none\n cursor: pointer\n\n .icon\n color: var(--color-foreground-secondary)\n height: 1.5rem\n width: 1.5rem\n\n.toc-header-icon, .nav-overlay-icon\n // for when we set display: flex\n justify-content: center\n align-items: center\n\n.toc-content-icon\n height: 1.5rem\n width: 1.5rem\n\n.content-icon-container\n float: right\n display: flex\n margin-top: 1.5rem\n margin-left: 1rem\n margin-bottom: 1rem\n gap: 0.5rem\n\n .edit-this-page, .view-this-page\n svg\n color: inherit\n height: 1.25rem\n width: 1.25rem\n\n.sidebar-toggle\n position: absolute\n display: none\n// \n.sidebar-toggle[name=\"__toc\"]\n left: 20px\n.sidebar-toggle:checked\n left: 40px\n// \n\n.overlay\n position: fixed\n top: 0\n width: 0\n height: 0\n\n transition: width 0ms, height 0ms, opacity 250ms ease-out\n\n opacity: 0\n background-color: rgba(0, 0, 0, 0.54)\n.sidebar-overlay\n z-index: 20\n.toc-overlay\n z-index: 40\n\n// Keep things on top and smooth.\n.sidebar-drawer\n z-index: 30\n transition: left 250ms ease-in-out\n.toc-drawer\n z-index: 50\n transition: right 250ms ease-in-out\n\n// Show the Sidebar\n#__navigation:checked\n & ~ .sidebar-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .sidebar-drawer\n top: 0\n left: 0\n // Show the toc sidebar\n#__toc:checked\n & ~ .toc-overlay\n width: 100%\n height: 100%\n opacity: 1\n & ~ .page\n .toc-drawer\n top: 0\n right: 0\n\n////////////////////////////////////////////////////////////////////////////////\n// Back to top\n////////////////////////////////////////////////////////////////////////////////\n.back-to-top\n text-decoration: none\n\n display: none\n position: fixed\n left: 0\n top: 1rem\n padding: 0.5rem\n padding-right: 0.75rem\n border-radius: 1rem\n font-size: 0.8125rem\n\n background: var(--color-background-primary)\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), #6b728080 0px 0px 1px 0px\n\n z-index: 10\n\n margin-left: 50%\n transform: translateX(-50%)\n svg\n height: 1rem\n width: 1rem\n fill: currentColor\n display: inline-block\n\n span\n margin-left: 0.25rem\n\n .show-back-to-top &\n display: flex\n align-items: center\n\n////////////////////////////////////////////////////////////////////////////////\n// Responsive layouting\n////////////////////////////////////////////////////////////////////////////////\n// Make things a bit bigger on bigger screens.\n@media (min-width: $full-width + $sidebar-width)\n html\n font-size: 110%\n\n@media (max-width: $full-width)\n // Collapse \"toc\" into the icon.\n .toc-content-icon\n display: flex\n .toc-drawer\n position: fixed\n height: 100vh\n top: 0\n right: -$sidebar-width\n border-left: 1px solid var(--color-background-muted)\n .toc-tree\n border-left: none\n font-size: var(--toc-font-size--mobile)\n\n // Accomodate for a changed content width.\n .sidebar-drawer\n width: calc((100% - #{$full-width - $sidebar-width}) / 2 + #{$sidebar-width})\n\n@media (max-width: $content-padded-width + $sidebar-width)\n // Center the page\n .content\n margin-left: auto\n margin-right: auto\n padding: 0 $content-padding--small\n\n@media (max-width: $content-padded-width--small + $sidebar-width)\n // Collapse \"navigation\".\n .nav-overlay-icon\n display: flex\n .sidebar-drawer\n position: fixed\n height: 100vh\n width: $sidebar-width\n\n top: 0\n left: -$sidebar-width\n\n // Swap which icon is visible.\n .toc-header-icon, .theme-toggle-header\n display: flex\n .toc-content-icon, .theme-toggle-content\n display: none\n\n // Show the header.\n .mobile-header\n position: sticky\n top: 0\n display: flex\n justify-content: space-between\n align-items: center\n\n .header-left,\n .header-right\n display: flex\n height: var(--header-height)\n padding: 0 var(--header-padding)\n label\n height: 100%\n width: 100%\n user-select: none\n\n .nav-overlay-icon .icon,\n .theme-toggle svg\n height: 1.5rem\n width: 1.5rem\n\n // Add a scroll margin for the content\n :target\n scroll-margin-top: calc(var(--header-height) + 2.5rem)\n\n // Show back-to-top below the header\n .back-to-top\n top: calc(var(--header-height) + 0.5rem)\n\n // Accommodate for the header.\n .page\n flex-direction: column\n justify-content: center\n\n@media (max-width: $content-width + 2* $content-padding--small)\n // Content should respect window limits.\n .content\n width: 100%\n overflow-x: auto\n\n@media (max-width: $content-width)\n article[role=main] aside.sidebar\n float: none\n width: 100%\n margin: 1rem 0\n","@use \"sass:list\"\n@use \"../variables\" as *\n\n// The design here is strongly inspired by mkdocs-material.\n.admonition, .topic\n margin: 1rem auto\n padding: 0 0.5rem 0.5rem 0.5rem\n\n background: var(--color-admonition-background)\n\n border-radius: 0.2rem\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n font-size: var(--admonition-font-size)\n\n overflow: hidden\n page-break-inside: avoid\n\n // First element should have no margin, since the title has it.\n > :nth-child(2)\n margin-top: 0\n\n // Last item should have no margin, since we'll control that w/ padding\n > :last-child\n margin-bottom: 0\n\n.admonition p.admonition-title,\np.topic-title\n position: relative\n margin: 0 -0.5rem 0.5rem\n padding-left: 2rem\n padding-right: .5rem\n padding-top: .4rem\n padding-bottom: .4rem\n\n font-weight: 500\n font-size: var(--admonition-title-font-size)\n line-height: 1.3\n\n // Our fancy icon\n &::before\n content: \"\"\n position: absolute\n left: 0.5rem\n width: 1rem\n height: 1rem\n\n// Default styles\np.admonition-title\n background-color: var(--color-admonition-title-background)\n &::before\n background-color: var(--color-admonition-title)\n mask-image: var(--icon-admonition-default)\n mask-repeat: no-repeat\n\np.topic-title\n background-color: var(--color-topic-title-background)\n &::before\n background-color: var(--color-topic-title)\n mask-image: var(--icon-topic-default)\n mask-repeat: no-repeat\n\n//\n// Variants\n//\n.admonition\n border-left: 0.2rem solid var(--color-admonition-title)\n\n @each $type, $value in $admonitions\n &.#{$type}\n border-left-color: var(--color-admonition-title--#{$type})\n > .admonition-title\n background-color: var(--color-admonition-title-background--#{$type})\n &::before\n background-color: var(--color-admonition-title--#{$type})\n mask-image: var(--icon-#{list.nth($value, 2)})\n\n.admonition-todo > .admonition-title\n text-transform: uppercase\n","// This file stylizes the API documentation (stuff generated by autodoc). It's\n// deeply nested due to how autodoc structures the HTML without enough classes\n// to select the relevant items.\n\n// API docs!\ndl[class]:not(.option-list):not(.field-list):not(.footnote):not(.glossary):not(.simple)\n // Tweak the spacing of all the things!\n dd\n margin-left: 2rem\n > :first-child\n margin-top: 0.125rem\n > :last-child\n margin-bottom: 0.75rem\n\n // This is used for the arguments\n .field-list\n margin-bottom: 0.75rem\n\n // \"Headings\" (like \"Parameters\" and \"Return\")\n > dt\n text-transform: uppercase\n font-size: var(--font-size--small)\n\n dd:empty\n margin-bottom: 0.5rem\n dd > ul\n margin-left: -1.2rem\n > li\n > p:nth-child(2)\n margin-top: 0\n // When the last-empty-paragraph follows a paragraph, it doesn't need\n // to augument the existing spacing.\n > p + p:last-child:empty\n margin-top: 0\n margin-bottom: 0\n\n // Colorize the elements\n > dt\n color: var(--color-api-overall)\n\n.sig:not(.sig-inline)\n font-weight: bold\n\n font-size: var(--api-font-size)\n font-family: var(--font-stack--monospace)\n\n margin-left: -0.25rem\n margin-right: -0.25rem\n padding-top: 0.25rem\n padding-bottom: 0.25rem\n padding-right: 0.5rem\n\n // These are intentionally em, to properly match the font size.\n padding-left: 3em\n text-indent: -2.5em\n\n border-radius: 0.25rem\n\n background: var(--color-api-background)\n transition: background 100ms ease-out\n\n &:hover\n background: var(--color-api-background-hover)\n\n // adjust the size of the [source] link on the right.\n a.reference\n .viewcode-link\n font-weight: normal\n width: 4.25rem\n\nem.property\n font-style: normal\n &:first-child\n color: var(--color-api-keyword)\n.sig-name\n color: var(--color-api-name)\n.sig-prename\n font-weight: normal\n color: var(--color-api-pre-name)\n.sig-paren\n color: var(--color-api-paren)\n.sig-param\n font-style: normal\n\ndiv.versionadded,\ndiv.versionchanged,\ndiv.deprecated,\ndiv.versionremoved\n border-left: 0.1875rem solid\n border-radius: 0.125rem\n\n padding-left: 0.75rem\n\n p\n margin-top: 0.125rem\n margin-bottom: 0.125rem\n\ndiv.versionadded\n border-color: var(--color-api-added-border)\n .versionmodified\n color: var(--color-api-added)\n\ndiv.versionchanged\n border-color: var(--color-api-changed-border)\n .versionmodified\n color: var(--color-api-changed)\n\ndiv.deprecated\n border-color: var(--color-api-deprecated-border)\n .versionmodified\n color: var(--color-api-deprecated)\n\ndiv.versionremoved\n border-color: var(--color-api-removed-border)\n .versionmodified\n color: var(--color-api-removed)\n\n// Align the [docs] and [source] to the right.\n.viewcode-link, .viewcode-back\n float: right\n text-align: right\n",".line-block\n margin-top: 0.5rem\n margin-bottom: 0.75rem\n .line-block\n margin-top: 0rem\n margin-bottom: 0rem\n padding-left: 1rem\n","// Captions\narticle p.caption,\ntable > caption,\n.code-block-caption\n font-size: var(--font-size--small)\n text-align: center\n\n// Caption above a TOCTree\n.toctree-wrapper.compound\n .caption, :not(.caption) > .caption-text\n font-size: var(--font-size--small)\n text-transform: uppercase\n\n text-align: initial\n margin-bottom: 0\n\n > ul\n margin-top: 0\n margin-bottom: 0\n","// Inline code\ncode.literal, .sig-inline\n background: var(--color-inline-code-background)\n border-radius: 0.2em\n // Make the font smaller, and use padding to recover.\n font-size: var(--font-size--small--2)\n padding: 0.1em 0.2em\n\n pre.literal-block &\n font-size: inherit\n padding: 0\n\n p &\n border: 1px solid var(--color-background-border)\n\n.sig-inline\n font-family: var(--font-stack--monospace)\n\n// Code and Literal Blocks\n$code-spacing-vertical: 0.625rem\n$code-spacing-horizontal: 0.875rem\n\n// Wraps every literal block + line numbers.\ndiv[class*=\" highlight-\"],\ndiv[class^=\"highlight-\"]\n margin: 1em 0\n display: flex\n\n .table-wrapper\n margin: 0\n padding: 0\n\npre\n margin: 0\n padding: 0\n overflow: auto\n\n // Needed to have more specificity than pygments' \"pre\" selector. :(\n article[role=\"main\"] .highlight &\n line-height: 1.5\n\n &.literal-block,\n .highlight &\n font-size: var(--code-font-size)\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n // Make it look like all the other blocks.\n &.literal-block\n margin-top: 1rem\n margin-bottom: 1rem\n\n border-radius: 0.2rem\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n\n// All code is always contained in this.\n.highlight\n width: 100%\n border-radius: 0.2rem\n\n // Make line numbers and prompts un-selectable.\n .gp, span.linenos\n user-select: none\n pointer-events: none\n\n // Expand the line-highlighting.\n .hll\n display: block\n margin-left: -$code-spacing-horizontal\n margin-right: -$code-spacing-horizontal\n padding-left: $code-spacing-horizontal\n padding-right: $code-spacing-horizontal\n\n/* Make code block captions be nicely integrated */\n.code-block-caption\n display: flex\n padding: $code-spacing-vertical $code-spacing-horizontal\n\n border-radius: 0.25rem\n border-bottom-left-radius: 0\n border-bottom-right-radius: 0\n font-weight: 300\n border-bottom: 1px solid\n\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n border-color: var(--color-background-border)\n\n + div[class]\n margin-top: 0\n > .highlight\n border-top-left-radius: 0\n border-top-right-radius: 0\n\n// When `html_codeblock_linenos_style` is table.\n.highlighttable\n width: 100%\n display: block\n tbody\n display: block\n\n tr\n display: flex\n\n // Line numbers\n td.linenos\n background-color: var(--color-code-background)\n color: var(--color-code-foreground)\n padding: $code-spacing-vertical $code-spacing-horizontal\n padding-right: 0\n border-top-left-radius: 0.2rem\n border-bottom-left-radius: 0.2rem\n\n .linenodiv\n padding-right: $code-spacing-horizontal\n font-size: var(--code-font-size)\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n\n // Actual code\n td.code\n padding: 0\n display: block\n flex: 1\n overflow: hidden\n\n .highlight\n border-top-left-radius: 0\n border-bottom-left-radius: 0\n\n// When `html_codeblock_linenos_style` is inline.\n.highlight\n span.linenos\n display: inline-block\n padding-left: 0\n padding-right: $code-spacing-horizontal\n margin-right: $code-spacing-horizontal\n box-shadow: -0.0625rem 0 var(--color-foreground-border) inset\n","// Inline Footnote Reference\n.footnote-reference\n font-size: var(--font-size--small--4)\n vertical-align: super\n\n// Definition list, listing the content of each note.\n// docutils <= 0.17\ndl.footnote.brackets\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\n display: grid\n grid-template-columns: max-content auto\n dt\n margin: 0\n > .fn-backref\n margin-left: 0.25rem\n\n &:after\n content: \":\"\n\n .brackets\n &:before\n content: \"[\"\n &:after\n content: \"]\"\n\n dd\n margin: 0\n padding: 0 1rem\n\n// docutils >= 0.18\naside.footnote\n font-size: var(--font-size--small)\n color: var(--color-foreground-secondary)\n\naside.footnote > span,\ndiv.citation > span\n float: left\n font-weight: 500\n padding-right: 0.25rem\n\naside.footnote > *:not(span),\ndiv.citation > p\n margin-left: 2rem\n","//\n// Figures\n//\nimg\n box-sizing: border-box\n max-width: 100%\n height: auto\n\narticle\n figure, .figure\n border-radius: 0.2rem\n\n margin: 0\n :last-child\n margin-bottom: 0\n\n .align-left\n float: left\n clear: left\n margin: 0 1rem 1rem\n\n .align-right\n float: right\n clear: right\n margin: 0 1rem 1rem\n\n .align-default,\n .align-center\n display: block\n text-align: center\n margin-left: auto\n margin-right: auto\n\n // WELL, table needs to be stylised like a table.\n table.align-default\n display: table\n text-align: initial\n",".genindex-jumpbox, .domainindex-jumpbox\n border-top: 1px solid var(--color-background-border)\n border-bottom: 1px solid var(--color-background-border)\n padding: 0.25rem\n\n.genindex-section, .domainindex-section\n h2\n margin-top: 0.75rem\n margin-bottom: 0.5rem\n ul\n margin-top: 0\n margin-bottom: 0\n","ul,\nol\n padding-left: 1.2rem\n\n // Space lists out like paragraphs\n margin-top: 1rem\n margin-bottom: 1rem\n // reduce margins within li.\n li\n > p:first-child\n margin-top: 0.25rem\n margin-bottom: 0.25rem\n\n > p:last-child\n margin-top: 0.25rem\n\n > ul,\n > ol\n margin-top: 0.5rem\n margin-bottom: 0.5rem\n\nol\n &.arabic\n list-style: decimal\n &.loweralpha\n list-style: lower-alpha\n &.upperalpha\n list-style: upper-alpha\n &.lowerroman\n list-style: lower-roman\n &.upperroman\n list-style: upper-roman\n\n// Don't space lists out when they're \"simple\" or in a `.. toctree::`\n.simple,\n.toctree-wrapper\n li\n > ul,\n > ol\n margin-top: 0\n margin-bottom: 0\n\n// Definition Lists\n.field-list,\n.option-list,\ndl:not([class]),\ndl.simple,\ndl.footnote,\ndl.glossary\n dt\n font-weight: 500\n margin-top: 0.25rem\n + dt\n margin-top: 0\n\n .classifier::before\n content: \":\"\n margin-left: 0.2rem\n margin-right: 0.2rem\n\n dd\n > p:first-child,\n ul\n margin-top: 0.125rem\n\n ul\n margin-bottom: 0.125rem\n",".math-wrapper\n width: 100%\n overflow-x: auto\n\ndiv.math\n position: relative\n text-align: center\n\n .headerlink,\n &:focus .headerlink\n display: none\n\n &:hover .headerlink\n display: inline-block\n\n span.eqno\n position: absolute\n right: 0.5rem\n top: 50%\n transform: translate(0, -50%)\n z-index: 1\n","// Abbreviations\nabbr[title]\n cursor: help\n\n// \"Problematic\" content, as identified by Sphinx\n.problematic\n color: var(--color-problematic)\n\n// Keyboard / Mouse \"instructions\"\nkbd:not(.compound)\n margin: 0 0.2rem\n padding: 0 0.2rem\n border-radius: 0.2rem\n border: 1px solid var(--color-foreground-border)\n color: var(--color-foreground-primary)\n vertical-align: text-bottom\n\n font-size: var(--font-size--small--3)\n display: inline-block\n\n box-shadow: 0 0.0625rem 0 rgba(0, 0, 0, 0.2), inset 0 0 0 0.125rem var(--color-background-primary)\n\n background-color: var(--color-background-secondary)\n\n// Blockquote\nblockquote\n border-left: 4px solid var(--color-background-border)\n background: var(--color-background-secondary)\n\n margin-left: 0\n margin-right: 0\n padding: 0.5rem 1rem\n\n .attribution\n font-weight: 600\n text-align: right\n\n &.pull-quote,\n &.highlights\n font-size: 1.25em\n\n &.epigraph,\n &.pull-quote\n border-left-width: 0\n border-radius: 0.5rem\n\n &.highlights\n border-left-width: 0\n background: transparent\n\n// Center align embedded-in-text images\np .reference img\n vertical-align: middle\n","p.rubric\n line-height: 1.25\n font-weight: bold\n font-size: 1.125em\n\n // For Numpy-style documentation that's got rubrics within it.\n // https://github.com/pradyunsg/furo/discussions/505\n dd &\n line-height: inherit\n font-weight: inherit\n\n font-size: var(--font-size--small)\n text-transform: uppercase\n","article .sidebar\n float: right\n clear: right\n width: 30%\n\n margin-left: 1rem\n margin-right: 0\n\n border-radius: 0.2rem\n background-color: var(--color-background-secondary)\n border: var(--color-background-border) 1px solid\n\n > *\n padding-left: 1rem\n padding-right: 1rem\n\n > ul, > ol // lists need additional padding, because bullets.\n padding-left: 2.2rem\n\n .sidebar-title\n margin: 0\n padding: 0.5rem 1rem\n border-bottom: var(--color-background-border) 1px solid\n\n font-weight: 500\n\n// TODO: subtitle\n// TODO: dedicated variables?\n","[role=main] .table-wrapper.container\n width: 100%\n overflow-x: auto\n margin-top: 1rem\n margin-bottom: 0.5rem\n padding: 0.2rem 0.2rem 0.75rem\n\ntable.docutils\n border-radius: 0.2rem\n border-spacing: 0\n border-collapse: collapse\n\n box-shadow: 0 0.2rem 0.5rem rgba(0, 0, 0, 0.05), 0 0 0.0625rem rgba(0, 0, 0, 0.1)\n\n th\n background: var(--color-table-header-background)\n\n td,\n th\n // Space things out properly\n padding: 0 0.25rem\n\n // Get the borders looking just-right.\n border-left: 1px solid var(--color-table-border)\n border-right: 1px solid var(--color-table-border)\n border-bottom: 1px solid var(--color-table-border)\n\n p\n margin: 0.25rem\n\n &:first-child\n border-left: none\n &:last-child\n border-right: none\n\n // MyST-parser tables set these classes for control of column alignment\n &.text-left\n text-align: left\n &.text-right\n text-align: right\n &.text-center\n text-align: center\n","@use \"../variables\" as *\n\n:target\n scroll-margin-top: 2.5rem\n\n@media (max-width: $full-width - $sidebar-width)\n :target\n scroll-margin-top: calc(2.5rem + var(--header-height))\n\n // When a heading is selected\n section > span:target\n scroll-margin-top: calc(2.8rem + var(--header-height))\n\n// Permalinks\n.headerlink\n font-weight: 100\n user-select: none\n\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\ndl dt,\np.caption,\nfigcaption p,\ntable > caption,\n.code-block-caption\n > .headerlink\n margin-left: 0.5rem\n visibility: hidden\n &:hover > .headerlink\n visibility: visible\n\n // Don't change to link-like, if someone adds the contents directive.\n > .toc-backref\n color: inherit\n text-decoration-line: none\n\n// Figure and table captions are special.\nfigure:hover > figcaption > p > .headerlink,\ntable:hover > caption > .headerlink\n visibility: visible\n\n:target >, // Regular section[id] style anchors\nspan:target ~ // Non-regular span[id] style \"extra\" anchors\n h1,\n h2,\n h3,\n h4,\n h5,\n h6\n &:nth-of-type(1)\n background-color: var(--color-highlight-on-target)\n // .headerlink\n // visibility: visible\n code.literal\n background-color: transparent\n\ntable:target > caption,\nfigure:target\n background-color: var(--color-highlight-on-target)\n\n// Inline page contents\n.this-will-duplicate-information-and-it-is-still-useful-here li :target\n background-color: var(--color-highlight-on-target)\n\n// Code block permalinks\n.literal-block-wrapper:target .code-block-caption\n background-color: var(--color-highlight-on-target)\n\n// When a definition list item is selected\n//\n// There isn't really an alternative to !important here, due to the\n// high-specificity of API documentation's selector.\ndt:target\n background-color: var(--color-highlight-on-target) !important\n\n// When a footnote reference is selected\n.footnote > dt:target + dd,\n.footnote-reference:target\n background-color: var(--color-highlight-on-target)\n",".guilabel\n background-color: var(--color-guilabel-background)\n border: 1px solid var(--color-guilabel-border)\n color: var(--color-guilabel-text)\n\n padding: 0 0.3em\n border-radius: 0.5em\n font-size: 0.9em\n","// This file contains the styles used for stylizing the footer that's shown\n// below the content.\n@use \"../variables\" as *\n\nfooter\n font-size: var(--font-size--small)\n display: flex\n flex-direction: column\n\n margin-top: 2rem\n\n// Bottom of page information\n.bottom-of-page\n display: flex\n align-items: center\n justify-content: space-between\n\n margin-top: 1rem\n padding-top: 1rem\n padding-bottom: 1rem\n\n color: var(--color-foreground-secondary)\n border-top: 1px solid var(--color-background-border)\n\n line-height: 1.5\n\n @media (max-width: $content-width)\n text-align: center\n flex-direction: column-reverse\n gap: 0.25rem\n\n .left-details\n font-size: var(--font-size--small)\n\n .right-details\n display: flex\n flex-direction: column\n gap: 0.25rem\n text-align: right\n\n .icons\n display: flex\n justify-content: flex-end\n gap: 0.25rem\n font-size: 1rem\n\n a\n text-decoration: none\n\n svg,\n img\n font-size: 1.125rem\n height: 1em\n width: 1em\n\n// Next/Prev page information\n.related-pages\n a\n display: flex\n align-items: center\n\n text-decoration: none\n &:hover .page-info .title\n text-decoration: underline\n color: var(--color-link)\n text-decoration-color: var(--color-link-underline)\n\n svg.furo-related-icon,\n svg.furo-related-icon > use\n flex-shrink: 0\n\n color: var(--color-foreground-border)\n\n width: 0.75rem\n height: 0.75rem\n margin: 0 0.5rem\n\n &.next-page\n max-width: 50%\n\n float: right\n clear: right\n text-align: right\n\n &.prev-page\n max-width: 50%\n\n float: left\n clear: left\n\n svg\n transform: rotate(180deg)\n\n.page-info\n display: flex\n flex-direction: column\n overflow-wrap: anywhere\n\n .next-page &\n align-items: flex-end\n\n .context\n display: flex\n align-items: center\n\n padding-bottom: 0.1rem\n\n color: var(--color-foreground-muted)\n font-size: var(--font-size--small)\n text-decoration: none\n","// This file contains the styles for the contents of the left sidebar, which\n// contains the navigation tree, logo, search etc.\n\n////////////////////////////////////////////////////////////////////////////////\n// Brand on top of the scrollable tree.\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-brand\n display: flex\n flex-direction: column\n flex-shrink: 0\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n text-decoration: none\n\n.sidebar-brand-text\n color: var(--color-sidebar-brand-text)\n overflow-wrap: break-word\n margin: var(--sidebar-item-spacing-vertical) 0\n font-size: 1.5rem\n\n.sidebar-logo-container\n margin: var(--sidebar-item-spacing-vertical) 0\n\n.sidebar-logo\n margin: 0 auto\n display: block\n max-width: 100%\n\n////////////////////////////////////////////////////////////////////////////////\n// Search\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-search-container\n display: flex\n align-items: center\n margin-top: var(--sidebar-search-space-above)\n\n position: relative\n\n background: var(--color-sidebar-search-background)\n &:hover,\n &:focus-within\n background: var(--color-sidebar-search-background--focus)\n\n &::before\n content: \"\"\n position: absolute\n left: var(--sidebar-item-spacing-horizontal)\n width: var(--sidebar-search-icon-size)\n height: var(--sidebar-search-icon-size)\n\n background-color: var(--color-sidebar-search-icon)\n mask-image: var(--icon-search)\n\n.sidebar-search\n box-sizing: border-box\n\n border: none\n border-top: 1px solid var(--color-sidebar-search-border)\n border-bottom: 1px solid var(--color-sidebar-search-border)\n\n padding-top: var(--sidebar-search-input-spacing-vertical)\n padding-bottom: var(--sidebar-search-input-spacing-vertical)\n padding-right: var(--sidebar-search-input-spacing-horizontal)\n padding-left: calc(var(--sidebar-item-spacing-horizontal) + var(--sidebar-search-input-spacing-horizontal) + var(--sidebar-search-icon-size))\n\n width: 100%\n\n color: var(--color-sidebar-search-foreground)\n background: transparent\n z-index: 10\n\n &:focus\n outline: none\n\n &::placeholder\n font-size: var(--sidebar-search-input-font-size)\n\n//\n// Hide Search Matches link\n//\n#searchbox .highlight-link\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal) 0\n margin: 0\n text-align: center\n\n a\n color: var(--color-sidebar-search-icon)\n font-size: var(--font-size--small--2)\n\n////////////////////////////////////////////////////////////////////////////////\n// Structure/Skeleton of the navigation tree (left)\n////////////////////////////////////////////////////////////////////////////////\n.sidebar-tree\n font-size: var(--sidebar-item-font-size)\n margin-top: var(--sidebar-tree-space-above)\n margin-bottom: var(--sidebar-item-spacing-vertical)\n\n ul\n padding: 0\n margin-top: 0\n margin-bottom: 0\n\n display: flex\n flex-direction: column\n\n list-style: none\n\n li\n position: relative\n margin: 0\n\n > ul\n margin-left: var(--sidebar-item-spacing-horizontal)\n\n .icon\n color: var(--color-sidebar-link-text)\n\n .reference\n box-sizing: border-box\n color: var(--color-sidebar-link-text)\n\n // Fill the parent.\n display: inline-block\n line-height: var(--sidebar-item-line-height)\n text-decoration: none\n\n // Don't allow long words to cause wrapping.\n overflow-wrap: anywhere\n\n height: 100%\n width: 100%\n\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n &:hover\n color: var(--color-sidebar-link-text)\n background: var(--color-sidebar-item-background--hover)\n\n // Add a nice little \"external-link\" arrow here.\n &.external::after\n content: url('data:image/svg+xml,')\n margin: 0 0.25rem\n vertical-align: middle\n color: var(--color-sidebar-link-text)\n\n // Make the current page reference bold.\n .current-page > .reference\n font-weight: bold\n\n label\n position: absolute\n top: 0\n right: 0\n height: var(--sidebar-item-height)\n width: var(--sidebar-expander-width)\n\n cursor: pointer\n user-select: none\n\n display: flex\n justify-content: center\n align-items: center\n\n .caption, :not(.caption) > .caption-text\n font-size: var(--sidebar-caption-font-size)\n color: var(--color-sidebar-caption-text)\n\n font-weight: bold\n text-transform: uppercase\n\n margin: var(--sidebar-caption-space-above) 0 0 0\n padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal)\n\n // If it has children, add a bit more padding to wrap the content to avoid\n // overlapping with the `. + +Flood +~~~~~ + +:doc:`Caravan `, :doc:`FloodCastBench `, :doc:`HydroBench `, :doc:`NOAA Flood Events `, :doc:`WaterBench `. + +Earthquake +~~~~~~~~~~ + +:doc:`AEFA Forecast `, :doc:`pick-benchmark `, :doc:`SeisBench `. + +Tropical Cyclone +~~~~~~~~~~~~~~~~ + +:doc:`IBTrACS `, :doc:`TCBench Alpha `, :doc:`TropiCycloneNet-Dataset `. + +Developer Dataset Workflow +-------------------------- + +Use this section when you need the package-level registry and dataset +builder interface rather than the public catalog presentation. + +Inspect an External Dataset Source +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + +Load a Registered Dataset +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from pyhazards.datasets import available_datasets, load_dataset + + print(available_datasets()) + data = load_dataset( + "seisbench_waveforms", + micro=True, + ).load() + print(sorted(data.splits.keys())) + +Register a Custom Dataset +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from pyhazards.datasets import ( + DataBundle, + DataSplit, + Dataset, + FeatureSpec, + LabelSpec, + register_dataset, + ) + + class MyDataset(Dataset): + name = "my_dataset" + + def _load(self) -> DataBundle: + raise NotImplementedError("Return a populated DataBundle here.") + + register_dataset("my_dataset", MyDataset) + +Notes +~~~~~ + +- Public dataset docs are generated from cards in ``pyhazards/dataset_cards``. +- Run ``python scripts/render_dataset_docs.py`` after editing cards or generated dataset docs. +- Use :doc:`/implementation` for the full contributor workflow. + +Submodules +---------- + +pyhazards.datasets.base module +------------------------------ + +.. automodule:: pyhazards.datasets.base + :members: + :undoc-members: + :show-inheritance: + +pyhazards.datasets.registry module +----------------------------------- + +.. automodule:: pyhazards.datasets.registry + :members: + :undoc-members: + :show-inheritance: + +pyhazards.datasets.transforms package +------------------------------------- + +.. automodule:: pyhazards.datasets.transforms + :members: + :undoc-members: + :show-inheritance: + +pyhazards.datasets.hazards package +----------------------------------- + +.. automodule:: pyhazards.datasets.hazards + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.datasets + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api/pyhazards.engine.rst b/docs/source/api/pyhazards.engine.rst new file mode 100644 index 00000000..a4ad03da --- /dev/null +++ b/docs/source/api/pyhazards.engine.rst @@ -0,0 +1,37 @@ +pyhazards.engine package +======================== + +Submodules +---------- + +pyhazards.engine.trainer module +------------------------------- + +.. automodule:: pyhazards.engine.trainer + :members: + :undoc-members: + :show-inheritance: + +pyhazards.engine.distributed module +------------------------------------ + +.. automodule:: pyhazards.engine.distributed + :members: + :undoc-members: + :show-inheritance: + +pyhazards.engine.inference module +---------------------------------- + +.. automodule:: pyhazards.engine.inference + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.engine + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api/pyhazards.metrics.rst b/docs/source/api/pyhazards.metrics.rst new file mode 100644 index 00000000..a79a09ac --- /dev/null +++ b/docs/source/api/pyhazards.metrics.rst @@ -0,0 +1,7 @@ +pyhazards.metrics package +========================= + +.. automodule:: pyhazards.metrics + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api/pyhazards.models.rst b/docs/source/api/pyhazards.models.rst new file mode 100644 index 00000000..f2ee6f21 --- /dev/null +++ b/docs/source/api/pyhazards.models.rst @@ -0,0 +1,137 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +pyhazards.models package +======================== + +Catalog Summary +--------------- + +This page links the public model catalog, the developer registry +workflow, and the package submodules used to implement model builders. + +For the curated browsing experience, use :doc:`/pyhazards_models`. + +Wildfire +~~~~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`ASUFM `, :doc:`DNN-LSTM-AutoEncoder `, :doc:`FireCastNet `, :doc:`ForeFire Adapter `, :doc:`Wildfire Forecasting `, :doc:`WildfireSpreadTS `, :doc:`WRF-SFIRE Adapter `, :doc:`CNN-ASPP `. + +Earthquake +~~~~~~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`EQNet `, :doc:`EQTransformer `, :doc:`GPD `, :doc:`PhaseNet `, :doc:`WaveCastNet `. + +Flood +~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`EA-LSTM `, :doc:`FloodCast `, :doc:`Google Flood Forecasting `, :doc:`NeuralHydrology LSTM `, :doc:`UrbanFloodCast `, :doc:`HydroGraphNet `. + +Tropical Cyclone +~~~~~~~~~~~~~~~~ + +Implemented Models +++++++++++++++++++ + +:doc:`Hurricast `, :doc:`SAF-Net `, :doc:`TCIF-fusion `, :doc:`Tropical Cyclone MLP `, :doc:`TropiCycloneNet `. + +Experimental Adapters ++++++++++++++++++++++ + +:doc:`FourCastNet TC Adapter `, :doc:`GraphCast TC Adapter `, :doc:`Pangu TC Adapter `. + +Developer Registry Workflow +--------------------------- + +Use this section when you need the package-level builder and registry +interface rather than the public catalog presentation. + +Build a Registered Model +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from pyhazards.models import build_model + + model = build_model( + name="phasenet", + task="regression", + in_channels=3, + ) + +Register a Custom Model +~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + import torch.nn as nn + from pyhazards.models import build_model, register_model + + def my_custom_builder(task: str, in_dim: int, out_dim: int, **kwargs) -> nn.Module: + hidden = kwargs.get("hidden_dim", 128) + return nn.Sequential( + nn.Linear(in_dim, hidden), + nn.ReLU(), + nn.Linear(hidden, out_dim), + ) + + register_model("my_mlp", my_custom_builder, defaults={"hidden_dim": 128}) + model = build_model(name="my_mlp", task="regression", in_dim=16, out_dim=1) + +Notes +~~~~~ + +- Builders receive ``task`` plus any kwargs you pass. +- ``register_model`` stores optional defaults so configs can stay small. +- Use :doc:`/implementation` for the full contributor workflow. + +Submodules +---------- + +pyhazards.models.backbones module +---------------------------------- + +.. automodule:: pyhazards.models.backbones + :members: + :undoc-members: + :show-inheritance: + +pyhazards.models.heads module +------------------------------ + +.. automodule:: pyhazards.models.heads + :members: + :undoc-members: + :show-inheritance: + +pyhazards.models.builder module +------------------------------- + +.. automodule:: pyhazards.models.builder + :members: + :undoc-members: + :show-inheritance: + +pyhazards.models.registry module +-------------------------------- + +.. automodule:: pyhazards.models.registry + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api/pyhazards.reports.rst b/docs/source/api/pyhazards.reports.rst new file mode 100644 index 00000000..ed2243b4 --- /dev/null +++ b/docs/source/api/pyhazards.reports.rst @@ -0,0 +1,21 @@ +pyhazards.reports package +========================= + +Submodules +---------- + +pyhazards.reports.base module +----------------------------- + +.. automodule:: pyhazards.reports.base + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.reports + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/api/pyhazards.rst b/docs/source/api/pyhazards.rst new file mode 100644 index 00000000..385a8563 --- /dev/null +++ b/docs/source/api/pyhazards.rst @@ -0,0 +1,37 @@ +pyhazards package +================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + pyhazards.datasets + pyhazards.models + pyhazards.benchmarks + pyhazards.configs + pyhazards.reports + pyhazards.engine + pyhazards.metrics + pyhazards.utils + +Submodules +---------- + +pyhazards.interactive_map module +-------------------------------- + +.. automodule:: pyhazards.interactive_map + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards + :members: + :undoc-members: + :show-inheritance: + :exclude-members: BenchmarkRunner, GraphTemporalDataset, graph_collate, WildfireMamba, wildfire_mamba_builder diff --git a/docs/source/api/pyhazards.utils.rst b/docs/source/api/pyhazards.utils.rst new file mode 100644 index 00000000..5b552aa3 --- /dev/null +++ b/docs/source/api/pyhazards.utils.rst @@ -0,0 +1,29 @@ +pyhazards.utils package +======================= + +Submodules +---------- + +pyhazards.utils.hardware module +-------------------------------- + +.. automodule:: pyhazards.utils.hardware + :members: + :undoc-members: + :show-inheritance: + +pyhazards.utils.common module +------------------------------ + +.. automodule:: pyhazards.utils.common + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: pyhazards.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/source/appendix_a_coverage.rst b/docs/source/appendix_a_coverage.rst new file mode 100644 index 00000000..10610e49 --- /dev/null +++ b/docs/source/appendix_a_coverage.rst @@ -0,0 +1,306 @@ +.. This file is generated by scripts/render_appendix_a_docs.py. Do not edit by hand. + +Coverage Audit +============== + +Overview +-------- + +This page audits the current PyHazards implementation against the +planned methods, benchmarks, and datasets listed in ``pyhazard_plan.pdf``. +It separates implemented public entries from variant-only entries, +experimental wrappers, and items that are still missing. + +Status meanings: + +- ``Implemented``: a public PyHazards adapter exists for the named method or resource. +- ``Experimental``: a lightweight wrapper exists, but it should not be counted as stable core coverage. +- ``Missing``: no aligned adapter or benchmark integration is present yet. + +Hazard Summary +-------------- + +.. list-table:: + :widths: 26 18 18 18 + :header-rows: 1 + :class: dataset-list + + * - Hazard Family + - Implemented + - Experimental + - Missing + * - Earthquake + - 8 + - 0 + - 0 + * - Wildfire + - 6 + - 0 + - 0 + * - Flood + - 8 + - 0 + - 0 + * - Hurricane / Tropical Cyclone + - 8 + - 3 + - 0 + +Method and Resource Matrix +-------------------------- + +.. list-table:: + :widths: 22 22 16 14 24 34 + :header-rows: 1 + :class: dataset-list + + * - Hazard Family + - Method / Resource + - Type + - Status + - PyHazards Mapping + - Notes + * - Earthquake + - `PhaseNet `_ + - Baseline + - ``Implemented`` + - :doc:`PhaseNet ` + - Model adapter is implemented, but the SeisBench / pick-benchmark data path is still missing. + * - Earthquake + - `EQTransformer `_ + - Baseline + - ``Implemented`` + - :doc:`EQTransformer ` + - Model adapter is implemented, but the benchmark stack remains lighter than the PDF target. + * - Earthquake + - `GPD `_ + - Baseline + - ``Implemented`` + - :doc:`GPD ` + - Model adapter is implemented behind the shared picking interface. + * - Earthquake + - `EQNet `_ + - Baseline + - ``Implemented`` + - :doc:`EQNet ` + - Model adapter is implemented behind the shared picking interface. + * - Earthquake + - `SeisBench `_ + - Benchmark / Data Ecosystem + - ``Implemented`` + - None + - A synthetic-backed SeisBench-compatible waveform adapter is registered for smoke benchmarking. + * - Earthquake + - `pick-benchmark `_ + - Benchmark + - ``Implemented`` + - None + - A synthetic-backed pick-benchmark-compatible waveform adapter is registered for smoke benchmarking. + * - Earthquake + - `pyCSEP `_ + - Benchmark / Reports + - ``Implemented`` + - None + - The forecasting smoke benchmark exports a pyCSEP-style JSON artifact. + * - Earthquake + - `AEFA `_ + - Dataset / Forecast Benchmark + - ``Implemented`` + - None + - A synthetic-backed AEFA-style forecasting dataset adapter is registered. + * - Wildfire + - `wildfire_forecasting `_ + - Baseline + - ``Implemented`` + - :doc:`Wildfire Forecasting ` + - + * - Wildfire + - `WildfireSpreadTS `_ + - Baseline / Benchmark + - ``Implemented`` + - :doc:`WildfireSpreadTS ` + - + * - Wildfire + - `ASUFM `_ + - Baseline + - ``Implemented`` + - :doc:`ASUFM ` + - + * - Wildfire + - `WRF-SFIRE `_ + - Simulator Adapter + - ``Implemented`` + - :doc:`WRF-SFIRE Adapter ` + - The current adapter is lightweight and synthetic-backed rather than a full external simulator binding. + * - Wildfire + - `ForeFire `_ + - Simulator Adapter + - ``Implemented`` + - :doc:`ForeFire Adapter ` + - The current adapter is lightweight and synthetic-backed rather than a full external simulator binding. + * - Wildfire + - `FireCastNet `_ + - Optional Baseline + - ``Implemented`` + - :doc:`FireCastNet ` + - + * - Flood + - `NeuralHydrology `_ + - Baseline Family + - ``Implemented`` + - :doc:`NeuralHydrology LSTM `, :doc:`EA-LSTM ` + - The LSTM and EA-LSTM adapters are implemented, but Caravan / WaterBench benchmark backing is still missing. + * - Flood + - `Caravan `_ + - Dataset + - ``Implemented`` + - None + - A synthetic-backed Caravan adapter is registered for streamflow smoke benchmarking. + * - Flood + - `WaterBench `_ + - Dataset + - ``Implemented`` + - None + - A synthetic-backed WaterBench adapter is registered for streamflow smoke benchmarking. + * - Flood + - `FloodCast `_ + - Baseline + - ``Implemented`` + - :doc:`FloodCast ` + - The model adapter is implemented, but FloodCastBench-backed evaluation is not wired yet. + * - Flood + - `FloodCastBench `_ + - Benchmark + - ``Implemented`` + - None + - A synthetic-backed FloodCastBench-style inundation adapter is registered. + * - Flood + - `UrbanFloodCast `_ + - Baseline + - ``Implemented`` + - :doc:`UrbanFloodCast ` + - The model adapter is implemented on synthetic inundation fixtures today. + * - Flood + - `HydroBench `_ + - Benchmark / Diagnostics + - ``Implemented`` + - None + - A synthetic-backed HydroBench adapter is registered for streamflow smoke benchmarking. + * - Flood + - `google-research/flood-forecasting `_ + - Reference Baseline + - ``Implemented`` + - :doc:`Google Flood Forecasting ` + - + * - Hurricane / Tropical Cyclone + - `Hurricast `_ + - Baseline + - ``Implemented`` + - :doc:`Hurricast ` + - The model adapter is implemented, but the real TCBench / IBTrACS data path is still missing. + * - Hurricane / Tropical Cyclone + - `tropicalcyclone_MLP `_ + - Baseline + - ``Implemented`` + - :doc:`Tropical Cyclone MLP ` + - The model adapter is implemented as a basin-filtered storm baseline. + * - Hurricane / Tropical Cyclone + - `TCIF-fusion `_ + - Baseline + - ``Implemented`` + - :doc:`TCIF-fusion ` + - The model adapter is implemented behind the shared storm evaluator. + * - Hurricane / Tropical Cyclone + - `SAF-Net `_ + - Baseline + - ``Implemented`` + - :doc:`SAF-Net ` + - The model adapter is implemented behind the shared storm evaluator. + * - Hurricane / Tropical Cyclone + - `TropiCycloneNet `_ + - Baseline + - ``Implemented`` + - :doc:`TropiCycloneNet ` + - The model adapter is implemented, but the public benchmark/data track remains synthetic-first. + * - Hurricane / Tropical Cyclone + - `TropiCycloneNet-Dataset `_ + - Dataset + - ``Implemented`` + - None + - A synthetic-backed TropiCycloneNet-Dataset adapter is registered. + * - Hurricane / Tropical Cyclone + - `TCBench Alpha `_ + - Benchmark + - ``Implemented`` + - None + - A synthetic-backed TCBench Alpha adapter is registered. + * - Hurricane / Tropical Cyclone + - `IBTrACS `_ + - Dataset + - ``Implemented`` + - None + - A synthetic-backed IBTrACS adapter is registered. + * - Hurricane / Tropical Cyclone + - `GraphCast / GenCast `_ + - Foundation Adapter + - ``Experimental`` + - :doc:`GraphCast TC Adapter ` + - The current wrapper is intentionally lightweight and should not be counted as stable core coverage. + * - Hurricane / Tropical Cyclone + - `Pangu-Weather `_ + - Foundation Adapter + - ``Experimental`` + - :doc:`Pangu TC Adapter ` + - The current wrapper is intentionally lightweight and should not be counted as stable core coverage. + * - Hurricane / Tropical Cyclone + - `FourCastNet `_ + - Foundation Adapter + - ``Experimental`` + - :doc:`FourCastNet TC Adapter ` + - The current wrapper is intentionally lightweight and should not be counted as stable core coverage. + +Current Public Non-Core Implementations +--------------------------------------- + +These entries remain in the public catalog, but they are not counted as +part of the current core method set. + +.. list-table:: + :widths: 18 18 28 36 + :header-rows: 1 + :class: dataset-list + + * - Hazard Family + - Catalog Status + - Public Entry + - Why it is non-core + * - Wildfire + - ``variant`` + - :doc:`CNN-ASPP ` + - Implemented outside the current core method set and kept public as an additional model. + * - Earthquake + - ``variant`` + - :doc:`WaveCastNet ` + - Implemented outside the current core method set and kept public as an additional model. + * - Flood + - ``variant`` + - :doc:`HydroGraphNet ` + - Implemented outside the current core method set and kept public as an additional model. + * - Tropical Cyclone + - ``experimental`` + - :doc:`FourCastNet TC Adapter ` + - Wrapper-style experimental adapter pending stronger benchmark and dataset support. + * - Tropical Cyclone + - ``experimental`` + - :doc:`GraphCast TC Adapter ` + - Wrapper-style experimental adapter pending stronger benchmark and dataset support. + * - Tropical Cyclone + - ``experimental`` + - :doc:`Pangu TC Adapter ` + - Wrapper-style experimental adapter pending stronger benchmark and dataset support. + +Execution Note +-------------- + +Use `.github/ROADMAP_EXECUTION.md `_ +as the checked-in multi-agent handoff for finishing the remaining roadmap work. diff --git a/docs/source/benchmark.rst b/docs/source/benchmark.rst deleted file mode 100644 index efde6b4f..00000000 --- a/docs/source/benchmark.rst +++ /dev/null @@ -1,5 +0,0 @@ -Benchmark -========= - -Coming soon... - diff --git a/docs/source/benchmarks/aefa.rst b/docs/source/benchmarks/aefa.rst new file mode 100644 index 00000000..725ed707 --- /dev/null +++ b/docs/source/benchmarks/aefa.rst @@ -0,0 +1,101 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +AEFA +==== + +Overview +-------- + +The AEFA alignment is implemented as a synthetic-backed dense-grid forecasting adapter used by the WaveCastNet benchmark config. + +It keeps the forecasting task and metric shape aligned without claiming a full AEFA data pipeline. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + 1 model + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``earthquake`` + +**Registered class:** ``EarthquakeBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Earthquake Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`AEFA `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Wavefield Forecasting + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``mse`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``wavecastnet_benchmark_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`WaveCastNet `. diff --git a/docs/source/benchmarks/caravan.rst b/docs/source/benchmarks/caravan.rst new file mode 100644 index 00000000..291aecb3 --- /dev/null +++ b/docs/source/benchmarks/caravan.rst @@ -0,0 +1,104 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Caravan +======= + +Overview +-------- + +The current Caravan alignment is a metadata-backed streamflow adapter layered on top of the shared synthetic graph-temporal flood dataset. + +It currently drives the public smoke runs for NeuralHydrology LSTM and Google Flood Forecasting. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 2 + + .. container:: catalog-stat-note + + 2 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``flood`` + +**Registered class:** ``FloodBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Flood Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`Caravan - A global community dataset for large-sample hydrology `_ (`repo `__). + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Streamflow + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``rmse`` + - ``nse`` + - ``kge`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``neuralhydrology_lstm_smoke.yaml`` + - ``google_flood_forecasting_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`NeuralHydrology LSTM `, :doc:`Google Flood Forecasting `. diff --git a/docs/source/benchmarks/earthquake_benchmark.rst b/docs/source/benchmarks/earthquake_benchmark.rst new file mode 100644 index 00000000..c1beb12f --- /dev/null +++ b/docs/source/benchmarks/earthquake_benchmark.rst @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Earthquake Benchmark +==================== + +Overview +-------- + +The earthquake benchmark family groups the picking and forecasting paths under one registered evaluator and benchmark runner entrypoint. + +Current public coverage is synthetic-backed but already exposes the same task and report shape used across the earthquake smoke configs. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Family + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 5 + + .. container:: catalog-stat-note + + 5 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``earthquake`` + +**Registered class:** ``EarthquakeBenchmark`` + +Mapped benchmark ecosystems +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`AEFA `, :doc:`pick-benchmark `, :doc:`pyCSEP `, :doc:`SeisBench `. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Phase Picking + - Wavefield Forecasting + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``p_pick_mae`` + - ``s_pick_mae`` + - ``precision`` + - ``recall`` + - ``f1`` + - ``mae`` + - ``mse`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``phasenet_smoke.yaml`` + - ``eqtransformer_smoke.yaml`` + - ``gpd_smoke.yaml`` + - ``eqnet_smoke.yaml`` + - ``wavecastnet_benchmark_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`PhaseNet `, :doc:`EQTransformer `, :doc:`GPD `, :doc:`EQNet `, :doc:`WaveCastNet `. + +.. dropdown:: Notes + :class-container: catalog-dropdown + + - Forecasting runs export a pyCSEP-style report artifact through the shared earthquake benchmark. diff --git a/docs/source/benchmarks/flood_benchmark.rst b/docs/source/benchmarks/flood_benchmark.rst new file mode 100644 index 00000000..f21f03a3 --- /dev/null +++ b/docs/source/benchmarks/flood_benchmark.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Flood Benchmark +=============== + +Overview +-------- + +The flood benchmark family keeps streamflow and inundation scoring under one shared evaluator contract while preserving hazard-task-specific metrics. + +Current public coverage is synthetic-backed, but the same family already drives the streamflow and inundation smoke configs used across the flood models. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Family + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 6 + + .. container:: catalog-stat-note + + 6 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``flood`` + +**Registered class:** ``FloodBenchmark`` + +Mapped benchmark ecosystems +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Caravan `, :doc:`FloodCastBench `, :doc:`HydroBench `, :doc:`WaterBench `. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Streamflow + - Inundation + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``rmse`` + - ``nse`` + - ``kge`` + - ``pixel_mae`` + - ``iou`` + - ``f1`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``hydrographnet_smoke.yaml`` + - ``neuralhydrology_lstm_smoke.yaml`` + - ``neuralhydrology_ealstm_smoke.yaml`` + - ``google_flood_forecasting_smoke.yaml`` + - ``floodcast_smoke.yaml`` + - ``urbanfloodcast_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`HydroGraphNet `, :doc:`NeuralHydrology LSTM `, :doc:`EA-LSTM `, :doc:`Google Flood Forecasting `, :doc:`FloodCast `, :doc:`UrbanFloodCast `. diff --git a/docs/source/benchmarks/floodcastbench.rst b/docs/source/benchmarks/floodcastbench.rst new file mode 100644 index 00000000..70297a31 --- /dev/null +++ b/docs/source/benchmarks/floodcastbench.rst @@ -0,0 +1,103 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +FloodCastBench +============== + +Overview +-------- + +The current FloodCastBench alignment is implemented as a synthetic raster inundation adapter used by the public inundation smoke configs. + +It documents the benchmark/data protocol behind the FloodCast and UrbanFloodCast smoke paths. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 2 + + .. container:: catalog-stat-note + + 2 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``flood`` + +**Registered class:** ``FloodBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Flood Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`FloodCastBench `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Inundation + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``pixel_mae`` + - ``iou`` + - ``f1`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``floodcast_smoke.yaml`` + - ``urbanfloodcast_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`FloodCast `, :doc:`UrbanFloodCast `. diff --git a/docs/source/benchmarks/hydrobench.rst b/docs/source/benchmarks/hydrobench.rst new file mode 100644 index 00000000..7e4213b7 --- /dev/null +++ b/docs/source/benchmarks/hydrobench.rst @@ -0,0 +1,103 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +HydroBench +========== + +Overview +-------- + +The current HydroBench alignment uses a metadata-backed streamflow adapter over the shared synthetic flood streamflow dataset. + +It is currently exercised through the HydroGraphNet smoke benchmark path. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + 1 model + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``flood`` + +**Registered class:** ``FloodBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Flood Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`HydroBench `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Streamflow + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``rmse`` + - ``nse`` + - ``kge`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``hydrographnet_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`HydroGraphNet `. diff --git a/docs/source/benchmarks/ibtracs.rst b/docs/source/benchmarks/ibtracs.rst new file mode 100644 index 00000000..0279ab5d --- /dev/null +++ b/docs/source/benchmarks/ibtracs.rst @@ -0,0 +1,104 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +IBTrACS +======= + +Overview +-------- + +The current IBTrACS alignment uses a metadata-backed storm-history adapter over the shared synthetic tropical-cyclone dataset. + +It is the benchmark ecosystem currently used by Hurricast and the experimental weather-model adapter smoke configs. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + 4 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``tc`` + +**Registered class:** ``TropicalCycloneBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Tropical Cyclone Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`IBTrACS `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Track + Intensity + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``track_error`` + - ``intensity_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``hurricast_smoke.yaml`` + - ``graphcast_tc_smoke.yaml`` + - ``pangu_tc_smoke.yaml`` + - ``fourcastnet_tc_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`Hurricast `, :doc:`GraphCast TC Adapter `, :doc:`Pangu TC Adapter `, :doc:`FourCastNet TC Adapter `. diff --git a/docs/source/benchmarks/pick_benchmark.rst b/docs/source/benchmarks/pick_benchmark.rst new file mode 100644 index 00000000..bf9da111 --- /dev/null +++ b/docs/source/benchmarks/pick_benchmark.rst @@ -0,0 +1,105 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +pick-benchmark +============== + +Overview +-------- + +The current pick-benchmark path reuses the synthetic waveform picking bundle and tags it as a pick-benchmark-style benchmark adapter. + +It supports the earthquake picking smoke path for the transformer and CNN picking baselines. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 2 + + .. container:: catalog-stat-note + + 2 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``earthquake`` + +**Registered class:** ``EarthquakeBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Earthquake Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`pick-benchmark `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Phase Picking + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``p_pick_mae`` + - ``s_pick_mae`` + - ``precision`` + - ``recall`` + - ``f1`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``eqtransformer_smoke.yaml`` + - ``gpd_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`EQTransformer `, :doc:`GPD `. diff --git a/docs/source/benchmarks/pycsep.rst b/docs/source/benchmarks/pycsep.rst new file mode 100644 index 00000000..5150e19e --- /dev/null +++ b/docs/source/benchmarks/pycsep.rst @@ -0,0 +1,106 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +pyCSEP +====== + +Overview +-------- + +The current pyCSEP alignment is implemented as a report export contract inside the shared earthquake benchmark rather than as a standalone benchmark family. + +It documents the forecasting artifact shape used by the WaveCastNet smoke config. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + 1 model + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``earthquake`` + +**Registered class:** ``EarthquakeBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Earthquake Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`pyCSEP `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Wavefield Forecasting + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``mse`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``wavecastnet_benchmark_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`WaveCastNet `. + +.. dropdown:: Notes + :class-container: catalog-dropdown + + - Current repo support is report-export alignment, not a separate pyCSEP benchmark runner. diff --git a/docs/source/benchmarks/seisbench.rst b/docs/source/benchmarks/seisbench.rst new file mode 100644 index 00000000..9fc1ba8e --- /dev/null +++ b/docs/source/benchmarks/seisbench.rst @@ -0,0 +1,105 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +SeisBench +========= + +Overview +-------- + +The current SeisBench path uses a synthetic waveform adapter that preserves the same picking task shape expected by the shared earthquake benchmark. + +It exists today as a benchmark-compatible smoke path rather than a full external SeisBench ingestion pipeline. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 2 + + .. container:: catalog-stat-note + + 2 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``earthquake`` + +**Registered class:** ``EarthquakeBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Earthquake Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`SeisBench - A Toolbox for Machine Learning in Seismology `_ (`repo `__). + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Phase Picking + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``p_pick_mae`` + - ``s_pick_mae`` + - ``precision`` + - ``recall`` + - ``f1`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``phasenet_smoke.yaml`` + - ``eqnet_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`PhaseNet `, :doc:`EQNet `. diff --git a/docs/source/benchmarks/tcbench_alpha.rst b/docs/source/benchmarks/tcbench_alpha.rst new file mode 100644 index 00000000..4943e4f7 --- /dev/null +++ b/docs/source/benchmarks/tcbench_alpha.rst @@ -0,0 +1,103 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +TCBench Alpha +============= + +Overview +-------- + +The current TCBench Alpha alignment uses a metadata-backed storm-history adapter over the shared synthetic tropical-cyclone dataset. + +It currently drives the tropicalcyclone_MLP, SAF-Net, and TCIF-fusion smoke configs. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 3 + + .. container:: catalog-stat-note + + 3 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``tc`` + +**Registered class:** ``TropicalCycloneBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Tropical Cyclone Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`TCBench Alpha `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Track + Intensity + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``track_error`` + - ``intensity_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``tropicalcyclone_mlp_smoke.yaml`` + - ``saf_net_smoke.yaml`` + - ``tcif_fusion_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`Tropical Cyclone MLP `, :doc:`SAF-Net `, :doc:`TCIF-fusion `. diff --git a/docs/source/benchmarks/tropical_cyclone_benchmark.rst b/docs/source/benchmarks/tropical_cyclone_benchmark.rst new file mode 100644 index 00000000..a9aa8309 --- /dev/null +++ b/docs/source/benchmarks/tropical_cyclone_benchmark.rst @@ -0,0 +1,108 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Tropical Cyclone Benchmark +========================== + +Overview +-------- + +The tropical cyclone benchmark family is the single storm evaluator used by the hurricane-specific and all-basin tropical-cyclone smoke configs. + +Current coverage is synthetic-backed, but the same evaluator contract already scores core storm baselines and experimental weather-model adapters. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Family + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 8 + + .. container:: catalog-stat-note + + 8 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``tc`` + +**Registered class:** ``TropicalCycloneBenchmark`` + +Mapped benchmark ecosystems +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`IBTrACS `, :doc:`TCBench Alpha `, :doc:`TropiCycloneNet-Dataset `. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Track + Intensity + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``track_error`` + - ``intensity_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``hurricast_smoke.yaml`` + - ``tropicalcyclone_mlp_smoke.yaml`` + - ``tropicyclonenet_smoke.yaml`` + - ``saf_net_smoke.yaml`` + - ``tcif_fusion_smoke.yaml`` + - ``graphcast_tc_smoke.yaml`` + - ``pangu_tc_smoke.yaml`` + - ``fourcastnet_tc_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`Hurricast `, :doc:`Tropical Cyclone MLP `, :doc:`TropiCycloneNet `, :doc:`SAF-Net `, :doc:`TCIF-fusion `, :doc:`GraphCast TC Adapter `, :doc:`Pangu TC Adapter `, :doc:`FourCastNet TC Adapter `. + +.. dropdown:: Notes + :class-container: catalog-dropdown + + - IBTrACS, TCBench Alpha, and TropiCycloneNet-Dataset are surfaced as the public storm benchmark ecosystems. diff --git a/docs/source/benchmarks/tropicyclonenet_dataset.rst b/docs/source/benchmarks/tropicyclonenet_dataset.rst new file mode 100644 index 00000000..50b9f92b --- /dev/null +++ b/docs/source/benchmarks/tropicyclonenet_dataset.rst @@ -0,0 +1,101 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +TropiCycloneNet-Dataset +======================= + +Overview +-------- + +The current TropiCycloneNet-Dataset alignment uses a metadata-backed storm-history adapter over the shared synthetic tropical-cyclone dataset. + +It exists today to support the public TropiCycloneNet smoke benchmark path. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + 1 model + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``tc`` + +**Registered class:** ``TropicalCycloneBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Tropical Cyclone Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`TropiCycloneNet-Dataset `_. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Track + Intensity + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``track_error`` + - ``intensity_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``tropicyclonenet_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`TropiCycloneNet `. diff --git a/docs/source/benchmarks/waterbench.rst b/docs/source/benchmarks/waterbench.rst new file mode 100644 index 00000000..73c5ca68 --- /dev/null +++ b/docs/source/benchmarks/waterbench.rst @@ -0,0 +1,103 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +WaterBench +========== + +Overview +-------- + +The current WaterBench alignment uses a metadata-only adapter over the shared synthetic streamflow bundle and preserves the streamflow task contract. + +It is currently exercised by the EA-LSTM smoke benchmark path. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + 1 model + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``flood`` + +**Registered class:** ``FloodBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Flood Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting `_ (`repo `__). + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Streamflow + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``mae`` + - ``rmse`` + - ``nse`` + - ``kge`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``neuralhydrology_ealstm_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`EA-LSTM `. diff --git a/docs/source/benchmarks/wildfire_benchmark.rst b/docs/source/benchmarks/wildfire_benchmark.rst new file mode 100644 index 00000000..24016f64 --- /dev/null +++ b/docs/source/benchmarks/wildfire_benchmark.rst @@ -0,0 +1,116 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Wildfire Benchmark +================== + +Overview +-------- + +The wildfire benchmark family is the single scoring layer for tabular danger tasks, weekly forecasting tasks, and raster spread tasks. + +Current coverage is synthetic-backed, but it already exposes a single hazard-level evaluator contract across wildfire danger and wildfire spread smoke configs. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Family + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 8 + + .. container:: catalog-stat-note + + 8 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``wildfire`` + +**Registered class:** ``WildfireBenchmark`` + +Mapped benchmark ecosystems +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`WildfireSpreadTS `. + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Danger + - Spread + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``accuracy`` + - ``macro_f1`` + - ``auc`` + - ``pr_auc`` + - ``mae`` + - ``rmse`` + - ``iou`` + - ``f1`` + - ``burned_area_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``wildfire_danger_smoke.yaml`` + - ``wildfire_forecasting_smoke.yaml`` + - ``asufm_smoke.yaml`` + - ``wildfire_spread_smoke.yaml`` + - ``wildfirespreadts_smoke.yaml`` + - ``forefire_smoke.yaml`` + - ``wrf_sfire_smoke.yaml`` + - ``firecastnet_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`DNN-LSTM-AutoEncoder `, :doc:`Wildfire Forecasting `, :doc:`ASUFM `, :doc:`CNN-ASPP `, :doc:`WildfireSpreadTS `, :doc:`ForeFire Adapter `, :doc:`WRF-SFIRE Adapter `, :doc:`FireCastNet `. + +.. dropdown:: Notes + :class-container: catalog-dropdown + + - WildfireSpreadTS is the public Appendix-A benchmark ecosystem surfaced on this page. diff --git a/docs/source/benchmarks/wildfirespreadts_ecosystem.rst b/docs/source/benchmarks/wildfirespreadts_ecosystem.rst new file mode 100644 index 00000000..73f88b7c --- /dev/null +++ b/docs/source/benchmarks/wildfirespreadts_ecosystem.rst @@ -0,0 +1,106 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +WildfireSpreadTS +================ + +Overview +-------- + +WildfireSpreadTS is the public wildfire benchmark ecosystem surfaced from Appendix A. + +The current repo uses a synthetic temporal spread dataset to exercise the same spread-task contract for WildfireSpreadTS-style evaluation. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Kind + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Ecosystem + + .. container:: catalog-stat-note + + Family benchmark or external ecosystem view. + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public hazard grouping used on the benchmark index page. + + .. grid-item-card:: Support Status + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :bdg-info:`Synthetic-backed` + + .. container:: catalog-stat-note + + Current maturity of the adapter or evaluator path. + + .. grid-item-card:: Linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 5 + + .. container:: catalog-stat-note + + 5 models + + +Benchmark Mapping +----------------- + +**Shared benchmark key:** ``wildfire`` + +**Registered class:** ``WildfireBenchmark`` + +Mapped benchmark family +~~~~~~~~~~~~~~~~~~~~~~~ + +:doc:`Wildfire Benchmark ` + +Primary Source +~~~~~~~~~~~~~~ + +`WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction `_ (`repo `__). + +.. dropdown:: Supported Tasks + :class-container: catalog-dropdown + + - Spread + +.. dropdown:: Key Metrics + :class-container: catalog-dropdown + + - ``iou`` + - ``f1`` + - ``burned_area_mae`` + +.. dropdown:: Smoke Configs + :class-container: catalog-dropdown + + - ``wildfire_spread_smoke.yaml`` + - ``wildfirespreadts_smoke.yaml`` + - ``forefire_smoke.yaml`` + - ``wrf_sfire_smoke.yaml`` + - ``firecastnet_smoke.yaml`` + +.. dropdown:: Linked Models + :class-container: catalog-dropdown + + :doc:`CNN-ASPP `, :doc:`WildfireSpreadTS `, :doc:`ForeFire Adapter `, :doc:`WRF-SFIRE Adapter `, :doc:`FireCastNet `. diff --git a/docs/source/cite.rst b/docs/source/cite.rst index 16c3b82b..6e53ea01 100644 --- a/docs/source/cite.rst +++ b/docs/source/cite.rst @@ -1,12 +1,19 @@ How to Cite =========== -If you find it useful, please considering cite the following work: + +Use the following citation for the PyHazards software package itself. If you are +also relying on specific datasets or model papers, cite those sources from +:doc:`references` as well. + +Library Citation +---------------- .. code-block:: bibtex - @article{li2025intellectual, - title={Intellectual Property in Graph-Based Machine Learning as a Service: Attacks and Defenses}, - author={Li, Lincan and Shen, Bolin and Zhao, Chenxi and Sun, Yuxiang and Zhao, Kaixiang and Pan, Shirui and Dong, Yushun}, - journal={arXiv preprint arXiv:2508.19641}, - year={2025} - } + @misc{pyhazards2025, + title = {PyHazards: An Open-Source Library for AI-Powered Hazard Prediction}, + author = {Cheng et al.}, + year = {2025}, + howpublished = {\url{https://github.com/LabRAI/PyHazards}}, + note = {GitHub repository} + } diff --git a/docs/source/conf.py b/docs/source/conf.py index 1cda1e6f..f92b29ff 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -4,10 +4,10 @@ sys.path.insert(0, os.path.abspath('../../')) -project = 'PyGIP' +project = 'PyHazards' copyright = '2025, RAILab' author = 'RAILab' -release = '1.0.0' +release = '1.0.5' # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration @@ -16,6 +16,7 @@ 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary', + 'sphinx_design', 'sphinx_autodoc_typehints' ] @@ -46,9 +47,16 @@ html_theme = 'furo' html_static_path = ['_static'] -html_baseurl = "https://labrai.github.io/PyGIP/" +html_baseurl = "https://labrai.github.io/PyHazards/" +html_logo = "_static/logo.png" +html_favicon = "_static/logo.png" +html_title = "PyHazards 1.0.5 documentation" html_theme_options = { "navigation_with_keys": True, "sidebar_hide_name": False, } + +html_css_files = [ + 'custom.css', +] diff --git a/docs/source/datasets/aefa_forecast.rst b/docs/source/datasets/aefa_forecast.rst new file mode 100644 index 00000000..e116a389 --- /dev/null +++ b/docs/source/datasets/aefa_forecast.rst @@ -0,0 +1,113 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +AEFA Forecast +============= + +Synthetic-backed dense-grid forecasting adapter aligned to the AEFA earthquake forecasting workflow. + +Overview +-------- + +AEFA Forecast is the public forecasting adapter used by the earthquake benchmark when exercising dense-grid wavefield forecasting models. + +The current implementation is synthetic-backed, but it preserves the task shape, tensor layout, and reporting surface used by the shared earthquake evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - AEFA forecasting ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Earthquake + * - Source Role + - Forecast Benchmark + * - Coverage + - Benchmark-aligned earthquake forecasting samples + * - Geometry + - Dense-grid wavefield tensors + * - Spatial Resolution + - Benchmark-defined dense sensor grid + * - Temporal Resolution + - Short history and forecast windows + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``aefa_forecast`` + +Data Characteristics +-------------------- + +- Multichannel dense-grid history tensors paired with future dense-grid targets. +- Registry-backed benchmark adapter rather than a raw external archive loader. +- Intended for forecasting-path validation and report generation. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Smoke tests for WaveCastNet-style earthquake forecasting. +- Shared forecasting benchmark runs under the earthquake evaluator. +- Validation of report exports aligned to the forecasting path. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `AEFA repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public earthquake forecasting benchmark surface rather than the private synthetic dataset name. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``aefa_forecast`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "aefa_forecast", + micro=True, + temporal_in=5, + temporal_out=4, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +- micro=True keeps the synthetic-backed forecasting path lightweight for validation. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`AEFA ` + +**Representative Models:** :doc:`WaveCastNet ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a benchmark adapter, not a full external AEFA ingestion pipeline. + +Reference +--------- + +- `AEFA `_. diff --git a/docs/source/datasets/caravan_streamflow.rst b/docs/source/datasets/caravan_streamflow.rst new file mode 100644 index 00000000..414bef72 --- /dev/null +++ b/docs/source/datasets/caravan_streamflow.rst @@ -0,0 +1,112 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +Caravan +======= + +Synthetic-backed streamflow benchmark adapter aligned to the Caravan large-sample hydrology ecosystem. + +Overview +-------- + +Caravan is the public flood streamflow adapter used to align PyHazards with a large-sample hydrology benchmark surface. + +The current implementation is synthetic-backed, but it preserves the streamflow forecasting contract used by the shared flood benchmark. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - Caravan community dataset surfaced through a PyHazards adapter + * - Hazard Family + - Flood + * - Source Role + - Streamflow Benchmark + * - Coverage + - Benchmark-aligned streamflow forecasting samples + * - Geometry + - Graph-temporal basin or node sequences + * - Spatial Resolution + - Basin or gauge nodes represented as graph elements + * - Temporal Resolution + - Rolling history windows for streamflow prediction + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch graph-temporal dataset objects via the dataset registry + * - Registry Entry + - ``caravan_streamflow`` + +Data Characteristics +-------------------- + +- Graph-temporal sequences with node-level targets for next-step streamflow prediction. +- Registry-backed benchmark adapter instead of a raw Caravan ingestion pipeline. +- Supports the public streamflow smoke path for NeuralHydrology LSTM and Google Flood Forecasting. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Streamflow smoke tests for benchmark-linked flood models. +- Shared flood benchmark runs with streamflow metrics such as NSE and KGE. +- Regression checks for graph-temporal basin workflows. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `Caravan paper `_ +- `Caravan repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public Caravan-aligned streamflow surface exposed by the flood benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``caravan_streamflow`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "caravan_streamflow", + micro=True, + history=4, + nodes=6, + ).load() + + train = data.get_split("train") + print(len(train.inputs), train.inputs[0].x.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Flood Benchmark `, :doc:`Caravan ` + +**Representative Models:** :doc:`NeuralHydrology LSTM `, :doc:`Google Flood Forecasting ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full Caravan downloader. + +Reference +--------- + +- `Caravan - A global community dataset for large-sample hydrology `_ (`repo `__). diff --git a/docs/source/datasets/era5.rst b/docs/source/datasets/era5.rst new file mode 100644 index 00000000..e816179a --- /dev/null +++ b/docs/source/datasets/era5.rst @@ -0,0 +1,98 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +ERA5 +==== + +ECMWF's global reanalysis used as a high-resolution meteorological baseline for hazard experiments. + +Overview +-------- + +ERA5 is ECMWF's fifth-generation global reanalysis, combining historical observations with a modern data assimilation system to produce temporally consistent atmospheric fields. + +PyHazards uses ERA5 as a shared meteorological baseline for flood, wildfire, and weather-aware graph workflows, including the HydroGraphNet example path. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - ECMWF / Copernicus Climate Change Service (C3S) + * - Hazard Family + - Shared Forcing + * - Source Role + - Reanalysis + * - Coverage + - Global + * - Geometry + - Regular latitude-longitude grid + * - Spatial Resolution + - ~0.25 deg x 0.25 deg + * - Temporal Resolution + - Hourly + * - Update Cadence + - Daily ERA5T updates with about 5-day latency, followed by final validated releases after 2-3 months + * - Period of Record + - 1940-present + * - Formats + - GRIB and NetCDF + * - Inspection CLI + - ``python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10`` + +Data Characteristics +-------------------- + +- Global hourly fields on a regular latitude-longitude grid. +- Single-level products with optional pressure-level and model-level variables. +- Common variables include near-surface meteorology, precipitation, radiation, and atmospheric state variables. +- Recent dates may mix validated ERA5 with preliminary ERA5T data. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Meteorological forcing for flood, wildfire, and extreme-weather prediction models. +- Climate variability analysis and environmental feature engineering. +- Shared reanalysis input for graph and spatiotemporal benchmark pipelines. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `ERA5 single levels `_ +- `Copernicus Climate Data Store `_ + +PyHazards Usage +--------------- + +Use the inspection command for direct file validation, then feed local ERA5 files into HydroGraphNet-style helper loaders when you need graph-temporal training inputs. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Representative Models:** :doc:`HydroGraphNet ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + +Notes +----- + +- ERA5 is inspection-first in the public catalog; the downstream HydroGraphNet helper is documented here for convenience but is not a ``load_dataset(...)`` registry entry. + +Reference +--------- + +- `Hersbach et al. (2020). The ERA5 global reanalysis. `_. diff --git a/docs/source/datasets/firms.rst b/docs/source/datasets/firms.rst new file mode 100644 index 00000000..8199627c --- /dev/null +++ b/docs/source/datasets/firms.rst @@ -0,0 +1,95 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +FIRMS +===== + +NASA's near-real-time active fire detections used for operational wildfire monitoring and event labeling. + +Overview +-------- + +FIRMS distributes active fire and thermal anomaly detections derived from MODIS and VIIRS satellite sensors, with each record corresponding to a time-stamped hotspot observation. + +PyHazards uses FIRMS as a wildfire occurrence signal for operational monitoring and label construction when combined with weather and land-surface context. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NASA LANCE / FIRMS + * - Hazard Family + - Wildfire + * - Source Role + - Active Fire Detections + * - Coverage + - Global + * - Geometry + - Event-based point detections + * - Spatial Resolution + - ~375 m for VIIRS, ~1 km for MODIS + * - Temporal Resolution + - Event-based detections with multiple updates per day + * - Update Cadence + - Fire maps refresh about every 5 minutes and downloadable files refresh about hourly + * - Period of Record + - Near-real-time archive with later standard-science replacements + * - Formats + - CSV, Shapefile, GeoJSON, KML + * - Inspection CLI + - ``python -m pyhazards.datasets.firms.inspection --path /path/to/firms_data --max-items 10`` + +Data Characteristics +-------------------- + +- Global event-based point detections rather than gridded tensors. +- Latency is typically under 3 hours globally and faster for some U.S. and Canada products. +- Common attributes include location, detection time, fire radiative power, and confidence indicators. +- Near-real-time detections are later replaced by standard or science-quality products. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Operational wildfire monitoring and early detection. +- Event labeling for wildfire prediction pipelines. +- Spatiotemporal analysis of fire occurrence and activity patterns. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `FIRMS portal `_ +- `NASA Earthdata `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.firms.inspection --path /path/to/firms_data --max-items 10 + +- Some archive and bulk-download routes require Earthdata login credentials. + +Reference +--------- + +- `Schroeder et al. (2014). The New VIIRS 375 m active fire detection data product. `_. diff --git a/docs/source/datasets/floodcastbench_inundation.rst b/docs/source/datasets/floodcastbench_inundation.rst new file mode 100644 index 00000000..77a95f66 --- /dev/null +++ b/docs/source/datasets/floodcastbench_inundation.rst @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +FloodCastBench +============== + +Synthetic-backed inundation benchmark adapter aligned to the FloodCastBench evaluation ecosystem. + +Overview +-------- + +FloodCastBench is the public inundation adapter used by PyHazards for raster flood prediction benchmarks. + +The current implementation is synthetic-backed, but it preserves the raster task and metric surface used by the shared flood evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - FloodCastBench ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Flood + * - Source Role + - Inundation Benchmark + * - Coverage + - Benchmark-aligned flood inundation samples + * - Geometry + - Raster inundation sequences + * - Spatial Resolution + - Benchmark-defined raster tiles + * - Temporal Resolution + - Short history windows with next-horizon inundation targets + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``floodcastbench_inundation`` + +Data Characteristics +-------------------- + +- Multi-step raster inputs paired with next-horizon inundation targets. +- Registry-backed benchmark adapter rather than a raw external dataset ingestion path. +- Intended for pixel-level evaluation such as IoU and pixel MAE. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Smoke tests for FloodCast and UrbanFloodCast. +- Shared flood benchmark runs on inundation tasks. +- Regression checks for raster flood prediction outputs. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `FloodCastBench repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public FloodCastBench-aligned inundation surface exposed by the flood benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``floodcastbench_inundation`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "floodcastbench_inundation", + micro=True, + history=4, + channels=3, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Flood Benchmark `, :doc:`FloodCastBench ` + +**Representative Models:** :doc:`FloodCast `, :doc:`UrbanFloodCast ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full FloodCastBench ingestion pipeline. + +Reference +--------- + +- `FloodCastBench `_. diff --git a/docs/source/datasets/fpa_fod_tabular.rst b/docs/source/datasets/fpa_fod_tabular.rst new file mode 100644 index 00000000..3f0a549d --- /dev/null +++ b/docs/source/datasets/fpa_fod_tabular.rst @@ -0,0 +1,115 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +FPA-FOD Tabular +=============== + +Incident-level FPA-FOD features packaged for wildfire cause and size classification. + +Overview +-------- + +FPA-FOD Tabular converts one wildfire incident record into one feature vector for classification tasks such as incident cause prediction and grouped size prediction. + +PyHazards exposes it as a loadable dataset with a deterministic micro mode so the full source database is not required for smoke tests or quick experimentation. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - Fire Program Analysis Fire-Occurrence Database (FPA-FOD) adaptation in PyHazards + * - Hazard Family + - Wildfire + * - Source Role + - Incident Tabular + * - Coverage + - User-provided FPA-FOD coverage + * - Geometry + - Tabular feature vectors + * - Spatial Resolution + - Incident-level records + * - Temporal Resolution + - Event-based + * - Update Cadence + - User-managed local inputs or deterministic micro mode + * - Period of Record + - Depends on the supplied FPA-FOD source files + * - Formats + - SQLite, DB, CSV, and Parquet inputs + * - Inspection CLI + - ``python -m pyhazards.datasets.fpa_fod_tabular.inspection --task cause --micro`` + * - Registry Entry + - ``fpa_fod_tabular`` + +Data Characteristics +-------------------- + +- Supports task='cause' and task='size' classification targets. +- Accepts SQLite, DB, CSV, and Parquet sources. +- Micro mode keeps the path deterministic and lightweight for validation. +- Returned splits follow the standard DataBundle contract with tabular inputs and integer targets. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Wildfire cause classification experiments. +- Grouped fire size classification from incident records. +- Lightweight smoke and regression tests for the wildfire tabular path. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `PyHazards public dataset catalog `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``fpa_fod_tabular`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "fpa_fod_tabular", + task="cause", + micro=True, + normalize=True, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +- region='US' uses all available states, while region='CA' restricts to California incidents. +- cause_mode='paper5' preserves the five consolidated cause groups used by the public wildfire tabular path. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +**Representative Models:** :doc:`DNN-LSTM-AutoEncoder ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.fpa_fod_tabular.inspection --task cause --micro + +Reference +--------- + +- `PyHazards FPA-FOD tabular adaptation for the wildfire incident classification path. `_. diff --git a/docs/source/datasets/fpa_fod_weekly.rst b/docs/source/datasets/fpa_fod_weekly.rst new file mode 100644 index 00000000..6dfa3f7f --- /dev/null +++ b/docs/source/datasets/fpa_fod_weekly.rst @@ -0,0 +1,115 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +FPA-FOD Weekly +============== + +Weekly FPA-FOD aggregates packaged for next-week wildfire count forecasting by size group. + +Overview +-------- + +FPA-FOD Weekly builds rolling lookback windows from weekly wildfire incident counts and predicts next-week counts for grouped size classes. + +PyHazards exposes it as a loadable forecasting dataset with a micro mode so sequence models can be validated without the full source archive. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - Fire Program Analysis Fire-Occurrence Database (FPA-FOD) adaptation in PyHazards + * - Hazard Family + - Wildfire + * - Source Role + - Weekly Forecasting + * - Coverage + - User-provided FPA-FOD coverage + * - Geometry + - Temporal tabular sequences + * - Spatial Resolution + - Weekly aggregate windows + * - Temporal Resolution + - Weekly + * - Update Cadence + - User-managed local inputs or deterministic micro mode + * - Period of Record + - Depends on the supplied FPA-FOD source files + * - Formats + - SQLite, DB, CSV, and Parquet inputs + * - Inspection CLI + - ``python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12`` + * - Registry Entry + - ``fpa_fod_weekly`` + +Data Characteristics +-------------------- + +- Predicts next-week counts for grouped size classes A/B/C/D/EFG. +- Supports feature modes with counts only or counts plus seasonal time features. +- Uses chronological splits to preserve the forecasting setting. +- Returned splits follow the DataBundle contract with sequence inputs and floating-point targets. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Weekly wildfire forecasting experiments. +- Sequence-model smoke tests for wildfire activity prediction. +- Lightweight benchmarking of tabular temporal wildfire baselines. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `PyHazards public dataset catalog `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``fpa_fod_weekly`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "fpa_fod_weekly", + micro=True, + features="counts+time", + lookback_weeks=12, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +- features='counts' uses only the five weekly count channels. +- features='counts+time' adds sinusoidal week-of-year features for seasonality. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +**Representative Models:** :doc:`DNN-LSTM-AutoEncoder `, :doc:`Wildfire Forecasting `, :doc:`ASUFM ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12 + +Reference +--------- + +- `PyHazards FPA-FOD weekly adaptation for the wildfire forecasting path. `_. diff --git a/docs/source/datasets/frap_fire_perimeters.rst b/docs/source/datasets/frap_fire_perimeters.rst new file mode 100644 index 00000000..f856f250 --- /dev/null +++ b/docs/source/datasets/frap_fire_perimeters.rst @@ -0,0 +1,99 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +FRAP Fire Perimeters +==================== + +California's authoritative historical fire perimeter archive maintained by CAL FIRE FRAP. + +Overview +-------- + +FRAP Fire Perimeters is CAL FIRE's statewide historical perimeter archive for large fires and other mapped wildfire events in California. + +In PyHazards it serves as a regional authoritative perimeter source for wildfire evaluation, event backfilling, and comparison against national incident feeds such as WFIGS or satellite detections such as FIRMS. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - CAL FIRE / Fire and Resource Assessment Program (FRAP) + * - Hazard Family + - Wildfire + * - Source Role + - Historical Perimeters + * - Coverage + - California + * - Geometry + - Vector fire perimeter polygons + * - Spatial Resolution + - Event-level polygon geometries + * - Temporal Resolution + - Event-based historical perimeter archive + * - Update Cadence + - Annual spring releases with new fire-season perimeters + * - Period of Record + - Historical California fire perimeter archive spanning multiple decades + * - Formats + - Shapefile, file geodatabase downloads, and zipped GIS packages + * - Inspection CLI + - ``ogrinfo -so "/home/runyang/ryang/FRAP_Fire_Perimeters/shapefile/California_Fire_Perimeters_(all).shp" "California_Fire_Perimeters_(all)"`` + +Data Characteristics +-------------------- + +- Statewide polygon archive focused on historical fire perimeters. +- More suitable for perimeter validation and retrospective analysis than for near-real-time detection. +- Includes known completeness limitations for older fires and should be interpreted with source caveats in mind. +- Complements national incident feeds by providing California-specific historical depth. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Historical wildfire perimeter validation in California. +- Regional benchmark label curation and retrospective fire footprint analysis. +- Cross-checking incident records against mapped burn extents. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `CAL FIRE FRAP Fire Perimeters `_ +- `CAL FIRE Fire Perimeters metadata `_ + +PyHazards Usage +--------------- + +Use the local shapefile or zipped archive as an external inspection-first source when you need California-specific historical perimeters in wildfire workflows. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + ogrinfo -so "/home/runyang/ryang/FRAP_Fire_Perimeters/shapefile/California_Fire_Perimeters_(all).shp" "California_Fire_Perimeters_(all)" + +Notes +----- + +- FRAP is especially useful when you want a California-specific historical perimeter reference in addition to national feeds. +- Local copy detected at ``/home/runyang/ryang/FRAP_Fire_Perimeters``. + +Reference +--------- + +- `CAL FIRE FRAP Fire Perimeters `_. diff --git a/docs/source/datasets/geomac_historical.rst b/docs/source/datasets/geomac_historical.rst new file mode 100644 index 00000000..8437c2f8 --- /dev/null +++ b/docs/source/datasets/geomac_historical.rst @@ -0,0 +1,98 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +GeoMAC Historical +================= + +Historical GeoMAC wildfire perimeters preserved as a legacy U.S. perimeter archive for long-horizon evaluation. + +Overview +-------- + +GeoMAC Historical packages legacy wildfire perimeter archives that predate newer interagency operational feeds. + +In PyHazards it acts as a historical archive source for long-range retrospective wildfire evaluation, especially when you need older national perimeter context before newer incident systems became standard. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - Legacy GeoMAC / USGS-hosted historical archive + * - Hazard Family + - Wildfire + * - Source Role + - Historical Perimeters + * - Coverage + - United States + * - Geometry + - Archived wildfire perimeter polygons + * - Spatial Resolution + - Event-level perimeter geometries + * - Temporal Resolution + - Event-based archive + * - Update Cadence + - Legacy archive; local copy is static + * - Period of Record + - Local archive includes 2000-2018 plus 2019 packages + * - Formats + - ZIP archives containing GIS perimeter products + * - Inspection CLI + - ``unzip -l "/home/runyang/ryang/GeoMAC_Historical/Historic_Geomac_Perimeters_All_Years_2000_2018/Historic_Geomac_Perimeters_All_Years_2000_2018.zip" | head`` + +Data Characteristics +-------------------- + +- Legacy archive rather than a live operational feed. +- Useful for extending historical perimeter coverage when evaluating older wildfire seasons. +- Typically consumed after extraction into standard GIS formats. +- Best paired with newer systems such as WFIGS for post-2019 workflows. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Long-horizon historical wildfire perimeter studies. +- Retrospective perimeter benchmarking across older U.S. wildfire seasons. +- Gap-filling historical archives before newer interagency feeds. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `USGS Data Series 612: GeoMAC wildfire perimeters `_ + +PyHazards Usage +--------------- + +Use the local archives as an external inspection-first source when older U.S. wildfire perimeter history is needed. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + unzip -l "/home/runyang/ryang/GeoMAC_Historical/Historic_Geomac_Perimeters_All_Years_2000_2018/Historic_Geomac_Perimeters_All_Years_2000_2018.zip" | head + +Notes +----- + +- GeoMAC Historical is a legacy archive and should be treated as a historical reference rather than a live feed. +- Local copy detected at ``/home/runyang/ryang/GeoMAC_Historical``. + +Reference +--------- + +- `USGS Data Series 612: GeoMAC wildfire perimeters `_. diff --git a/docs/source/datasets/goes_geocolor.rst b/docs/source/datasets/goes_geocolor.rst new file mode 100644 index 00000000..8302f579 --- /dev/null +++ b/docs/source/datasets/goes_geocolor.rst @@ -0,0 +1,85 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +GOES GeoColor +============= + +NOAA GOES-East/West GeoColor imagery source used for visual fire-scene context. + +Overview +-------- + +GOES GeoColor imagery combines visible and infrared channels into an easy-to-interpret geostationary imagery product. + +In PyHazards it acts as wildfire scene context imagery for visual verification, event inspection, and qualitative comparison against fire and smoke products. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA GOES / CIRA GeoColor imagery services + * - Hazard Family + - Shared Forcing + * - Source Role + - Satellite Imagery Context + * - Coverage + - GOES-East/West views over the Americas + * - Geometry + - Geostationary imagery time series + * - Spatial Resolution + - ABI imagery resolution on the fixed grid + * - Temporal Resolution + - About every 10 minutes + * - Update Cadence + - Continuous ingest as new imagery becomes available + * - Period of Record + - Local copy spans 2017-2026 with GOES-18 subset on disk + * - Formats + - Image products and derived imagery files + * - Inspection CLI + - ``find /home/runyang/ryang/GOES_GeoColor_CIRA -maxdepth 3 -type f | head`` + +Data Characteristics +-------------------- + +- Visual-context imagery rather than direct fire detections. +- Useful for scene interpretation, plume verification, and rapid event review. +- High temporal refresh over the geostationary domain. +- Best paired with GOES-R FDCF or HMS smoke products. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Visual wildfire scene context. +- Smoke and plume inspection. +- Manual event triage and QA. + +Access +------ + +- `CIRA Slider `_ + +PyHazards Usage +--------------- + +Use this imagery archive as an inspection-first visual context source. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +.. code-block:: bash + + find /home/runyang/ryang/GOES_GeoColor_CIRA -maxdepth 3 -type f | head + +Notes +----- + +- Local copy detected at ``/home/runyang/ryang/GOES_GeoColor_CIRA``. diff --git a/docs/source/datasets/goesr.rst b/docs/source/datasets/goesr.rst new file mode 100644 index 00000000..a496cacb --- /dev/null +++ b/docs/source/datasets/goesr.rst @@ -0,0 +1,88 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +GOES-R +====== + +Rapid-refresh GOES-R satellite imagery used for smoke, fire, and weather monitoring workflows. + +Overview +-------- + +GOES-R provides high-frequency geostationary observations from the Advanced Baseline Imager, enabling continuous monitoring of atmospheric and surface processes across the Americas. + +PyHazards uses it as rapid-refresh imagery for smoke, fire evolution, ignition monitoring, and operational situational awareness workflows. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA GOES-R Program + * - Hazard Family + - Shared Forcing + * - Source Role + - Geostationary Imagery + * - Coverage + - Western Hemisphere / Americas geostationary view + * - Geometry + - Raster imagery time series on the ABI fixed grid + * - Spatial Resolution + - ~0.5-2 km depending on spectral band + * - Temporal Resolution + - 1-10 minute refresh depending on sector and mode + * - Update Cadence + - Continuous ingest as new files become available + * - Period of Record + - Ongoing operational satellite archive + * - Formats + - NetCDF + * - Inspection CLI + - ``python -m pyhazards.datasets.goesr.inspection --path /path/to/goesr_data --max-items 10`` + +Data Characteristics +-------------------- + +- Raster time series rather than event records. +- Typical Mode 6 scan cadence is 10 minutes for Full Disk, 5 minutes for CONUS, and 1 minute for mesoscale sectors. +- Common products include visible and infrared imagery, brightness temperature, and fire-related thermal context. +- Distribution latency depends on the access route even when observations are near real time. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Early detection and monitoring of wildfire ignition and growth. +- Smoke and fire evolution analysis at high temporal resolution. +- Real-time situational awareness workflows. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `GOES-R Program `_ +- `NOAA Open Data Dissemination `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.goesr.inspection --path /path/to/goesr_data --max-items 10 + +Reference +--------- + +- `Schmit et al. (2017). A closer look at the ABI on the GOES-R series. `_. diff --git a/docs/source/datasets/goesr_fdcf.rst b/docs/source/datasets/goesr_fdcf.rst new file mode 100644 index 00000000..e24ac806 --- /dev/null +++ b/docs/source/datasets/goesr_fdcf.rst @@ -0,0 +1,99 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +GOES-R FDCF +=========== + +GOES-R ABI Fire/Hot Spot Characterization files used for high-frequency active-fire monitoring across the Americas. + +Overview +-------- + +GOES-R FDCF is the ABI Fire/Hot Spot Characterization product from the GOES-R series, providing rapid-refresh geostationary active-fire and hot-spot information. + +In PyHazards it serves as a wildfire-specific geostationary fire-monitoring source that complements FIRMS with much higher refresh frequency over the GOES-East and GOES-West domains. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA GOES-R Series / ABI + * - Hazard Family + - Wildfire + * - Source Role + - Geostationary Active Fire + * - Coverage + - GOES-East and GOES-West full-disk views over the Americas + * - Geometry + - Geostationary raster NetCDF time series + * - Spatial Resolution + - Product pixels at geostationary ABI resolution (roughly kilometer-scale at nadir) + * - Temporal Resolution + - About every 10 minutes for full-disk scans + * - Update Cadence + - Continuous operational production as new scans arrive + * - Period of Record + - GOES-16 and GOES-18 operational era + * - Formats + - NetCDF + * - Inspection CLI + - ``python -m pyhazards.datasets.goesr.inspection --path /home/runyang/ryang/GOES_FDCF_G16/2024 --max-items 10`` + +Data Characteristics +-------------------- + +- Geostationary fire monitoring with much higher temporal refresh than polar-orbiting active-fire products. +- Product is especially useful for tracking rapidly evolving wildfire activity. +- Domain is regional rather than global, tied to GOES-East and GOES-West views. +- Best used alongside FIRMS, incident records, and perimeter archives. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Rapid-refresh wildfire activity monitoring. +- Temporal alignment of fire activity with smoke and weather products. +- Cross-checking high-frequency fire dynamics against FIRMS hotspots. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `GOES-R Fire/Hot Spot Characterization product `_ +- `GOES-R product page at NOAA STAR `_ + +PyHazards Usage +--------------- + +Use the local GOES-East and GOES-West NetCDF archive as an external inspection-first source for high-frequency wildfire monitoring workflows. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.goesr.inspection --path /home/runyang/ryang/GOES_FDCF_G16/2024 --max-items 10 + +Notes +----- + +- GOES-R FDCF complements FIRMS by trading lower spatial precision for much higher temporal refresh. +- Local copies detected at ``/home/runyang/ryang/GOES_FDCF_G16`` and ``/home/runyang/ryang/GOES_FDCF_G18``. + +Reference +--------- + +- `GOES-R Fire/Hot Spot Characterization product `_. diff --git a/docs/source/datasets/hms_smoke.rst b/docs/source/datasets/hms_smoke.rst new file mode 100644 index 00000000..6f70a5fb --- /dev/null +++ b/docs/source/datasets/hms_smoke.rst @@ -0,0 +1,99 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +HMS Smoke +========= + +NOAA analyst-drawn smoke plume polygons used for smoke tracking, verification, and wildfire smoke exposure analysis. + +Overview +-------- + +HMS Smoke is part of NOAA's Hazard Mapping System, where analysts blend multiple satellite streams to delineate visible smoke plume extent. + +In PyHazards it serves as a smoke-impact companion dataset for wildfire analysis, useful for plume verification, smoke transport evaluation, and exposure-aware wildfire workflows. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA NESDIS / Hazard Mapping System (HMS) + * - Hazard Family + - Wildfire + * - Source Role + - Smoke Plumes + * - Coverage + - North America, Hawaii, and the Caribbean + * - Geometry + - Analyst-drawn smoke polygons + * - Spatial Resolution + - Vector plume extents with analyst-interpreted boundaries + * - Temporal Resolution + - Sub-daily plume updates + * - Update Cadence + - Near-real-time analyst updates during active smoke events + * - Period of Record + - Ongoing operational archive with historical yearly packages + * - Formats + - Shapefile and zipped archive packages + * - Inspection CLI + - ``ogrinfo -so "/home/runyang/ryang/HMS_Smoke/2024/shapefile/hms_smoke2024.shp" hms_smoke2024`` + +Data Characteristics +-------------------- + +- Polygon smoke extents rather than fire detections or perimeters. +- Interpreted product derived from multiple satellite views and analyst QA. +- Useful for smoke verification and impact mapping, not just fire ignition or spread. +- Complements FIRMS, GOES fire products, and incident perimeter archives. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Smoke plume verification and event analysis. +- Exposure-aware wildfire impact studies. +- Cross-checking smoke extent against active-fire and perimeter products. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `NOAA HMS Fire and Smoke Analysis `_ +- `NASA ARSET overview mentioning HMS smoke product `_ + +PyHazards Usage +--------------- + +Use the local shapefile archive as an external inspection-first source for smoke-plume-aware wildfire workflows. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + ogrinfo -so "/home/runyang/ryang/HMS_Smoke/2024/shapefile/hms_smoke2024.shp" hms_smoke2024 + +Notes +----- + +- HMS Smoke is especially useful when you want smoke impact context, not only fire occurrence. +- Local copy detected at ``/home/runyang/ryang/HMS_Smoke``. + +Reference +--------- + +- `NOAA HMS Fire and Smoke Analysis `_. diff --git a/docs/source/datasets/hpwren_weather.rst b/docs/source/datasets/hpwren_weather.rst new file mode 100644 index 00000000..c475716c --- /dev/null +++ b/docs/source/datasets/hpwren_weather.rst @@ -0,0 +1,85 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +HPWREN Weather +============== + +Public HPWREN station feeds for metadata, realtime weather observations, and recent local weather context. + +Overview +-------- + +HPWREN provides wildfire-relevant station observations and station metadata from Southern California mountain and foothill environments. + +In PyHazards it serves as a local weather-station context source for wildfire operations, station-based validation, and regional feature engineering. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - HPWREN / University of California San Diego + * - Hazard Family + - Shared Forcing + * - Source Role + - Weather Stations + * - Coverage + - HPWREN station network footprint + * - Geometry + - Station points with tabular observations + * - Spatial Resolution + - Station-level observations + * - Temporal Resolution + - Minutes to hourly depending on station/feed + * - Update Cadence + - Real-time operational updates plus archived monthly summaries + * - Period of Record + - Local copy spans 2000-2026 + * - Formats + - Text, CSV-style tables, and station metadata files + * - Inspection CLI + - ``find /home/runyang/ryang/HPWREN_Weather -maxdepth 2 -type f | head`` + +Data Characteristics +-------------------- + +- Station-based observations rather than gridded forecasts. +- Useful for local fire-weather context and sanity checks against model forcing. +- Includes metadata, real-time feeds, and historical monthly directories. +- Best used together with gridded forecast products such as HRRR or NDFD. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Local fire-weather validation. +- Station-context feature engineering. +- Regional monitoring dashboards for wildfire operations. + +Access +------ + +- `HPWREN `_ + +PyHazards Usage +--------------- + +Use this local station archive as an inspection-first source for wildfire weather context and QA workflows. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +.. code-block:: bash + + find /home/runyang/ryang/HPWREN_Weather -maxdepth 2 -type f | head + +Notes +----- + +- Local copy detected at ``/home/runyang/ryang/HPWREN_Weather``. diff --git a/docs/source/datasets/hrrr.rst b/docs/source/datasets/hrrr.rst new file mode 100644 index 00000000..c77866ff --- /dev/null +++ b/docs/source/datasets/hrrr.rst @@ -0,0 +1,86 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +HRRR +==== + +NOAA HRRR forecast layers used for dynamic short-range wildfire weather features. + +Overview +-------- + +HRRR is NOAA's high-resolution rapid-refresh forecast system, updated hourly and designed for short-range weather prediction at convection-allowing resolution. + +In PyHazards it serves as a short-range weather forecast backbone for wildfire prediction and operational forecast feature extraction. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA HRRR + * - Hazard Family + - Shared Forcing + * - Source Role + - Weather Forecast + * - Coverage + - CONUS-focused forecast domain + * - Geometry + - Gridded forecast fields + * - Spatial Resolution + - About 3 km + * - Temporal Resolution + - Hourly model cycles with short forecast lead times + * - Update Cadence + - Hourly + * - Period of Record + - Local copy spans 2014-2026 with 2024 archive on disk + * - Formats + - GRIB2 and derivative archives + * - Inspection CLI + - ``find /home/runyang/ryang/HRRR/2024 -maxdepth 3 -type f | head`` + +Data Characteristics +-------------------- + +- Short-range numerical weather prediction rather than reanalysis. +- High spatial and temporal refresh for dynamic fire-weather context. +- Useful for forecast-aware wildfire features such as wind, humidity, and precipitation. +- Often paired with NDFD for operational forecast context. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Short-range wildfire weather forecasting features. +- Forecast forcing for next-day wildfire benchmark experiments. +- Operational fire-weather context analysis. + +Access +------ + +- `HRRR official page `_ +- `NOAA HRRR open-data listing `_ + +PyHazards Usage +--------------- + +Use the local archive as an inspection-first short-range forecast source. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +.. code-block:: bash + + find /home/runyang/ryang/HRRR/2024 -maxdepth 3 -type f | head + +Notes +----- + +- Local copy detected at ``/home/runyang/ryang/HRRR``. diff --git a/docs/source/datasets/hydrobench_streamflow.rst b/docs/source/datasets/hydrobench_streamflow.rst new file mode 100644 index 00000000..7bcc3e1b --- /dev/null +++ b/docs/source/datasets/hydrobench_streamflow.rst @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +HydroBench +========== + +Synthetic-backed streamflow diagnostics adapter aligned to the HydroBench ecosystem. + +Overview +-------- + +HydroBench is the public flood adapter used for streamflow diagnostics and HydroGraphNet-aligned benchmark runs. + +The current implementation is synthetic-backed, but it preserves the streamflow task and metric contract exposed by the shared flood benchmark. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - HydroBench ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Flood + * - Source Role + - Streamflow Benchmark + * - Coverage + - Benchmark-aligned streamflow forecasting samples + * - Geometry + - Graph-temporal basin or node sequences + * - Spatial Resolution + - Basin or gauge nodes represented as graph elements + * - Temporal Resolution + - Rolling history windows for streamflow prediction + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch graph-temporal dataset objects via the dataset registry + * - Registry Entry + - ``hydrobench_streamflow`` + +Data Characteristics +-------------------- + +- Graph-temporal sequences with node-level targets for next-step streamflow prediction. +- Registry-backed benchmark adapter rather than a raw HydroBench dataset ingestion path. +- Intended for HydroGraphNet smoke runs and flood benchmark diagnostics. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- HydroGraphNet smoke tests. +- Shared flood benchmark runs with HydroBench-aligned metrics. +- Diagnostics for graph-based flood forecasting experiments. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `HydroBench repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public HydroBench-aligned streamflow surface exposed by the flood benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``hydrobench_streamflow`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "hydrobench_streamflow", + micro=True, + history=4, + nodes=6, + ).load() + + train = data.get_split("train") + print(len(train.inputs), train.inputs[0].x.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Flood Benchmark `, :doc:`HydroBench ` + +**Representative Models:** :doc:`HydroGraphNet ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full HydroBench downloader. + +Reference +--------- + +- `HydroBench `_. diff --git a/docs/source/datasets/ibtracs_tracks.rst b/docs/source/datasets/ibtracs_tracks.rst new file mode 100644 index 00000000..a3dbf0e3 --- /dev/null +++ b/docs/source/datasets/ibtracs_tracks.rst @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +IBTrACS +======= + +Synthetic-backed storm-track adapter aligned to the IBTrACS tropical cyclone archive. + +Overview +-------- + +IBTrACS is the public storm-track adapter used by PyHazards for shared tropical cyclone benchmark runs. + +The current implementation is synthetic-backed, but it preserves the track-intensity forecasting surface used by the shared tropical cyclone evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA NCEI International Best Track Archive for Climate Stewardship surfaced through a PyHazards adapter + * - Hazard Family + - Tropical Cyclone + * - Source Role + - Track Archive + * - Coverage + - Benchmark-aligned tropical cyclone track and intensity samples + * - Geometry + - Storm-track history sequences + * - Spatial Resolution + - Storm-centered best-track sequences + * - Temporal Resolution + - Historical track windows with forecast horizons + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``ibtracs_tracks`` + +Data Characteristics +-------------------- + +- Storm-history sequences with future latitude, longitude, and intensity targets. +- Registry-backed benchmark adapter rather than a raw IBTrACS archive loader. +- Supports both basin-specific hurricane models and broader tropical cyclone adapters. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Hurricast smoke tests. +- Shared tropical cyclone benchmark runs for track and intensity prediction. +- Benchmark-aligned validation for weather-model storm adapters. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `IBTrACS product page `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public IBTrACS-aligned storm-track surface exposed by the tropical cyclone benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``ibtracs_tracks`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "ibtracs_tracks", + micro=True, + history=6, + horizon=5, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`IBTrACS ` + +**Representative Models:** :doc:`Hurricast `, :doc:`GraphCast TC Adapter `, :doc:`Pangu TC Adapter `, :doc:`FourCastNet TC Adapter ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full IBTrACS ingestion pipeline. + +Reference +--------- + +- `IBTrACS `_. diff --git a/docs/source/datasets/landfire.rst b/docs/source/datasets/landfire.rst new file mode 100644 index 00000000..42569a64 --- /dev/null +++ b/docs/source/datasets/landfire.rst @@ -0,0 +1,93 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +LANDFIRE +======== + +Nationwide fuels, vegetation, and canopy layers used as static wildfire covariates. + +Overview +-------- + +LANDFIRE provides nationwide maps of vegetation, fuels, canopy structure, and fire regime information derived from remote sensing, field observations, and ecological modeling. + +PyHazards uses it as static landscape context for wildfire spread, behavior, and risk-oriented workflows. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - U.S. Forest Service LANDFIRE Program + * - Hazard Family + - Wildfire + * - Source Role + - Fuels and Vegetation + * - Coverage + - United States + * - Geometry + - Gridded raster layers + * - Spatial Resolution + - ~30 m + * - Temporal Resolution + - Static or slowly varying versioned releases + * - Update Cadence + - Annual versioned update suites + * - Period of Record + - Versioned annual releases + * - Formats + - GeoTIFF and related GIS packages + * - Inspection CLI + - ``python -m pyhazards.datasets.landfire.inspection --path /path/to/landfire_data --max-items 10`` + +Data Characteristics +-------------------- + +- Raster covariates rather than event records. +- Versioned annual releases intended to stay current to the previous year. +- Common layers include fuel models, vegetation type, canopy metrics, and fire regime products. +- Distributed in projected coordinate systems with product-specific metadata. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Fuel characterization for wildfire behavior and spread modeling. +- Landscape-scale wildfire risk assessment. +- Static feature layers for machine-learning wildfire models. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `LANDFIRE data access `_ +- `LANDFIRE program overview `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.landfire.inspection --path /path/to/landfire_data --max-items 10 + +Reference +--------- + +- `Rollins (2009). LANDFIRE: A nationally consistent vegetation, wildland fire, and fuel assessment. `_. diff --git a/docs/source/datasets/landscan_population.rst b/docs/source/datasets/landscan_population.rst new file mode 100644 index 00000000..07c58548 --- /dev/null +++ b/docs/source/datasets/landscan_population.rst @@ -0,0 +1,86 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +LandScan Population +=================== + +ORNL LandScan population package used for population-at-risk and exposure context in wildfire workflows. + +Overview +-------- + +LandScan is ORNL's global gridded population product designed for estimating ambient population distribution. + +In PyHazards it serves as a wildfire exposure and population-at-risk context layer for evaluation, risk modeling, and human-impact analysis. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - Oak Ridge National Laboratory (ORNL) + * - Hazard Family + - Wildfire + * - Source Role + - Population Exposure Context + * - Coverage + - Global + * - Geometry + - Gridded population rasters + * - Spatial Resolution + - About 30 arc-seconds globally + * - Temporal Resolution + - Annual releases + * - Update Cadence + - Release-based / annual + * - Period of Record + - Local copy includes LandScan Global 2024 + * - Formats + - GeoTIFF and extracted raster packages + * - Inspection CLI + - ``find /home/runyang/ryang/LandScan_Global_2024 -maxdepth 3 -type f | head`` + +Data Characteristics +-------------------- + +- Population exposure raster rather than wildfire observations. +- Useful for population-at-risk analysis and human-impact context. +- Global rather than wildfire-specific, but often highly relevant in hazard studies. +- Best paired with incident or perimeter data. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Population-at-risk context for wildfire events. +- Exposure-aware risk mapping. +- Human-impact summaries for benchmark analysis. + +Access +------ + +- `ORNL LandScan Viewer `_ +- `LandScan Global 2024 dataset entry `_ + +PyHazards Usage +--------------- + +Use this local raster package as an inspection-first population context source. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +.. code-block:: bash + + find /home/runyang/ryang/LandScan_Global_2024 -maxdepth 3 -type f | head + +Notes +----- + +- Local copy detected at ``/home/runyang/ryang/LandScan_Global_2024``. diff --git a/docs/source/datasets/merra2.rst b/docs/source/datasets/merra2.rst new file mode 100644 index 00000000..a141c69d --- /dev/null +++ b/docs/source/datasets/merra2.rst @@ -0,0 +1,90 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +MERRA-2 +======= + +Global atmospheric reanalysis from NASA GMAO used as a shared meteorological backbone for hazard modeling. + +Overview +-------- + +MERRA-2 is a global atmospheric reanalysis that assimilates satellite and conventional observations into a numerical weather prediction system to produce gridded, time-continuous estimates of the atmospheric state. + +In PyHazards it serves as a shared forcing and covariate source for weather-aware hazard workflows, especially when a project needs a stable long historical archive. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NASA Global Modeling and Assimilation Office (GMAO) + * - Hazard Family + - Shared Forcing + * - Source Role + - Reanalysis + * - Coverage + - Global + * - Geometry + - Regular latitude-longitude grid + * - Spatial Resolution + - ~0.5 deg x 0.625 deg + * - Temporal Resolution + - Hourly + * - Update Cadence + - Published monthly with typical 2-3 week latency after month end + * - Period of Record + - 1980-present + * - Formats + - NetCDF4 + * - Inspection CLI + - ``python -m pyhazards.datasets.merra2.inspection 20260101`` + +Data Characteristics +-------------------- + +- Global coverage on a regular latitude-longitude grid. +- Hourly meteorology with derived 3-hourly, daily, and monthly products. +- Surface fields plus multi-level atmospheric profiles. +- Common variables include near-surface temperature, humidity, wind, precipitation, and surface fluxes. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Meteorological forcing for wildfire and multi-hazard prediction models. +- Climate diagnostics and long-horizon environmental covariates. +- Shared weather backbone for weather-climate benchmark pipelines. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `MERRA-2 overview `_ +- `NASA Earthdata `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.merra2.inspection 20260101 + +- Earthdata credentials are required when raw files are not already available locally. + +Reference +--------- + +- `Gelaro et al. (2017). The Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2). `_. diff --git a/docs/source/datasets/mtbs.rst b/docs/source/datasets/mtbs.rst new file mode 100644 index 00000000..e1daddf0 --- /dev/null +++ b/docs/source/datasets/mtbs.rst @@ -0,0 +1,93 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +MTBS +==== + +U.S. burn severity and fire perimeter products used for post-fire analysis and wildfire evaluation. + +Overview +-------- + +MTBS maps wildfire perimeters and burn severity across the United States using Landsat imagery and standardized spectral change products such as dNBR and RdNBR. + +In PyHazards it acts as a post-fire assessment source for burn extent, severity, and long-term wildfire regime studies. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - U.S. Geological Survey and USDA Forest Service MTBS program + * - Hazard Family + - Wildfire + * - Source Role + - Burn Severity + * - Coverage + - United States + * - Geometry + - Per-fire rasters with associated vector perimeters + * - Spatial Resolution + - 30 m + * - Temporal Resolution + - Fire-event and fire-year products + * - Update Cadence + - Continuous mapping with quarterly releases + * - Period of Record + - 1984-near present + * - Formats + - GeoTIFF, Shapefile, File Geodatabase + * - Inspection CLI + - ``python -m pyhazards.datasets.mtbs.inspection --path /path/to/mtbs_data --max-items 10`` + +Data Characteristics +-------------------- + +- Event-based raster layers with vector perimeters for individual fires. +- Historical archive from 1984 onward, expanded through quarterly releases. +- Includes burn severity classes and supporting spectral severity products. +- Product availability depends on Landsat imagery timing and production workflow rather than near-real-time ingest. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Post-fire burn severity and impact assessment. +- Long-term wildfire regime and trend analysis. +- Model evaluation for fire extent and severity prediction. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `MTBS data portal `_ +- `USGS MTBS overview `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.mtbs.inspection --path /path/to/mtbs_data --max-items 10 + +Reference +--------- + +- `Eidenshink et al. (2007). A project for monitoring trends in burn severity. `_. diff --git a/docs/source/datasets/nasa_gibs.rst b/docs/source/datasets/nasa_gibs.rst new file mode 100644 index 00000000..ab5be0f2 --- /dev/null +++ b/docs/source/datasets/nasa_gibs.rst @@ -0,0 +1,85 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +NASA GIBS +========= + +NASA EOSDIS global imagery via WMS/WMTS used for daily true-color satellite imagery. + +Overview +-------- + +NASA GIBS provides easy-to-browse global imagery layers from EOSDIS through map tile and imagery services. + +In PyHazards it acts as daily wildfire scene imagery context and a lightweight remote-sensing browse layer for event inspection. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NASA EOSDIS / GIBS + * - Hazard Family + - Shared Forcing + * - Source Role + - Satellite Imagery Context + * - Coverage + - Global + * - Geometry + - Tiled imagery and browse layers + * - Spatial Resolution + - Product-dependent imagery resolutions + * - Temporal Resolution + - Daily imagery products + * - Update Cadence + - Daily + * - Period of Record + - Local copy spans 2000-2026 with 2024 imagery subset on disk + * - Formats + - WMTS/WMS layers and downloaded imagery tiles + * - Inspection CLI + - ``find /home/runyang/ryang/NASA_GIBS_2024 -maxdepth 3 -type f | head`` + +Data Characteristics +-------------------- + +- Browse-oriented imagery service rather than analysis-ready tensors. +- Useful for qualitative inspection and event context. +- Global daily coverage across multiple EOSDIS imagery layers. +- Best paired with analytical fire or smoke products when building workflows. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Daily true-color wildfire imagery context. +- Manual inspection of fire events and plume signatures. +- Remote-sensing browse support for benchmark QA. + +Access +------ + +- `NASA GIBS overview `_ + +PyHazards Usage +--------------- + +Use this imagery archive as an inspection-first context source rather than a registry dataset. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +.. code-block:: bash + + find /home/runyang/ryang/NASA_GIBS_2024 -maxdepth 3 -type f | head + +Notes +----- + +- Local copy detected at ``/home/runyang/ryang/NASA_GIBS_2024``. diff --git a/docs/source/datasets/ndfd.rst b/docs/source/datasets/ndfd.rst new file mode 100644 index 00000000..93053c6d --- /dev/null +++ b/docs/source/datasets/ndfd.rst @@ -0,0 +1,85 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +NDFD +==== + +NOAA NDFD grids and warning bulletins used for wildfire forecast context, watches, warnings, and advisories. + +Overview +-------- + +The National Digital Forecast Database packages official National Weather Service forecast grids and public hazard products. + +In PyHazards it provides operational forecast layers and warning context for wildfire-weather workflows, including critical fire weather and watches/warnings fields. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA National Weather Service / NDFD + * - Hazard Family + - Shared Forcing + * - Source Role + - Weather Forecast and Watches/Warnings + * - Coverage + - United States public forecast grids + * - Geometry + - Gridded forecast layers and bulletin products + * - Spatial Resolution + - Forecast grid products with variable regional resolution + * - Temporal Resolution + - Hourly to daily depending on field + * - Update Cadence + - Issue-based for hazards and routine forecast refresh for grids + * - Period of Record + - Local copy spans 2000-2026 + * - Formats + - GRIB2, text, and derived grids + * - Inspection CLI + - ``find /home/runyang/ryang/NDFD -maxdepth 2 -type d | head`` + +Data Characteristics +-------------------- + +- Official forecast grids rather than model reanalysis. +- Includes fire-weather relevant variables and warning/advisory products. +- Useful for operational context and downstream feature extraction. +- Complements HRRR when both official forecast products and model guidance are needed. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Fire-weather forecast feature engineering. +- Watches, warnings, and advisories context. +- Operational wildfire decision-support pipelines. + +Access +------ + +- `NDFD / digital.weather.gov `_ + +PyHazards Usage +--------------- + +Use the local NDFD archive as an inspection-first operational forecast source. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +.. code-block:: bash + + find /home/runyang/ryang/NDFD -maxdepth 2 -type d | head + +Notes +----- + +- Local copy detected at ``/home/runyang/ryang/NDFD``. diff --git a/docs/source/datasets/noaa_flood.rst b/docs/source/datasets/noaa_flood.rst new file mode 100644 index 00000000..a9c79ccc --- /dev/null +++ b/docs/source/datasets/noaa_flood.rst @@ -0,0 +1,94 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +NOAA Flood Events +================= + +Historical NOAA storm-event flood records used as event labels and impact targets for flood studies. + +Overview +-------- + +NOAA Flood Events are derived from the NOAA Storm Events Database and document the timing, location, and impacts of severe flood-related events across the United States. + +In PyHazards they function as event-level labels or targets for flood occurrence and impact analysis, especially when paired with meteorological drivers. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA National Centers for Environmental Information (NCEI) + * - Hazard Family + - Flood + * - Source Role + - Event Records + * - Coverage + - United States + * - Geometry + - Tabular event records with administrative regions and optional point coordinates + * - Spatial Resolution + - County or zone level reporting, with points when available + * - Temporal Resolution + - Event-based + * - Update Cadence + - Updated monthly, typically 75-90 days after the end of a data month + * - Period of Record + - 1950-present + * - Formats + - Web query, bulk CSV, and database extracts + * - Inspection CLI + - ``python -m pyhazards.datasets.noaa_flood.inspection --path /path/to/noaa_flood_data --max-items 10`` + +Data Characteristics +-------------------- + +- Event-based tabular records rather than gridded tensors. +- Historical archive appended as new months are processed and validated. +- Typical attributes include event timing, location, narratives, and reported damages. +- Very recent months may be unavailable because of reporting and validation lag. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Flood occurrence and frequency analysis. +- Impact and damage assessment studies. +- Supervised learning with event records as flood targets. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `Storm Events Database `_ +- `Storm Events bulk download `_ +- `NOAA NCEI `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Flood Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.noaa_flood.inspection --path /path/to/noaa_flood_data --max-items 10 + +Reference +--------- + +- `NOAA National Centers for Environmental Information. Storm Events Database Documentation. `_. diff --git a/docs/source/datasets/nohrsc_snodas.rst b/docs/source/datasets/nohrsc_snodas.rst new file mode 100644 index 00000000..9b7f4ecd --- /dev/null +++ b/docs/source/datasets/nohrsc_snodas.rst @@ -0,0 +1,85 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +NOHRSC SNODAS +============= + +NOAA snow-analysis / SNODAS daily archives used as snow-condition context for wildfire-weather studies. + +Overview +-------- + +SNODAS is the Snow Data Assimilation System distributed by NOAA NOHRSC, providing daily gridded snow-condition products. + +In PyHazards it supplies snow-state context that can matter for seasonal fuel curing, hydrologic carryover, and mountain wildfire-weather workflows. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA NOHRSC / SNODAS + * - Hazard Family + - Shared Forcing + * - Source Role + - Snow Analysis + * - Coverage + - Continental United States + * - Geometry + - Gridded raster fields + * - Spatial Resolution + - About 1 km + * - Temporal Resolution + - Daily + * - Update Cadence + - Daily + * - Period of Record + - Local copy spans 2003-2026 with 2024 archive on disk + * - Formats + - Gridded archives and derived masks + * - Inspection CLI + - ``find /home/runyang/ryang/NOHRSC_SNODAS_masked_2024 -maxdepth 2 -type d | head`` + +Data Characteristics +-------------------- + +- Daily snow-condition product rather than direct wildfire observations. +- Useful as seasonal context for fuel and landscape state. +- Best integrated with weather forcing and topographic context. +- Particularly relevant for mountain and snow-affected regions. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Snow-condition context for wildfire feature engineering. +- Seasonal carryover analysis. +- Landscape-state covariates in western U.S. wildfire workflows. + +Access +------ + +- `NOHRSC archived data and SNODAS description `_ + +PyHazards Usage +--------------- + +Use the local daily archive as an inspection-first forcing source when snow state matters for wildfire context. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +.. code-block:: bash + + find /home/runyang/ryang/NOHRSC_SNODAS_masked_2024 -maxdepth 2 -type d | head + +Notes +----- + +- Local copy detected at ``/home/runyang/ryang/NOHRSC_SNODAS_masked_2024``. diff --git a/docs/source/datasets/pick_benchmark_waveforms.rst b/docs/source/datasets/pick_benchmark_waveforms.rst new file mode 100644 index 00000000..008adadf --- /dev/null +++ b/docs/source/datasets/pick_benchmark_waveforms.rst @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +pick-benchmark +============== + +Synthetic-backed waveform picking adapter aligned to the pick-benchmark evaluation ecosystem. + +Overview +-------- + +pick-benchmark is the public waveform adapter used by the earthquake benchmark for transformer and CNN picking baselines. + +The current implementation is synthetic-backed, but it preserves the phase-picking task shape, labels, and metrics expected by the shared earthquake evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - pick-benchmark ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Earthquake + * - Source Role + - Waveform Benchmark + * - Coverage + - Benchmark-aligned earthquake phase-picking samples + * - Geometry + - Multichannel waveform windows + * - Spatial Resolution + - Benchmark-defined waveform channels and sample windows + * - Temporal Resolution + - Short waveform windows with phase-pick targets + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``pick_benchmark_waveforms`` + +Data Characteristics +-------------------- + +- Multichannel waveform windows paired with P- and S-arrival sample targets. +- Registry-backed benchmark adapter rather than a raw external waveform ingestion path. +- Intended for phase-picking validation and smoke tests. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- EQTransformer and GPD smoke tests. +- Shared earthquake picking benchmark runs. +- Regression checks for waveform-based picking models. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `pick-benchmark repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public pick-benchmark-aligned waveform surface exposed by the earthquake benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``pick_benchmark_waveforms`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "pick_benchmark_waveforms", + micro=True, + channels=3, + length=256, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`pick-benchmark ` + +**Representative Models:** :doc:`EQTransformer `, :doc:`GPD ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full pick-benchmark downloader. + +Reference +--------- + +- `pick-benchmark `_. diff --git a/docs/source/datasets/seisbench_waveforms.rst b/docs/source/datasets/seisbench_waveforms.rst new file mode 100644 index 00000000..5fb54c8f --- /dev/null +++ b/docs/source/datasets/seisbench_waveforms.rst @@ -0,0 +1,112 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +SeisBench +========= + +Synthetic-backed waveform picking adapter aligned to the SeisBench ecosystem. + +Overview +-------- + +SeisBench is the public waveform adapter used by PyHazards for the earthquake picking path. + +The current implementation is synthetic-backed, but it preserves the picking task shape, labels, and metrics expected by the shared earthquake evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - SeisBench ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Earthquake + * - Source Role + - Waveform Benchmark + * - Coverage + - Benchmark-aligned earthquake phase-picking samples + * - Geometry + - Multichannel waveform windows + * - Spatial Resolution + - Benchmark-defined waveform channels and sample windows + * - Temporal Resolution + - Short waveform windows with phase-pick targets + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``seisbench_waveforms`` + +Data Characteristics +-------------------- + +- Multichannel waveform windows paired with P- and S-arrival sample targets. +- Registry-backed benchmark adapter rather than a raw external waveform ingestion path. +- Intended for phase-picking validation and smoke tests. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- PhaseNet and EQNet smoke tests. +- Shared earthquake benchmark runs on picking tasks. +- Regression checks for waveform-based seismic models. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `SeisBench paper `_ +- `SeisBench repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public SeisBench-aligned waveform surface exposed by the earthquake benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``seisbench_waveforms`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "seisbench_waveforms", + micro=True, + channels=3, + length=256, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`SeisBench ` + +**Representative Models:** :doc:`PhaseNet `, :doc:`EQNet ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full SeisBench ingestion pipeline. + +Reference +--------- + +- `SeisBench - A Toolbox for Machine Learning in Seismology `_ (`repo `__). diff --git a/docs/source/datasets/spot_forecast.rst b/docs/source/datasets/spot_forecast.rst new file mode 100644 index 00000000..7e5e8030 --- /dev/null +++ b/docs/source/datasets/spot_forecast.rst @@ -0,0 +1,87 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +Spot Forecast +============= + +NOAA NWS spot forecast products used for incident-specific forecast guidance and operational fire-weather context. + +Overview +-------- + +Spot Forecast products are incident-focused weather forecast products prepared by the National Weather Service for wildfire and emergency operations. + +In PyHazards they provide operational fire-weather context for incident timelines, analyst review, and retrospective comparison with model-based forecast sources. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - NOAA National Weather Service + * - Hazard Family + - Shared Forcing + * - Source Role + - Incident Forecast Guidance + * - Coverage + - Incident-specific forecast products + * - Geometry + - Text and bulletin-style forecast products + * - Spatial Resolution + - Incident/request level + * - Temporal Resolution + - Issue-based + * - Update Cadence + - Generated when requested for active incidents + * - Period of Record + - Local copy spans 2000-2026 + * - Formats + - Text products and support lists + * - Inspection CLI + - ``find /home/runyang/ryang/Spot_Forecast_Current -maxdepth 2 -type f | head`` + +Data Characteristics +-------------------- + +- Operational forecast guidance rather than retrospective climate data. +- Highly incident-specific and request-driven. +- Useful for contextualizing decisions and incident weather expectations. +- Best interpreted alongside broader forecast grids and observations. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Incident-level fire-weather context. +- Retrospective comparison of model forecast versus operational forecast guidance. +- Operational timeline reconstruction. + +Access +------ + +- `NWS Spot Forecast page `_ +- `NWS fire weather resources `_ + +PyHazards Usage +--------------- + +Use this product archive as an inspection-first operational context source rather than a direct ``load_dataset(...)`` path. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +.. code-block:: bash + + find /home/runyang/ryang/Spot_Forecast_Current -maxdepth 2 -type f | head + +Notes +----- + +- Spot Forecast is best treated as operations context, not a uniform gridded forcing dataset. +- Local copy detected at ``/home/runyang/ryang/Spot_Forecast_Current``. diff --git a/docs/source/datasets/synoptic_weather.rst b/docs/source/datasets/synoptic_weather.rst new file mode 100644 index 00000000..a9e6b9d4 --- /dev/null +++ b/docs/source/datasets/synoptic_weather.rst @@ -0,0 +1,85 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +Synoptic Weather +================ + +Synoptic station metadata and snapshots used for weather-station context in wildfire workflows. + +Overview +-------- + +Synoptic aggregates real-time and historical weather station observations and metadata through a common API and bulk-access workflow. + +In PyHazards it serves as station-based weather context for wildfire operations, event review, and local observation cross-checks. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - Synoptic Data + * - Hazard Family + - Shared Forcing + * - Source Role + - Weather Stations + * - Coverage + - Multi-network station coverage where access is available + * - Geometry + - Station points with tabular observations and metadata + * - Spatial Resolution + - Station-level observations + * - Temporal Resolution + - Minutes to hourly depending on station/network + * - Update Cadence + - Near-real-time for current feeds; historical access depends on plan tier + * - Period of Record + - Local copy spans 2000-2026 in current snapshots + * - Formats + - JSON/CSV-style outputs and metadata tables + * - Inspection CLI + - ``find /home/runyang/ryang/Synoptic_Weather_Current -maxdepth 2 -type f | head`` + +Data Characteristics +-------------------- + +- Station-based observations rather than gridded forecasts. +- Useful for local weather context and network metadata. +- Historical completeness depends on the access tier available at download time. +- Complements HPWREN and forecast grids. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Weather-station context for wildfire workflows. +- Cross-checking forecast grids against local observations. +- Metadata inspection for station-network selection. + +Access +------ + +- `Synoptic Weather API `_ + +PyHazards Usage +--------------- + +Use the local snapshots as an inspection-first station-context source. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +.. code-block:: bash + + find /home/runyang/ryang/Synoptic_Weather_Current -maxdepth 2 -type f | head + +Notes +----- + +- Local copies detected at ``/home/runyang/ryang/Synoptic_Weather_Current`` and related Synoptic directories. diff --git a/docs/source/datasets/tcbench_alpha.rst b/docs/source/datasets/tcbench_alpha.rst new file mode 100644 index 00000000..a0989b29 --- /dev/null +++ b/docs/source/datasets/tcbench_alpha.rst @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +TCBench Alpha +============= + +Synthetic-backed storm-track benchmark adapter aligned to the TCBench Alpha ecosystem. + +Overview +-------- + +TCBench Alpha is the public storm adapter used by several tropical cyclone baselines on the shared track-intensity evaluator. + +The current implementation is synthetic-backed, but it preserves the task, metric, and reporting surface used by the shared tropical cyclone benchmark. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - TCBench Alpha ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Tropical Cyclone + * - Source Role + - Track Benchmark + * - Coverage + - Benchmark-aligned tropical cyclone track and intensity samples + * - Geometry + - Storm-track history sequences + * - Spatial Resolution + - Storm-centered best-track sequences + * - Temporal Resolution + - Historical track windows with forecast horizons + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``tcbench_alpha`` + +Data Characteristics +-------------------- + +- Storm-history sequences with future latitude, longitude, and intensity targets. +- Registry-backed benchmark adapter rather than a raw external benchmark ingestion path. +- Intended for benchmark-linked track-intensity forecasting runs. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Tropical Cyclone MLP, SAF-Net, and TCIF-fusion smoke tests. +- Shared tropical cyclone benchmark runs. +- Regression checks for storm-track baselines. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `TCBench Alpha repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public TCBench Alpha-aligned storm surface exposed by the tropical cyclone benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``tcbench_alpha`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "tcbench_alpha", + micro=True, + history=6, + horizon=5, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`TCBench Alpha ` + +**Representative Models:** :doc:`Tropical Cyclone MLP `, :doc:`SAF-Net `, :doc:`TCIF-fusion ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full TCBench Alpha ingestion pipeline. + +Reference +--------- + +- `TCBench Alpha `_. diff --git a/docs/source/datasets/tropicyclonenet_dataset.rst b/docs/source/datasets/tropicyclonenet_dataset.rst new file mode 100644 index 00000000..41508b57 --- /dev/null +++ b/docs/source/datasets/tropicyclonenet_dataset.rst @@ -0,0 +1,111 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +TropiCycloneNet-Dataset +======================= + +Synthetic-backed storm-track benchmark adapter aligned to the TropiCycloneNet-Dataset ecosystem. + +Overview +-------- + +TropiCycloneNet-Dataset is the public storm adapter used by the TropiCycloneNet model path on the shared track-intensity evaluator. + +The current implementation is synthetic-backed, but it preserves the task, metric, and reporting surface used by the shared tropical cyclone benchmark. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - TropiCycloneNet-Dataset ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Tropical Cyclone + * - Source Role + - Track Benchmark + * - Coverage + - Benchmark-aligned tropical cyclone track and intensity samples + * - Geometry + - Storm-track history sequences + * - Spatial Resolution + - Storm-centered best-track sequences + * - Temporal Resolution + - Historical track windows with forecast horizons + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch tensors via the dataset registry + * - Registry Entry + - ``tropicyclonenet_dataset`` + +Data Characteristics +-------------------- + +- Storm-history sequences with future latitude, longitude, and intensity targets. +- Registry-backed benchmark adapter rather than a raw external dataset ingestion path. +- Intended for benchmark-linked storm forecasting smoke runs. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- TropiCycloneNet smoke tests. +- Shared tropical cyclone benchmark runs. +- Regression checks for track-intensity prediction models. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `TropiCycloneNet-Dataset repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public TropiCycloneNet-Dataset-aligned storm surface exposed by the tropical cyclone benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``tropicyclonenet_dataset`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "tropicyclonenet_dataset", + micro=True, + history=6, + horizon=5, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`TropiCycloneNet-Dataset ` + +**Representative Models:** :doc:`TropiCycloneNet ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full TropiCycloneNet-Dataset downloader. + +Reference +--------- + +- `TropiCycloneNet-Dataset `_. diff --git a/docs/source/datasets/waterbench_streamflow.rst b/docs/source/datasets/waterbench_streamflow.rst new file mode 100644 index 00000000..e9530966 --- /dev/null +++ b/docs/source/datasets/waterbench_streamflow.rst @@ -0,0 +1,112 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +WaterBench +========== + +Synthetic-backed streamflow benchmark adapter aligned to the WaterBench ecosystem. + +Overview +-------- + +WaterBench is the public flood streamflow adapter used by the EA-LSTM path on the shared flood benchmark. + +The current implementation is synthetic-backed, but it preserves the streamflow forecasting contract expected by the shared evaluator. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - WaterBench ecosystem surfaced through a PyHazards adapter + * - Hazard Family + - Flood + * - Source Role + - Streamflow Benchmark + * - Coverage + - Benchmark-aligned streamflow forecasting samples + * - Geometry + - Graph-temporal basin or node sequences + * - Spatial Resolution + - Basin or gauge nodes represented as graph elements + * - Temporal Resolution + - Rolling history windows for streamflow prediction + * - Update Cadence + - Generated locally for smoke and benchmark-alignment runs + * - Period of Record + - Synthetic-backed benchmark adapter + * - Formats + - PyTorch graph-temporal dataset objects via the dataset registry + * - Registry Entry + - ``waterbench_streamflow`` + +Data Characteristics +-------------------- + +- Graph-temporal sequences with node-level targets for next-step streamflow prediction. +- Registry-backed benchmark adapter rather than a raw WaterBench ingestion pipeline. +- Intended for benchmark-linked streamflow smoke runs. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- EA-LSTM smoke tests. +- Shared flood benchmark runs with streamflow metrics. +- Regression checks for basin-scale forecasting experiments. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `WaterBench abstract `_ +- `WaterBench repository `_ + +PyHazards Usage +--------------- + +Use this adapter when you want the public WaterBench-aligned streamflow surface exposed by the flood benchmark. + +Registry Workflow +~~~~~~~~~~~~~~~~~ + +Primary dataset name: ``waterbench_streamflow`` + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "waterbench_streamflow", + micro=True, + history=4, + nodes=6, + ).load() + + train = data.get_split("train") + print(len(train.inputs), train.inputs[0].x.shape) + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Flood Benchmark `, :doc:`WaterBench ` + +**Representative Models:** :doc:`EA-LSTM ` + +Inspection Workflow +------------------- + +This dataset is currently surfaced as a registry-backed benchmark adapter, +so there is no standalone inspection CLI documented for it. + +Notes +----- + +- This is a synthetic-backed benchmark adapter rather than a full WaterBench downloader. + +Reference +--------- + +- `WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting `_ (`repo `__). diff --git a/docs/source/datasets/wfigs.rst b/docs/source/datasets/wfigs.rst new file mode 100644 index 00000000..1193d849 --- /dev/null +++ b/docs/source/datasets/wfigs.rst @@ -0,0 +1,93 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +WFIGS +===== + +Interagency wildfire incident records used as authoritative wildfire ground truth across the United States. + +Overview +-------- + +WFIGS aggregates geospatial information on active and historical wildland fire incidents, representing officially reported incidents rather than satellite-detected hotspots. + +In PyHazards it acts as an authoritative wildfire ground-truth source for validation, labeling, and comparison against remote-sensing detections. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - National Interagency Fire Center (NIFC) / interagency WFIGS + * - Hazard Family + - Wildfire + * - Source Role + - Incident Records + * - Coverage + - United States + * - Geometry + - Incident points and perimeters + * - Spatial Resolution + - Event-level vector geometries + * - Temporal Resolution + - Event-based with live operational updates + * - Update Cadence + - Refreshed from IRWIN roughly every 5 minutes, with perimeter changes often appearing within 15 minutes + * - Period of Record + - Historical archive plus ongoing incidents + * - Formats + - ArcGIS REST services, GeoJSON, and Shapefile downloads + * - Inspection CLI + - ``python -m pyhazards.datasets.wfigs.inspection --path /path/to/wfigs_data --max-items 10`` + +Data Characteristics +-------------------- + +- Event-based incident records with point and polygon geometries. +- Operational data that can change as incidents evolve and records are reconciled. +- Common fields include incident identifiers, timing, status, location, and fire size. +- Current and year-to-date layers follow different retention rules. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- Ground-truth labeling of wildfire occurrence. +- Validation of satellite-based fire detection products. +- Analysis of ignition timing and incident geography. + +Access +------ + +Use the links below to access the upstream source or its public documentation. + +- `NIFC Open Data WFIGS layers `_ +- `National Interagency Fire Center `_ + +PyHazards Usage +--------------- + +Use this dataset through the public inspection or registry surface documented below. + +This dataset is currently documented as an external or inspection-first +source rather than a public ``load_dataset(...)`` entrypoint. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +Use the documented inspection path below to validate local files before training or analysis. + +.. code-block:: bash + + python -m pyhazards.datasets.wfigs.inspection --path /path/to/wfigs_data --max-items 10 + +Reference +--------- + +- `National Interagency Fire Center. Wildland Fire Incident Geospatial Services (WFIGS). `_. diff --git a/docs/source/datasets/wrc_housing_density.rst b/docs/source/datasets/wrc_housing_density.rst new file mode 100644 index 00000000..cc500292 --- /dev/null +++ b/docs/source/datasets/wrc_housing_density.rst @@ -0,0 +1,85 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +WRC Housing Density +=================== + +USDA Forest Service housing-density raster used for exposure and population-at-risk context. + +Overview +-------- + +This housing-density raster is part of the Wildfire Risk to Communities ecosystem and encodes where homes or housing units are concentrated in fire-prone landscapes. + +In PyHazards it serves as a static exposure covariate for wildfire risk, WUI context, and population-at-risk analysis. + +At a Glance +----------- + +.. list-table:: + :widths: 28 72 + :stub-columns: 1 + + * - Provider + - USDA Forest Service / Wildfire Risk to Communities + * - Hazard Family + - Wildfire + * - Source Role + - Exposure and Community Context + * - Coverage + - United States + * - Geometry + - Raster exposure layers + * - Spatial Resolution + - About 30 m + * - Temporal Resolution + - Static or release-based + * - Update Cadence + - Release-based + * - Period of Record + - Local copy corresponds to 2018-era housing density package + * - Formats + - Raster packages and extracted tiles + * - Inspection CLI + - ``find /home/runyang/ryang/WRC_Housing_Density -maxdepth 3 -type f | head`` + +Data Characteristics +-------------------- + +- Exposure-focused raster rather than fire detections. +- Useful for WUI and human-exposure context in wildfire modeling. +- Complements fuels, perimeters, and population layers. +- Supports risk interpretation rather than direct fire labeling. + +Typical Use Cases +~~~~~~~~~~~~~~~~~ + +- WUI and housing-exposure covariates. +- Community wildfire risk context. +- Population-at-risk and exposure analysis. + +Access +------ + +- `Wildfire Risk to Communities datasets `_ + +PyHazards Usage +--------------- + +Use this local raster package as an inspection-first wildfire exposure layer. + +Related Coverage +~~~~~~~~~~~~~~~~ + +**Benchmarks:** :doc:`Wildfire Benchmark ` + +Inspection Workflow +------------------- + +.. code-block:: bash + + find /home/runyang/ryang/WRC_Housing_Density -maxdepth 3 -type f | head + +Notes +----- + +- Local copy detected at ``/home/runyang/ryang/WRC_Housing_Density``. diff --git a/docs/source/implementation.rst b/docs/source/implementation.rst index b9913092..36af24da 100644 --- a/docs/source/implementation.rst +++ b/docs/source/implementation.rst @@ -1,238 +1,497 @@ -Implementation -============== - -PyGIP is built to be modular and extensible, allowing contributors to implement their own attack and defense strategies. -Below, we detail how to extend the framework by implementing custom attack and defense classes, with a focus on how to -leverage the provided dataset structure. - -Dataset -------- - -The ``Dataset`` class standardizes the data format across PyGIP. Here’s its structure: +Implementation Guide +==================== + +Use this guide when you want to extend PyHazards itself. It is written for +contributors who are adding new datasets, new models, smoke tests, catalog +cards, or documentation updates for the public site. + +This page explains the public contributor workflow. For repository operations +and maintainer automation details, also see ``.github/IMPLEMENTATION.md``. + +Who This Guide Is For +--------------------- + +This guide assumes you already know Python and PyTorch, but you have not yet +worked inside the PyHazards codebase. It is most useful when you are doing one +of the following: + +- adding a new dataset loader or dataset inspection entrypoint, +- porting a paper or external implementation into ``pyhazards.models``, +- updating the public dataset or model catalogs and generated documentation, +- preparing a pull request that should be easy to review and merge. + +If you only want to install the library and run a first example, use +:doc:`installation` and :doc:`quick_start` instead. + +Repository Mental Model +----------------------- + +PyHazards is organized around a small set of extension points: + +- ``pyhazards.datasets`` contains dataset abstractions, the dataset registry, + and inspection entrypoints for supported data sources. +- ``pyhazards.models`` contains model builders, reusable components, and the + model registry used by ``build_model(...)``. +- ``pyhazards.engine`` contains the shared training and evaluation workflow. +- ``pyhazards/dataset_cards`` contains YAML cards used to generate the public + dataset catalog and per-dataset documentation pages. +- ``pyhazards/model_cards`` contains YAML cards used to generate the public + model tables and per-model documentation pages. +- ``docs/source`` contains handwritten Sphinx pages, while the committed + ``docs/`` directory contains the rendered HTML published on GitHub Pages. + +There are three separate layers to keep in mind: + +1. registry availability: + a dataset or model can be constructed from Python once it is registered; +2. catalog visibility: + a public dataset or model only appears on the website when it also has a + matching catalog card; +3. published website output: + GitHub Pages only changes after the rendered HTML in ``docs/`` is rebuilt. + +Typical Contribution Workflow +----------------------------- + +Most changes should follow the same sequence: + +1. decide whether you are extending a dataset, a model, or both; +2. implement the code in ``pyhazards/datasets`` or ``pyhazards/models``; +3. register the new entrypoint so it is discoverable from the library API; +4. add or update smoke-test coverage for the new behavior; +5. update the relevant docs source and, for public datasets or models, the + matching catalog cards; +6. run the smallest local validation commands that match the change; +7. rebuild the published docs HTML if the website output changed; +8. open a pull request with the required metadata and validation notes. + +Treat code, validation, generated docs, and published docs as one contribution. +A public dataset or model implementation is not complete if users cannot +discover it or if the website catalog still describes the old state of the +library. + +Adding a Dataset +---------------- + +Datasets are built around ``Dataset`` and ``DataBundle``. A dataset subclass +implements ``_load()`` and returns train/validation/test splits plus feature and +label metadata. + +The minimum pattern looks like this: .. code-block:: python - class Dataset(object): - def __init__(self, api_type='dgl', path='./data'): - assert api_type in {'dgl', 'pyg'}, 'API type must be dgl or pyg' - self.api_type = api_type - self.path = path - self.dataset_name = self.get_name() - - # DGLGraph or PyGData - self.graph_dataset = None - self.graph_data = None + import torch + from pyhazards.datasets import ( + DataBundle, + DataSplit, + Dataset, + FeatureSpec, + LabelSpec, + register_dataset, + ) + + class MyHazardDataset(Dataset): + name = "my_hazard" + + def _load(self) -> DataBundle: + x = torch.randn(1000, 16) + y = torch.randint(0, 2, (1000,)) + + splits = { + "train": DataSplit(x[:800], y[:800]), + "val": DataSplit(x[800:900], y[800:900]), + "test": DataSplit(x[900:], y[900:]), + } - # meta data - self.num_nodes = 0 - self.num_features = 0 - self.num_classes = 0 + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=16, + description="Example tabular hazard features.", + ), + label_spec=LabelSpec( + num_targets=2, + task_type="classification", + description="Binary hazard label.", + ), + ) -**Importance**: We are currently using the default ``api_type='pyg'`` to load the data. It is important to note that when -``api_type='pyg'``, ``self.graph_data`` should be an instance of ``torch_geometric.data.Data``. In your implementation, make -sure to use our defined Dataset class to build your code. + register_dataset(MyHazardDataset.name, MyHazardDataset) -Device ------- +Keep the following expectations in mind when you add a dataset: -To ensure consistency and simplicity when managing CUDA devices across attacks and defenses, we follow the convention -below: +- use ``DataBundle`` to make split names, feature dimensions, and target + semantics explicit; +- keep the builder/import path lightweight so the dataset can be imported + without triggering heavy side effects; +- register the dataset with ``register_dataset(...)`` so + ``load_dataset(name=...)`` can construct it; +- if the dataset belongs in the public catalog, add or update a card in + ``pyhazards/dataset_cards`` and regenerate the dataset docs; +- prefer clear metadata over implicit conventions, especially when a model + depends on shapes, channels, graph structure, or task type. -- Both ``BaseAttack`` and ``BaseDefense`` define the device attribute ``self.device`` in their ``__init__()`` method. -- Subclasses should not manually redefine or modify the device logic. -- If you are implementing a custom attack or defense class, simply inherit from ``BaseAttack`` or ``BaseDefense``. -- You can directly access the device using: ``x = x.to(self.device)`` +Dataset Inspection Entry Points +------------------------------- -Implementing Attack -------------------- +PyHazards also includes inspection modules under ``pyhazards.datasets`` for +supported external data sources. If you add a new dataset family, keep the +inspection module consistent with the existing ones: -To create a custom attack, you need to extend the abstract base class ``BaseAttack``. Here’s the structure -of ``BaseAttack``: +- it should be importable as ``python -m pyhazards.datasets..inspection``; +- ``--help`` should exit cleanly; +- argument parsing should work without requiring optional plotting or network + dependencies at import time; +- if the dataset belongs in the public dataset table, its inspection workflow + should be stable enough for ``scripts/verify_table_entries.py``. -.. code-block:: python +The goal is simple: users should be able to discover the dataset from the docs, +inspect it from the command line, and load it from Python through the registry. - class BaseAttack(ABC): - supported_api_types = set() - supported_datasets = set() +Dataset Cards and Generated Docs +-------------------------------- - def __init__(self, dataset: Dataset, attack_node_fraction: float = None, model_path: str = None, - device: Optional[Union[str, torch.device]] = None): - self.device = torch.device(device) if device else get_device() - print(f"Using device: {self.device}") +Public datasets are documented through cards in ``pyhazards/dataset_cards``. +These cards are the source of truth for the public dataset catalog and the +generated per-dataset detail pages. - # graph data - self.dataset = dataset - self.graph_dataset = dataset.graph_dataset - self.graph_data = dataset.graph_data +A typical dataset card includes: - # meta data - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes +- the public display name and hazard family, +- a one-sentence summary and source role, +- provider, geometry, cadence, and period-of-record metadata, +- the primary source or product reference, +- the inspection command when the dataset is inspection-first, +- the registry name and example when it is public through + ``load_dataset(...)``, +- related model and benchmark links when those cross-links help users navigate + the library. - # params - self.attack_node_fraction = attack_node_fraction - self.model_path = model_path +After updating dataset cards, refresh the generated docs: - self._check_dataset_compatibility() +.. code-block:: bash -To implement your own attack: + python scripts/render_dataset_docs.py -1. **Inherit from ``BaseAttack``**: - Create a new class that inherits from ``BaseAttack``. You’ll need to provide the following required parameters in the - constructor: +Use the ``--check`` mode when you want to confirm the generated files are +already up to date: - - ``dataset``: An instance of the ``Dataset`` class (see below for details). - - ``attack_node_fraction``: A float between 0 and 1 representing the fraction of nodes to attack. - - ``model_path`` (optional): A string specifying the path to a pre-trained model (defaults to ``None``). +.. code-block:: bash - You need to implement the following methods: + python scripts/render_dataset_docs.py --check - - ``attack()``: Add main attack logic here. If multiple attack types are supported, define the attack type as an optional - argument to this function. For each specific attack type, implement a corresponding helper function such as - ``_attack_type1()`` or ``_attack_type2()``, and call the appropriate helper inside ``attack()`` based on the given method name. - - ``_load_model()``: Load victim model. - - ``_train_target_model()``: Train victim model. - - ``_train_attack_model()``: Train attack model. - - ``_helper_func()`` (optional): Add your helper functions based on your needs, but keep the methods private. +Adding a Model +-------------- -2. **Implement the ``attack()`` Method**: - Override the abstract ``attack()`` method with your attack logic, and return a dict of results. For example: +Models are registered builders that can be constructed through: .. code-block:: python - class MyCustomAttack(BaseAttack): - supported_api_types = {"pyg"} # "pyg" or "dgl" - supported_datasets = {"Cora"} # you can leave this blank if your method supports all datasets - - def __init__(self, dataset: Dataset, attack_node_fraction: float, model_path: str = None): - super().__init__(dataset, attack_node_fraction, model_path) - # Additional initialization if needed - - def attack(self): - # Example: Access the graph and perform an attack - print(f"Attacking {self.attack_node_fraction * 100}% of nodes") - num_nodes = self.graph.num_nodes() - print(f"Graph has {num_nodes} nodes") - # Add your attack logic here - return { - 'metric1': 'metric1 here', - 'metric2': 'metric2 here' - } + from pyhazards.models import build_model - def _load_model(self): - # add your logic here - pass + model = build_model(name="", task="", **kwargs) - def _train_target_model(self): - # add your logic here - pass +When you port a paper or external repository into PyHazards, define the library +contract first. Your builder should: - def _train_attack_model(self): - # add your logic here - pass +- accept ``task: str``, +- accept the shape and hyperparameter arguments needed to construct the model, +- return an ``nn.Module``, +- validate unsupported tasks early with a clear error, +- accept ``**kwargs`` so extra configuration keys do not break the call path. -Implementing Defense --------------------- - -To create a custom defense, you need to extend the abstract base class ``BaseDefense``. Here’s the structure -of ``BaseDefense``: +The minimum pattern looks like this: .. code-block:: python - class BaseDefense(ABC): - supported_api_types = set() - supported_datasets = set() - - def __init__(self, dataset: Dataset, attack_node_fraction: float, - device: Optional[Union[str, torch.device]] = None): - self.device = torch.device(device) if device else get_device() - print(f"Using device: {self.device}") - - # graph data - self.dataset = dataset - self.graph_dataset = dataset.graph_dataset - self.graph_data = dataset.graph_data - - # meta data - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes + from __future__ import annotations - # params - self.attack_node_fraction = attack_node_fraction + import torch + import torch.nn as nn + from pyhazards.models import register_model - self._check_dataset_compatibility() - -To implement your own defense: -1. **Inherit from ``BaseDefense``**: - Create a new class that inherits from ``BaseDefense``. You’ll need to provide the following required parameters in the - constructor: + class MyModel(nn.Module): + def __init__(self, in_dim: int, out_dim: int, hidden_dim: int = 128): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, out_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 2: + raise ValueError(f"Expected input of shape (batch, features), got {tuple(x.shape)}") + return self.net(x) + + + def my_model_builder( + task: str, + in_dim: int, + out_dim: int, + hidden_dim: int = 128, + **kwargs, + ) -> nn.Module: + _ = kwargs + if task.lower() not in {"classification", "regression"}: + raise ValueError(f"MyModel does not support task={task!r}") + return MyModel(in_dim=in_dim, out_dim=out_dim, hidden_dim=hidden_dim) + + + register_model( + "my_model", + my_model_builder, + defaults={"hidden_dim": 128}, + ) + +In practice, good model ports also include: + +- a short paper-to-library mapping from the original repository into the new + PyHazards module and builder kwargs; +- explicit input-shape validation in ``forward()`` so integration failures are + easy to diagnose; +- clear task handling when the same architecture can be used for different + objectives; +- minimal defaults in the registry so ``build_model(...)`` is predictable. + +Match the Forward Signature to the Data Path +-------------------------------------------- + +PyHazards supports more than one input style. Some models work with plain tensor +pairs, while others expect mappings, graph batches, or custom dataset objects. +Make that contract explicit. + +As a rule: + +- if your model expects ``Tensor -> Tensor``, keep the shape assumptions simple + and document them in the model card; +- if your model expects graph or structured inputs, prefer dataset and collate + behavior that produces the mapping your ``forward()`` already consumes; +- use ``FeatureSpec``, ``LabelSpec``, and split metadata to record dimensions, + channels, and task semantics instead of burying them in comments. + +Porting Training Logic +---------------------- + +Do not copy an upstream training loop into PyHazards unless the architecture +truly depends on custom runtime behavior. In most cases you should: + +- keep the architecture inside ``nn.Module``, +- keep custom losses or helper blocks close to the model implementation, +- use ``pyhazards.engine.Trainer`` for fit, evaluate, and predict workflows, +- document intentional differences from the paper repository in the pull request. + +If the PyHazards port changes preprocessing, outputs, or optimization behavior, +state that clearly in the PR's parity notes. Review is much faster when the +intended differences are explicit. + +Model Cards and Generated Docs +------------------------------ + +Public models are documented through cards in ``pyhazards/model_cards``. A model +card is not optional when you want a model to appear on the website. + +A typical card includes: + +- the public model name and display name, +- the hazard family used for the model table, +- the source file and builder name, +- a short summary and description, +- the paper citation or technical reference, +- supported tasks, +- one runnable example, +- a synthetic smoke-test specification. + +For example: + +.. code-block:: yaml + + model_name: my_model + display_name: My Model + hazard: Flood + source_file: pyhazards/models/my_model.py + builder_name: my_model_builder + summary: > + Short description of the public model entrypoint. + paper: + title: Example paper title + url: https://example.com/paper + tasks: + - regression + smoke_test: + task: regression + build_kwargs: + in_dim: 16 + out_dim: 1 + input: + kind: tensor + shape: [4, 16] + expected_output: + kind: tensor + shape: [4, 1] + +Model cards drive the generated pages in :doc:`pyhazards_models`. They also +control public visibility: + +- if a model is registered but has no card, it can still be used from Python but + it will not appear in the public model tables; +- if a card sets ``include_in_public_catalog: false``, the implementation stays + in the library but is hidden from the public catalog; +- if the hazard name in the card is new, the generated model page creates a new + hazard section automatically. + +After updating a card, refresh the generated docs: + +.. code-block:: bash + + python scripts/render_model_docs.py + +Use the ``--check`` mode when you want to confirm the generated files are +already up to date: + +.. code-block:: bash + + python scripts/render_model_docs.py --check + +Validation Workflow +------------------- + +Run the smallest set of checks that covers your change. The core validation +commands in this repository are: - - ``dataset``: An instance of the ``Dataset`` class (see below for details). - - ``attack_node_fraction``: A float between 0 and 1 representing the fraction of nodes to attack. - - ``model_path`` (optional): A string specifying the path to a pre-trained model (defaults to ``None``). +.. code-block:: bash - You need to implement the following methods: + python -c "import pyhazards; print(pyhazards.__version__)" + python scripts/render_dataset_docs.py --check + python scripts/render_model_docs.py --check + python scripts/verify_table_entries.py - - ``defense()``: Add main defense logic here. If multiple defense types are supported, define the defense type as an - optional argument to this function. For each specific defense type, implement a corresponding helper function such as - ``_defense_type1()`` or ``_defense_type2()``, and call the appropriate helper inside ``defense()``. - - ``_load_model()``: Load victim model. - - ``_train_target_model()``: Train victim model. - - ``_train_defense_model()``: Train defense model. - - ``_train_surrogate_model()``: Train attack model. - - ``_helper_func()`` (optional): Add your helper functions based on your needs, but keep the methods private. +Use them for the following purposes: -2. **Implement the ``defense()`` Method**: - Override the abstract ``defense()`` method with your defense logic, and return a dict of results. For example: +- ``python -c "import pyhazards; print(pyhazards.__version__)"`` + verifies that the package still imports cleanly; +- ``python scripts/render_dataset_docs.py --check`` + verifies that generated dataset docs and catalog pages are in sync with the + current dataset cards; +- ``python scripts/render_model_docs.py --check`` + verifies that generated model docs and catalog pages are in sync with the + current model cards; +- ``python scripts/verify_table_entries.py`` + exercises dataset inspection entrypoints and runs smoke tests for cataloged + public models. + +When you changed a specific model, also run the model-scoped smoke test: + +.. code-block:: bash + + python scripts/smoke_test_models.py --models -.. code-block:: python +This uses the model card's smoke-test spec, so it is the fastest way to confirm +that a new public model can build and run with synthetic inputs. - class MyCustomDefense(BaseDefense): - supported_api_types = {"pyg"} # "pyg" or "dgl" - supported_datasets = {"Cora"} # you can leave this blank if your method supports all datasets - - def defend(self): - # Step 1: Train target model - target_model = self._train_target_model() - # Step 2: Attack target model - attack = MyCustomAttack(self.dataset, attack_node_fraction=0.3) - attack.attack(target_model) - # Step 3: Train defense model - defense_model = self._train_defense_model() - # Step 4: Test defense against attack - attack = MyCustomAttack(self.dataset, attack_node_fraction=0.3) - attack.attack(defense_model) - # Print performance metrics - - def _load_model(self): - # add your logic here - pass - - def _train_target_model(self): - # add your logic here - pass - - def _train_defense_model(self): - # add your logic here - pass - - def _train_surrogate_model(self): - # add your logic here - pass - -Miscellaneous Tips ------------------- - -- **Reference Implementation**: The ``ModelExtractionAttack0`` class is a fully implemented attack example. Study it for - inspiration or as a template. -- **Flexibility**: Add as many helper functions as needed within your class to keep your code clean and modular. -- **Backbone Models**: We provide several basic backbone models like ``GCN, GraphSAGE``. You can use or add more - at ``from models.nn import GraphSAGE``. -- **Example Scripts**: Please provide an example script in the ``examples/`` folder demonstrating how to run your code. This - will significantly speed up our code review process. - -By following these guidelines, you can seamlessly integrate your custom attack or defense strategies into PyGIP. Happy -coding! \ No newline at end of file +If your change touched the model catalog or its generation logic, also run: + +.. code-block:: bash + + python -m pytest tests/test_model_catalog.py + +If you changed runtime behavior in the training path and you have the required +hardware available, run the broader smoke path described in ``test.py`` as well. + +Preparing a Model Pull Request +------------------------------ + +Model PRs should make the implementation easy to review against the original +paper or upstream repository. The PR template asks for a few specific fields for +that reason: + +- ``Model Summary`` should describe the architecture and public API you are + adding, not just the file names you changed; +- ``Hazard Scenario`` should name the model table that owns the entry, and it + should explicitly call out when the PR introduces a new hazard family; +- ``Registry Name`` should list the exact ``build_model(name=...)`` entrypoints + added or changed in the PR; +- ``Paper / Source`` should link the scientific paper, source repository, or + technical reference that the implementation follows; +- ``Smoke Test`` should list the commands you ran or point to the card's + smoke-test specification; +- ``Parity Notes`` should explain intentional differences from the upstream + implementation, especially around preprocessing, outputs, or objectives. + +PR automation can only help when this metadata is present and accurate. A +catalog-backed model PR is expected to include the implementation, the registry +wiring, the model card, the smoke-test path, and refreshed generated docs. + +Registration, Catalog, and Published HTML +----------------------------------------- + +It is easy to update one layer of the repo and forget the others. Keep this +distinction in mind: + +- code registration makes a dataset or model usable from Python; +- dataset cards make a public dataset discoverable in the generated docs; +- model cards make a public model discoverable in the generated docs; +- Sphinx source updates change the documentation source tree; +- rebuilding ``docs/`` updates the committed HTML published on GitHub Pages. + +If the website output changed, rebuild the site locally: + +.. code-block:: bash + + cd docs + sphinx-build -b html source build/html + cp -r build/html/* . + +That final copy step matters in this repository because the published website is +served from the committed ``docs/`` directory, not from ``docs/source``. + +Common Mistakes +--------------- + +These are the issues that most often block review: + +- the new dataset or model exists in code but was never registered; +- a public dataset changed, but ``pyhazards/dataset_cards`` or the generated + dataset docs were not updated; +- a public model was implemented without a matching card in + ``pyhazards/model_cards``; +- generated docs were not refreshed after the model card changed; +- ``docs/source`` was updated but the committed ``docs/`` HTML was not rebuilt; +- the builder does not validate unsupported tasks or accepts the wrong shape + arguments for the intended use; +- a hidden or internal model was accidentally left visible in the public + catalog; +- an inspection module imports optional heavy dependencies at module import time, + which breaks ``python -m ... --help`` in clean environments. + +Contributor Checklist +--------------------- + +Before you open a pull request, confirm all of the following: + +- the implementation lives in the correct dataset or model module; +- the new entrypoint is registered and can be constructed from the public API; +- task handling and input-shape validation are clear and actionable; +- public datasets have a complete card when they belong in the public catalog; +- generated dataset docs are refreshed and pass ``render_dataset_docs.py --check``; +- public models have a complete card with a runnable smoke-test spec; +- generated model docs are refreshed and pass ``render_model_docs.py --check``; +- dataset inspection entrypoints and public tables pass + ``scripts/verify_table_entries.py``; +- the published docs HTML in ``docs/`` was rebuilt if the visible website output + changed; +- the pull request explains the source paper, registry name, hazard scenario, + smoke-test commands, and parity notes. + +Next Steps +---------- + +After you finish a contributor-oriented change: + +- browse the public catalogs in :doc:`pyhazards_datasets` and + :doc:`pyhazards_models` to confirm the new entry is discoverable; +- use :doc:`quick_start` to check that the user path still feels coherent; +- keep ``.github/IMPLEMENTATION.md`` and this page aligned when the repository + workflow changes. diff --git a/docs/source/index.rst b/docs/source/index.rst index 13b8bf78..7804de80 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,141 +1,275 @@ +.. title:: PyHazards + +.. image:: _static/logo.png + :alt: PyHazards Icon + :width: 220px + :align: center + :class: landing-hero-logo + .. raw:: html -
- PyGIP Icon + -.. image:: https://img.shields.io/pypi/v/PyGIP - :target: https://pypi.org/project/PyGIP - :alt: PyPI Version +Overview +-------- -.. image:: https://img.shields.io/github/actions/workflow/status/LabRAI/PyGIP/docs.yml - :target: https://github.com/LabRAI/PyGIP/actions - :alt: Build Status +PyHazards brings together public dataset catalogs, registry-based models, +benchmark families, experiment configs, and shared training or reporting +workflows across wildfire, earthquake, flood, and tropical cyclone tasks. -.. image:: https://img.shields.io/github/license/LabRAI/PyGIP.svg - :target: https://github.com/LabRAI/PyGIP/blob/main/LICENSE - :alt: License +It is designed for researchers and practitioners who need one coherent library +for reproducing baselines, comparing methods, and extending hazard-ML +workflows without rebuilding the software stack for each hazard family. -.. image:: https://img.shields.io/pypi/dm/pygip - :target: https://github.com/LabRAI/PyGIP - :alt: PyPI Downloads +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid home-kicker-grid home-hero-stats -.. image:: https://img.shields.io/github/issues/LabRAI/PyGIP - :target: https://github.com/LabRAI/PyGIP - :alt: Issues + .. grid-item-card:: Hazard Families + :class-card: catalog-stat-card -.. image:: https://img.shields.io/github/issues-pr/LabRAI/PyGIP - :target: https://github.com/LabRAI/PyGIP - :alt: Pull Requests + .. container:: catalog-stat-value -.. image:: https://img.shields.io/github/stars/LabRAI/PyGIP - :target: https://github.com/LabRAI/PyGIP - :alt: Stars + 4 -.. image:: https://img.shields.io/github/forks/LabRAI/PyGIP - :target: https://github.com/LabRAI/PyGIP - :alt: GitHub forks + .. container:: catalog-stat-note -.. image:: _static/github.svg - :target: https://github.com/LabRAI/PyGIP - :alt: GitHub + Wildfire, earthquake, flood, and tropical cyclone workflows under one library. ----- + .. grid-item-card:: Public Datasets + :class-card: catalog-stat-card -**PyGIP** is a comprehensive Python library focused on model extraction attacks and defenses in Graph Neural Networks (GNNs). Built on PyTorch, PyTorch Geometric, and DGL, the library offers a robust framework for understanding, implementing, and defending against attacks targeting GNN models. + .. container:: catalog-stat-value -**PyGIP is featured for:** + 20 -- **Extensive Attack Implementations**: Multiple strategies for GNN model extraction attacks, including fidelity and accuracy evaluation. -- **Defensive Techniques**: Tools for creating robust defense mechanisms, such as watermarking graphs and inserting synthetic nodes. -- **Unified API**: Intuitive APIs for both attacks and defenses. -- **Integration with PyTorch/DGL**: Seamlessly integrates with PyTorch Geometric and DGL for scalable graph processing. -- **Customizable**: Supports user-defined attack and defense configurations. + .. container:: catalog-stat-note -**Quick Start Example:** + Curated dataset pages covering forcing sources and hazard-specific benchmark adapters. -Model Extraction Attack Example with 5 Lines of Code: + .. grid-item-card:: Implemented Models + :class-card: catalog-stat-card -.. code-block:: python + .. container:: catalog-stat-value - from datasets import Cora - from models.attack import ModelExtractionAttack0 + 24 - # Load the Cora dataset - dataset = Cora() + .. container:: catalog-stat-note - # Initialize the attack with a sampling ratio of 0.25 - mea = ModelExtractionAttack0(dataset, 0.25) + Public implemented baselines and variants surfaced through the model catalog. - # Execute the attack - mea.attack() + .. grid-item-card:: Benchmark Families + :class-card: catalog-stat-card -Attack Modules ---------------- + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + Shared evaluator families with linked ecosystems, smoke configs, and reports. + +Start Here +---------- + +.. container:: home-section-note + + Use one of these four paths to move from overview to action quickly. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid home-link-grid + + .. grid-item-card:: Quick Start + :class-card: catalog-detail-card + + Run the first benchmark-aware workflow and verify the package. + + **Open:** :doc:`Quick Start ` + + .. grid-item-card:: Browse Datasets + :class-card: catalog-detail-card + + Explore forcing sources, benchmark adapters, and inspection entrypoints. + + **Open:** :doc:`Datasets ` -.. list-table:: - :header-rows: 1 + .. grid-item-card:: Browse Models + :class-card: catalog-detail-card - * - Class Name - - Reference + Compare implemented baselines, variants, and benchmark-linked model detail pages. - * - :doc:`MEA <_autosummary/attack/pygip.models.attack.mea.MEA>` - - Wu, Bang, et al. "Model extraction attacks on graph neural networks: Taxonomy and realisation." Proceedings of the 2022 ACM on Asia conference on computer and communications security. 2022. + **Open:** :doc:`Models ` - * - :doc:`AdvMEA <_autosummary/attack/pygip.models.attack.AdvMEA>` - - DeFazio, David, and Arti Ramesh. "Adversarial model extraction on graph neural networks." arXiv preprint arXiv:1912.07721 (2019). + .. grid-item-card:: Browse Benchmarks + :class-card: catalog-detail-card - * - :doc:`CEGA <_autosummary/attack/pygip.models.attack.CEGA>` - - Wang, Zebin, et al. "CEGA: A Cost-Effective Approach for Graph-Based Model Extraction and Acquisition." arXiv preprint arXiv:2506.17709 (2025). + Compare hazard benchmark families, ecosystem mappings, and smoke coverage. - * - :doc:`DataFreeMEA <_autosummary/attack/pygip.models.attack.DataFreeMEA>` - - Zhuang, Yuanxin, et al. "Unveiling the Secrets without Data: Can Graph Neural Networks Be Exploited through {Data-Free} Model Extraction Attacks?." 33rd USENIX Security Symposium (USENIX Security 24). 2024. + **Open:** :doc:`Benchmarks ` - * - :doc:`Realistic <_autosummary/attack/pygip.models.attack.Realistic>` - - Guan, Faqian, et al. "A realistic model extraction attack against graph neural networks." Knowledge-Based Systems 300 (2024): 112144. +Why PyHazards +------------- +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-grid home-pillar-grid -Defense Modules + .. grid-item-card:: Unified Datasets + :class-card: catalog-detail-card + + Public datasets, forcing sources, and inspection surfaces are documented through one hazard-first catalog. + + .. grid-item-card:: Benchmark-aligned Evaluation + :class-card: catalog-detail-card + + Shared benchmark families, smoke configs, and report exports make model comparisons more reproducible. + + .. grid-item-card:: Registry-based Models + :class-card: catalog-detail-card + + Baselines and adapters are exposed through a consistent build surface instead of one-off scripts. + + .. grid-item-card:: Shared Training and Inference + :class-card: catalog-detail-card + + One engine layer supports training, evaluation, prediction, and benchmark execution across hazard tasks. + +Hazard Coverage --------------- -.. list-table:: - :header-rows: 1 +.. container:: home-section-note + + PyHazards spans four hazard families with public datasets, models, and benchmark pages designed to work together. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid home-hazard-grid + + .. grid-item-card:: Wildfire + :class-card: catalog-detail-card + + Danger forecasting, weekly forecasting, spread baselines, fuels, burn products, and active-fire sources. + + **Explore:** :doc:`Datasets ` | :doc:`Models ` + + .. grid-item-card:: Earthquake + :class-card: catalog-detail-card + + Waveform picking, dense-grid forecasting adapters, and linked benchmark ecosystems for phase-picking workflows. + + **Explore:** :doc:`Models ` | :doc:`Benchmarks ` - * - Class Name - - Reference + .. grid-item-card:: Flood + :class-card: catalog-detail-card - * - :doc:`RandomWM <_autosummary/defense/pygip.models.defense.RandomWM>` - - Zhao, Xiangyu, Hanzhou Wu, and Xinpeng Zhang. "Watermarking graph neural networks by random graphs." 2021 9th International Symposium on Digital Forensics and Security (ISDFS). IEEE, 2021. + Streamflow and inundation baselines with benchmark-backed datasets, configs, and evaluation coverage. - * - :doc:`BackdoorWM <_autosummary/defense/pygip.models.defense.BackdoorWM>` - - Xu, Jing, et al. "Watermarking graph neural networks based on backdoor attacks." 2023 IEEE 8th European Symposium on Security and Privacy (EuroS&P). IEEE, 2023. + **Explore:** :doc:`Datasets ` | :doc:`Benchmarks ` - * - :doc:`SurviveWM <_autosummary/defense/pygip.models.defense.SurviveWM>` - - Wang, Haiming, et al. "Making Watermark Survive Model Extraction Attacks in Graph Neural Networks." ICC 2023-IEEE International Conference on Communications. IEEE, 2023. + .. grid-item-card:: Tropical Cyclone + :class-card: catalog-detail-card - * - :doc:`ImperceptibleWM <_autosummary/defense/pygip.models.defense.ImperceptibleWM>` - - Zhang, Linji, et al. "An imperceptible and owner-unique watermarking method for graph neural networks." Proceedings of the ACM Turing Award Celebration Conference-China 2024. 2024. + Track-and-intensity forecasting baselines plus shared benchmark ecosystems and experimental weather-model adapters. - * - :doc:`ATOM <_autosummary/defense/pygip.models.defense.atom.ATOM>` - - Cheng, Zhan, et al. "Atom: A framework of detecting query-based model extraction attacks for graph neural networks." Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V. 2. 2025. + **Explore:** :doc:`Models ` | :doc:`Benchmarks ` - * - :doc:`Integrity <_autosummary/defense/pygip.models.defense.Integrity>` - - Wu, Bang, et al. "Securing graph neural networks in mlaas: A comprehensive realization of query-based integrity verification." 2024 IEEE Symposium on Security and Privacy (SP). IEEE, 2024. +Featured Example +---------------- +.. container:: home-section-note -How to Cite ------------ + Run a benchmark-aligned smoke configuration with one command, then move into the full Quick Start for model building and training workflows. -If you find it useful, please considering cite the following work: +.. code-block:: bash + + python scripts/run_benchmark.py --config pyhazards/configs/flood/hydrographnet_smoke.yaml + +.. container:: catalog-link-row + + **Next step:** :doc:`Quick Start ` for the first full workflow, or :doc:`Models ` to browse benchmark-linked baselines. + +Explore the Docs +---------------- + +.. grid:: 1 1 2 3 + :gutter: 2 + :class-container: catalog-recommend-grid home-link-grid + + .. grid-item-card:: Installation + :class-card: catalog-detail-card + + Set up PyHazards from PyPI or source and verify the environment. + + **Open:** :doc:`installation` + + .. grid-item-card:: Quick Start + :class-card: catalog-detail-card + + Run the shortest end-to-end workflow in the library. + + **Open:** :doc:`quick_start` + + .. grid-item-card:: Datasets + :class-card: catalog-detail-card + + Browse hazard-grouped dataset cards, detail pages, and inspection entrypoints. + + **Open:** :doc:`pyhazards_datasets` + + .. grid-item-card:: Models + :class-card: catalog-detail-card + + Compare implemented models, variants, and benchmark-linked detail pages. + + **Open:** :doc:`pyhazards_models` + + .. grid-item-card:: Benchmarks + :class-card: catalog-detail-card + + Review benchmark families, ecosystem mappings, and smoke-config coverage. + + **Open:** :doc:`pyhazards_benchmarks` + + .. grid-item-card:: Reports and Configs + :class-card: catalog-detail-card + + Load reproducible experiment YAML files and export benchmark summaries. + + **Open:** :doc:`pyhazards_configs` | :doc:`pyhazards_reports` + +For Contributors +---------------- + +PyHazards is registry-driven and uses dataset cards, model cards, and benchmark +cards to generate the public catalogs. If you plan to extend the library, use +:doc:`implementation` for the contributor workflow and :doc:`appendix_a_coverage` +for the audited gap list behind the current roadmap work. + +Citation +-------- + +If you use PyHazards in your research, please cite: .. code-block:: bibtex - @article{li2025intellectual, - title={Intellectual Property in Graph-Based Machine Learning as a Service: Attacks and Defenses}, - author={Li, Lincan and Shen, Bolin and Zhao, Chenxi and Sun, Yuxiang and Zhao, Kaixiang and Pan, Shirui and Dong, Yushun}, - journal={arXiv preprint arXiv:2508.19641}, - year={2025} - } + @misc{pyhazards2025, + title = {PyHazards: An Open-Source Library for AI-Powered Hazard Prediction}, + author = {Cheng et al.}, + year = {2025}, + howpublished = {\url{https://github.com/LabRAI/PyHazards}}, + note = {GitHub repository} + } + +Community +--------- + +Use the `RAI Lab Slack channel `_ +for project discussion and coordination. .. toctree:: @@ -145,17 +279,21 @@ If you find it useful, please considering cite the following work: installation quick_start - benchmark .. toctree:: :maxdepth: 1 :caption: API Reference :hidden: - pygip_datasets - pygip_models_attack - pygip_models_defense - pygip_utils + pyhazards_datasets + pyhazards_models + pyhazards_benchmarks + pyhazards_configs + pyhazards_reports + pyhazards_engine + pyhazards_metrics + pyhazards_utils + interactive_map .. toctree:: :maxdepth: 2 @@ -163,13 +301,7 @@ If you find it useful, please considering cite the following work: :hidden: implementation + appendix_a_coverage cite + references team - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/source/installation.rst b/docs/source/installation.rst index 8cb76f09..9bd47237 100644 --- a/docs/source/installation.rst +++ b/docs/source/installation.rst @@ -1,43 +1,63 @@ Installation ============ -PyGIP requires Python 3.8+ and can be installed using pip. We recommend using a conda environment for installation. +Use this page to install PyHazards, verify that the package imports correctly, +and choose the right setup path for local use or contribution. PyHazards +supports Python 3.8 through 3.12 and installs with ``pip``. -Installing PyGIP ----------------- +Requirements +------------ + +- Python ``>=3.8, <3.13`` +- PyTorch ``>=2.3, <3.0`` -To get started with PyGIP, set up your environment by installing the required dependencies: +Install from PyPI +----------------- + +Install from PyPI: .. code-block:: bash - pip install -r requirements.txt + pip install pyhazards -Ensure you have Python installed (version 3.8 or higher recommended) along with the necessary libraries listed -in `requirements.txt`. +GPU Install +----------- -Specifically, using following command to install `dgl 2.2.1` and ensure your `pytorch==2.3.0`. +If you plan to run on GPU, install a matching PyTorch build first and then +install PyHazards. -For CPU -~~~~~~~~~~~~~ +Example for CUDA 12.6: .. code-block:: bash - pip install dgl==2.2.1 -f https://data.dgl.ai/wheels/torch-2.3/repo.html + pip install torch --index-url https://download.pytorch.org/whl/cu126 + pip install pyhazards + +Install from Source +------------------- -For GPU (cuda 12.1) -~~~~~~~~~~~~~ +Use an editable install when you are contributing code or documentation: .. code-block:: bash - pip install dgl==2.2.1 -f https://data.dgl.ai/wheels/torch-2.3/cu121/repo.html + git clone https://github.com/LabRAI/PyHazards.git + cd PyHazards + python -m pip install -e . +Verify the Installation +----------------------- +Run a small import check to confirm that the package is available in the +environment: -Requirements ------------- +.. code-block:: bash + + python -c "import pyhazards; print(pyhazards.__version__)" + +You should see the installed package version printed to stdout. + +Next Steps +---------- -- Python >= 3.8 -- PyTorch == 2.3 -- torch-geometric >= 2.6.0 -- dgl == 2.2.1 -- CUDA 12.1 +- Continue to :doc:`quick_start` for the first end-to-end workflow. +- See :doc:`implementation` if you are setting up a contributor workflow. diff --git a/docs/source/interactive_map.rst b/docs/source/interactive_map.rst new file mode 100644 index 00000000..b3b5798e --- /dev/null +++ b/docs/source/interactive_map.rst @@ -0,0 +1,60 @@ +Wildfire Interactive Map +======================== + +PyHazards includes a lightweight launcher for the external **RAI Fire** +interactive wildfire map. This companion site is specific to wildfire use +cases; it is not a general interactive map for every hazard domain in +PyHazards. + +Use it when you want a browser-based wildfire view without leaving the broader +PyHazards workflow. + +What This Page Covers +--------------------- + +- the live wildfire-focused RAI Fire website, +- the built-in launcher command, +- the small Python helper exposed by the package. + +Live Website +------------ + +- `RAI Fire `_ +- `Source repository `_ + +Command Line +------------ + +Open the website from the library with: + +.. code-block:: bash + + python -m pyhazards map + +The command prints the URL and, when possible, opens it in your default browser. + +Python API +---------- + +.. code-block:: python + + from pyhazards import open_interactive_map + + url = open_interactive_map() + print(url) + +Notes +----- + +The interactive map is an external companion application for wildfire +visualization. PyHazards links to it and provides a launcher, but it does not +host the web application inside the Python package itself. + +Module Reference +---------------- + +.. automodule:: pyhazards.interactive_map + :members: + :undoc-members: + :show-inheritance: + :no-index: diff --git a/docs/source/modules/models_asufm.rst b/docs/source/modules/models_asufm.rst new file mode 100644 index 00000000..0e136dd5 --- /dev/null +++ b/docs/source/modules/models_asufm.rst @@ -0,0 +1,58 @@ +ASUFM +===== + +Description +----------- + +``asufm`` is a self-contained PyHazards port of the ASUFM wildfire model family: +an Attention Swin U-Net with focal modulation for wildfire spread prediction. + +This module follows the official ASUFM configuration pattern with: + +- ``image_size=64`` +- ``patch_size=4`` +- ``in_channels=6`` +- ``embed_dim=96`` +- ``depths=(2, 2, 2, 2)`` +- ``num_heads=(3, 6, 12, 24)`` +- focal modulation in the encoder +- attention-gated skip connections in the decoder + +Paper / source +-------------- + +- `Wildfire Spread Prediction in North America Using Satellite Imagery and Vision Transformer `_ +- `Official repository `_ + +Paper parity note +----------------- + +This PyHazards implementation preserves the main architectural ideas from the +official repository while staying dependency-free inside the main library. +It intentionally replaces the original ``timm``/``einops``-based components +with a native PyTorch implementation of: + +- patch embedding +- hierarchical Swin-style window attention +- focal modulation in encoder blocks +- U-Net-style decoder with spatially gated skip connections + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="asufm", + task="segmentation", + image_size=64, + in_channels=6, + out_dim=1, + ) + + x = torch.randn(2, 6, 64, 64) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_attention_unet.rst b/docs/source/modules/models_attention_unet.rst new file mode 100644 index 00000000..3fdcf7f1 --- /dev/null +++ b/docs/source/modules/models_attention_unet.rst @@ -0,0 +1,42 @@ +Attention U-Net +=============== + +Description +----------- + +``attention_unet`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.attention_unet_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="attention_unet", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_convgru_trajgru.rst b/docs/source/modules/models_convgru_trajgru.rst new file mode 100644 index 00000000..5a3b9a4d --- /dev/null +++ b/docs/source/modules/models_convgru_trajgru.rst @@ -0,0 +1,42 @@ +ConvGRU / TrajGRU +================= + +Description +----------- + +``convgru_trajgru`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.convgru_trajgru_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="convgru_trajgru", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_convlstm.rst b/docs/source/modules/models_convlstm.rst new file mode 100644 index 00000000..ff612614 --- /dev/null +++ b/docs/source/modules/models_convlstm.rst @@ -0,0 +1,42 @@ +ConvLSTM +======== + +Description +----------- + +``convlstm`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.convlstm_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="convlstm", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_deep_ensemble.rst b/docs/source/modules/models_deep_ensemble.rst new file mode 100644 index 00000000..b0a8261e --- /dev/null +++ b/docs/source/modules/models_deep_ensemble.rst @@ -0,0 +1,42 @@ +Deep Ensemble +============= + +Description +----------- + +``deep_ensemble`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.deep_ensemble_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="deep_ensemble", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_deeplabv3p.rst b/docs/source/modules/models_deeplabv3p.rst new file mode 100644 index 00000000..fb4a9503 --- /dev/null +++ b/docs/source/modules/models_deeplabv3p.rst @@ -0,0 +1,42 @@ +DeepLabv3+ +========== + +Description +----------- + +``deeplabv3p`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.deeplabv3p_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="deeplabv3p", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_earthfarseer.rst b/docs/source/modules/models_earthfarseer.rst new file mode 100644 index 00000000..85cc2822 --- /dev/null +++ b/docs/source/modules/models_earthfarseer.rst @@ -0,0 +1,42 @@ +EarthFarseer +============ + +Description +----------- + +``earthfarseer`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.earthfarseer_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="earthfarseer", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_earthformer.rst b/docs/source/modules/models_earthformer.rst new file mode 100644 index 00000000..d050fdcc --- /dev/null +++ b/docs/source/modules/models_earthformer.rst @@ -0,0 +1,42 @@ +Earthformer +=========== + +Description +----------- + +``earthformer`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.earthformer_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="earthformer", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_eqnet.rst b/docs/source/modules/models_eqnet.rst new file mode 100644 index 00000000..649727af --- /dev/null +++ b/docs/source/modules/models_eqnet.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +EQNet +===== + +Overview +-------- + +``eqnet`` extends the PyHazards earthquake benchmark stack with a lightweight attention-based picking model. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Phase Picking + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Earthquake Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``eqnet`` extends the PyHazards earthquake benchmark stack with a lightweight attention-based picking model. + +The implementation keeps the shared waveform input and two-pick output contract so it can be evaluated alongside ``phasenet`` and ``eqtransformer``. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Earthquake Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`SeisBench ` + +External References +------------------- + +**Paper:** `An End-To-End Earthquake Detection Method for Joint Phase Picking and Association Using Deep Learning `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``eqnet`` + +Supported Tasks +--------------- + +- Phase Picking + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="eqnet", task="regression", in_channels=3) + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) + +Notes +----- + +- Outputs are P- and S-arrival sample indices. diff --git a/docs/source/modules/models_eqtransformer.rst b/docs/source/modules/models_eqtransformer.rst new file mode 100644 index 00000000..001e9916 --- /dev/null +++ b/docs/source/modules/models_eqtransformer.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +EQTransformer +============= + +Overview +-------- + +``eqtransformer`` is the second earthquake picking baseline in the staged roadmap and shares the synthetic waveform contract used by ``phasenet``. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Phase Picking + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Earthquake Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``eqtransformer`` is the second earthquake picking baseline in the staged roadmap and shares the synthetic waveform contract used by ``phasenet``. + +The PyHazards adapter focuses on the shared picking interface rather than a full reproduction of the original multitask training pipeline. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Earthquake Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`pick-benchmark ` + +External References +------------------- + +**Paper:** `Earthquake Transformer-An attentive deep-learning model for simultaneous earthquake detection and phase picking `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``eqtransformer`` + +Supported Tasks +--------------- + +- Phase Picking + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="eqtransformer", task="regression", in_channels=3) + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) + +Notes +----- + +- Outputs are P- and S-arrival sample indices. diff --git a/docs/source/modules/models_firecastnet.rst b/docs/source/modules/models_firecastnet.rst new file mode 100644 index 00000000..381d2bcd --- /dev/null +++ b/docs/source/modules/models_firecastnet.rst @@ -0,0 +1,47 @@ +FireCastNet +=========== + +Description +----------- + +``firecastnet`` is a lightweight PyHazards port of the FireCastNet model family. + +This module keeps the main benchmark-relevant ideas needed for integration: + +- compact wildfire-risk raster encoder +- dense decoding head +- forecasting-oriented wildfire output map + +Paper / source +-------------- + +- `FireCastNet paper `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally **not** a full reproduction of +the original FireCastNet seasonal graph pipeline. Instead, it is a clean +benchmark-facing neural port that preserves the forecasting-oriented wildfire +modeling role needed for PyHazards integration. + +It does not claim architecture or preprocessing parity with the original release. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="firecastnet", + task="segmentation", + in_channels=12, + out_channels=1, + ) + + x = torch.randn(2, 12, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_firemm_ir.rst b/docs/source/modules/models_firemm_ir.rst new file mode 100644 index 00000000..1370c986 --- /dev/null +++ b/docs/source/modules/models_firemm_ir.rst @@ -0,0 +1,56 @@ +FireMM-IR +========= + +Description +----------- + +``firemm_ir`` is a benchmark-facing PyHazards port inspired by the FireMM-IR +multi-modal large language model for remote-sensing forest fire monitoring. + +This module preserves the main ideas emphasized by the paper: + +- dual-modality optical + infrared fusion +- class-aware memory +- instruction-conditioned segmentation reasoning +- dense wildfire-scene decoding + +Paper / source +-------------- + +- `FireMM-IR: An Infrared-Enhanced Multi-Modal Large Language Model for Comprehensive Scene Understanding in Remote Sensing Forest Fire Monitoring `_ +- `PubMed entry `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally **not** the original full MLLM +stack with text generation, external instruction tuning, and dataset-specific +serving pipeline. Instead, it is a benchmark-friendly neural port that +preserves the architectural roles needed for PyHazards integration: + +- optical / infrared dual encoder +- class-aware memory enhancement +- instruction-conditioned feature fusion +- dense segmentation head + +It is suitable for smoke testing and benchmark integration, while remaining +transparent about not reproducing the original external MLLM runtime. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="firemm_ir", + task="segmentation", + in_channels=6, + out_dim=1, + ) + + x = torch.randn(2, 6, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_firepred.rst b/docs/source/modules/models_firepred.rst new file mode 100644 index 00000000..919b78cc --- /dev/null +++ b/docs/source/modules/models_firepred.rst @@ -0,0 +1,46 @@ +FirePred +======== + +Description +----------- + +``firepred`` is a PyHazards port inspired by the FirePred wildfire spread model. + +This implementation keeps the benchmark-relevant structure of the published method: + +- multi-temporal wildfire raster input +- separate recent, aggregated, and snapshot branches +- fused CNN decoding for next-step wildfire spread prediction + +Paper / source +-------------- + +- `FirePred GitHub repository `_ +- Paper title used by the official repository: ``FirePred: A hybrid multi-temporal convolutional neural network model for wildfire spread prediction`` + +Paper parity note +----------------- + +This PyHazards implementation is intentionally a lightweight benchmark-facing port. +It preserves the multi-temporal hybrid-CNN pattern while avoiding notebook-only or +project-specific training code from the original release. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="firepred", + task="segmentation", + history=5, + in_channels=8, + out_channels=1, + ) + + x = torch.randn(2, 5, 8, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_floodcast.rst b/docs/source/modules/models_floodcast.rst new file mode 100644 index 00000000..42decc16 --- /dev/null +++ b/docs/source/modules/models_floodcast.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +FloodCast +========= + +Overview +-------- + +``floodcast`` is the first public inundation model in the staged PyHazards flood roadmap. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Inundation + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``floodcast`` is the first public inundation model in the staged PyHazards flood roadmap. + +The adapter uses shared raster tensors so it can be benchmarked through the ``flood.inundation`` evaluator without dataset-specific glue code. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`FloodCastBench ` + +External References +------------------- + +**Paper:** `Large-scale flood modeling and forecasting with FloodCast `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``floodcast`` + +Supported Tasks +--------------- + +- Inundation + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="floodcast", task="regression", in_channels=3, history=4) + preds = model(torch.randn(2, 4, 3, 16, 16)) + print(preds.shape) + +Notes +----- + +- Outputs are next-horizon inundation depth rasters. diff --git a/docs/source/modules/models_forefire.rst b/docs/source/modules/models_forefire.rst new file mode 100644 index 00000000..58d1fdec --- /dev/null +++ b/docs/source/modules/models_forefire.rst @@ -0,0 +1,62 @@ +ForeFire Adapter +================ + +Description +----------- + +``forefire`` is a lightweight PyHazards raster adapter inspired by the +front-propagation behavior of the ForeFire wildfire spread simulator. + +This module is designed as a benchmark-facing canonical model that keeps the +main local spread mechanism simple and reproducible inside the PyHazards +library: + +- ``in_channels=12`` +- ``out_channels=1`` +- ``diffusion_steps=2`` by default +- repeated neighborhood spread updates +- explicit fuel and wind modulation + +Paper / source +-------------- + +- `ForeFire: open source code for wildland fire spread models `_ +- `ForeFire repository `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally **not** the full ForeFire +simulation system. Instead, it provides a compact raster adapter that captures +the main deterministic spread intuition needed for registry integration and +smoke testing in the main library. + +The canonical PyHazards version keeps: + +- raster input/output contract +- repeated local front spread updates +- fuel-conditioned spread +- wind-conditioned spread + +It does not attempt to reproduce the full propagation solver, landscape +representation, or operational simulation stack of the original ForeFire +system. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="forefire", + task="segmentation", + in_channels=12, + diffusion_steps=2, + ) + + x = torch.randn(2, 12, 32, 32) + spread = model(x) + print(spread.shape) diff --git a/docs/source/modules/models_fourcastnet_tc.rst b/docs/source/modules/models_fourcastnet_tc.rst new file mode 100644 index 00000000..3515efc7 --- /dev/null +++ b/docs/source/modules/models_fourcastnet_tc.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +FourCastNet TC Adapter +====================== + +Overview +-------- + +``fourcastnet_tc`` completes the first wave of experimental foundation-weather storm adapters in the staged roadmap. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Experimental Adapter + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``fourcastnet_tc`` completes the first wave of experimental foundation-weather storm adapters in the staged roadmap. + +The PyHazards version is intentionally lightweight and uses the same trajectory output contract as the other storm baselines. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`IBTrACS ` + +External References +------------------- + +**Paper:** `FourCastNet: A Global Data-driven High-resolution Weather Model using Adaptive Fourier Neural Operators `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``fourcastnet_tc`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="fourcastnet_tc", task="regression", input_dim=8, history=6, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Experimental adapter: intended for shared-evaluator prototyping rather than exact weather-model parity. diff --git a/docs/source/modules/models_gemini_25_pro_wildfire_prompted.rst b/docs/source/modules/models_gemini_25_pro_wildfire_prompted.rst new file mode 100644 index 00000000..2e08e013 --- /dev/null +++ b/docs/source/modules/models_gemini_25_pro_wildfire_prompted.rst @@ -0,0 +1,47 @@ +Gemini 2.5 Pro Wildfire Prompted +================================ + +Description +----------- + +``gemini_25_pro_wildfire_prompted`` is a benchmark-facing prompt-conditioned VLM port +inspired by Gemini 2.5 Pro. + +This implementation keeps the integration-relevant structure for a generic wildfire +vision-language baseline: + +- raster wildfire/environment input +- prompt-token conditioning +- visual-token and prompt-token fusion +- dense wildfire-risk decoding + +Paper / source +-------------- + +- `Gemini models documentation `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally not a checkpoint-level port of +Gemini 2.5 Pro. Instead, it is a compact prompt-conditioned wildfire segmentation +baseline that preserves the benchmark-relevant VLM pattern. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="gemini_25_pro_wildfire_prompted", + task="segmentation", + in_channels=6, + out_dim=1, + ) + + x = torch.randn(2, 6, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_google_flood_forecasting.rst b/docs/source/modules/models_google_flood_forecasting.rst new file mode 100644 index 00000000..ea8999be --- /dev/null +++ b/docs/source/modules/models_google_flood_forecasting.rst @@ -0,0 +1,113 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Google Flood Forecasting +======================== + +Overview +-------- + +``google_flood_forecasting`` is a compact sequence-to-node forecasting baseline for flood streamflow prediction. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Streamflow + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``google_flood_forecasting`` is a compact sequence-to-node forecasting baseline for flood streamflow prediction. + +The PyHazards implementation uses a transformer encoder over per-node history windows and returns one forecast value per node. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`Caravan ` + +External References +------------------- + +**Paper:** `Global Flood Forecasting at a Fine Catchment Resolution using Machine Learning `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``google_flood_forecasting`` + +Supported Tasks +--------------- + +- Streamflow + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="google_flood_forecasting", + task="regression", + input_dim=2, + out_dim=1, + history=4, + ) + preds = model({"x": torch.randn(2, 4, 6, 2)}) + print(preds.shape) + +Notes +----- + +- The smoke path uses the same streamflow-style graph fixture as the other flood baselines. diff --git a/docs/source/modules/models_gpd.rst b/docs/source/modules/models_gpd.rst new file mode 100644 index 00000000..12607c77 --- /dev/null +++ b/docs/source/modules/models_gpd.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +GPD +=== + +Overview +-------- + +``gpd`` provides a lightweight earthquake picking adapter with the same waveform-to-pick interface used across the PyHazards earthquake benchmarks. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Phase Picking + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Earthquake Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``gpd`` provides a lightweight earthquake picking adapter with the same waveform-to-pick interface used across the PyHazards earthquake benchmarks. + +This adapter is intended as a reproducible low-cost baseline rather than an exact port of every original training detail. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Earthquake Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`pick-benchmark ` + +External References +------------------- + +**Paper:** `Generalized Seismic Phase Detection with Deep Learning `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``gpd`` + +Supported Tasks +--------------- + +- Phase Picking + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="gpd", task="regression", in_channels=3) + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) + +Notes +----- + +- The adapter keeps a simple two-output pick interface for shared evaluation. diff --git a/docs/source/modules/models_graphcast_tc.rst b/docs/source/modules/models_graphcast_tc.rst new file mode 100644 index 00000000..4d267deb --- /dev/null +++ b/docs/source/modules/models_graphcast_tc.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +GraphCast TC Adapter +==================== + +Overview +-------- + +``graphcast_tc`` is an experimental foundation-weather adapter that keeps the shared storm trajectory interface while remaining lightweight enough for CI. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Experimental Adapter + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``graphcast_tc`` is an experimental foundation-weather adapter that keeps the shared storm trajectory interface while remaining lightweight enough for CI. + +The PyHazards version is intentionally wrapper-style and should be treated as an adapter contract rather than a full reproduction of the original weather model. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`IBTrACS ` + +External References +------------------- + +**Paper:** `GraphCast: Learning skillful medium-range global weather forecasting `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``graphcast_tc`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="graphcast_tc", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Experimental adapter: intended for shared-evaluator prototyping rather than exact weather-model parity. diff --git a/docs/source/modules/models_hurricast.rst b/docs/source/modules/models_hurricast.rst new file mode 100644 index 00000000..8fa01259 --- /dev/null +++ b/docs/source/modules/models_hurricast.rst @@ -0,0 +1,114 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Hurricast +========= + +Overview +-------- + +``hurricast`` is the first basin-specific storm baseline in the staged PyHazards roadmap and operates on storm-history sequences. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``hurricast`` is the first basin-specific storm baseline in the staged PyHazards roadmap and operates on storm-history sequences. + +This initial adapter focuses on the shared tropical-cyclone forecasting interface and is intended as a reproducible starting point before broader storm-model breadth. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`IBTrACS ` + +External References +------------------- + +**Paper:** `Hurricane Forecasting: A Novel Multimodal Machine Learning Framework `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``hurricast`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="hurricast", + task="regression", + input_dim=8, + horizon=5, + output_dim=3, + ) + + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Outputs are lead-time sequences of latitude, longitude, and intensity targets. diff --git a/docs/source/modules/models_hydrographnet.rst b/docs/source/modules/models_hydrographnet.rst new file mode 100644 index 00000000..da02a757 --- /dev/null +++ b/docs/source/modules/models_hydrographnet.rst @@ -0,0 +1,119 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +HydroGraphNet +============= + +Overview +-------- + +``hydrographnet`` is the PyHazards entrypoint for flood forecasting on irregular meshes with graph-structured hydrologic state updates. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Streamflow + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``hydrographnet`` is the PyHazards entrypoint for flood forecasting on irregular meshes with graph-structured hydrologic state updates. + +In PyHazards, this model is typically paired with the ERA5-based hydrograph adapter ``load_hydrograph_data`` for end-to-end smoke validation. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`HydroBench ` + +External References +------------------- + +**Paper:** `Interpretable physics-informed graph neural networks for flood forecasting `_ + +Registry Name +------------- + +Primary entrypoint: ``hydrographnet`` + +Supported Tasks +--------------- + +- Streamflow + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, + ) + + batch = { + "x": torch.randn(1, 3, 6, 2), + "adj": torch.eye(6).unsqueeze(0), + "coords": torch.randn(6, 2), + } + preds = model(batch) + print(preds.shape) + +Notes +----- + +- The smoke test uses a synthetic graph batch so it stays CPU-safe in CI. diff --git a/docs/source/modules/models_internvl3_wildfire_prompted.rst b/docs/source/modules/models_internvl3_wildfire_prompted.rst new file mode 100644 index 00000000..22d9cd35 --- /dev/null +++ b/docs/source/modules/models_internvl3_wildfire_prompted.rst @@ -0,0 +1,47 @@ +InternVL3 Wildfire Prompted +=========================== + +Description +----------- + +``internvl3_wildfire_prompted`` is a benchmark-facing prompt-conditioned VLM port +inspired by InternVL3. + +This implementation keeps the integration-relevant structure for a generic wildfire +vision-language baseline: + +- raster wildfire/environment input +- prompt-token conditioning +- visual-token and prompt-token fusion +- dense wildfire-risk decoding + +Paper / source +-------------- + +- `InternVL repository `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally not a checkpoint-level port of +InternVL3. Instead, it is a compact prompt-conditioned wildfire segmentation +baseline that preserves the benchmark-relevant VLM pattern. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="internvl3_wildfire_prompted", + task="segmentation", + in_channels=6, + out_dim=1, + ) + + x = torch.randn(2, 6, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_lightgbm.rst b/docs/source/modules/models_lightgbm.rst new file mode 100644 index 00000000..e151d712 --- /dev/null +++ b/docs/source/modules/models_lightgbm.rst @@ -0,0 +1,42 @@ +LightGBM +======== + +Description +----------- + +``lightgbm`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.lightgbm_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="lightgbm", + task="classification", + ) + + if "classification" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_llama4_wildfire_prompted.rst b/docs/source/modules/models_llama4_wildfire_prompted.rst new file mode 100644 index 00000000..dfabc6d1 --- /dev/null +++ b/docs/source/modules/models_llama4_wildfire_prompted.rst @@ -0,0 +1,48 @@ +Llama 4 Wildfire Prompted +========================= + +Description +----------- + +``llama4_wildfire_prompted`` is a benchmark-facing prompt-conditioned multimodal port +inspired by Meta Llama 4. + +This implementation keeps the integration-relevant structure for a generic wildfire +vision-language baseline: + +- raster wildfire/environment input +- prompt-token conditioning +- visual-token and prompt-token fusion +- dense wildfire-risk decoding + +Paper / source +-------------- + +- `Meta Llama organization `_ +- `Llama site `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally not a checkpoint-level port of +Llama 4. Instead, it is a compact prompt-conditioned wildfire segmentation +baseline that preserves the benchmark-relevant multimodal reasoning pattern. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="llama4_wildfire_prompted", + task="segmentation", + in_channels=6, + out_dim=1, + ) + + x = torch.randn(2, 6, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_logistic_regression.rst b/docs/source/modules/models_logistic_regression.rst new file mode 100644 index 00000000..8fde8cac --- /dev/null +++ b/docs/source/modules/models_logistic_regression.rst @@ -0,0 +1,42 @@ +Logistic Regression +=================== + +Description +----------- + +``logistic_regression`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.logistic_regression_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="logistic_regression", + task="classification", + ) + + if "classification" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_mau.rst b/docs/source/modules/models_mau.rst new file mode 100644 index 00000000..bb4c78a0 --- /dev/null +++ b/docs/source/modules/models_mau.rst @@ -0,0 +1,42 @@ +MAU +=== + +Description +----------- + +``mau`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.mau_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="mau", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_modis_active_fire_c61.rst b/docs/source/modules/models_modis_active_fire_c61.rst new file mode 100644 index 00000000..ad65c1fa --- /dev/null +++ b/docs/source/modules/models_modis_active_fire_c61.rst @@ -0,0 +1,48 @@ +MODIS Active Fire C6.1 +====================== + +Description +----------- + +``modis_active_fire_c61`` is a PyHazards operational-detection baseline inspired by +NASA's MODIS Collection 6.1 active-fire algorithm and its FIRMS-facing use in practice. + +This implementation keeps the benchmark-relevant structure of the published method: + +- satellite active-fire detection framing rather than generic segmentation +- contextual thermal anomaly estimation at coarser MODIS-like support +- split-window style evidence between mid-IR and longwave channels +- lightweight learnable calibration head so the method can run under the PyHazards benchmark contract + +Paper / source +-------------- + +- `MODIS Land Team fire page `_ +- `Giglio et al. (2016) `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally a benchmark-facing surrogate rather than a byte-for-byte +reproduction of the NASA operational code path. It preserves the operational-detection intuition of +contextual thermal anomaly plus spectral evidence, while adding a compact learnable calibration head so +that smoke runs can generate standard training artifacts. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="modis_active_fire_c61", + task="segmentation", + in_channels=5, + out_dim=1, + ) + + x = torch.randn(2, 5, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_neuralhydrology_ealstm.rst b/docs/source/modules/models_neuralhydrology_ealstm.rst new file mode 100644 index 00000000..7004c06d --- /dev/null +++ b/docs/source/modules/models_neuralhydrology_ealstm.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +EA-LSTM +======= + +Overview +-------- + +``neuralhydrology_ealstm`` complements the plain LSTM adapter with a lightweight static gating path inspired by EA-LSTM style hydrology models. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Streamflow + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``neuralhydrology_ealstm`` complements the plain LSTM adapter with a lightweight static gating path inspired by EA-LSTM style hydrology models. + +It keeps the same graph-temporal input contract as the rest of the flood streamflow roadmap. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`WaterBench ` + +External References +------------------- + +**Paper:** `Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``neuralhydrology_ealstm`` + +Supported Tasks +--------------- + +- Streamflow + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="neuralhydrology_ealstm", task="regression", input_dim=2, out_dim=1) + preds = model({"x": torch.randn(1, 4, 6, 2)}) + print(preds.shape) + +Notes +----- + +- This adapter focuses on the entity-aware gating contract, not exact repo parity. diff --git a/docs/source/modules/models_neuralhydrology_lstm.rst b/docs/source/modules/models_neuralhydrology_lstm.rst new file mode 100644 index 00000000..8379c935 --- /dev/null +++ b/docs/source/modules/models_neuralhydrology_lstm.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +NeuralHydrology LSTM +==================== + +Overview +-------- + +``neuralhydrology_lstm`` is the first community-style hydrology baseline in the PyHazards flood roadmap. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Streamflow + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``neuralhydrology_lstm`` is the first community-style hydrology baseline in the PyHazards flood roadmap. + +The adapter consumes the shared graph-temporal streamflow batch format and produces next-step nodewise discharge predictions. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`Caravan ` + +External References +------------------- + +**Paper:** `Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``neuralhydrology_lstm`` + +Supported Tasks +--------------- + +- Streamflow + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="neuralhydrology_lstm", task="regression", input_dim=2, out_dim=1) + preds = model({"x": torch.randn(1, 4, 6, 2)}) + print(preds.shape) + +Notes +----- + +- The smoke test uses the shared synthetic streamflow dataset shape. diff --git a/docs/source/modules/models_pangu_tc.rst b/docs/source/modules/models_pangu_tc.rst new file mode 100644 index 00000000..d7ec2240 --- /dev/null +++ b/docs/source/modules/models_pangu_tc.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Pangu TC Adapter +================ + +Overview +-------- + +``pangu_tc`` adds a second foundation-weather reference path behind the shared tropical-cyclone evaluator. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Experimental Adapter + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``pangu_tc`` adds a second foundation-weather reference path behind the shared tropical-cyclone evaluator. + +The implementation is intentionally lightweight and should be interpreted as an adapter contract for forecast-field driven storm evaluation. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`IBTrACS ` + +External References +------------------- + +**Paper:** `Accurate medium-range global weather forecasting with 3D neural networks `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``pangu_tc`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="pangu_tc", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Experimental adapter: intended for shared-evaluator prototyping rather than exact weather-model parity. diff --git a/docs/source/modules/models_phasenet.rst b/docs/source/modules/models_phasenet.rst new file mode 100644 index 00000000..780512f6 --- /dev/null +++ b/docs/source/modules/models_phasenet.rst @@ -0,0 +1,112 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +PhaseNet +======== + +Overview +-------- + +``phasenet`` is the first earthquake picking baseline in the staged PyHazards roadmap and is paired with the synthetic waveform dataset for smoke validation. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Phase Picking + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Earthquake Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``phasenet`` is the first earthquake picking baseline in the staged PyHazards roadmap and is paired with the synthetic waveform dataset for smoke validation. + +This initial adapter focuses on the shared waveform-to-pick interface and does not claim exact reproduction of the original PhaseNet training stack. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Earthquake Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`SeisBench ` + +External References +------------------- + +**Paper:** `PhaseNet: A Deep-Neural-Network-Based Seismic Arrival Time Picking Method `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``phasenet`` + +Supported Tasks +--------------- + +- Phase Picking + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="phasenet", + task="regression", + in_channels=3, + ) + + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) + +Notes +----- + +- Outputs are P- and S-arrival sample indices in the current smoke-test adapter. diff --git a/docs/source/modules/models_predrnn_v2.rst b/docs/source/modules/models_predrnn_v2.rst new file mode 100644 index 00000000..448d31da --- /dev/null +++ b/docs/source/modules/models_predrnn_v2.rst @@ -0,0 +1,42 @@ +PredRNN-v2 +========== + +Description +----------- + +``predrnn_v2`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.predrnn_v2_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="predrnn_v2", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_prithvi_burnscars.rst b/docs/source/modules/models_prithvi_burnscars.rst new file mode 100644 index 00000000..bbe1b5fe --- /dev/null +++ b/docs/source/modules/models_prithvi_burnscars.rst @@ -0,0 +1,55 @@ +Prithvi BurnScars +================= + +Description +----------- + +``prithvi_burnscars`` is a lightweight PyHazards downstream segmentation model +inspired by the official Prithvi BurnScars release. + +This module keeps the benchmark-relevant ideas from the model card: + +- Prithvi-style EO temporal backbone +- single-timestamp or arbitrary-timestamp fine-tuning support +- burn-scar-style segmentation head +- U-Net-like skip fusion for dense output + +Paper / source +-------------- + +- `Prithvi-EO-2.0: A Versatile Multi-Temporal Foundation Model for Earth Observation Applications `_ +- `Prithvi-EO-2.0-300M-BurnScars model card `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally **not** the official released +checkpoint. Instead, it is a benchmark-facing downstream port that preserves the +main architectural story of the official BurnScars release: + +- EO foundation-style encoder +- downstream burn-scar segmentation objective +- dense decoder with skip fusion + +It is suitable for PyHazards integration and smoke testing, while remaining +transparent about not being a weight-identical reproduction. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="prithvi_burnscars", + task="segmentation", + image_size=32, + in_channels=6, + out_dim=1, + ) + + x = torch.randn(2, 1, 6, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_prithvi_eo_2_tl.rst b/docs/source/modules/models_prithvi_eo_2_tl.rst new file mode 100644 index 00000000..9113b19d --- /dev/null +++ b/docs/source/modules/models_prithvi_eo_2_tl.rst @@ -0,0 +1,57 @@ +Prithvi-EO-2.0-TL +================= + +Description +----------- + +``prithvi_eo_2_tl`` is a lightweight PyHazards port inspired by the +Prithvi-EO-2.0 transfer-learning model family. + +This module keeps the main ideas highlighted in the official paper/model card: + +- multi-temporal EO input sequences +- temporal embeddings +- location embeddings +- transformer-style EO backbone +- segmentation-ready downstream head + +Paper / source +-------------- + +- `Prithvi-EO-2.0: A Versatile Multi-Temporal Foundation Model for Earth Observation Applications `_ +- `Prithvi-EO-2.0-300M-TL model card `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally **not** the full official +pretrained foundation model with released checkpoints. Instead, it is a clean +PyTorch port that preserves the benchmark-relevant architectural ideas needed +for PyHazards integration: + +- sequence-based EO input handling +- temporal and location conditioning +- transformer encoder over patch tokens +- downstream segmentation decoding + +It does not claim checkpoint parity with the official IBM-NASA release. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="prithvi_eo_2_tl", + task="segmentation", + image_size=32, + in_channels=6, + out_dim=1, + ) + + x = torch.randn(2, 4, 6, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_prithvi_wxc.rst b/docs/source/modules/models_prithvi_wxc.rst new file mode 100644 index 00000000..7a470547 --- /dev/null +++ b/docs/source/modules/models_prithvi_wxc.rst @@ -0,0 +1,57 @@ +Prithvi-WxC +=========== + +Description +----------- + +``prithvi_wxc`` is a lightweight PyHazards port inspired by the +Prithvi-WxC weather-climate foundation-model family. + +This module keeps the main ideas highlighted in the official paper/model card: + +- multi-step weather input sequences +- lead-time conditioning +- variable-summary conditioning +- transformer-style weather backbone +- dense downstream head for wildfire-style grid prediction + +Paper / source +-------------- + +- `Prithvi WxC: Foundation Model for Weather and Climate `_ +- `Prithvi-WxC model card `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally **not** the full official +pretrained Prithvi-WxC checkpoint stack. Instead, it is a clean PyTorch port +that preserves the benchmark-relevant ideas we need for integration: + +- multi-variable weather sequence handling +- lead-time-aware conditioning +- transformer encoder over weather patch tokens +- dense wildfire-risk decoding + +It does not claim checkpoint parity with the official NASA/IBM release. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="prithvi_wxc", + task="segmentation", + image_size=32, + in_channels=8, + out_dim=1, + ) + + x = torch.randn(2, 5, 8, 32, 32) + lead_time = torch.linspace(6.0, 30.0, 5).repeat(2, 1) + logits = model({"x": x, "lead_time_hours": lead_time}) + print(logits.shape) diff --git a/docs/source/modules/models_qwen25_vl_wildfire_prompted.rst b/docs/source/modules/models_qwen25_vl_wildfire_prompted.rst new file mode 100644 index 00000000..d0a2c7d5 --- /dev/null +++ b/docs/source/modules/models_qwen25_vl_wildfire_prompted.rst @@ -0,0 +1,52 @@ +Qwen2.5-VL Wildfire Prompted +============================ + +Description +----------- + +``qwen25_vl_wildfire_prompted`` is a benchmark-facing prompt-conditioned VLM port +inspired by Qwen2.5-VL. + +This implementation keeps the integration-relevant structure for a generic wildfire +vision-language baseline: + +- raster wildfire/environment input +- prompt-token conditioning +- visual-token and prompt-token fusion +- dense wildfire-risk decoding + +Paper / source +-------------- + +- `QwenLM/Qwen2.5-VL GitHub repository `_ +- `Qwen2.5-VL Technical Report `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally not a full parameter-port of the +released Qwen2.5-VL checkpoints. Instead, it is a compact prompt-conditioned +wildfire segmentation baseline that preserves the benchmark-relevant VLM pattern: + +- prompt-conditioned visual reasoning +- image-token and prompt-token fusion +- dense downstream wildfire prediction head + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="qwen25_vl_wildfire_prompted", + task="segmentation", + in_channels=6, + out_dim=1, + ) + + x = torch.randn(2, 6, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_rainformer.rst b/docs/source/modules/models_rainformer.rst new file mode 100644 index 00000000..b5cf7329 --- /dev/null +++ b/docs/source/modules/models_rainformer.rst @@ -0,0 +1,42 @@ +Rainformer +========== + +Description +----------- + +``rainformer`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.rainformer_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="rainformer", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_random_forest.rst b/docs/source/modules/models_random_forest.rst new file mode 100644 index 00000000..3cb20ff6 --- /dev/null +++ b/docs/source/modules/models_random_forest.rst @@ -0,0 +1,42 @@ +Random Forest +============= + +Description +----------- + +``random_forest`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.random_forest_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="random_forest", + task="classification", + ) + + if "classification" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_resnet18_unet.rst b/docs/source/modules/models_resnet18_unet.rst new file mode 100644 index 00000000..d55b5403 --- /dev/null +++ b/docs/source/modules/models_resnet18_unet.rst @@ -0,0 +1,42 @@ +ResNet-18 U-Net +=============== + +Description +----------- + +``resnet18_unet`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.resnet18_unet_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="resnet18_unet", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_saf_net.rst b/docs/source/modules/models_saf_net.rst new file mode 100644 index 00000000..4d99da2f --- /dev/null +++ b/docs/source/modules/models_saf_net.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +SAF-Net +======= + +Overview +-------- + +``saf_net`` adds an intensity-oriented storm baseline to the shared ``tc.track_intensity`` evaluator. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``saf_net`` adds an intensity-oriented storm baseline to the shared ``tc.track_intensity`` evaluator. + +The adapter keeps full trajectory outputs so it can use the same report format as the other PyHazards storm models. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`TCBench Alpha ` + +External References +------------------- + +**Paper:** `SAF-Net: A spatio-temporal deep learning method for typhoon intensity prediction `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``saf_net`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="saf_net", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Track channels are retained so the shared storm evaluator can score all baselines consistently. diff --git a/docs/source/modules/models_segformer.rst b/docs/source/modules/models_segformer.rst new file mode 100644 index 00000000..429688b6 --- /dev/null +++ b/docs/source/modules/models_segformer.rst @@ -0,0 +1,42 @@ +SegFormer +========= + +Description +----------- + +``segformer`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.segformer_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="segformer", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_swin_unet.rst b/docs/source/modules/models_swin_unet.rst new file mode 100644 index 00000000..32f4cfd5 --- /dev/null +++ b/docs/source/modules/models_swin_unet.rst @@ -0,0 +1,42 @@ +Swin-UNet +========= + +Description +----------- + +``swin_unet`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.swin_unet_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="swin_unet", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_swinlstm.rst b/docs/source/modules/models_swinlstm.rst new file mode 100644 index 00000000..1a576ebd --- /dev/null +++ b/docs/source/modules/models_swinlstm.rst @@ -0,0 +1,42 @@ +SwinLSTM +======== + +Description +----------- + +``swinlstm`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.swinlstm_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="swinlstm", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_tcif_fusion.rst b/docs/source/modules/models_tcif_fusion.rst new file mode 100644 index 00000000..f9c46589 --- /dev/null +++ b/docs/source/modules/models_tcif_fusion.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +TCIF-fusion +=========== + +Overview +-------- + +``tcif_fusion`` combines multiple feature streams behind the shared storm forecasting interface used throughout the PyHazards cyclone roadmap. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``tcif_fusion`` combines multiple feature streams behind the shared storm forecasting interface used throughout the PyHazards cyclone roadmap. + +The adapter focuses on the fusion contract and evaluator compatibility rather than full reproduction of the original training stack. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`TCBench Alpha ` + +External References +------------------- + +**Paper:** `Tropical cyclone intensity forecasting using model knowledge guided deep learning model `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``tcif_fusion`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="tcif_fusion", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Outputs are shared storm forecast trajectories over the configured horizon. diff --git a/docs/source/modules/models_tcn.rst b/docs/source/modules/models_tcn.rst new file mode 100644 index 00000000..46f7055a --- /dev/null +++ b/docs/source/modules/models_tcn.rst @@ -0,0 +1,42 @@ +TCN +=== + +Description +----------- + +``tcn`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.tcn_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="tcn", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_tropicalcyclone_mlp.rst b/docs/source/modules/models_tropicalcyclone_mlp.rst new file mode 100644 index 00000000..7c0b4c3c --- /dev/null +++ b/docs/source/modules/models_tropicalcyclone_mlp.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Tropical Cyclone MLP +==================== + +Overview +-------- + +``tropicalcyclone_mlp`` complements ``hurricast`` with a lighter-weight hurricane baseline that uses the same storm-history input contract. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``tropicalcyclone_mlp`` complements ``hurricast`` with a lighter-weight hurricane baseline that uses the same storm-history input contract. + +The adapter is useful for practical low-cost intensity and trajectory experiments in basin-filtered settings. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`TCBench Alpha ` + +External References +------------------- + +**Paper:** `Deep Learning Experiments for Tropical Cyclone Intensity Forecasts `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``tropicalcyclone_mlp`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="tropicalcyclone_mlp", task="regression", input_dim=8, history=6) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Outputs are lead-time sequences of latitude, longitude, and intensity targets. diff --git a/docs/source/modules/models_tropicyclonenet.rst b/docs/source/modules/models_tropicyclonenet.rst new file mode 100644 index 00000000..dba1fe62 --- /dev/null +++ b/docs/source/modules/models_tropicyclonenet.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +TropiCycloneNet +=============== + +Overview +-------- + +``tropicyclonenet`` extends the shared storm benchmark stack beyond the hurricane-only presets. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Tropical Cyclone + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Track + Intensity + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``tropicyclonenet`` extends the shared storm benchmark stack beyond the hurricane-only presets. + +The PyHazards adapter keeps a single storm-history to forecast-trajectory interface so it can share the same evaluator as ``hurricast``. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Tropical Cyclone Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`TropiCycloneNet-Dataset ` + +External References +------------------- + +**Paper:** `Benchmark dataset and deep learning method for global tropical cyclone forecasting `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``tropicyclonenet`` + +Supported Tasks +--------------- + +- Track + Intensity + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="tropicyclonenet", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) + +Notes +----- + +- Outputs are lead-time sequences of latitude, longitude, and intensity targets. diff --git a/docs/source/modules/models_ts_satfire.rst b/docs/source/modules/models_ts_satfire.rst new file mode 100644 index 00000000..01c215da --- /dev/null +++ b/docs/source/modules/models_ts_satfire.rst @@ -0,0 +1,50 @@ +TS-SatFire +========== + +Description +----------- + +``ts_satfire`` is a lightweight PyHazards port inspired by the TS-SatFire +multi-temporal wildfire prediction benchmark family. + +This module keeps the benchmark-relevant ideas we need for integration: + +- multi-temporal satellite image sequences +- auxiliary environmental channels +- spatio-temporal raster encoding +- dense wildfire progression prediction + +Paper / source +-------------- + +- `TS-SatFire paper `_ +- `TS-SatFire official repository `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally **not** the entire official +TS-SatFire processing and benchmark stack. Instead, it is a clean spatio-temporal +port that preserves the prediction-task modeling role needed for benchmark integration. + +It does not claim exact architecture, dataset, or training parity with the original release. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="ts_satfire", + task="segmentation", + history=5, + in_channels=8, + out_channels=1, + ) + + x = torch.randn(2, 5, 8, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_unet.rst b/docs/source/modules/models_unet.rst new file mode 100644 index 00000000..375a0a2a --- /dev/null +++ b/docs/source/modules/models_unet.rst @@ -0,0 +1,42 @@ +U-Net +===== + +Description +----------- + +``unet`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.unet_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="unet", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_urbanfloodcast.rst b/docs/source/modules/models_urbanfloodcast.rst new file mode 100644 index 00000000..6d52cdb2 --- /dev/null +++ b/docs/source/modules/models_urbanfloodcast.rst @@ -0,0 +1,107 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +UrbanFloodCast +============== + +Overview +-------- + +``urbanfloodcast`` adds an urban-focused raster baseline to the PyHazards inundation benchmark stack. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Flood + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Inundation + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Flood Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``urbanfloodcast`` adds an urban-focused raster baseline to the PyHazards inundation benchmark stack. + +The implementation keeps the shared spatiotemporal tensor contract used by the synthetic inundation smoke dataset. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Flood Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`FloodCastBench ` + +External References +------------------- + +**Paper:** `UrbanFloodCast: WMO Urban Flooding Forecasting Challenge `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``urbanfloodcast`` + +Supported Tasks +--------------- + +- Inundation + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model(name="urbanfloodcast", task="regression", in_channels=3, history=4) + preds = model(torch.randn(2, 4, 3, 16, 16)) + print(preds.shape) + +Notes +----- + +- Outputs are next-horizon inundation depth rasters. diff --git a/docs/source/modules/models_utae.rst b/docs/source/modules/models_utae.rst new file mode 100644 index 00000000..1dcac233 --- /dev/null +++ b/docs/source/modules/models_utae.rst @@ -0,0 +1,42 @@ +UTAE +==== + +Description +----------- + +``utae`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.utae_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="utae", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_viirs_375m_active_fire.rst b/docs/source/modules/models_viirs_375m_active_fire.rst new file mode 100644 index 00000000..9cbfd9f2 --- /dev/null +++ b/docs/source/modules/models_viirs_375m_active_fire.rst @@ -0,0 +1,48 @@ +VIIRS 375 m Active Fire +======================= + +Description +----------- + +``viirs_375m_active_fire`` is a PyHazards operational-detection baseline inspired by +NASA's VIIRS 375 m active-fire algorithm and its FIRMS-facing use in practice. + +This implementation keeps the benchmark-relevant structure of the published method: + +- satellite active-fire detection framing rather than generic segmentation +- contextual thermal anomaly estimation +- split-window style evidence between mid-IR and longwave channels +- lightweight learnable calibration head so the method can run under the PyHazards benchmark contract + +Paper / source +-------------- + +- `NASA Earthdata VIIRS I-Band 375 m Active Fire page `_ +- `Schroeder et al. (2014) `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally a benchmark-facing surrogate rather than a byte-for-byte +reproduction of the NASA operational code path. It preserves the operational-detection intuition of +contextual thermal anomaly plus spectral evidence, while adding a compact learnable calibration head so +that smoke runs can generate standard training artifacts. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="viirs_375m_active_fire", + task="segmentation", + in_channels=5, + out_dim=1, + ) + + x = torch.randn(2, 5, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_vit_segmenter.rst b/docs/source/modules/models_vit_segmenter.rst new file mode 100644 index 00000000..a4c77307 --- /dev/null +++ b/docs/source/modules/models_vit_segmenter.rst @@ -0,0 +1,42 @@ +ViT Segmenter +============= + +Description +----------- + +``vit_segmenter`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.vit_segmenter_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="vit_segmenter", + task="segmentation", + ) + + if "segmentation" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/modules/models_wavecastnet.rst b/docs/source/modules/models_wavecastnet.rst new file mode 100644 index 00000000..51a41fef --- /dev/null +++ b/docs/source/modules/models_wavecastnet.rst @@ -0,0 +1,121 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +WaveCastNet +=========== + +Overview +-------- + +``wavecastnet`` is the PyHazards entrypoint for dense-grid earthquake wavefield forecasting based on the ConvLEM encoder-decoder design described by Lyu et al. (2025). + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Earthquake + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Wavefield Forecasting + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Earthquake Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wavecastnet`` is the PyHazards entrypoint for dense-grid earthquake wavefield forecasting based on the ConvLEM encoder-decoder design described by Lyu et al. (2025). + +This implementation focuses on the core dense-grid forecasting path and keeps data loading outside the model so users can adapt it to their own simulation or sensor pipelines. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Earthquake Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`AEFA `, :doc:`pyCSEP ` + +External References +------------------- + +**Paper:** `Rapid wavefield forecasting for earthquake early warning via deep sequence to sequence learning `_ + +Registry Name +------------- + +Primary entrypoint: ``wavecastnet`` + +Supported Tasks +--------------- + +- Wavefield Forecasting + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wavecastnet", + task="regression", + in_channels=3, + height=32, + width=24, + temporal_in=6, + temporal_out=4, + hidden_dim=32, + num_layers=1, + dropout=0.0, + ) + + x = torch.randn(2, 3, 6, 32, 24) + y = model(x) + print(y.shape) + +Notes +----- + +- The PyHazards version currently targets dense-grid forecasting rather than the paper's sparse-sensor variants. +- The smoke test uses reduced spatial and temporal sizes so it stays CPU-safe in CI. diff --git a/docs/source/modules/models_wildfire_aspp.rst b/docs/source/modules/models_wildfire_aspp.rst new file mode 100644 index 00000000..1f411824 --- /dev/null +++ b/docs/source/modules/models_wildfire_aspp.rst @@ -0,0 +1,115 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +CNN-ASPP +======== + +Overview +-------- + +``wildfire_aspp`` is the backward-compatible public PyHazards entrypoint for the CNN + ASPP wildfire spread model. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Spread + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wildfire_aspp`` is the backward-compatible public PyHazards entrypoint for the CNN + ASPP wildfire spread model. + +PyHazards keeps the alias for compatibility while the implementation delegates to the native ``wildfire_cnn_aspp`` builder under the hood. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`WildfireSpreadTS ` + +External References +------------------- + +**Paper:** `Application of Explainable Artificial Intelligence in Predicting Wildfire Spread `_ + +Registry Name +------------- + +Primary entrypoint: ``wildfire_aspp`` + +Aliases: ``wildfire_cnn_aspp`` + +Supported Tasks +--------------- + +- Spread + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_aspp", + task="segmentation", + in_channels=12, + ) + + x = torch.randn(2, 12, 64, 64) + logits = model(x) + print(logits.shape) + +Notes +----- + +- ``wildfire_cnn_aspp`` remains available as an alias for the same public model. diff --git a/docs/source/modules/models_wildfire_fpa.rst b/docs/source/modules/models_wildfire_fpa.rst new file mode 100644 index 00000000..b5890f79 --- /dev/null +++ b/docs/source/modules/models_wildfire_fpa.rst @@ -0,0 +1,117 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +DNN-LSTM-AutoEncoder +==================== + +Overview +-------- + +``wildfire_fpa`` is the paper-facing PyHazards entrypoint for the FPA-FOD wildfire framework described by Shen et al. (2023). + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 2 + + .. container:: catalog-stat-note + + Classification, Forecasting + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wildfire_fpa`` is the paper-facing PyHazards entrypoint for the FPA-FOD wildfire framework described by Shen et al. (2023). + +PyHazards exposes the combined DNN-LSTM-AutoEncoder workflow through one public registry name while keeping the lower-level components internal. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +External References +------------------- + +**Paper:** `Developing risk assessment framework for wildfire in the United States `_ + +Registry Name +------------- + +Primary entrypoint: ``wildfire_fpa`` + +Supported Tasks +--------------- + +- Classification +- Forecasting + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_fpa", + task="classification", + in_dim=8, + out_dim=5, + hidden_dim=64, + depth=2, + ) + + x = torch.randn(4, 8) + logits = model(x) + print(logits.shape) + +Notes +----- + +- This is the only retained public method from Shen et al. (2023) in the PyHazards catalog. +- Use ``task="classification"`` for the DNN stage. +- Use ``task="forecasting"`` or ``task="regression"`` for the sequence stage. diff --git a/docs/source/modules/models_wildfire_mamba.rst b/docs/source/modules/models_wildfire_mamba.rst new file mode 100644 index 00000000..0912fc54 --- /dev/null +++ b/docs/source/modules/models_wildfire_mamba.rst @@ -0,0 +1,115 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +:orphan: + +Wildfire Mamba +============== + +Overview +-------- + +``wildfire_mamba`` models county-day ERA5 sequences by combining selective state-space temporal blocks with a simple spatial graph layer. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Hidden + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Classification + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Unmapped + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wildfire_mamba`` models county-day ERA5 sequences by combining selective state-space temporal blocks with a simple spatial graph layer. + +The PyHazards implementation targets binary next-day per-county wildfire classification and supports an optional count head for multi-task extensions. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** Not yet mapped. + +External References +------------------- + +**Paper:** `Mamba: Linear-Time Sequence Modeling with Selective State Spaces `_ + +Registry Name +------------- + +Primary entrypoint: ``wildfire_mamba`` + +Supported Tasks +--------------- + +- Classification + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_mamba", + task="classification", + in_dim=3, + num_counties=4, + past_days=5, + ) + + x = torch.randn(2, 5, 4, 3) + logits = model(x) + print(logits.shape) + +Notes +----- + +- The CI smoke test validates the default binary-classification path on synthetic data. diff --git a/docs/source/modules/models_wildfiregpt.rst b/docs/source/modules/models_wildfiregpt.rst new file mode 100644 index 00000000..67aabd7e --- /dev/null +++ b/docs/source/modules/models_wildfiregpt.rst @@ -0,0 +1,55 @@ +WildfireGPT +=========== + +Description +----------- + +``wildfiregpt`` is a benchmark-facing PyHazards port inspired by the +WildfireGPT multi-agent retrieval-augmented generation system. + +This module preserves the main ideas emphasized by the official paper/repository: + +- user profile conditioning +- planning / analyst style system-role tokens +- retrieved knowledge conditioning +- decision-support style fusion before producing a wildfire risk map + +Paper / source +-------------- + +- `MARSHA: multi-agent RAG system for hazard adaptation `_ +- `WildfireGPT repository `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally **not** the original Streamlit + +OpenAI Assistant API system. Instead, it is a benchmark-friendly neural port +that preserves the architectural roles needed for PyHazards integration: + +- user-profile representation +- retrieved-context representation +- multi-agent style orchestration tokens +- downstream wildfire risk decoding + +It is suitable for smoke testing and benchmark integration, while remaining +transparent about not reproducing the external hosted LLM stack. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfiregpt", + task="segmentation", + in_channels=12, + out_dim=1, + ) + + x = torch.randn(2, 12, 32, 32) + logits = model(x) + print(logits.shape) diff --git a/docs/source/modules/models_wildfirespreadts.rst b/docs/source/modules/models_wildfirespreadts.rst new file mode 100644 index 00000000..ec85fdfe --- /dev/null +++ b/docs/source/modules/models_wildfirespreadts.rst @@ -0,0 +1,112 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +WildfireSpreadTS +================ + +Overview +-------- + +``wildfirespreadts`` models wildfire spread as a sequence-to-mask prediction task. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Wildfire + + .. container:: catalog-stat-note + + Public catalog grouping used for this model. + + .. grid-item-card:: Maturity + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + Implemented + + .. container:: catalog-stat-note + + Catalog maturity label used on the index page. + + .. grid-item-card:: Tasks + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 1 + + .. container:: catalog-stat-note + + Spread + + .. grid-item-card:: Benchmark Family + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + :doc:`Wildfire Benchmark ` + + .. container:: catalog-stat-note + + Primary benchmark-family link used for compatible evaluation coverage. + + +Description +----------- + +``wildfirespreadts`` models wildfire spread as a sequence-to-mask prediction task. + +The PyHazards adapter uses a compact 3D convolution stack that consumes short raster history windows and predicts the next spread mask. + +Benchmark Compatibility +----------------------- + +**Primary benchmark family:** :doc:`Wildfire Benchmark ` + +**Mapped benchmark ecosystems:** :doc:`WildfireSpreadTS ` + +External References +------------------- + +**Paper:** `WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction `_ | **Repo:** `Repository `__ + +Registry Name +------------- + +Primary entrypoint: ``wildfirespreadts`` + +Supported Tasks +--------------- + +- Spread + +Programmatic Use +---------------- + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfirespreadts", + task="segmentation", + history=4, + in_channels=6, + ) + logits = model(torch.randn(2, 4, 6, 16, 16)) + print(logits.shape) + +Notes +----- + +- The smoke dataset uses temporal wildfire spread tensors rather than single-frame rasters. diff --git a/docs/source/modules/models_wrf_sfire.rst b/docs/source/modules/models_wrf_sfire.rst new file mode 100644 index 00000000..84caef96 --- /dev/null +++ b/docs/source/modules/models_wrf_sfire.rst @@ -0,0 +1,60 @@ +WRF-SFIRE Adapter +================= + +Description +----------- + +``wrf_sfire`` is a lightweight PyHazards raster adapter inspired by the +transport-and-diffusion behavior of the WRF-SFIRE wildfire spread system. + +This module is designed as a benchmark-facing canonical model that keeps the +main spread intuition simple inside the PyHazards library: + +- ``in_channels=12`` +- ``out_channels=1`` +- ``diffusion_steps=3`` by default +- local transport via a fixed spread kernel +- terrain and moisture modulation during repeated spread steps + +Paper / source +-------------- + +- `Coupled atmosphere-wildland fire modeling with WRF 3.3 and SFIRE 2011 `_ +- `WRF-SFIRE repository `_ + +Paper parity note +----------------- + +This PyHazards implementation is intentionally **not** the full WRF-SFIRE +coupled simulator. Instead, it provides a compact raster adapter that preserves +the main local-spread intuition needed for benchmark integration and smoke +testing inside the main library. + +The canonical PyHazards version keeps: + +- raster input/output contract +- repeated local diffusion +- terrain-aware spread scaling +- moisture damping + +It does not attempt to reproduce the full atmospheric coupling, mesh handling, +or solver stack of the original WRF-SFIRE system. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="wrf_sfire", + task="segmentation", + in_channels=12, + diffusion_steps=3, + ) + + x = torch.randn(2, 12, 32, 32) + spread = model(x) + print(spread.shape) diff --git a/docs/source/modules/models_xgboost.rst b/docs/source/modules/models_xgboost.rst new file mode 100644 index 00000000..b47c7bda --- /dev/null +++ b/docs/source/modules/models_xgboost.rst @@ -0,0 +1,42 @@ +XGBoost +======= + +Description +----------- + +``xgboost`` is the canonical PyHazards promotion of the wildfire benchmark Track-O baseline. + +This module is kept in ``pyhazards.models`` so the main branch can treat the benchmark baseline as a first-class model implementation. + +It is primarily intended for benchmark integration, smoke testing, and registry-based construction through ``build_model(...)``. + +Paper / source +-------------- + +- Promoted from the wildfire benchmark Track-O model family in PyHazards. +- Source implementation lineage: ``pyhazards.pipelines.wildfire_benchmark.models.xgboost_track_o``. + +Paper parity note +----------------- + +This PyHazards implementation is intentionally benchmark-facing. It preserves the modeling role of the Track-O baseline while making the model available from the main ``pyhazards.models`` layer. + +Example of how to use it +------------------------ + +.. code-block:: python + + import torch + from pyhazards.models import build_model + + model = build_model( + name="xgboost", + task="classification", + ) + + if "classification" == "classification": + x = torch.randn(4, 16) + else: + x = torch.randn(2, 1, 32, 32) + out = model(x) + print(type(out)) diff --git a/docs/source/pygip_datasets.rst b/docs/source/pygip_datasets.rst deleted file mode 100644 index 0585d02a..00000000 --- a/docs/source/pygip_datasets.rst +++ /dev/null @@ -1,24 +0,0 @@ -Datasets -=================== - -Summary -------- - -In this section, we present the datasets currently supported by PyGIP. -For each dataset, we provide both DGL and PyG APIs to ensure flexibility and compatibility with different graph learning frameworks. - -Submodules ----------- - -.. autosummary:: - :toctree: _autosummary/datasets - :recursive: - - pygip.datasets.Cora - pygip.datasets.CiteSeer - pygip.datasets.PubMed - pygip.datasets.Computers - pygip.datasets.Photo - pygip.datasets.CoauthorCS - pygip.datasets.CoauthorPhysics - diff --git a/docs/source/pygip_models_attack.rst b/docs/source/pygip_models_attack.rst deleted file mode 100644 index f63e52ab..00000000 --- a/docs/source/pygip_models_attack.rst +++ /dev/null @@ -1,22 +0,0 @@ -Attack -=================== - -Summary -------- - -In this section, we present all currently supported **Model Extraction Attack** modules in PyGIP. - -Submodules ----------- - -.. autosummary:: - :toctree: _autosummary/attack - :template: autosummary/module.rst - :recursive: - - pygip.models.attack.base - pygip.models.attack.mea.MEA - pygip.models.attack.AdvMEA - pygip.models.attack.CEGA - pygip.models.attack.DataFreeMEA - pygip.models.attack.Realistic \ No newline at end of file diff --git a/docs/source/pygip_models_defense.rst b/docs/source/pygip_models_defense.rst deleted file mode 100644 index 453e836c..00000000 --- a/docs/source/pygip_models_defense.rst +++ /dev/null @@ -1,23 +0,0 @@ -Defense -=================== - -Summary -------- - -In this section, we present all currently supported **Model Extraction Defense** modules in PyGIP. - -Submodules ----------- - -.. autosummary:: - :toctree: _autosummary/defense - :template: autosummary/module.rst - :recursive: - - pygip.models.defense.base - pygip.models.defense.RandomWM - pygip.models.defense.BackdoorWM - pygip.models.defense.SurviveWM - pygip.models.defense.ImperceptibleWM - pygip.models.defense.atom.ATOM - pygip.models.defense.Integrity diff --git a/docs/source/pygip_utils.rst b/docs/source/pygip_utils.rst deleted file mode 100644 index 5aa60b54..00000000 --- a/docs/source/pygip_utils.rst +++ /dev/null @@ -1,18 +0,0 @@ -Utils -=================== - -Summary -------- - -In this section, we present the utility modules of PyGIP. - -Submodules ----------- - -.. autosummary:: - :toctree: _autosummary/utils - :recursive: - - pygip.utils.hardware - pygip.utils.dglTopyg - pygip.utils.metrics diff --git a/docs/source/pyhazards_benchmarks.rst b/docs/source/pyhazards_benchmarks.rst new file mode 100644 index 00000000..82ec8635 --- /dev/null +++ b/docs/source/pyhazards_benchmarks.rst @@ -0,0 +1,694 @@ +.. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand. + +Benchmarks +=================== + +Explore shared benchmark families, aligned external ecosystems, supported +tasks, and model compatibility across PyHazards. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Benchmark Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + Shared evaluator families available through the benchmark runner. + + .. grid-item-card:: Ecosystem Mappings + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 12 + + .. container:: catalog-stat-note + + External benchmark or data ecosystems linked from the public docs. + + .. grid-item-card:: Supported Task Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 7 + + .. container:: catalog-stat-note + + Hazard tasks covered across the family-level benchmark contracts. + + .. grid-item-card:: Smoke Configurations + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 27 + + .. container:: catalog-stat-note + + Unique smoke configs referenced by the benchmark family cards. + + +Benchmark Families +------------------ + +These four cards summarize the benchmark families exposed through the +shared runner and compress the core tasks, metrics, support level, and +coverage counts into a scan-friendly catalog. + +.. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Wildfire Benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Shared PyHazards evaluator family for wildfire danger and wildfire spread experiments. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Danger` :bdg-secondary:`Spread` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Tasks:** Danger, Spread + + .. container:: catalog-meta-row + + **Key Metrics:** Accuracy, Macro F1, AUC, PR-AUC, +5 more + + .. container:: catalog-meta-row + + **Coverage:** 8 smoke configs | 8 models | 1 ecosystem + + .. container:: catalog-link-row + + **View Details:** :doc:`Wildfire Benchmark ` + + .. grid-item-card:: Earthquake Benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Shared PyHazards evaluator family for earthquake phase-picking and wavefield-forecasting runs. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-secondary:`Wavefield Forecasting` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Tasks:** Phase Picking, Wavefield Forecasting + + .. container:: catalog-meta-row + + **Key Metrics:** P-pick MAE, S-pick MAE, Precision, Recall, +3 more + + .. container:: catalog-meta-row + + **Coverage:** 5 smoke configs | 5 models | 4 ecosystems + + .. container:: catalog-link-row + + **View Details:** :doc:`Earthquake Benchmark ` + + .. grid-item-card:: Flood Benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Shared PyHazards evaluator family for streamflow forecasting and inundation prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-secondary:`Inundation` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Tasks:** Streamflow, Inundation + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, RMSE, NSE, KGE, +3 more + + .. container:: catalog-meta-row + + **Coverage:** 6 smoke configs | 6 models | 4 ecosystems + + .. container:: catalog-link-row + + **View Details:** :doc:`Flood Benchmark ` + + .. grid-item-card:: Tropical Cyclone Benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Shared PyHazards evaluator family for tropical cyclone and hurricane track-intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Tasks:** Track + Intensity + + .. container:: catalog-meta-row + + **Key Metrics:** Track Error, Intensity MAE + + .. container:: catalog-meta-row + + **Coverage:** 8 smoke configs | 8 models | 3 ecosystems + + .. container:: catalog-link-row + + **View Details:** :doc:`Tropical Cyclone Benchmark ` + + +Coverage Matrix +--------------- + +Use the matrix below for side-by-side comparison of hazard coverage, +family-level tasks, primary metrics, linked-model counts, and support +status without opening the detail pages first. + +.. list-table:: + :widths: 14 22 18 20 14 12 + :header-rows: 1 + :class: catalog-matrix + + * - Hazard + - Benchmark Family + - Tasks + - Primary Metrics + - Linked Models + - Support Status + * - Wildfire + - :doc:`Wildfire Benchmark ` + - Danger, Spread + - Accuracy, Macro F1, AUC, PR-AUC, +5 more + - 8 models + - Synthetic-backed + * - Earthquake + - :doc:`Earthquake Benchmark ` + - Phase Picking, Wavefield Forecasting + - P-pick MAE, S-pick MAE, Precision, Recall, +3 more + - 5 models + - Synthetic-backed + * - Flood + - :doc:`Flood Benchmark ` + - Streamflow, Inundation + - MAE, RMSE, NSE, KGE, +3 more + - 6 models + - Synthetic-backed + * - Tropical Cyclone + - :doc:`Tropical Cyclone Benchmark ` + - Track + Intensity + - Track Error, Intensity MAE + - 8 models + - Synthetic-backed + +Benchmark Ecosystems +-------------------- + +Browse the aligned benchmark ecosystems by hazard family. Each card +links to a detail page with the routed benchmark family, source links, +and the models currently mapped to that ecosystem. + +.. tab-set:: + :class: catalog-tabs + + .. tab-item:: Wildfire + + .. container:: catalog-section-note + + Ecosystem cards describe the external benchmark or data protocol + surfaced on this page and show how it maps back to the shared + PyHazards benchmark family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: WildfireSpreadTS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Temporal wildfire spread benchmark coverage for the shared wildfire spread evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** IoU, F1, Burned-area MAE + + .. container:: catalog-meta-row + + **Coverage:** 5 smoke configs | 5 models + + .. container:: catalog-link-row + + **View Details:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction `_ | **Repo:** `Repository `__ + + + .. tab-item:: Earthquake + + .. container:: catalog-section-note + + Ecosystem cards describe the external benchmark or data protocol + surfaced on this page and show how it maps back to the shared + PyHazards benchmark family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: AEFA + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + AEFA-style forecasting dataset support for the shared earthquake forecasting path. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Wavefield Forecasting` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, MSE + + .. container:: catalog-meta-row + + **Coverage:** 1 smoke config | 1 model + + .. container:: catalog-link-row + + **View Details:** :doc:`AEFA ` + + .. container:: catalog-link-row + + **Paper:** `AEFA `_ + + .. grid-item-card:: pick-benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + pick-benchmark-compatible waveform picking support routed through the shared earthquake evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** P-pick MAE, S-pick MAE, Precision, Recall, +1 more + + .. container:: catalog-meta-row + + **Coverage:** 2 smoke configs | 2 models + + .. container:: catalog-link-row + + **View Details:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Paper:** `pick-benchmark `_ + + .. grid-item-card:: pyCSEP + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + pyCSEP-style forecasting report export for the earthquake forecasting smoke path. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Wavefield Forecasting` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, MSE + + .. container:: catalog-meta-row + + **Coverage:** 1 smoke config | 1 model + + .. container:: catalog-link-row + + **View Details:** :doc:`pyCSEP ` + + .. container:: catalog-link-row + + **Paper:** `pyCSEP `_ + + .. grid-item-card:: SeisBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + SeisBench-shaped waveform picking support for the shared earthquake benchmark family. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** P-pick MAE, S-pick MAE, Precision, Recall, +1 more + + .. container:: catalog-meta-row + + **Coverage:** 2 smoke configs | 2 models + + .. container:: catalog-link-row + + **View Details:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Paper:** `SeisBench - A Toolbox for Machine Learning in Seismology `_ | **Repo:** `Repository `__ + + + .. tab-item:: Flood + + .. container:: catalog-section-note + + Ecosystem cards describe the external benchmark or data protocol + surfaced on this page and show how it maps back to the shared + PyHazards benchmark family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Caravan + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Caravan-style streamflow benchmark coverage for the shared flood streamflow evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, RMSE, NSE, KGE + + .. container:: catalog-meta-row + + **Coverage:** 2 smoke configs | 2 models + + .. container:: catalog-link-row + + **View Details:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Paper:** `Caravan - A global community dataset for large-sample hydrology `_ | **Repo:** `Repository `__ + + .. grid-item-card:: FloodCastBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + FloodCastBench-style inundation benchmark coverage for the shared flood inundation evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Inundation` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** Pixel MAE, IoU, F1 + + .. container:: catalog-meta-row + + **Coverage:** 2 smoke configs | 2 models + + .. container:: catalog-link-row + + **View Details:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Paper:** `FloodCastBench `_ + + .. grid-item-card:: HydroBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + HydroBench-style streamflow diagnostics coverage for the shared flood streamflow evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, RMSE, NSE, KGE + + .. container:: catalog-meta-row + + **Coverage:** 1 smoke config | 1 model + + .. container:: catalog-link-row + + **View Details:** :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Paper:** `HydroBench `_ + + .. grid-item-card:: WaterBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + WaterBench-style streamflow benchmark coverage for the shared flood evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** MAE, RMSE, NSE, KGE + + .. container:: catalog-meta-row + + **Coverage:** 1 smoke config | 1 model + + .. container:: catalog-link-row + + **View Details:** :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Paper:** `WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting `_ | **Repo:** `Repository `__ + + + .. tab-item:: Tropical Cyclone + + .. container:: catalog-section-note + + Ecosystem cards describe the external benchmark or data protocol + surfaced on this page and show how it maps back to the shared + PyHazards benchmark family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: IBTrACS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + IBTrACS-backed storm benchmark coverage for the shared tropical cyclone evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** Track Error, Intensity MAE + + .. container:: catalog-meta-row + + **Coverage:** 4 smoke configs | 4 models + + .. container:: catalog-link-row + + **View Details:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `IBTrACS `_ + + .. grid-item-card:: TCBench Alpha + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + TCBench Alpha-style storm benchmark coverage for the shared tropical cyclone evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** Track Error, Intensity MAE + + .. container:: catalog-meta-row + + **Coverage:** 3 smoke configs | 3 models + + .. container:: catalog-link-row + + **View Details:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `TCBench Alpha `_ + + .. grid-item-card:: TropiCycloneNet-Dataset + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + TropiCycloneNet-Dataset-backed storm benchmark coverage for the shared tropical cyclone evaluator. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-info:`Synthetic-backed` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Key Metrics:** Track Error, Intensity MAE + + .. container:: catalog-meta-row + + **Coverage:** 1 smoke config | 1 model + + .. container:: catalog-link-row + + **View Details:** :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Paper:** `TropiCycloneNet-Dataset `_ + + + +Programmatic Use +---------------- + +.. code-block:: python + + from pyhazards.configs import load_experiment_config + from pyhazards.engine import BenchmarkRunner + + config = load_experiment_config("pyhazards/configs/earthquake/phasenet_smoke.yaml") + summary = BenchmarkRunner().run(config) + print(summary.metrics) + +Use ``python scripts/run_benchmark.py --help`` for the CLI entry point, +then pair this page with :doc:`pyhazards_configs` for experiment YAMLs +and :doc:`pyhazards_reports` for comparable benchmark exports. + +.. toctree:: + :maxdepth: 1 + :hidden: + + benchmarks/aefa + benchmarks/caravan + benchmarks/earthquake_benchmark + benchmarks/flood_benchmark + benchmarks/floodcastbench + benchmarks/hydrobench + benchmarks/ibtracs + benchmarks/pick_benchmark + benchmarks/pycsep + benchmarks/seisbench + benchmarks/tcbench_alpha + benchmarks/tropical_cyclone_benchmark + benchmarks/tropicyclonenet_dataset + benchmarks/waterbench + benchmarks/wildfire_benchmark + benchmarks/wildfirespreadts_ecosystem diff --git a/docs/source/pyhazards_configs.rst b/docs/source/pyhazards_configs.rst new file mode 100644 index 00000000..0a09d5cc --- /dev/null +++ b/docs/source/pyhazards_configs.rst @@ -0,0 +1,40 @@ +Configs +=================== + +Overview +-------- + +Use the configs layer when you want reproducible experiment specifications for +benchmark runs, smoke tests, and hazard-specific model comparisons. + +What This Page Covers +--------------------- + +- ``pyhazards.configs`` dataclasses and YAML loading helpers +- hazard-scoped smoke configs under ``pyhazards/configs//`` +- the shared structure for benchmark, dataset, model, and report settings + +Typical Usage +------------- + +.. code-block:: python + + from pyhazards.configs import load_experiment_config + + config = load_experiment_config("pyhazards/configs/flood/hydrographnet_smoke.yaml") + print(config.benchmark.hazard_task) + print(config.model.name) + +Config Layout +------------- + +Each experiment config contains four sections: + +- ``benchmark``: which evaluator to run and which hazard task to score +- ``dataset``: which registered dataset to load and with which parameters +- ``model``: which registered model to build and with which parameters +- ``report``: where to write JSON, Markdown, or CSV outputs + +Next step: pair this page with :doc:`pyhazards_benchmarks` when you want to +match configs to implemented evaluation paths, and with +:doc:`pyhazards_reports` when you want to export benchmark outputs. diff --git a/docs/source/pyhazards_datasets.rst b/docs/source/pyhazards_datasets.rst new file mode 100644 index 00000000..2064dd14 --- /dev/null +++ b/docs/source/pyhazards_datasets.rst @@ -0,0 +1,1401 @@ +.. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand. + +Datasets +=================== + +Browse PyHazards datasets across hazard families, compare source roles, +inspection paths, and registry surfaces, and navigate to dataset-specific +detail pages. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Groups + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 5 + + .. container:: catalog-stat-note + + Public dataset tabs grouped by the curated hazard-first taxonomy. + + .. grid-item-card:: Public Datasets + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 34 + + .. container:: catalog-stat-note + + Curated datasets surfaced on the public site. + + .. grid-item-card:: Inspection Entry Points + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 24 + + .. container:: catalog-stat-note + + Datasets with an explicit inspection command documented on the site. + + .. grid-item-card:: Registry-loadable Datasets + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 12 + + .. container:: catalog-stat-note + + Datasets with a documented public ``load_dataset(...)`` path. + + +Catalog by Hazard +----------------- + +Use the hazard tabs below to browse the public dataset catalog. Each +card keeps the summary short, then links into the detail page, the +primary source, and the most relevant inspection or registry surface. + +.. tab-set:: + :class: catalog-tabs + + .. tab-item:: Shared Forcing + + .. container:: catalog-section-note + + Cross-hazard meteorology and imagery sources that support multiple PyHazards workflows, inspections, and forcing pipelines. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: ERA5 + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + ECMWF's global reanalysis used as a high-resolution meteorological baseline for hazard experiments. + + .. container:: catalog-chip-row + + :bdg-secondary:`Reanalysis` :bdg-info:`Regular latitude-longitude grid` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Daily ERA5T updates with about 5-day latency, followed by final validated releases after 2-3 months + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10`` + + .. container:: catalog-link-row + + **Details:** :doc:`ERA5 ` + + .. container:: catalog-link-row + + **Primary Source:** `Hersbach et al. (2020). The ERA5 global reanalysis. `_ + + .. grid-item-card:: GOES-R + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Rapid-refresh GOES-R satellite imagery used for smoke, fire, and weather monitoring workflows. + + .. container:: catalog-chip-row + + :bdg-secondary:`Geostationary Imagery` :bdg-info:`Raster imagery time series on the ABI fixed grid` + + .. container:: catalog-meta-row + + **Coverage:** Western Hemisphere / Americas geostationary view + + .. container:: catalog-meta-row + + **Update Cadence:** Continuous ingest as new files become available + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.goesr.inspection --path /path/to/goesr_data --max-items 10`` + + .. container:: catalog-link-row + + **Details:** :doc:`GOES-R ` + + .. container:: catalog-link-row + + **Primary Source:** `Schmit et al. (2017). A closer look at the ABI on the GOES-R series. `_ + + .. grid-item-card:: MERRA-2 + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Global atmospheric reanalysis from NASA GMAO used as a shared meteorological backbone for hazard modeling. + + .. container:: catalog-chip-row + + :bdg-secondary:`Reanalysis` :bdg-info:`Regular latitude-longitude grid` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Published monthly with typical 2-3 week latency after month end + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.merra2.inspection 20260101`` + + .. container:: catalog-link-row + + **Details:** :doc:`MERRA-2 ` + + .. container:: catalog-link-row + + **Primary Source:** `Gelaro et al. (2017). The Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2). `_ + + .. grid-item-card:: HPWREN Weather + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Public HPWREN station feeds used for local weather-station context in wildfire operations and validation workflows. + + .. container:: catalog-chip-row + + :bdg-secondary:`Weather Stations` :bdg-info:`Station points with tabular observations` + + .. container:: catalog-meta-row + + **Coverage:** HPWREN station network footprint + + .. container:: catalog-meta-row + + **Update Cadence:** Real-time operational updates plus archived monthly summaries + + .. container:: catalog-meta-row + + **Inspection:** ``find /home/runyang/ryang/HPWREN_Weather -maxdepth 2 -type f | head`` + + .. container:: catalog-link-row + + **Details:** :doc:`HPWREN Weather ` + + .. container:: catalog-link-row + + **Primary Source:** `HPWREN `_ + + .. grid-item-card:: Spot Forecast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + NOAA NWS spot forecast products used for incident-specific forecast guidance and fire-weather context. + + .. container:: catalog-chip-row + + :bdg-secondary:`Incident Forecast Guidance` :bdg-info:`Text and bulletin-style products` + + .. container:: catalog-meta-row + + **Coverage:** Incident-specific forecast products + + .. container:: catalog-meta-row + + **Update Cadence:** Generated when requested for active incidents + + .. container:: catalog-meta-row + + **Inspection:** ``find /home/runyang/ryang/Spot_Forecast_Current -maxdepth 2 -type f | head`` + + .. container:: catalog-link-row + + **Details:** :doc:`Spot Forecast ` + + .. container:: catalog-link-row + + **Primary Source:** `NWS Spot Forecast page `_ + + .. grid-item-card:: NOHRSC SNODAS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Daily snow-analysis grids used as snow-state context for mountain wildfire and seasonal fuel workflows. + + .. container:: catalog-chip-row + + :bdg-secondary:`Snow Analysis` :bdg-info:`Gridded raster fields` + + .. container:: catalog-meta-row + + **Coverage:** Continental United States + + .. container:: catalog-meta-row + + **Update Cadence:** Daily + + .. container:: catalog-meta-row + + **Inspection:** ``find /home/runyang/ryang/NOHRSC_SNODAS_masked_2024 -maxdepth 2 -type d | head`` + + .. container:: catalog-link-row + + **Details:** :doc:`NOHRSC SNODAS ` + + .. container:: catalog-link-row + + **Primary Source:** `NOHRSC archived data and SNODAS description `_ + + .. grid-item-card:: HRRR + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + NOAA's rapid-refresh forecast system used for short-range wildfire weather features and forecast forcing. + + .. container:: catalog-chip-row + + :bdg-secondary:`Weather Forecast` :bdg-info:`Gridded forecast fields` + + .. container:: catalog-meta-row + + **Coverage:** CONUS-focused forecast domain + + .. container:: catalog-meta-row + + **Update Cadence:** Hourly + + .. container:: catalog-meta-row + + **Inspection:** ``find /home/runyang/ryang/HRRR/2024 -maxdepth 3 -type f | head`` + + .. container:: catalog-link-row + + **Details:** :doc:`HRRR ` + + .. container:: catalog-link-row + + **Primary Source:** `HRRR official page `_ + + .. grid-item-card:: NDFD + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Official NWS forecast grids and warning products used for wildfire-weather context and public hazard overlays. + + .. container:: catalog-chip-row + + :bdg-secondary:`Forecast and Warnings` :bdg-info:`Gridded forecast layers and bulletins` + + .. container:: catalog-meta-row + + **Coverage:** United States public forecast grids + + .. container:: catalog-meta-row + + **Update Cadence:** Issue-based for hazards and routine forecast refresh for grids + + .. container:: catalog-meta-row + + **Inspection:** ``find /home/runyang/ryang/NDFD -maxdepth 2 -type d | head`` + + .. container:: catalog-link-row + + **Details:** :doc:`NDFD ` + + .. container:: catalog-link-row + + **Primary Source:** `NDFD / digital.weather.gov `_ + + .. grid-item-card:: GOES GeoColor + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + GOES GeoColor imagery used for rapid visual fire-scene context and plume inspection. + + .. container:: catalog-chip-row + + :bdg-secondary:`Satellite Imagery Context` :bdg-info:`Geostationary imagery time series` + + .. container:: catalog-meta-row + + **Coverage:** GOES-East and GOES-West views over the Americas + + .. container:: catalog-meta-row + + **Update Cadence:** Continuous ingest as new imagery becomes available + + .. container:: catalog-meta-row + + **Inspection:** ``find /home/runyang/ryang/GOES_GeoColor_CIRA -maxdepth 3 -type f | head`` + + .. container:: catalog-link-row + + **Details:** :doc:`GOES GeoColor ` + + .. container:: catalog-link-row + + **Primary Source:** `CIRA Slider `_ + + .. grid-item-card:: NASA GIBS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + NASA EOSDIS browse imagery used for daily wildfire scene context and qualitative event inspection. + + .. container:: catalog-chip-row + + :bdg-secondary:`Satellite Imagery Context` :bdg-info:`Tiled imagery and browse layers` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Daily + + .. container:: catalog-meta-row + + **Inspection:** ``find /home/runyang/ryang/NASA_GIBS_2024 -maxdepth 3 -type f | head`` + + .. container:: catalog-link-row + + **Details:** :doc:`NASA GIBS ` + + .. container:: catalog-link-row + + **Primary Source:** `NASA GIBS overview `_ + + .. grid-item-card:: Synoptic Weather + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synoptic weather-station feeds used for local observation context and wildfire weather cross-checks. + + .. container:: catalog-chip-row + + :bdg-secondary:`Weather Stations` :bdg-info:`Station points with tabular observations and metadata` + + .. container:: catalog-meta-row + + **Coverage:** Multi-network station coverage where access is available + + .. container:: catalog-meta-row + + **Update Cadence:** Near-real-time for current feeds; historical access depends on plan tier + + .. container:: catalog-meta-row + + **Inspection:** ``find /home/runyang/ryang/Synoptic_Weather_Current -maxdepth 2 -type f | head`` + + .. container:: catalog-link-row + + **Details:** :doc:`Synoptic Weather ` + + .. container:: catalog-link-row + + **Primary Source:** `Synoptic Weather API `_ + + + .. tab-item:: Wildfire + + .. container:: catalog-section-note + + Wildfire datasets span authoritative incident records, active-fire detections, fuels, burn severity, and forecast-ready benchmark adapters. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: FIRMS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + NASA's near-real-time active fire detections used for operational wildfire monitoring and event labeling. + + .. container:: catalog-chip-row + + :bdg-secondary:`Active Fire Detections` :bdg-info:`Event-based point detections` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Fire maps refresh about every 5 minutes and downloadable files refresh about hourly + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.firms.inspection --path /path/to/firms_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`FIRMS ` + + .. container:: catalog-link-row + + **Primary Source:** `Schroeder et al. (2014). The New VIIRS 375 m active fire detection data product. `_ + + .. grid-item-card:: FPA-FOD Tabular + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Incident-level FPA-FOD features packaged for wildfire cause and size classification. + + .. container:: catalog-chip-row + + :bdg-secondary:`Incident Tabular` :bdg-info:`Tabular feature vectors` + + .. container:: catalog-meta-row + + **Coverage:** User-provided FPA-FOD coverage + + .. container:: catalog-meta-row + + **Update Cadence:** User-managed local inputs or deterministic micro mode + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.fpa_fod_tabular.inspection --task cause --micro`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`FPA-FOD Tabular ` + + .. container:: catalog-link-row + + **Primary Source:** `PyHazards FPA-FOD tabular adaptation for the wildfire incident classification path. `_ + + .. grid-item-card:: FPA-FOD Weekly + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Weekly FPA-FOD aggregates packaged for next-week wildfire count forecasting by size group. + + .. container:: catalog-chip-row + + :bdg-secondary:`Weekly Forecasting` :bdg-info:`Temporal tabular sequences` + + .. container:: catalog-meta-row + + **Coverage:** User-provided FPA-FOD coverage + + .. container:: catalog-meta-row + + **Update Cadence:** User-managed local inputs or deterministic micro mode + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`FPA-FOD Weekly ` + + .. container:: catalog-link-row + + **Primary Source:** `PyHazards FPA-FOD weekly adaptation for the wildfire forecasting path. `_ + + .. grid-item-card:: LANDFIRE + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Nationwide fuels, vegetation, and canopy layers used as static wildfire covariates. + + .. container:: catalog-chip-row + + :bdg-secondary:`Fuels and Vegetation` :bdg-info:`Gridded raster layers` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Annual versioned update suites + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.landfire.inspection --path /path/to/landfire_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`LANDFIRE ` + + .. container:: catalog-link-row + + **Primary Source:** `Rollins (2009). LANDFIRE: A nationally consistent vegetation, wildland fire, and fuel assessment. `_ + + .. grid-item-card:: MTBS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + U.S. burn severity and fire perimeter products used for post-fire analysis and wildfire evaluation. + + .. container:: catalog-chip-row + + :bdg-secondary:`Burn Severity` :bdg-info:`Per-fire rasters with associated vector perimeters` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Continuous mapping with quarterly releases + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.mtbs.inspection --path /path/to/mtbs_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`MTBS ` + + .. container:: catalog-link-row + + **Primary Source:** `Eidenshink et al. (2007). A project for monitoring trends in burn severity. `_ + + .. grid-item-card:: WFIGS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Interagency wildfire incident records used as authoritative wildfire ground truth across the United States. + + .. container:: catalog-chip-row + + :bdg-secondary:`Incident Records` :bdg-info:`Incident points and perimeters` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Refreshed from IRWIN roughly every 5 minutes, with perimeter changes often appearing within 15 minutes + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.wfigs.inspection --path /path/to/wfigs_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`WFIGS ` + + .. container:: catalog-link-row + + **Primary Source:** `National Interagency Fire Center. Wildland Fire Incident Geospatial Services (WFIGS). `_ + + .. grid-item-card:: FRAP Fire Perimeters + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + California's authoritative historical fire perimeter archive maintained by CAL FIRE FRAP. + + .. container:: catalog-chip-row + + :bdg-secondary:`Historical Perimeters` :bdg-info:`Vector fire perimeter polygons` + + .. container:: catalog-meta-row + + **Coverage:** California + + .. container:: catalog-meta-row + + **Update Cadence:** Annual spring releases with new fire-season perimeters + + .. container:: catalog-meta-row + + **Inspection:** ``ogrinfo -so "/home/runyang/ryang/FRAP_Fire_Perimeters/shapefile/California_Fire_Perimeters_(all).shp" "California_Fire_Perimeters_(all)"`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`FRAP Fire Perimeters ` + + .. container:: catalog-link-row + + **Primary Source:** `CAL FIRE FRAP Fire Perimeters `_ + + .. grid-item-card:: GeoMAC Historical + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Historical GeoMAC wildfire perimeters preserved as a legacy U.S. perimeter archive for long-horizon evaluation. + + .. container:: catalog-chip-row + + :bdg-secondary:`Historical Perimeters` :bdg-info:`Archived wildfire perimeter polygons` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Legacy archive; local copy is static + + .. container:: catalog-meta-row + + **Inspection:** ``unzip -l "/home/runyang/ryang/GeoMAC_Historical/Historic_Geomac_Perimeters_All_Years_2000_2018/Historic_Geomac_Perimeters_All_Years_2000_2018.zip" | head`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`GeoMAC Historical ` + + .. container:: catalog-link-row + + **Primary Source:** `USGS GeoMAC historical archive description `_ + + .. grid-item-card:: HMS Smoke + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + NOAA analyst-drawn smoke plume polygons used for smoke tracking, verification, and wildfire smoke exposure analysis. + + .. container:: catalog-chip-row + + :bdg-secondary:`Smoke Plumes` :bdg-info:`Vector smoke polygons` + + .. container:: catalog-meta-row + + **Coverage:** North America, Hawaii, and the Caribbean + + .. container:: catalog-meta-row + + **Update Cadence:** Sub-daily near-real-time analyst updates + + .. container:: catalog-meta-row + + **Inspection:** ``ogrinfo -so "/home/runyang/ryang/HMS_Smoke/2024/shapefile/hms_smoke2024.shp" hms_smoke2024`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`HMS Smoke ` + + .. container:: catalog-link-row + + **Primary Source:** `NOAA HMS Fire and Smoke Analysis `_ + + .. grid-item-card:: GOES-R FDCF + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + GOES-R ABI Fire/Hot Spot Characterization files used for high-frequency active-fire monitoring across the Americas. + + .. container:: catalog-chip-row + + :bdg-secondary:`Geostationary Active Fire` :bdg-info:`Raster NetCDF time series` + + .. container:: catalog-meta-row + + **Coverage:** GOES-East and GOES-West full-disk views + + .. container:: catalog-meta-row + + **Update Cadence:** About every 10 minutes + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.goesr.inspection --path /home/runyang/ryang/GOES_FDCF_G16/2024 --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`GOES-R FDCF ` + + .. container:: catalog-link-row + + **Primary Source:** `GOES-R Fire/Hot Spot Characterization `_ + + .. grid-item-card:: WRC Housing Density + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Housing-density raster from Wildfire Risk to Communities used for WUI and exposure-aware wildfire analysis. + + .. container:: catalog-chip-row + + :bdg-secondary:`Exposure Context` :bdg-info:`Raster exposure layers` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Release-based + + .. container:: catalog-meta-row + + **Inspection:** ``find /home/runyang/ryang/WRC_Housing_Density -maxdepth 3 -type f | head`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`WRC Housing Density ` + + .. container:: catalog-link-row + + **Primary Source:** `Wildfire Risk to Communities datasets `_ + + .. grid-item-card:: LandScan Population + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Population raster used for population-at-risk and human exposure context in wildfire studies. + + .. container:: catalog-chip-row + + :bdg-secondary:`Population Exposure` :bdg-info:`Gridded population rasters` + + .. container:: catalog-meta-row + + **Coverage:** Global + + .. container:: catalog-meta-row + + **Update Cadence:** Release-based / annual + + .. container:: catalog-meta-row + + **Inspection:** ``find /home/runyang/ryang/LandScan_Global_2024 -maxdepth 3 -type f | head`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`LandScan Population ` + + .. container:: catalog-link-row + + **Primary Source:** `LandScan Global 2024 dataset entry `_ + + + .. tab-item:: Flood + + .. container:: catalog-section-note + + Flood datasets combine event records with streamflow and inundation benchmark adapters used by the public flood models. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Caravan + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed streamflow benchmark adapter aligned to the Caravan large-sample hydrology ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Streamflow Benchmark` :bdg-info:`Graph-temporal basin or node sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned streamflow forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('caravan_streamflow', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`Caravan ` + + .. container:: catalog-link-row + + **Details:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Primary Source:** `Caravan - A global community dataset for large-sample hydrology `_ + + .. grid-item-card:: FloodCastBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed inundation benchmark adapter aligned to the FloodCastBench evaluation ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Inundation Benchmark` :bdg-info:`Raster inundation sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned flood inundation samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('floodcastbench_inundation', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Primary Source:** `FloodCastBench `_ + + .. grid-item-card:: HydroBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed streamflow diagnostics adapter aligned to the HydroBench ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Streamflow Benchmark` :bdg-info:`Graph-temporal basin or node sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned streamflow forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('hydrobench_streamflow', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Primary Source:** `HydroBench `_ + + .. grid-item-card:: NOAA Flood Events + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Historical NOAA storm-event flood records used as event labels and impact targets for flood studies. + + .. container:: catalog-chip-row + + :bdg-secondary:`Event Records` :bdg-info:`Tabular event records with administrative regions and optional point coordinates` + + .. container:: catalog-meta-row + + **Coverage:** United States + + .. container:: catalog-meta-row + + **Update Cadence:** Updated monthly, typically 75-90 days after the end of a data month + + .. container:: catalog-meta-row + + **Inspection:** ``python -m pyhazards.datasets.noaa_flood.inspection --path /path/to/noaa_flood_data --max-items 10`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`NOAA Flood Events ` + + .. container:: catalog-link-row + + **Primary Source:** `NOAA National Centers for Environmental Information. Storm Events Database Documentation. `_ + + .. grid-item-card:: WaterBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed streamflow benchmark adapter aligned to the WaterBench ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Streamflow Benchmark` :bdg-info:`Graph-temporal basin or node sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned streamflow forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('waterbench_streamflow', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Flood Benchmark `, :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Primary Source:** `WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting `_ + + + .. tab-item:: Earthquake + + .. container:: catalog-section-note + + Earthquake datasets cover waveform-picking and forecasting adapters that align the public models with the shared earthquake benchmark. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: AEFA Forecast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed dense-grid forecasting adapter aligned to the AEFA earthquake forecasting workflow. + + .. container:: catalog-chip-row + + :bdg-secondary:`Forecast Benchmark` :bdg-info:`Dense-grid wavefield tensors` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned earthquake forecasting samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('aefa_forecast', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`AEFA ` + + .. container:: catalog-link-row + + **Details:** :doc:`AEFA Forecast ` + + .. container:: catalog-link-row + + **Primary Source:** `AEFA `_ + + .. grid-item-card:: pick-benchmark + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed waveform picking adapter aligned to the pick-benchmark evaluation ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Waveform Benchmark` :bdg-info:`Multichannel waveform windows` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned earthquake phase-picking samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('pick_benchmark_waveforms', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Details:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Primary Source:** `pick-benchmark `_ + + .. grid-item-card:: SeisBench + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed waveform picking adapter aligned to the SeisBench ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Waveform Benchmark` :bdg-info:`Multichannel waveform windows` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned earthquake phase-picking samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('seisbench_waveforms', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Earthquake Benchmark `, :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Details:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Primary Source:** `SeisBench - A Toolbox for Machine Learning in Seismology `_ + + + .. tab-item:: Tropical Cyclone + + .. container:: catalog-section-note + + Storm datasets cover best-track archives and benchmark adapters used by the shared tropical cyclone track-intensity workflow. + + .. rubric:: Implemented Datasets + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: IBTrACS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed storm-track adapter aligned to the IBTrACS tropical cyclone archive. + + .. container:: catalog-chip-row + + :bdg-secondary:`Track Archive` :bdg-info:`Storm-track history sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned tropical cyclone track and intensity samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('ibtracs_tracks', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Details:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Primary Source:** `IBTrACS `_ + + .. grid-item-card:: TCBench Alpha + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed storm-track benchmark adapter aligned to the TCBench Alpha ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Track Benchmark` :bdg-info:`Storm-track history sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned tropical cyclone track and intensity samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('tcbench_alpha', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Details:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Primary Source:** `TCBench Alpha `_ + + .. grid-item-card:: TropiCycloneNet-Dataset + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + Synthetic-backed storm-track benchmark adapter aligned to the TropiCycloneNet-Dataset ecosystem. + + .. container:: catalog-chip-row + + :bdg-secondary:`Track Benchmark` :bdg-info:`Storm-track history sequences` + + .. container:: catalog-meta-row + + **Coverage:** Benchmark-aligned tropical cyclone track and intensity samples + + .. container:: catalog-meta-row + + **Update Cadence:** Generated locally for smoke and benchmark-alignment runs + + .. container:: catalog-meta-row + + **Registry:** ``load_dataset('tropicyclonenet_dataset', ...)`` + + .. container:: catalog-meta-row + + **Related Benchmarks:** :doc:`Tropical Cyclone Benchmark `, :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Details:** :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Primary Source:** `TropiCycloneNet-Dataset `_ + + + +Recommended Entry Points +------------------------ + +If you are new to PyHazards, start with one high-signal dataset per +hazard group before branching into the full catalog. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid + + .. grid-item-card:: Shared Forcing + :class-card: catalog-detail-card + + **Start with:** :doc:`ERA5 ` + + ECMWF's global reanalysis used as a high-resolution meteorological baseline for hazard experiments. + + **Primary Surface:** Inspection: ``python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10`` + + .. grid-item-card:: Wildfire + :class-card: catalog-detail-card + + **Start with:** :doc:`FPA-FOD Weekly ` + + Weekly FPA-FOD aggregates packaged for next-week wildfire count forecasting by size group. + + **Primary Surface:** Inspection: ``python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12`` + + .. grid-item-card:: Flood + :class-card: catalog-detail-card + + **Start with:** :doc:`Caravan ` + + Synthetic-backed streamflow benchmark adapter aligned to the Caravan large-sample hydrology ecosystem. + + **Primary Surface:** Registry: ``load_dataset('caravan_streamflow', ...)`` + + .. grid-item-card:: Earthquake + :class-card: catalog-detail-card + + **Start with:** :doc:`SeisBench ` + + Synthetic-backed waveform picking adapter aligned to the SeisBench ecosystem. + + **Primary Surface:** Registry: ``load_dataset('seisbench_waveforms', ...)`` + + .. grid-item-card:: Tropical Cyclone + :class-card: catalog-detail-card + + **Start with:** :doc:`IBTrACS ` + + Synthetic-backed storm-track adapter aligned to the IBTrACS tropical cyclone archive. + + **Primary Surface:** Registry: ``load_dataset('ibtracs_tracks', ...)`` + + +Programmatic Use +---------------- + +.. code-block:: bash + + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 + +.. code-block:: python + + from pyhazards.datasets import load_dataset + + data = load_dataset( + "fpa_fod_weekly", + micro=True, + lookback_weeks=12, + features="counts+time", + ).load() + print(sorted(data.splits.keys())) + +Use :doc:`api/pyhazards.datasets` for the developer dataset workflow +and package-level API lookup. Pair this page with :doc:`pyhazards_models` +and :doc:`pyhazards_benchmarks` when you need to trace datasets into +model and evaluation coverage. + +.. toctree:: + :maxdepth: 1 + :hidden: + + datasets/era5 + datasets/goesr + datasets/merra2 + datasets/hpwren_weather + datasets/spot_forecast + datasets/nohrsc_snodas + datasets/hrrr + datasets/ndfd + datasets/goes_geocolor + datasets/nasa_gibs + datasets/synoptic_weather + datasets/firms + datasets/fpa_fod_tabular + datasets/fpa_fod_weekly + datasets/landfire + datasets/mtbs + datasets/wfigs + datasets/frap_fire_perimeters + datasets/geomac_historical + datasets/hms_smoke + datasets/goesr_fdcf + datasets/wrc_housing_density + datasets/landscan_population + datasets/caravan_streamflow + datasets/floodcastbench_inundation + datasets/hydrobench_streamflow + datasets/noaa_flood + datasets/waterbench_streamflow + datasets/aefa_forecast + datasets/pick_benchmark_waveforms + datasets/seisbench_waveforms + datasets/ibtracs_tracks + datasets/tcbench_alpha + datasets/tropicyclonenet_dataset diff --git a/docs/source/pyhazards_engine.rst b/docs/source/pyhazards_engine.rst new file mode 100644 index 00000000..8e5e310a --- /dev/null +++ b/docs/source/pyhazards_engine.rst @@ -0,0 +1,47 @@ +Engine +=================== + +Overview +-------- + +Use the engine when you want a shared interface for training, evaluation, and +prediction without rewriting the loop for every hazard task. + +Core modules +------------ + +- ``pyhazards.engine.trainer``: the ``Trainer`` class with ``fit``, + ``evaluate``, and ``predict``. +- ``pyhazards.engine.distributed``: distributed-strategy helpers. +- ``pyhazards.engine.inference``: inference utilities for large grids or + sliding-window style workflows. + +Typical Usage +------------- + +.. code-block:: python + + import torch + from pyhazards.engine import Trainer + from pyhazards.metrics import ClassificationMetrics + from pyhazards.models import build_model + + model = build_model(name="mlp", task="classification", in_dim=16, out_dim=2) + trainer = Trainer(model=model, metrics=[ClassificationMetrics()], mixed_precision=True) + + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + loss_fn = torch.nn.CrossEntropyLoss() + + trainer.fit(data_bundle, optimizer=optimizer, loss_fn=loss_fn, max_epochs=10) + results = trainer.evaluate(data_bundle, split="test") + preds = trainer.predict(data_bundle, split="test") + +Device and Distributed Notes +---------------------------- + +- ``Trainer(strategy="auto")`` uses DDP when multiple GPUs are available; otherwise runs single-device. +- ``mixed_precision=True`` enables AMP when on CUDA. +- Device selection is handled via ``pyhazards.utils.hardware.auto_device`` by default. + +Next step: pair this page with :doc:`pyhazards_metrics` and +:doc:`pyhazards_utils` when you want to customize evaluation or device behavior. diff --git a/docs/source/pyhazards_metrics.rst b/docs/source/pyhazards_metrics.rst new file mode 100644 index 00000000..128f5896 --- /dev/null +++ b/docs/source/pyhazards_metrics.rst @@ -0,0 +1,29 @@ +Metrics +=================== + +Overview +-------- + +PyHazards includes small, task-oriented metric classes that accumulate +predictions and targets across a full split. + +Core Classes +------------ + +- ``MetricBase``: shared interface with ``update``, ``compute``, and ``reset``. +- ``ClassificationMetrics``: basic classification metrics such as accuracy. +- ``RegressionMetrics``: MAE and RMSE style regression summaries. +- ``SegmentationMetrics``: segmentation-oriented aggregation. + +Usage +----- + +.. code-block:: python + + from pyhazards.metrics import ClassificationMetrics + + metrics = [ClassificationMetrics()] + # pass to Trainer or update metrics directly + +Use this page together with :doc:`pyhazards_engine` if you want a consistent +train/evaluate workflow. diff --git a/docs/source/pyhazards_models.rst b/docs/source/pyhazards_models.rst new file mode 100644 index 00000000..ad8d7eb1 --- /dev/null +++ b/docs/source/pyhazards_models.rst @@ -0,0 +1,1769 @@ +.. This file is generated by scripts/render_model_docs.py. Do not edit by hand. + +Models +=================== + +Browse PyHazards model implementations across hazard families, compare +scope and maturity, and navigate to model-specific detail pages. + +At a Glance +----------- + +.. grid:: 1 2 4 4 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hazard Families + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 4 + + .. container:: catalog-stat-note + + Catalog tabs grouped by the normalized public hazard taxonomy. + + .. grid-item-card:: Implemented Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 66 + + .. container:: catalog-stat-note + + Public core baselines plus additional implemented variants. + + .. grid-item-card:: Experimental Adapters + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 3 + + .. container:: catalog-stat-note + + Prototype weather-model integrations kept separate from the stable catalog. + + .. grid-item-card:: Benchmark-linked Models + :class-card: catalog-stat-card + + .. container:: catalog-stat-value + + 61 + + .. container:: catalog-stat-note + + Models with explicit benchmark-family or ecosystem links on this page. + + +Catalog by Hazard +----------------- + +Use the hazard tabs below to browse the public catalog. Each card keeps +the index-page summary short, then links into model-specific detail +pages and compatible benchmark coverage. + +.. tab-set:: + :class: catalog-tabs + + .. tab-item:: Wildfire + + .. container:: catalog-section-note + + Wildfire models now cover forecasting, spread prediction, operational detection, foundation-model transfer, and prompted multimodal reasoning under the shared wildfire benchmark family. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: ASUFM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A temporal convolution baseline for weekly wildfire activity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`ASUFM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Wildfire Spread Prediction in North America Using Satellite Imagery and Vision Transformer `_ | **Repo:** `Repository `__ + + .. grid-item-card:: DNN-LSTM-AutoEncoder + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A two-stage wildfire framework with a DNN stage for incident-level cause and size prediction plus an LSTM + autoencoder stage for weekly forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Classification` :bdg-secondary:`Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`DNN-LSTM-AutoEncoder ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Developing risk assessment framework for wildfire in the United States `_ + + .. grid-item-card:: FireCastNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact encoder-decoder baseline for wildfire spread mask prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`FireCastNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `FireCastNet: Earth-as-a-Graph for Seasonal Fire Prediction `_ | **Repo:** `Repository `__ + + .. grid-item-card:: ForeFire Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A lightweight simulator-style wildfire spread adapter inspired by front-propagation systems. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`ForeFire Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `ForeFire: A Modular, Scriptable C++ Simulation Engine and Library for Wildland-Fire Spread `_ | **Repo:** `Repository `__ + + .. grid-item-card:: WildfireSpreadTS + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A temporal convolution wildfire spread baseline over short raster history windows. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction `_ | **Repo:** `Repository `__ + + .. grid-item-card:: WRF-SFIRE Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A lightweight raster wildfire spread adapter inspired by WRF-SFIRE style transport. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`WRF-SFIRE Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `Coupled atmosphere-wildland fire modeling with WRF 3.3 and SFIRE 2011 `_ | **Repo:** `Repository `__ + + .. grid-item-card:: CNN-ASPP + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An explainable CNN segmentation model with an ASPP mechanism for next-day wildfire spread prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`CNN-ASPP ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WildfireSpreadTS ` + + .. container:: catalog-link-row + + **Paper:** `Application of Explainable Artificial Intelligence in Predicting Wildfire Spread `_ + + .. grid-item-card:: FirePred + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A hybrid multi-temporal CNN wildfire spread model over short satellite-history windows. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`FirePred ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Repo:** `Repository `__ + + .. grid-item-card:: FireMM-IR + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A benchmark-facing multi-modal large-model port for infrared-enhanced wildfire scene understanding. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`LLM / MLLM` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`FireMM-IR ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `FireMM-IR `_ + + .. grid-item-card:: MODIS Active Fire C6.1 + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An operational-detection baseline inspired by NASA's MODIS Collection 6.1 active-fire algorithm. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Operational Detection` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`MODIS Active Fire C6.1 ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Giglio et al. (2016) `_ + + .. grid-item-card:: Prithvi-EO-2.0-TL + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A transfer-learning earth-observation foundation-model port for dense wildfire prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Foundation Model` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Prithvi-EO-2.0-TL ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Model Card:** `IBM-NASA Prithvi-EO-2.0-300M-TL `_ + + .. grid-item-card:: Prithvi BurnScars + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A benchmark-facing burn-scar segmentation downstream model derived from the Prithvi EO family. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Foundation Model` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Prithvi BurnScars ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Model Card:** `Prithvi-EO-2.0-300M-BurnScars `_ + + .. grid-item-card:: Prithvi-WxC + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A weather-climate foundation-model port adapted for dense wildfire-risk prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Foundation Model` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Prithvi-WxC ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Prithvi WxC `_ + + .. grid-item-card:: Gemini 2.5 Pro Wildfire Prompted + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A prompt-conditioned wildfire VLM baseline inspired by Gemini 2.5 Pro. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`LLM / MLLM` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Gemini 2.5 Pro Wildfire Prompted ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** `Gemini models documentation `_ + + .. grid-item-card:: InternVL3 Wildfire Prompted + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A prompt-conditioned wildfire VLM baseline inspired by InternVL3. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`LLM / MLLM` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`InternVL3 Wildfire Prompted ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Repo:** `Repository `__ + + .. grid-item-card:: Llama 4 Wildfire Prompted + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A prompt-conditioned multimodal wildfire baseline inspired by Meta Llama 4. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`LLM / MLLM` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Llama 4 Wildfire Prompted ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** `Meta Llama `_ + + .. grid-item-card:: Qwen2.5-VL Wildfire Prompted + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A prompt-conditioned wildfire VLM baseline inspired by Qwen2.5-VL. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`LLM / MLLM` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Qwen2.5-VL Wildfire Prompted ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Qwen2.5-VL Technical Report `_ + + .. grid-item-card:: TS-SatFire + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A spatio-temporal satellite wildfire benchmark model over multi-temporal raster sequences. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spread` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`TS-SatFire ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `TS-SatFire `_ + + .. grid-item-card:: VIIRS 375 m Active Fire + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An operational-detection baseline inspired by NASA's VIIRS 375 m active-fire algorithm. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Operational Detection` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`VIIRS 375 m Active Fire ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Schroeder et al. (2014) `_ + + .. grid-item-card:: WildfireGPT + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A benchmark-facing multi-agent wildfire reasoning model inspired by WildfireGPT. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`LLM / MLLM` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`WildfireGPT ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Repo:** `Repository `__ + + +.. grid-item-card:: Logistic Regression + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A classical binary wildfire occurrence baseline over tabular features. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Classification` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Logistic Regression ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: Random Forest + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A random-forest wildfire occurrence baseline over tabular features. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Classification` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Random Forest ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: XGBoost + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A boosted-tree wildfire occurrence baseline using a binary logistic objective. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Classification` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`XGBoost ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: LightGBM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A boosted-tree wildfire occurrence baseline using LightGBM binary classification. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Classification` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`LightGBM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: U-Net + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact dense-prediction wildfire baseline built on a U-Net style encoder-decoder. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Segmentation` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`U-Net ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: ResNet-18 U-Net + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A residual encoder-decoder wildfire segmentation baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Segmentation` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`ResNet-18 U-Net ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: Attention U-Net + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An attention-gated U-Net wildfire segmentation baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Segmentation` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Attention U-Net ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: DeepLabv3+ + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A DeepLab-style wildfire segmentation baseline with ASPP-like context aggregation. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Segmentation` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`DeepLabv3+ ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: ConvLSTM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A recurrent spatio-temporal wildfire prediction baseline over raster sequences. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spatiotemporal` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`ConvLSTM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: MAU + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact memory-augmented recurrent wildfire prediction baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spatiotemporal` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`MAU ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: PredRNN-v2 + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A predictive recurrent wildfire raster baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spatiotemporal` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`PredRNN-v2 ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: Rainformer + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A transformer-style spatio-temporal wildfire raster baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spatiotemporal` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Rainformer ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: Earthformer + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact Earthformer-style wildfire forecasting baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spatiotemporal` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Earthformer ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: SwinLSTM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A windowed-attention recurrent wildfire raster baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spatiotemporal` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`SwinLSTM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: EarthFarseer + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact EarthFarseer-style wildfire forecasting baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spatiotemporal` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`EarthFarseer ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: ConvGRU / TrajGRU + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A recurrent wildfire baseline mixing ConvGRU and TrajGRU-style dynamics. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spatiotemporal` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`ConvGRU / TrajGRU ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: TCN + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A temporal convolution wildfire baseline over short raster histories. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spatiotemporal` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`TCN ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: UTAE + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A temporal attention encoder wildfire baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Spatiotemporal` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`UTAE ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: SegFormer + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A transformer-based dense wildfire prediction baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Transformer` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`SegFormer ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: Swin-UNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A Swin-style encoder-decoder wildfire segmentation baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Transformer` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Swin-UNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: ViT Segmenter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A ViT-style dense wildfire segmentation baseline. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Transformer` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`ViT Segmenter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + +.. grid-item-card:: Deep Ensemble + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An ensemble wildfire segmentation baseline that averages multiple member networks. + + .. container:: catalog-chip-row + + :bdg-primary:`Wildfire` :bdg-secondary:`Uncertainty` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Deep Ensemble ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Wildfire Benchmark ` + + .. container:: catalog-link-row + + **Source:** Promoted Track-O baseline implementation. + + + .. tab-item:: Earthquake + + .. container:: catalog-section-note + + Earthquake models span phase picking and dense-grid forecasting, with detail pages linked to the shared earthquake benchmark coverage. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: EQNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A transformer-style earthquake phase-picking baseline for modern sequence modeling comparisons. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`EQNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Paper:** `An End-To-End Earthquake Detection Method for Joint Phase Picking and Association Using Deep Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: EQTransformer + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A bidirectional sequence encoder for joint earthquake phase picking with attention pooling over waveform windows. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`EQTransformer ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Earthquake Transformer-An attentive deep-learning model for simultaneous earthquake detection and phase picking `_ | **Repo:** `Repository `__ + + .. grid-item-card:: GPD + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact CNN baseline for generalized phase detection and historical earthquake picking comparisons. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`GPD ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`pick-benchmark ` + + .. container:: catalog-link-row + + **Paper:** `Generalized Seismic Phase Detection with Deep Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: PhaseNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A lightweight phase-picking baseline that predicts P- and S-arrival indices from multichannel waveform windows. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Phase Picking` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`PhaseNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`SeisBench ` + + .. container:: catalog-link-row + + **Paper:** `PhaseNet: A Deep-Neural-Network-Based Seismic Arrival Time Picking Method `_ | **Repo:** `Repository `__ + + .. grid-item-card:: WaveCastNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A ConvLEM-based sequence-to-sequence model for dense-grid earthquake wavefield forecasting and early-warning style rollout experiments. + + .. container:: catalog-chip-row + + :bdg-primary:`Earthquake` :bdg-secondary:`Wavefield Forecasting` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`WaveCastNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Earthquake Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`AEFA `, :doc:`pyCSEP ` + + .. container:: catalog-link-row + + **Paper:** `Rapid wavefield forecasting for earthquake early warning via deep sequence to sequence learning `_ + + + .. tab-item:: Flood + + .. container:: catalog-section-note + + Flood models cover streamflow and inundation forecasting, ranging from sequence baselines to dense-grid flood-mapping architectures. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: EA-LSTM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An entity-aware hydrology baseline with static-feature gating over streamflow histories. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`EA-LSTM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`WaterBench ` + + .. container:: catalog-link-row + + **Paper:** `Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets `_ | **Repo:** `Repository `__ + + .. grid-item-card:: FloodCast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact spatiotemporal flood-inundation baseline for raster forecast experiments. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Inundation` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`FloodCast ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Paper:** `Large-scale flood modeling and forecasting with FloodCast `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Google Flood Forecasting + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A transformer-style sequence baseline for nodewise streamflow forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Google Flood Forecasting ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Paper:** `Global Flood Forecasting at a Fine Catchment Resolution using Machine Learning `_ | **Repo:** `Repository `__ + + .. grid-item-card:: NeuralHydrology LSTM + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An adapter-style LSTM baseline for nodewise streamflow forecasting on graph-temporal inputs. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`NeuralHydrology LSTM ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`Caravan ` + + .. container:: catalog-link-row + + **Paper:** `Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets `_ | **Repo:** `Repository `__ + + .. grid-item-card:: UrbanFloodCast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A U-Net style urban inundation baseline for dense-grid flood prediction. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Inundation` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`UrbanFloodCast ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`FloodCastBench ` + + .. container:: catalog-link-row + + **Paper:** `UrbanFloodCast: WMO Urban Flooding Forecasting Challenge `_ | **Repo:** `Repository `__ + + .. grid-item-card:: HydroGraphNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A physics-informed graph neural network for flood forecasting with interpretable KAN-style components, residual message passing, and delta-state decoding. + + .. container:: catalog-chip-row + + :bdg-primary:`Flood` :bdg-secondary:`Streamflow` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`HydroGraphNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Flood Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`HydroBench ` + + .. container:: catalog-link-row + + **Paper:** `Interpretable physics-informed graph neural networks for flood forecasting `_ + + + .. tab-item:: Tropical Cyclone + + .. container:: catalog-section-note + + Storm models are organized under one tropical-cyclone family, including basin-specific hurricane baselines and shared all-basin forecasting models. + + .. rubric:: Implemented Models + + .. container:: catalog-section-note + + This table includes both core baselines and public variants or additional implementations for the hazard family. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: Hurricast + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact multimodal storm baseline for hurricane track and intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Hurricast ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `Hurricane Forecasting: A Novel Multimodal Machine Learning Framework `_ | **Repo:** `Repository `__ + + .. grid-item-card:: SAF-Net + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A spatiotemporal tropical-cyclone baseline with an intensity-focused head and shared trajectory output. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`SAF-Net ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `SAF-Net: A spatio-temporal deep learning method for typhoon intensity prediction `_ | **Repo:** `Repository `__ + + .. grid-item-card:: TCIF-fusion + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A knowledge-guided fusion baseline for tropical cyclone track and intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`TCIF-fusion ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `Tropical cyclone intensity forecasting using model knowledge guided deep learning model `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Tropical Cyclone MLP + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A compact MLP baseline for hurricane track and intensity forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`Tropical Cyclone MLP ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TCBench Alpha ` + + .. container:: catalog-link-row + + **Paper:** `Deep Learning Experiments for Tropical Cyclone Intensity Forecasts `_ | **Repo:** `Repository `__ + + .. grid-item-card:: TropiCycloneNet + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + A GRU plus attention baseline for all-basin tropical cyclone forecasting. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-success:`Implemented` + + .. container:: catalog-meta-row + + **Details:** :doc:`TropiCycloneNet ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`TropiCycloneNet-Dataset ` + + .. container:: catalog-link-row + + **Paper:** `Benchmark dataset and deep learning method for global tropical cyclone forecasting `_ | **Repo:** `Repository `__ + + .. rubric:: Experimental Adapters + + .. container:: catalog-section-note + + These entries remain public as lightweight wrapper or prototype integrations and should not be counted as stable implemented methods. + + .. grid:: 1 1 2 2 + :gutter: 2 + :class-container: catalog-grid + + .. grid-item-card:: FourCastNet TC Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An experimental wrapper-style storm adapter inspired by FourCastNet forecast fields. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-warning:`Experimental Adapter` + + .. container:: catalog-meta-row + + **Details:** :doc:`FourCastNet TC Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `FourCastNet: A Global Data-driven High-resolution Weather Model using Adaptive Fourier Neural Operators `_ | **Repo:** `Repository `__ + + .. grid-item-card:: GraphCast TC Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An experimental wrapper-style storm adapter inspired by GraphCast/GenCast forecast fields. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-warning:`Experimental Adapter` + + .. container:: catalog-meta-row + + **Details:** :doc:`GraphCast TC Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `GraphCast: Learning skillful medium-range global weather forecasting `_ | **Repo:** `Repository `__ + + .. grid-item-card:: Pangu TC Adapter + :class-card: catalog-entry-card + + .. container:: catalog-entry-summary + + An experimental wrapper-style storm adapter inspired by Pangu-Weather forecast fields. + + .. container:: catalog-chip-row + + :bdg-primary:`Tropical Cyclone` :bdg-secondary:`Track + Intensity` :bdg-warning:`Experimental Adapter` + + .. container:: catalog-meta-row + + **Details:** :doc:`Pangu TC Adapter ` + + .. container:: catalog-meta-row + + **Benchmark Family:** :doc:`Tropical Cyclone Benchmark ` + + .. container:: catalog-meta-row + + **Benchmark Ecosystems:** :doc:`IBTrACS ` + + .. container:: catalog-link-row + + **Paper:** `Accurate medium-range global weather forecasting with 3D neural networks `_ | **Repo:** `Repository `__ + + + +Recommended Entry Points +------------------------ + +If you are new to PyHazards, these four models provide the clearest +starting point for each hazard family. + +.. grid:: 1 1 2 4 + :gutter: 2 + :class-container: catalog-recommend-grid + + .. grid-item-card:: Wildfire + :class-card: catalog-detail-card + + **Start with:** :doc:`FireCastNet ` + + A compact encoder-decoder baseline for wildfire spread mask prediction. + + **Benchmark:** :doc:`Wildfire Benchmark ` + + .. grid-item-card:: Earthquake + :class-card: catalog-detail-card + + **Start with:** :doc:`PhaseNet ` + + A lightweight phase-picking baseline that predicts P- and S-arrival indices from multichannel waveform windows. + + **Benchmark:** :doc:`Earthquake Benchmark ` + + .. grid-item-card:: Flood + :class-card: catalog-detail-card + + **Start with:** :doc:`FloodCast ` + + A compact spatiotemporal flood-inundation baseline for raster forecast experiments. + + **Benchmark:** :doc:`Flood Benchmark ` + + .. grid-item-card:: Tropical Cyclone + :class-card: catalog-detail-card + + **Start with:** :doc:`Hurricast ` + + A compact multimodal storm baseline for hurricane track and intensity forecasting. + + **Benchmark:** :doc:`Tropical Cyclone Benchmark ` + + +Programmatic Use +---------------- + +Use :doc:`api/pyhazards.models` for the developer registry workflow, +builder examples, and package-level API lookup. Use +:doc:`pyhazards_benchmarks` to compare compatible benchmark families +before selecting a model for evaluation. + +.. toctree:: + :maxdepth: 1 + :hidden: + + modules/models_asufm + modules/models_eqnet + modules/models_eqtransformer + modules/models_firecastnet + modules/models_logistic_regression + modules/models_random_forest + modules/models_xgboost + modules/models_lightgbm + modules/models_unet + modules/models_resnet18_unet + modules/models_attention_unet + modules/models_deeplabv3p + modules/models_convlstm + modules/models_mau + modules/models_predrnn_v2 + modules/models_rainformer + modules/models_earthformer + modules/models_swinlstm + modules/models_earthfarseer + modules/models_convgru_trajgru + modules/models_tcn + modules/models_utae + modules/models_segformer + modules/models_swin_unet + modules/models_vit_segmenter + modules/models_deep_ensemble + modules/models_firemm_ir + modules/models_firepred + modules/models_gemini_25_pro_wildfire_prompted + modules/models_internvl3_wildfire_prompted + modules/models_llama4_wildfire_prompted + modules/models_modis_active_fire_c61 + modules/models_prithvi_burnscars + modules/models_prithvi_eo_2_tl + modules/models_prithvi_wxc + modules/models_qwen25_vl_wildfire_prompted + modules/models_ts_satfire + modules/models_viirs_375m_active_fire + modules/models_floodcast + modules/models_forefire + modules/models_fourcastnet_tc + modules/models_google_flood_forecasting + modules/models_gpd + modules/models_graphcast_tc + modules/models_hurricast + modules/models_hydrographnet + modules/models_neuralhydrology_ealstm + modules/models_neuralhydrology_lstm + modules/models_pangu_tc + modules/models_phasenet + modules/models_saf_net + modules/models_tcif_fusion + modules/models_tropicalcyclone_mlp + modules/models_tropicyclonenet + modules/models_urbanfloodcast + modules/models_wavecastnet + modules/models_wildfire_aspp + modules/models_wildfiregpt + modules/models_wildfire_fpa + modules/models_wildfirespreadts + modules/models_wrf_sfire diff --git a/docs/source/pyhazards_reports.rst b/docs/source/pyhazards_reports.rst new file mode 100644 index 00000000..c42ede8e --- /dev/null +++ b/docs/source/pyhazards_reports.rst @@ -0,0 +1,36 @@ +Reports +=================== + +Overview +-------- + +Use the reports layer when you want benchmark outputs exported in structured +formats that are easy to archive, compare, and publish. + +What This Page Covers +--------------------- + +- ``pyhazards.reports`` exporters for JSON, CSV, and Markdown summaries +- how benchmark metrics and metadata are written to disk +- where report paths appear in ``BenchmarkRunSummary`` + +Typical Usage +------------- + +.. code-block:: python + + from pyhazards.configs import load_experiment_config + from pyhazards.engine import BenchmarkRunner + + config = load_experiment_config("pyhazards/configs/tc/hurricast_smoke.yaml") + summary = BenchmarkRunner().run(config, output_dir="reports/tc_demo") + print(summary.report_paths) + +Why It Matters +-------------- + +The reports layer keeps hazard comparisons reproducible by exporting the same +metric and config snapshot structure across benchmark runs. + +Next step: pair this page with :doc:`pyhazards_benchmarks` when you want to +inspect the evaluator contracts behind those report files. diff --git a/docs/source/pyhazards_utils.rst b/docs/source/pyhazards_utils.rst new file mode 100644 index 00000000..713beb65 --- /dev/null +++ b/docs/source/pyhazards_utils.rst @@ -0,0 +1,22 @@ +Utils +=================== + +Overview +-------- + +Utility helpers keep the rest of the library concise. Use these modules for +device selection, reproducibility, and small shared helpers. + +Submodules +---------- + +- :mod:`pyhazards.utils.hardware`: device helpers and automatic device selection. +- :mod:`pyhazards.utils.common`: reproducibility, logging, and shared utility + functions. + +Typical Uses +------------ + +- choose CPU or GPU behavior explicitly, +- set deterministic seeds for experiments, +- reuse small helpers instead of copying project-specific boilerplate. diff --git a/docs/source/quick_start.rst b/docs/source/quick_start.rst index 7b1927ed..0a9c6f5c 100644 --- a/docs/source/quick_start.rst +++ b/docs/source/quick_start.rst @@ -1,82 +1,111 @@ Quick Start -================= -This guide will help you get started with PyGIP quickly. +=========== -Attack Examples ---------------- +Use this page after :doc:`installation` to run the first end-to-end PyHazards +workflow: verify the package, inspect example data, build a model, and execute +one short training loop. -Model Extraction Attack -~~~~~~~~~~~~~~~~~~~~~~~ +Step 1: Verify the Package +-------------------------- -.. code-block:: python - - from datasets import Cora - from models.attack import ModelExtractionAttack0 +Confirm that Python can import the package cleanly: - # Load the Cora dataset - dataset = Cora() +.. code-block:: bash - # Initialize the attack with a sampling ratio of 0.25 - mea = ModelExtractionAttack0(dataset, 0.25) + python -c "import pyhazards; print(pyhazards.__version__)" - # Execute the attack - mea.attack() +Step 2: Inspect Example Data +---------------------------- -To run the attack example: +Use the ERA5 inspection entrypoint to validate the bundled sample data before +training: .. code-block:: bash - python examples/attack/MEAs.py + python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 -Defense Examples ----------------- +Step 3: Build a Model +--------------------- -RandomWM Defense -~~~~~~~~~~~~~~~~ +Instantiate ``hydrographnet`` through the unified model registry: .. code-block:: python - from datasets import Cora - from models.defense import RandomWM - - # Load the Cora dataset - dataset = Cora() + from pyhazards.models import build_model - # Initialize the attack with a sampling ratio of 0.25 - med = RandomWM(dataset, 0.25) + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, + ) + print(type(model).__name__) - # Execute the defense - med.defend() - -To run the defense example: - -.. code-block:: bash +Step 4: Run a Short Train/Evaluate Loop +--------------------------------------- - python examples/defense/RandomWM.py +This example pairs the ERA5 subset with ``hydrographnet`` to confirm that the +dataset, model, and training engine work together in one workflow. -GPU Support ------------ +.. code-block:: python -If you want to use cuda, please set environment variable: + import torch + from pyhazards.data.load_hydrograph_data import load_hydrograph_data + from pyhazards.datasets import graph_collate + from pyhazards.engine import Trainer + from pyhazards.models import build_model + + data = load_hydrograph_data("pyhazards/data/era5_subset", max_nodes=50) + + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, + ) + + trainer = Trainer(model=model, mixed_precision=False) + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + loss_fn = torch.nn.MSELoss() + + trainer.fit( + data, + optimizer=optimizer, + loss_fn=loss_fn, + max_epochs=1, + batch_size=1, + collate_fn=graph_collate, + ) + + metrics = trainer.evaluate( + data, + split="train", + batch_size=1, + collate_fn=graph_collate, + ) + print(metrics) + +Step 5: Next Steps +------------------ + +- Go to :doc:`pyhazards_datasets` to browse supported datasets. +- Go to :doc:`pyhazards_models` to compare built-in models. +- Go to :doc:`implementation` to add your own dataset or model. + +Device Notes +------------ + +PyHazards uses CUDA automatically when available. To force a device: .. code-block:: bash - export PYGIP_DEVICE=cuda:0 - -Alternatively, you can explicitly specify the device in your code: + export PYHAZARDS_DEVICE=cuda:0 .. code-block:: python - from pygip.utils.hardware import set_device + from pyhazards.utils import set_device set_device("cuda:0") - - -Next Steps ----------- - -For more detailed documentation, please refer to: - -- :doc:`pygip_datasets` - Available datasets -- :doc:`pygip_models_attack` - Detailed attack mechanisms -- :doc:`pygip_models_defense` - Detailed defense mechanisms + set_device("cpu") diff --git a/docs/source/references.rst b/docs/source/references.rst index dbec7516..71a1176b 100644 --- a/docs/source/references.rst +++ b/docs/source/references.rst @@ -1,17 +1,76 @@ References ========== -Academic Publications ---------------------- +This page collects the main dataset and model references cited throughout the +PyHazards docs. It is a project reference list, not an exhaustive bibliography. -Model Extraction Attacks -~~~~~~~~~~~~~~~~~~~~~~~~ +Dataset References +------------------ -Wu, B., Yang, X., Pan, S., & Yuan, X. (2022). Model extraction attacks on graph neural networks: Taxonomy and realisation. In *Proceedings of the 2022 ACM on Asia conference on computer and communications security*, 337-350. +- Gelaro, R., McCarty, W., Suárez, M. J., et al. (2017). *The Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2)*. `[link] `__. +- Hersbach, H., Bell, B., Berrisford, P., et al. (2020). *The ERA5 global reanalysis*. `[link] `__. +- NOAA National Centers for Environmental Information (NCEI). *Storm Events Database Documentation*. `[link] `__. +- Schroeder, W., Oliva, P., Giglio, L., and Csiszar, I. (2014). *The New VIIRS 375 m active fire detection data product: Algorithm description and initial assessment*. `[link] `__. +- Eidenshink, J., Schwind, B., Brewer, K., Zhu, Z., Quayle, B., and Howard, S. (2007). *A project for monitoring trends in burn severity*. `[link] `__. +- Rollins, M. G. (2009). *LANDFIRE: A nationally consistent vegetation, wildland fire, and fuel assessment*. `[link] `__. +- National Interagency Fire Center (NIFC). *Wildland Fire Incident Geospatial Services (WFIGS)*. `[link] `__. +- Schmit, T. J., Griffith, P., Gunshor, M. M., et al. (2017). *A closer look at the ABI on the GOES-R series*. `[link] `__. +Model References +---------------- -Watermarking Defense -~~~~~~~~~~~~~~~~~~~~ +Wildfire +~~~~~~~~ -Zhao, X., Wu, H., & Zhang, X. (2021). Watermarking graph neural networks by random graphs. In *2021 9th International Symposium on Digital Forensics and Security (ISDFS)*, 1-6. IEEE. +- *Developing risk assessment framework for wildfire in the United States*. `[paper] `__. +- *Application of Explainable Artificial Intelligence in Predicting Wildfire Spread: An ASPP-Enabled CNN Approach*. `[paper] `__. +- *Wildfire Danger Prediction and Understanding with Deep Learning*. `[paper] `__, `[repo] `__. +- *WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction*. `[paper] `__, `[repo] `__. +- *Wildfire Spread Prediction in North America Using Satellite Imagery and Vision Transformer*. `[paper] `__, `[repo] `__. +- *ForeFire: A Modular, Scriptable C++ Simulation Engine and Library for Wildland-Fire Spread*. `[paper] `__, `[repo] `__. +- *Coupled atmosphere-wildland fire modeling with WRF 3.3 and SFIRE 2011*. `[paper] `__, `[repo] `__. +- *FireCastNet: Earth-as-a-Graph for Seasonal Fire Prediction*. `[paper] `__, `[repo] `__. +Earthquake +~~~~~~~~~~ + +- *Rapid wavefield forecasting for earthquake early warning via deep sequence to sequence learning*. `[paper] `__. +- *PhaseNet: A Deep-Neural-Network-Based Seismic Arrival Time Picking Method*. `[paper] `__, `[repo] `__. +- *Earthquake Transformer-An attentive deep-learning model for simultaneous earthquake detection and phase picking*. `[paper] `__, `[repo] `__. +- *Generalized Seismic Phase Detection with Deep Learning*. `[paper] `__, `[repo] `__. +- *An End-To-End Earthquake Detection Method for Joint Phase Picking and Association Using Deep Learning*. `[paper] `__, `[repo] `__. + +Flood +~~~~~ + +- *Interpretable physics-informed graph neural networks for flood forecasting*. `[paper] `__. +- *Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets*. `[paper] `__, `[repo] `__. +- *Large-scale flood modeling and forecasting with FloodCast*. `[paper] `__, `[repo] `__. +- *UrbanFloodCast: WMO Urban Flooding Forecasting Challenge*. `[paper] `__, `[repo] `__. +- *Global Flood Forecasting at a Fine Catchment Resolution using Machine Learning*. `[paper] `__, `[repo] `__. + +Hurricane and Tropical Cyclone +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- *Hurricane Forecasting: A Novel Multimodal Machine Learning Framework*. `[paper] `__, `[repo] `__. +- *Deep Learning Experiments for Tropical Cyclone Intensity Forecasts*. `[paper] `__, `[repo] `__. +- *Benchmark dataset and deep learning method for global tropical cyclone forecasting*. `[paper] `__, `[repo] `__. +- *SAF-Net: A spatio-temporal deep learning method for typhoon intensity prediction*. `[paper] `__, `[repo] `__. +- *Tropical cyclone intensity forecasting using model knowledge guided deep learning model*. `[paper] `__, `[repo] `__. +- *GraphCast: Learning skillful medium-range global weather forecasting*. `[paper] `__, `[repo] `__. +- *Accurate medium-range global weather forecasting with 3D neural networks*. `[paper] `__, `[repo] `__. +- *FourCastNet: A Global Data-driven High-resolution Weather Model using Adaptive Fourier Neural Operators*. `[paper] `__, `[repo] `__. + +Benchmark and Data Resources +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- *SeisBench - A Toolbox for Machine Learning in Seismology*. `[paper] `__, `[repo] `__. +- *pick-benchmark*. `[repo] `__. +- *pyCSEP*. `[repo] `__. +- *AEFA*. `[repo] `__. +- *Caravan - A global community dataset for large-sample hydrology*. `[paper] `__, `[repo] `__. +- *WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting*. `[paper] `__, `[repo] `__. +- *FloodCastBench*. `[repo] `__. +- *HydroBench*. `[repo] `__. +- *TCBench Alpha*. `[repo] `__. +- *IBTrACS*. `[product page] `__. diff --git a/docs/source/team.rst b/docs/source/team.rst index 2e006fcc..0a031d10 100644 --- a/docs/source/team.rst +++ b/docs/source/team.rst @@ -1,46 +1,39 @@ Core Team ========= -Our team is composed of dedicated researchers and developers who contribute to PyGIP's development and maintenance. +PyHazards is maintained by researchers and engineers working on hazard-focused +machine learning, data systems, and model development. + +Lead Developer +-------------- + +- Xueqi Cheng, Florida State University (xc25@fsu.edu) Founder ------- -* `Yushun Dong `__ - Florida State University -Architects ----------- -* `Bolin Shen `__ - Florida State University -* `Kaixiang Zhao `__ - University of Notre Dame +- Yushun Dong, Florida State University Principal Contributors & Maintainers ------------------------------------ -* `Yuxiang Sun `__ - University of Wisconsin-Madison -* `Chenxi Zhao `__ - Northeastern University -* `Lincan Li `__ - Florida State University + +- Yangshuang Xu, Florida State University +- Runyang Xu, Florida State University +- Hugh Long, Florida State University Core Contributors ----------------- -* `Unique Karki `_ - [Affiliation] -* `Yujing Ju `__ - Heriot-Watt University -* `Zhan Cheng `__ - University of Wisconsin-Madison -* `Zaiyi Zheng `__ - University of Virginia -* `Tyler Blalock `_ - [Affiliation] -* `Sibtain Syed `_ - [Affiliation] -* `Md Ibrahim `_ - Uttara University, Bangladesh -* `Aditya Khanal `_ - Tribhuvan University, Nepal -* `Kedar Satish Awale `_ - Florida State University -* `Hong Iris `_ - Washington University in St. Louis -* `Aasman Bashyal `_ - [Affiliation] -* `Cameron Bender `_ - Florida State University -* `Yushi Huang `_ - [Affiliation] -* `Anurag Shukla `_ - [Affiliation] - - ----------------- - -The Core Team is responsible for: - -* Strategic planning and technical direction -* Code review and quality assurance -* Documentation and maintenance -* Community engagement and support + +- Lex Schneier, Florida State University +- Sharan Kumar Reddy Kodudula, Florida State University +- Cristian Victoria, Florida State University +- Deyang Hsu, University of Southern California +- Dacheng Shen, University of Southern California + +What the Team Maintains +----------------------- + +- technical direction for the library, +- code review and quality checks, +- documentation and examples, +- ongoing maintenance of public releases. diff --git a/docs/team.html b/docs/team.html new file mode 100644 index 00000000..c61d2d5f --- /dev/null +++ b/docs/team.html @@ -0,0 +1,446 @@ + + + + + + + + + + + Core Team - PyHazards 1.0.5 documentation + + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

Core Team

+

PyHazards is maintained by researchers and engineers working on hazard-focused +machine learning, data systems, and model development.

+
+

Lead Developer

+ +
+
+

Founder

+
    +
  • Yushun Dong, Florida State University

  • +
+
+
+

Principal Contributors & Maintainers

+
    +
  • Yangshuang Xu, Florida State University

  • +
  • Runyang Xu, Florida State University

  • +
  • Hugh Long, Florida State University

  • +
+
+
+

Core Contributors

+
    +
  • Lex Schneier, Florida State University

  • +
  • Sharan Kumar Reddy Kodudula, Florida State University

  • +
  • Cristian Victoria, Florida State University

  • +
  • Deyang Hsu, University of Southern California

  • +
  • Dacheng Shen, University of Southern California

  • +
+
+
+

What the Team Maintains

+
    +
  • technical direction for the library,

  • +
  • code review and quality checks,

  • +
  • documentation and examples,

  • +
  • ongoing maintenance of public releases.

  • +
+
+
+ +
+
+ +
+ +
+
+ + + + + + \ No newline at end of file diff --git a/examples/attack/AdvMEA.py b/examples/attack/AdvMEA.py deleted file mode 100644 index 1a875382..00000000 --- a/examples/attack/AdvMEA.py +++ /dev/null @@ -1,18 +0,0 @@ -from pygip.datasets import * -from pygip.models.attack import AdvMEA -from pygip.utils.hardware import set_device - -# TODO verify performance -# TODO attack after defense - -set_device("cpu") # cpu, cuda:0 - - -def advmea(): - dataset = Cora(api_type='dgl') - mea = AdvMEA(dataset, attack_node_fraction=0.1) - mea.attack() - - -if __name__ == '__main__': - advmea() diff --git a/examples/attack/CEGA.py b/examples/attack/CEGA.py deleted file mode 100644 index 67bf0c5b..00000000 --- a/examples/attack/CEGA.py +++ /dev/null @@ -1,16 +0,0 @@ -from pygip.datasets import * -from pygip.models.attack import CEGA - - -# TODO verify performance -# TODO record metrics (original acc, attack acc, fidelity) - - -def cega(): - dataset = Cora(api_type='dgl') - mea = CEGA(dataset, attack_node_fraction=0.1) - mea.attack() - - -if __name__ == '__main__': - cega() diff --git a/examples/attack/DataFreeMEA.py b/examples/attack/DataFreeMEA.py deleted file mode 100644 index 823ce74f..00000000 --- a/examples/attack/DataFreeMEA.py +++ /dev/null @@ -1,24 +0,0 @@ -from pygip.datasets import Cora -from pygip.models.attack import DFEATypeI, DFEATypeII -from pygip.utils.hardware import set_device - -# TODO verify performance -# TODO record metrics (original acc, attack acc, fidelity) - -set_device("cuda:0") - - -def dfea_type1(): - dataset = Cora(api_type='dgl') - mea = DFEATypeI(dataset, attack_node_fraction=0.1) - mea.attack() - - -def dfea_type2(): - dataset = Cora(api_type='dgl') - mea = DFEATypeII(dataset, attack_node_fraction=0.1) - mea.attack() - - -if __name__ == '__main__': - dfea_type2() diff --git a/examples/attack/MEA.py b/examples/attack/MEA.py deleted file mode 100644 index 609cfe9a..00000000 --- a/examples/attack/MEA.py +++ /dev/null @@ -1,51 +0,0 @@ -from pygip.datasets import * -from pygip.models.attack import ModelExtractionAttack0, ModelExtractionAttack1, ModelExtractionAttack2, \ - ModelExtractionAttack3, ModelExtractionAttack4, ModelExtractionAttack5 -from pygip.utils.hardware import set_device - -# TODO verify performance -# TODO generate shadow graph -# TODO record metrics (original acc, attack acc, fidelity) - -set_device("cuda:0") # cpu, cuda:0 - - -def mea0(): - dataset = CiteSeer(api_type='dgl') - print(dataset) - mea = ModelExtractionAttack0(dataset, attack_node_fraction=0.1) - mea.attack() - - -def mea1(): - dataset = Cora(api_type='dgl') - mea = ModelExtractionAttack1(dataset, attack_node_fraction=0.1) - mea.attack() - - -def mea2(): - dataset = Cora(api_type='dgl') - mea = ModelExtractionAttack2(dataset, attack_node_fraction=0.1) - mea.attack() - - -def mea3(): - dataset = Cora(api_type='dgl') - mea = ModelExtractionAttack3(dataset, attack_node_fraction=0.1) - mea.attack() - - -def mea4(): - dataset = Cora(api_type='dgl') - mea = ModelExtractionAttack4(dataset, attack_node_fraction=0.1) - mea.attack() - - -def mea5(): - dataset = Cora(api_type='dgl') - mea = ModelExtractionAttack5(dataset, attack_node_fraction=0.1) - mea.attack() - - -if __name__ == '__main__': - mea0() diff --git a/examples/attack/Realistic.py b/examples/attack/Realistic.py deleted file mode 100644 index 9e537904..00000000 --- a/examples/attack/Realistic.py +++ /dev/null @@ -1,24 +0,0 @@ -from pygip.datasets import Cora -from pygip.models.attack import RealisticAttack -from pygip.utils.hardware import set_device - -# TODO verify performance -# TODO record metrics (original acc, attack acc, fidelity) - -set_device("cuda:0") - - -def realistic(): - dataset = Cora(api_type='dgl') - mea = RealisticAttack( - dataset=dataset, - attack_node_fraction=0.05, - hidden_dim=64, - threshold_s=0.6, # Cosine similarity threshold - threshold_a=0.4 # Edge prediction threshold - ) - mea.attack() - - -if __name__ == '__main__': - realistic() diff --git a/examples/defense/ATOM.py b/examples/defense/ATOM.py deleted file mode 100644 index 451b1d3f..00000000 --- a/examples/defense/ATOM.py +++ /dev/null @@ -1,18 +0,0 @@ -from pygip.datasets import Cora -from pygip.models.defense import ATOM - - -# TODO test datasets -# TODO generate query set -# TODO test gpu -# TODO verify performance -# TODO record metrics (original acc, defense acc, fidelity) - -def atom(): - dataset = Cora(api_type='pyg') - med = ATOM(dataset, attack_node_fraction=0.1) - med.defend() - - -if __name__ == '__main__': - atom() diff --git a/examples/defense/BackdoorWM.py b/examples/defense/BackdoorWM.py deleted file mode 100644 index 11a22f4c..00000000 --- a/examples/defense/BackdoorWM.py +++ /dev/null @@ -1,17 +0,0 @@ -from pygip.datasets import * -from pygip.models.defense import BackdoorWM - - -# TODO test datasets -# TODO test gpu -# TODO verify performance -# TODO record metrics (original acc, defense acc, fidelity) - -def backdoorwm(): - dataset = Cora(api_type='dgl') - med = BackdoorWM(dataset, attack_node_fraction=0.1, trigger_rate=0.1, l=20, target_label=0) - med.defend() - - -if __name__ == '__main__': - backdoorwm() diff --git a/examples/defense/GrOVe.py b/examples/defense/GrOVe.py deleted file mode 100644 index 6673c3df..00000000 --- a/examples/defense/GrOVe.py +++ /dev/null @@ -1,20 +0,0 @@ -from pygip.datasets import * -from pygip.models.defense import GroveDefense - - -# TODO test datasets -# TODO test gpu -# TODO verify performance -# TODO record metrics (verification accuracy, defense time, inference time) - -def grovedefense(): - dataset = Cora(api_type='dgl') - med = GroveDefense(dataset, attack_node_fraction=0.1, - hidden_dim=256, - verification_threshold=0.5, - num_surrogate_models=3) - _, res_comp = med.defend() - - -if __name__ == '__main__': - grovedefense() diff --git a/examples/defense/ImperceptibleWM.py b/examples/defense/ImperceptibleWM.py deleted file mode 100644 index 4f7eedba..00000000 --- a/examples/defense/ImperceptibleWM.py +++ /dev/null @@ -1,17 +0,0 @@ -from pygip.datasets import Cora -from pygip.models.defense import ImperceptibleWM - - -# TODO test datasets -# TODO test gpu -# TODO verify performance -# TODO record metrics (original acc, defense acc, fidelity) - -def imperceptiblewm(): - dataset = Cora(api_type='pyg') - med = ImperceptibleWM(dataset, defense_ratio=0.1) - med.defend() - - -if __name__ == '__main__': - imperceptiblewm() diff --git a/examples/defense/Integrity.py b/examples/defense/Integrity.py deleted file mode 100644 index 71bb6de4..00000000 --- a/examples/defense/Integrity.py +++ /dev/null @@ -1,17 +0,0 @@ -from pygip.datasets import Cora -from pygip.models.defense import IntegrityVerification - - -# TODO test datasets -# TODO test gpu -# TODO verify performance -# TODO record metrics (original acc, defense acc, fidelity) - -def integrity(): - dataset = Cora(api_type='dgl') - med = IntegrityVerification(dataset, defense_ratio=0.1) - med.defend() - - -if __name__ == '__main__': - integrity() diff --git a/examples/defense/RandomWM.py b/examples/defense/RandomWM.py deleted file mode 100644 index 4fbeb8e8..00000000 --- a/examples/defense/RandomWM.py +++ /dev/null @@ -1,17 +0,0 @@ -from pygip.datasets import Cora -from pygip.models.defense import RandomWM - - -# TODO test datasets -# TODO test gpu -# TODO verify performance -# TODO record metrics (original acc, defense acc, fidelity) - -def randomwm(): - dataset = Cora(api_type='dgl') - med = RandomWM(dataset, defense_ratio=0.1, wm_node=50, pr=0.1, pg=0.1) - med.defend() - - -if __name__ == '__main__': - randomwm() diff --git a/examples/defense/Revisiting.py b/examples/defense/Revisiting.py deleted file mode 100644 index 7783bc6f..00000000 --- a/examples/defense/Revisiting.py +++ /dev/null @@ -1,22 +0,0 @@ -from pygip.datasets import Cora -from pygip.models.defense import Revisiting - - -def main(): - # Load dataset - dataset = Cora() - - # Init defense (tweak params as needed) - defense = Revisiting( - dataset, - attack_node_fraction=0.20, # fraction of nodes to mix - alpha=0.80, # neighbor-mixing strength [0,1] - ) - - print("Initialized Revisiting defense; starting defend()...") - results = defense.defend() - print("Defense finished. Results:", results) - - -if __name__ == "__main__": - main() diff --git a/examples/defense/SurviveWM.py b/examples/defense/SurviveWM.py deleted file mode 100644 index 717d0467..00000000 --- a/examples/defense/SurviveWM.py +++ /dev/null @@ -1,17 +0,0 @@ -from pygip.datasets import Cora -from pygip.models.defense import SurviveWM - - -# TODO test datasets -# TODO test gpu -# TODO verify performance -# TODO record metrics (original acc, defense acc, fidelity) - -def survivewm(): - dataset = Cora(api_type='dgl') - med = SurviveWM(dataset, defense_ratio=0.1) - med.defend() - - -if __name__ == '__main__': - survivewm() diff --git a/pygip/__init__.py b/pygip/__init__.py deleted file mode 100644 index 9aed9e10..00000000 --- a/pygip/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from importlib.metadata import version, PackageNotFoundError - -try: - __version__ = version("PyGIP") -except PackageNotFoundError: - __version__ = "0.0.0" # fallback \ No newline at end of file diff --git a/pygip/datasets/__init__.py b/pygip/datasets/__init__.py deleted file mode 100644 index 4decc351..00000000 --- a/pygip/datasets/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from .datasets import ( - Dataset, - Cora, - CiteSeer, - PubMed, - Computers, - Photo, - CoauthorCS, - CoauthorPhysics, -) - -__all__ = [ - 'Dataset', - 'Cora', - 'CiteSeer', - 'PubMed', - 'Computers', - 'Photo', - 'CoauthorCS', - 'CoauthorPhysics', -] diff --git a/pygip/datasets/datasets.py b/pygip/datasets/datasets.py deleted file mode 100644 index a5776e39..00000000 --- a/pygip/datasets/datasets.py +++ /dev/null @@ -1,567 +0,0 @@ -import dgl -import numpy as np -import torch -from dgl import DGLGraph -from dgl.data import AmazonCoBuyComputerDataset # Amazon-Computer -from dgl.data import AmazonCoBuyPhotoDataset # Amazon-Photo -from dgl.data import CoauthorCSDataset, CoauthorPhysicsDataset -from dgl.data import FakeNewsDataset -from dgl.data import FlickrDataset -from dgl.data import GINDataset -from dgl.data import MUTAGDataset -from dgl.data import RedditDataset -from dgl.data import YelpDataset -from dgl.data import citation_graph # Cora, CiteSeer, PubMed -from sklearn.model_selection import StratifiedShuffleSplit -from torch_geometric.data import Data as PyGData -from torch_geometric.datasets import Amazon # Amazon Computers, Photo -from torch_geometric.datasets import Coauthor # cs, physics -from torch_geometric.datasets import FacebookPagePage -from torch_geometric.datasets import Flickr as FlickrPyG -from torch_geometric.datasets import LastFMAsia -from torch_geometric.datasets import Planetoid # Cora, CiteSeer, PubMed -from torch_geometric.datasets import PolBlogs as PolBlogsPyG -from torch_geometric.datasets import Reddit -from torch_geometric.datasets import TUDataset # ENZYMES - - -def dgl_to_tg(dgl_graph): - edge_index = torch.stack(dgl_graph.edges()) - x = dgl_graph.ndata.get('feat') - y = dgl_graph.ndata.get('label') - - train_mask = dgl_graph.ndata.get('train_mask') - val_mask = dgl_graph.ndata.get('val_mask') - test_mask = dgl_graph.ndata.get('test_mask') - - data = PyGData(x=x, edge_index=edge_index, y=y, - train_mask=train_mask, val_mask=val_mask, test_mask=test_mask) - return data - - -def tg_to_dgl(py_g_data): - edge_index = py_g_data.edge_index - dgl_graph = dgl.graph((edge_index[0], edge_index[1])) - - if py_g_data.x is not None: - dgl_graph.ndata['feat'] = py_g_data.x - if py_g_data.y is not None: - dgl_graph.ndata['label'] = py_g_data.y - - if hasattr(py_g_data, 'train_mask') and py_g_data.train_mask is not None: - dgl_graph.ndata['train_mask'] = py_g_data.train_mask - if hasattr(py_g_data, 'val_mask') and py_g_data.val_mask is not None: - dgl_graph.ndata['val_mask'] = py_g_data.val_mask - if hasattr(py_g_data, 'test_mask') and py_g_data.test_mask is not None: - dgl_graph.ndata['test_mask'] = py_g_data.test_mask - - return dgl_graph - - -class Dataset(object): - def __init__(self, api_type='dgl', path='./data'): - assert api_type in {'dgl', 'pyg'}, 'API type must be dgl or pyg' - self.api_type = api_type - self.path = path - self.dataset_name = self.get_name() - - # DGLGraph or PyGData - self.graph_dataset = None - self.graph_data = None - - # meta data - self.num_nodes = 0 - self.num_features = 0 - self.num_classes = 0 - - if self.api_type == 'dgl': - self.load_dgl_data() - elif self.api_type == 'pyg': - self.load_pyg_data() - else: - raise ValueError("Unsupported api_type.") - - self._load_meta_data() - - def get_name(self): - return self.__class__.__name__ - - def load_dgl_data(self): - raise NotImplementedError("load_dgl_data not implemented in subclasses.") - - def load_pyg_data(self): - raise NotImplementedError("load_pyg_data not implemented in subclasses.") - - def _load_meta_data(self): - if isinstance(self.graph_data, DGLGraph): - self.num_nodes = self.graph_data.number_of_nodes() - self.num_features = len(self.graph_data.ndata['feat'][0]) - self.num_classes = int(max(self.graph_data.ndata['label']) - min(self.graph_data.ndata['label'])) + 1 - elif isinstance(self.graph_data, PyGData): - self.num_nodes = self.graph_data.num_nodes - self.num_features = self.graph_dataset.num_node_features - self.num_classes = self.graph_dataset.num_classes - else: - raise TypeError("graph_data must be either DGLGraph or torch_geometric.data.Data.") - - def _generate_masks_by_ratio(self, train_ratio=0.8): - if self.graph_data is None: - raise ValueError("graph_data is not loaded.") - - try: - import dgl - except ImportError: - dgl = None - - try: - from torch_geometric.data import Data - except ImportError: - Data = None - - is_dgl = dgl and isinstance(self.graph_data, dgl.DGLGraph) - is_pyg = Data and isinstance(self.graph_data, Data) - - if not (is_dgl or is_pyg): - raise TypeError("graph_data must be either DGLGraph or torch_geometric.data.Data.") - - # Check if masks already exist - if is_dgl: - if all(k in self.graph_data.ndata for k in ['train_mask', 'val_mask', 'test_mask']): - print("Masks already exist in DGL graph. Skipping mask generation.") - return - num_nodes = self.graph_data.num_nodes() - else: # PyG - if all(hasattr(self.graph_data, k) for k in ['train_mask', 'val_mask', 'test_mask']): - print("Masks already exist in PyG data. Skipping mask generation.") - return - num_nodes = self.graph_data.num_nodes - - # Generate masks - indices = torch.randperm(num_nodes) - train_size = int(train_ratio * num_nodes) - val_size = (num_nodes - train_size) // 2 - - train_mask = torch.zeros(num_nodes, dtype=torch.bool) - val_mask = torch.zeros(num_nodes, dtype=torch.bool) - test_mask = torch.zeros(num_nodes, dtype=torch.bool) - - train_mask[indices[:train_size]] = True - val_mask[indices[train_size:train_size + val_size]] = True - test_mask[indices[train_size + val_size:]] = True - - # Store masks - if is_dgl: - self.graph_data.ndata['train_mask'] = train_mask - self.graph_data.ndata['val_mask'] = val_mask - self.graph_data.ndata['test_mask'] = test_mask - else: # PyG - self.graph_data.train_mask = train_mask - self.graph_data.val_mask = val_mask - self.graph_data.test_mask = test_mask - - print(f"Masks successfully generated and stored. (train_ratio={train_ratio})") - - def _generate_masks_by_classes(self, num_class_samples=100, val_count=500, test_count=1000, seed=42): - """ - For Amazon and Coauthor datasets: - - train: `num_class_samples` per class - - val: `val_count` nodes from remaining - - test: `test_count` nodes from remaining after val - Works for both DGL and PyG graphs via self.graph_data - """ - try: - import dgl - except ImportError: - dgl = None - try: - from torch_geometric.data import Data as PyGData - except ImportError: - PyGData = None - - is_dgl = dgl is not None and isinstance(self.graph_data, dgl.DGLGraph) - is_pyg = PyGData is not None and isinstance(self.graph_data, PyGData) - - if not (is_dgl or is_pyg): - raise TypeError("graph_data must be either DGLGraph or torch_geometric.data.Data.") - - if is_dgl: - if all(k in self.graph_data.ndata for k in ['train_mask', 'val_mask', 'test_mask']): - print("Masks already exist in DGL graph. Skipping mask generation.") - return - num_nodes = self.graph_data.num_nodes() - labels = self.graph_data.ndata['label'] - else: # PyG - if all(hasattr(self.graph_data, k) for k in ['train_mask', 'val_mask', 'test_mask']): - print("Masks already exist in PyG data. Skipping mask generation.") - return - num_nodes = self.graph_data.num_nodes - labels = self.graph_data.y - - num_classes = int(labels.max().item()) + 1 - - used_mask = torch.zeros(num_nodes, dtype=torch.bool) - generator = torch.Generator().manual_seed(seed) - train_idx_parts = [] - - # train set - print("Training samples per class:") - for c in range(num_classes): - class_idx = (labels == c).nonzero(as_tuple=True)[0] - if class_idx.numel() == 0: - print(f" class {c}: no available samples") - continue - perm = class_idx[torch.randperm(class_idx.size(0), generator=generator)] - n_select = min(num_class_samples, perm.size(0)) - selected = perm[:n_select] - train_idx_parts.append(selected) - used_mask[selected] = True - print(f" class {c}: select {n_select} samples") - - if len(train_idx_parts) == 0: - raise ValueError("no training samples available.") - - train_idx = torch.cat(train_idx_parts, dim=0) - - # val set - remaining_idx = (~used_mask).nonzero(as_tuple=True)[0] - if remaining_idx.numel() == 0: - raise ValueError("no remaining samples available.") - remaining_perm = remaining_idx[torch.randperm(remaining_idx.size(0), generator=generator)] - - val_take = min(val_count, remaining_perm.size(0)) - val_idx = remaining_perm[:val_take] - used_mask[val_idx] = True - - # test set - remaining_idx = (~used_mask).nonzero(as_tuple=True)[0] - test_take = min(test_count, remaining_idx.size(0)) - test_idx = remaining_idx[:test_take] - - train_mask = self._index_to_mask(train_idx, num_nodes) - val_mask = self._index_to_mask(val_idx, num_nodes) - test_mask = self._index_to_mask(test_idx, num_nodes) - - if is_pyg: - self.graph_data.train_mask = train_mask - self.graph_data.val_mask = val_mask - self.graph_data.test_mask = test_mask - else: - self.graph_data.ndata["train_mask"] = train_mask - self.graph_data.ndata["val_mask"] = val_mask - self.graph_data.ndata["test_mask"] = test_mask - - def _index_to_mask(self, index: torch.Tensor, size: int): - mask = torch.zeros(size, dtype=torch.bool, device=index.device if isinstance(index, torch.Tensor) else None) - mask[index] = True - return mask - - def __repr__(self): - return (f"Dataset(name={self.dataset_name}, api_type={self.api_type}, " - f"#Nodes={self.num_nodes}, #Features={self.num_features}, " - f"#Classes={self.num_classes})") - - -class Cora(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = citation_graph.load_cora() - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - def load_pyg_data(self): - dataset = Planetoid(root=self.path, name='Cora') - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - -class CiteSeer(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = citation_graph.load_citeseer() - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - def load_pyg_data(self): - dataset = Planetoid(root=self.path, name='Citeseer') - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - -class PubMed(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = citation_graph.load_pubmed() - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - def load_pyg_data(self): - dataset = Planetoid(root=self.path, name='PubMed') - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - -class Computers(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = AmazonCoBuyComputerDataset(raw_dir=self.path) - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = dgl.add_self_loop(data) - - self._generate_masks_by_classes() - - def load_pyg_data(self): - dataset = Amazon(root=self.path, name='Computers') - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - self._generate_masks_by_classes() - - -class Photo(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = AmazonCoBuyPhotoDataset(raw_dir=self.path) - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = dgl.add_self_loop(data) - - self._generate_masks_by_classes() - - def load_pyg_data(self): - dataset = Amazon(root=self.path, name='Photo') - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - self._generate_masks_by_classes() - - -class CoauthorCS(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = CoauthorCSDataset(raw_dir=self.path) - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - self._generate_masks_by_classes() - - def load_pyg_data(self): - dataset = Coauthor(root=self.path, name='CS') - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - self._generate_masks_by_classes() - - -class CoauthorPhysics(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = CoauthorPhysicsDataset(raw_dir=self.path) - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - self._generate_masks_by_classes() - - def load_pyg_data(self): - dataset = Coauthor(root=self.path, name='Physics') - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - self._generate_masks_by_classes() - - -class ENZYMES(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_pyg_data(self): - dataset = TUDataset(root=self.path, name='ENZYMES') - data_list = [data for data in dataset] - all_x = torch.cat([d.x for d in data_list], dim=0) - mean, std = all_x.mean(0), all_x.std(0) - for d in data_list: - d.x = (d.x - mean) / (std + 1e-6) - all_labels = np.array([int(d.y) for d in data_list]) - splitter = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) - train_idx, test_idx = next(splitter.split(np.zeros(len(all_labels)), all_labels)) - self.train_data = [data_list[i] for i in train_idx] - self.test_data = [data_list[i] for i in test_idx] - - -class Facebook(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_pyg_data(self): - dataset = FacebookPagePage(root=self.path) - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - -class Flickr(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = FlickrDataset(raw_dir=self.path) - self.graph_dataset = dataset - self.graph_data = dataset[0] - - def load_pyg_data(self): - dataset = FlickrPyG(root=self.path) - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - -class PolBlogs(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_pyg_data(self): - dataset = PolBlogsPyG(root=self.path) - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - self._generate_masks_by_classes() - - -class LastFM(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_pyg_data(self): - dataset = LastFMAsia(root=self.path) - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - -class Reddit(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = RedditDataset(raw_dir=self.path) - self.graph_dataset = dataset - self.graph_data = dataset[0] - - def load_pyg_data(self): - dataset = Reddit(self.path) - data = dataset[0] - self.graph_dataset = dataset - self.graph_data = data - - -class Twitter(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = FakeNewsDataset('gossipcop', 'bert', raw_dir=self.path) - graph, _ = dataset[0] - self.graph_dataset = dataset - self.graph_data = dgl.add_self_loop(graph) - - -class MUTAG(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = MUTAGDataset(raw_dir=self.path) - self.graph_dataset = dataset - self.graph_data = dataset[0] - - -class PTC(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = GINDataset(name='PTC', raw_dir=self.path, self_loop=False) - graph, _ = zip(*[dataset[i] for i in range(16)]) - self.graph_dataset = dataset - self.graph_data = dgl.batch(graph) - - -class NCI1(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = GINDataset(name='NCI1', raw_dir=self.path, self_loop=False) - graph, _ = zip(*[dataset[i] for i in range(16)]) - self.graph_dataset = dataset - self.graph_data = dgl.batch(graph) - - -class PROTEINS(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = GINDataset(name='PROTEINS', raw_dir=self.path, self_loop=False) - graph, _ = zip(*[dataset[i] for i in range(16)]) - self.graph_dataset = dataset - self.graph_data = dgl.batch(graph) - - -class Collab(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = GINDataset(name='COLLAB', raw_dir=self.path, self_loop=False) - graph, _ = zip(*[dataset[i] for i in range(16)]) - self.graph_dataset = dataset - self.graph_data = dgl.batch(graph) - - -class IMDB(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = GINDataset(name='IMDB-BINARY', raw_dir=self.path, self_loop=False) - graph, _ = zip(*[dataset[i] for i in range(16)]) - self.graph_dataset = dataset - self.graph_data = dgl.batch(graph) - - -class YelpData(Dataset): - def __init__(self, api_type='dgl', path='./data'): - super().__init__(api_type, path) - - def load_dgl_data(self): - dataset = YelpDataset(raw_dir=self.path) - self.graph_dataset = dataset - self.graph_data = dataset[0] diff --git a/pygip/models/attack/AdvMEA.py b/pygip/models/attack/AdvMEA.py deleted file mode 100644 index 3083b838..00000000 --- a/pygip/models/attack/AdvMEA.py +++ /dev/null @@ -1,245 +0,0 @@ -import time - -import dgl -import numpy as np -import torch -import torch.nn.functional as F -from torch_geometric.utils import k_hop_subgraph, dense_to_sparse -from tqdm import tqdm - -from pygip.models.attack.base import BaseAttack -from pygip.models.nn import GCN -from pygip.utils.metrics import AttackMetric, AttackCompMetric - - -class AdvMEA(BaseAttack): - supported_api_types = {"dgl"} - - def __init__(self, dataset, attack_node_fraction, model_path=None): - super().__init__(dataset, attack_node_fraction, model_path) - self.graph = dataset.graph_data.to(self.device) - self.features = self.graph.ndata['feat'] - self.labels = self.graph.ndata['label'] - self.train_mask = self.graph.ndata['train_mask'] - self.test_mask = self.graph.ndata['test_mask'] - - # meta data - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes - - if model_path is None: - self._train_target_model() - else: - self._load_model(model_path) - - def _load_model(self, model_path): - """ - Load a pre-trained model. - """ - # Create the model - self.net1 = GCN(self.num_features, self.num_classes).to(self.device) - - # Load the saved state dict - self.net1.load_state_dict(torch.load(model_path, map_location=self.device)) - - # Set to evaluation mode - self.net1.eval() - - def _train_target_model(self): - """ - Train the target model (GCN) on the original graph. - """ - # Initialize GNN model - self.net1 = GCN(self.num_features, self.num_classes).to(self.device) - optimizer = torch.optim.Adam(self.net1.parameters(), lr=0.01, weight_decay=5e-4) - - # Training loop - for epoch in range(200): - self.net1.train() - - # Forward pass - logits = self.net1(self.graph, self.features) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[self.train_mask], self.labels[self.train_mask]) - - # Backward pass - optimizer.zero_grad() - loss.backward() - optimizer.step() - - # Validation (optional) - if epoch % 20 == 0: - self.net1.eval() - with torch.no_grad(): - logits_val = self.net1(self.graph, self.features) - logp_val = F.log_softmax(logits_val, dim=1) - pred = logp_val.argmax(dim=1) - acc_val = (pred[self.test_mask] == self.labels[self.test_mask]).float().mean() - # You could print validation accuracy here - - return self.net1 - - # Define a local to_cpu method to avoid inheritance issues - def _to_cpu(self, tensor): - """ - Safely move tensor to CPU for NumPy operations - """ - if tensor.is_cuda: - return tensor.cpu() - return tensor - - def attack(self): - metric_comp = AttackCompMetric() - metric_comp.start() - g = self.graph.clone() - # Move adjacency matrix to CPU for NumPy operations - g_matrix = np.asmatrix(self._to_cpu(g.adjacency_matrix().to_dense()).numpy()) - edge_index = np.array(np.nonzero(g_matrix)) - edge_index = torch.tensor(edge_index, dtype=torch.long) - - attack_s1 = time.time() - # Select a center node with certain size - while True: - node_index = torch.randint(0, self.num_nodes, (1,)).item() - # print("node_index=",node_index) - sub_node_index, sub_edge_index, _, _ = k_hop_subgraph(node_index, 2, edge_index, relabel_nodes=True, - num_nodes=self.num_nodes) - if 45 <= sub_node_index.size(0) <= 50: - As = torch.zeros((sub_node_index.size(0), sub_node_index.size(0))) - As[sub_edge_index[0], sub_edge_index[1]] = 1 - print("sub_node_index=", sub_node_index.size(0)) - # Ensure moved to CPU - Xs = self._to_cpu(self.features[sub_node_index]) - break - - # Construct the prior distribution - Fd = [] - Md = [] - for label in range(self.num_classes): - # Ensure moved to CPU before converting to NumPy - features_cpu = self._to_cpu(self.features) - labels_cpu = self._to_cpu(self.labels) - class_nodes = features_cpu[labels_cpu == label].numpy() - - feature_counts = class_nodes.sum(axis=0) - feature_distribution = feature_counts / feature_counts.sum() - Fd.append(feature_distribution) - - num_features_per_node = class_nodes.sum(axis=1) - feature_count_distribution = np.bincount(num_features_per_node.astype(int), minlength=self.num_features) - Md.append(feature_count_distribution / feature_count_distribution.sum()) - - SA = [As] - SX = [Xs] - attack_e1 = time.time() - - query_s = time.time() - # Query the target model - self.net1.eval() - with torch.no_grad(): - logits_query = self.net1(g, self.features) - _, labels_query = torch.max(logits_query, dim=1) - - query_e = time.time() - - attack_s2 = time.time() - src, dst = As.nonzero(as_tuple=True) - initial_num_nodes = Xs.shape[0] - initial_graph = dgl.graph((src, dst), num_nodes=initial_num_nodes).to(self.device) - initial_graph.ndata['feat'] = Xs.to(self.device) - - self.net1.eval() - with torch.no_grad(): - initial_query = self.net1(initial_graph, initial_graph.ndata['feat']) - _, initial_label = torch.max(initial_query, dim=1) - - SL = self._to_cpu(initial_label).tolist() - samples_per_class = 10 - n = samples_per_class - - for i in range(n): - # For each class, generate and store a new sampled subgraph - for c in range(self.num_classes): - num_nodes = As.shape[0] - Ac = torch.ones((num_nodes, num_nodes)) - Xc = torch.zeros(num_nodes, len(Fd[c])) - for j in range(num_nodes): # Use j to avoid conflict with outer loop variable i - m = np.random.choice(np.arange(len(Md[c])), p=Md[c]) - features_idx = np.random.choice(len(Fd[c]), size=int(m), replace=False, p=Fd[c]) - Xc[j, features_idx] = 1 - SA.append(Ac) - SX.append(Xc) - - src, dst = Ac.nonzero(as_tuple=True) - subgraph = dgl.graph((src, dst), num_nodes=num_nodes).to(self.device) - subgraph.ndata['feat'] = Xc.to(self.device) - - self.net1.eval() - with torch.no_grad(): - api_query = self.net1(subgraph, subgraph.ndata['feat']) - _, label_query = torch.max(api_query, dim=1) - - SL.extend(self._to_cpu(label_query).tolist()) - - AG_list = [dense_to_sparse(torch.tensor(a))[0] for a in SA] - XG = torch.vstack([torch.tensor(x) for x in SX]) - - SL = torch.tensor(SL, dtype=torch.long) - - # Filter valid labels and trim - valid_mask = SL >= 0 - SL = SL[valid_mask] - SL = SL[:XG.shape[0]] - - # Calculate nodes per subgraph - num_nodes = XG.shape[0] // len(AG_list) if len(AG_list) > 0 else 0 - - # Combine edge indices from all subgraphs, adjusting node indices to avoid overlap - AG_combined = torch.cat([edge_index + i * num_nodes for i, edge_index in enumerate(AG_list)], dim=1) - - src, dst = AG_combined[0], AG_combined[1] - num_total_nodes = XG.shape[0] - sub_g = dgl.graph((src, dst), num_nodes=num_total_nodes).to(self.device) - sub_g.ndata['feat'] = XG.to(self.device) - - attack_e2 = time.time() - - train_surrogate_s = time.time() - # Create and train the extracted model - net6 = GCN(XG.shape[1], self.num_classes).to(self.device) - optimizer = torch.optim.Adam(net6.parameters(), lr=0.01, weight_decay=5e-4) - - print("=========Model Extracting==========================") - metric = AttackMetric() - - for epoch in tqdm(range(200)): - net6.train() - logits = net6(sub_g, sub_g.ndata['feat']) - out = torch.log_softmax(logits, dim=1) - loss = F.nll_loss(out, SL.to(self.device)) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - # Switch to evaluation mode - t0 = time.time() - net6.eval() - with torch.no_grad(): - logits = net6(g, self.features) - _, preds = torch.max(logits[self.test_mask], dim=1) - metric.update(preds, self.labels[self.test_mask], labels_query[self.test_mask]) - metric_comp.update(inference_surrogate_time=(time.time() - t0)) - - train_surrogate_e = time.time() - - print("========================Final results:=========================================") - metric_comp.end() - metric_comp.update(attack_time=(attack_e1 - attack_s1 + attack_e2 - attack_s2), - query_target_time=(query_e - query_s), - train_surrogate_time=(train_surrogate_e - train_surrogate_s)) - res = metric.compute() - res_comp = metric_comp.compute() - - return res, res_comp diff --git a/pygip/models/attack/CEGA.py b/pygip/models/attack/CEGA.py deleted file mode 100644 index d56c524a..00000000 --- a/pygip/models/attack/CEGA.py +++ /dev/null @@ -1,1860 +0,0 @@ -import copy -import json -import math -import os -import pickle as pkl -import random -import sys -import time - -import dgl -import dgl.function as fn -import networkx as nx -import numpy as np -import numpy.linalg as la -import pandas as pd -import scipy.sparse as sp -import torch -import torch as th -import torch.nn as nn -import torch.nn.functional as F -from dgl.data import AmazonCoBuyComputerDataset, AmazonCoBuyPhotoDataset, CoauthorCSDataset, CoauthorPhysicsDataset, \ - RedditDataset, WikiCSDataset, AmazonRatingsDataset, QuestionsDataset, RomanEmpireDataset, FlickrDataset, \ - CoraFullDataset -from dgl.data import citation_graph as citegrh -from dgl.nn.pytorch import GraphConv -from sklearn.cluster import KMeans -from sklearn.metrics import f1_score -from torch_geometric.datasets import CitationFull -from tqdm import tqdm - -time_limit = 300 - - -def get_receptive_fields_dense(cur_neighbors, selected_node, weighted_score, adj_matrix2): - receptive_vector = ((cur_neighbors + adj_matrix2[selected_node]) != 0) + 0 - count = weighted_score.dot(receptive_vector) - return count - - -def get_current_neighbors_dense(cur_nodes, adj_matrix2): - if np.array(cur_nodes).shape[0] == 0: - return 0 - neighbors = (adj_matrix2[list(cur_nodes)].sum(axis=0) != 0) + 0 - return neighbors - - -def get_current_neighbors_1(cur_nodes, adj_matrix): - if np.array(cur_nodes).shape[0] == 0: - return 0 - neighbors = (adj_matrix[list(cur_nodes)].sum(axis=0) != 0) + 0 - return neighbors - - -def get_entropy_contribute(npy_m1, npy_m2): - entro1 = 0 - entro2 = 0 - for i in range(npy_m1.shape[0]): - entro1 -= np.sum(npy_m1[i] * np.log2(npy_m1[i])) - entro2 -= np.sum(npy_m2[i] * np.log2(npy_m2[i])) - return entro1 - entro2 - - -def get_max_info_entropy_node_set(idx_used, - high_score_nodes, - labels, - batch_size, - adj_matrix2, - num_class, - model_prediction): - max_info_node_set = [] - high_score_nodes_ = copy.deepcopy(high_score_nodes) - labels_ = copy.deepcopy(labels) - for k in range(batch_size): - score_list = np.zeros(len(high_score_nodes_)) - for i in range(len(high_score_nodes_)): - labels_tmp = copy.deepcopy(labels_) - node = high_score_nodes_[i] - node_neighbors = get_current_neighbors_dense([node], adj_matrix2) - adj_neigh = adj_matrix2[list(node_neighbors)] - aay = np.matmul(adj_neigh, labels_) - total_score = 0 - for j in range(num_class): - if model_prediction[node][j] != 0: - labels_tmp[node] = 0 - labels_tmp[node][j] = 1 - aay_ = np.matmul(adj_neigh, labels_tmp) - total_score += model_prediction[node][j] * get_entropy_contribute(aay, aay_) - score_list[i] = total_score - idx = np.argmax(score_list) - max_node = high_score_nodes_[idx] - max_info_node_set.append(max_node) - labels_[max_node] = model_prediction[max_node] - high_score_nodes_.remove(max_node) - return max_info_node_set - - -def get_max_nnd_node_dense(idx_used, - high_score_nodes, - min_distance, - distance_aax, - num_ones, - num_node, - adj_matrix2, - gamma=1): - dmax = np.ones(num_node) - - max_receptive_node = 0 - max_total_score = 0 - cur_neighbors = get_current_neighbors_dense(idx_used, adj_matrix2) - for node in high_score_nodes: - receptive_field = get_receptive_fields_dense(cur_neighbors, node, num_ones, adj_matrix2) - node_distance = distance_aax[node, :] - node_distance = np.where(node_distance < min_distance, node_distance, min_distance) - node_distance = dmax - node_distance - distance_score = node_distance.dot(num_ones) - total_score = receptive_field / num_node + gamma * distance_score / num_node - if total_score > max_total_score: - max_total_score = total_score - max_receptive_node = node - return max_receptive_node - - -def aug_normalized_adjacency(adj): - adj = adj + sp.eye(adj.shape[0]) - adj = sp.coo_matrix(adj) - row_sum = np.array(adj.sum(1)) - d_inv_sqrt = np.power(row_sum, -0.5).flatten() - d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. - d_mat_inv_sqrt = sp.diags(d_inv_sqrt) - return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt).tocoo() - - -def compute_distance(_i, _j, features_aax): - return la.norm(features_aax[_i, :] - features_aax[_j, :]) - - -def parse_index_file(filename): - index = [] - for line in open(filename): - index.append(int(line.strip())) - return index - - -def normalize(mx): - """Row-normalize sparse matrix""" - rowsum = np.array(mx.sum(1)) - r_inv = np.power(rowsum, -1).flatten() - r_inv[np.isinf(r_inv)] = 0. - r_mat_inv = sp.diags(r_inv) - mx = r_mat_inv.dot(mx) - return mx - - -def accuracy(output, labels): - preds = output.max(1)[1].type_as(labels) - correct = preds.eq(labels).double() - correct = correct.sum() - return correct / len(labels) - - -def load_data_from_grain(path="./data", dataset="cora"): - """ - ind.[:dataset].x => the feature vectors of the training instances (scipy.sparse.csr.csr_matrix) - ind.[:dataset].y => the one-hot labels of the labeled training instances (numpy.ndarray) - ind.[:dataset].allx => the feature vectors of both labeled and unlabeled training instances (csr_matrix) - ind.[:dataset].ally => the labels for instances in ind.dataset_str.allx (numpy.ndarray) - ind.[:dataset].graph => the dict in the format {index: [index of neighbor nodes]} (collections.defaultdict) - ind.[:dataset].tx => the feature vectors of the test instances (scipy.sparse.csr.csr_matrix) - ind.[:dataset].ty => the one-hot labels of the test instances (numpy.ndarray) - ind.[:dataset].test.index => indices of test instances in graph, for the inductive setting - """ - print("\n[STEP 1]: Upload {} dataset.".format(dataset)) - - names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] - objects = [] - - for i in range(len(names)): - with open("{}/ind.{}.{}".format(path, dataset, names[i]), 'rb') as f: - if sys.version_info > (3, 0): - objects.append(pkl.load(f, encoding='latin1')) - else: - objects.append(pkl.load(f)) - - x, y, tx, ty, allx, ally, graph = tuple(objects) - - test_idx_reorder = parse_index_file("{}/ind.{}.test.index".format(path, dataset)) - test_idx_range = np.sort(test_idx_reorder) - - if dataset == 'citeseer': - # Citeseer dataset contains some isolated nodes in the graph - test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1) - tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) - tx_extended[test_idx_range - min(test_idx_range), :] = tx - tx = tx_extended - - ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) - ty_extended[test_idx_range - min(test_idx_range), :] = ty - ty = ty_extended - - features = sp.vstack((allx, tx)).tolil() - features[test_idx_reorder, :] = features[test_idx_range, :] - - adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) - - print("| # of nodes : {}".format(adj.shape[0])) - print("| # of edges : {}".format(adj.sum().sum() / 2)) - - features = normalize(features) - print("| # of features : {}".format(features.shape[1])) - print("| # of clases : {}".format(ally.shape[1])) - - features = torch.FloatTensor(np.array(features.todense())) - sparse_mx = adj.tocoo().astype(np.float32) - - labels = np.vstack((ally, ty)) - labels[test_idx_reorder, :] = labels[test_idx_range, :] - - if dataset == 'citeseer': - save_label = np.where(labels)[1] - labels = torch.LongTensor(np.where(labels)[1]) - - idx_train = range(len(y)) - idx_val = range(len(y), len(y) + 500) - idx_test = test_idx_range.tolist() - - print("| # of train set : {}".format(len(idx_train))) - print("| # of val set : {}".format(len(idx_val))) - print("| # of test set : {}".format(len(idx_test))) - - idx_train, idx_val, idx_test = list(map(lambda x: torch.LongTensor(x), [idx_train, idx_val, idx_test])) - - def missing_elements(L): - start, end = L[0], L[-1] - return sorted(set(range(start, end + 1)).difference(L)) - - if dataset == 'citeseer': - L = np.sort(idx_test) - missing = missing_elements(L) - - for element in missing: - save_label = np.insert(save_label, element, 0) - - labels = torch.LongTensor(save_label) - - return adj, features, labels, idx_train, idx_val, idx_test - - -def set_seed(seed): - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - - -def sparse_mx_to_torch_sparse_tensor(sparse_mx): - """Convert a scipy sparse matrix to a torch sparse tensor.""" - sparse_mx = sparse_mx.tocoo().astype(np.float32) - indices = torch.from_numpy( - np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)) - values = torch.from_numpy(sparse_mx.data) - shape = torch.Size(sparse_mx.shape) - return torch.sparse.FloatTensor(indices, values, shape) - - -def aug_normalized_adjacency(adj): - adj = adj # + sp.eye(adj.shape[0]) - adj = sp.coo_matrix(adj) - row_sum = np.array(adj.sum(1)) - d_inv_sqrt = np.power(row_sum, -0.5).flatten() - d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. - d_mat_inv_sqrt = sp.diags(d_inv_sqrt) - return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt).tocoo() - - -def aug_random_walk(adj): - adj = adj + sp.eye(adj.shape[0]) - adj = sp.coo_matrix(adj) - row_sum = np.array(adj.sum(1)) - d_inv = np.power(row_sum, -1.0).flatten() - d_mat = sp.diags(d_inv) - return (d_mat.dot(adj)).tocoo() - - -class GCN_drop(nn.Module): - def __init__(self, feature_number, label_number, dropout=0.85, nhid=128): - super(GCN_drop, self).__init__() - - self.gc1 = GraphConv(feature_number, nhid, bias=True) - self.gc2 = GraphConv(nhid, label_number, bias=True) - self.dropout = dropout - - def forward(self, g, features): - x = F.dropout(features, self.dropout, training=self.training) - x = F.relu(self.gc1(g, x)) - x = F.dropout(x, self.dropout, training=self.training) - x = self.gc2(g, x) - return x - - -def convert_pyg_to_dgl(pyg_data): - """ - Converts a PyTorch Geometric Data object into a DGLGraph. - - Args: - pyg_data (torch_geometric.data.Data): PyTorch Geometric Data object. - - Returns: - dgl.DGLGraph: The converted DGL graph. - """ - edge_index = pyg_data.edge_index - num_nodes = pyg_data.num_nodes - - g = dgl.graph((edge_index[0], edge_index[1]), num_nodes=num_nodes) - - if hasattr(pyg_data, 'x') and pyg_data.x is not None: - g.ndata['feat'] = pyg_data.x - - if hasattr(pyg_data, 'y') and pyg_data.y is not None: - g.ndata['label'] = pyg_data.y - - for mask_name in ['train_mask', 'val_mask', 'test_mask']: - if hasattr(pyg_data, mask_name) and getattr(pyg_data, mask_name) is not None: - g.ndata[mask_name] = getattr(pyg_data, mask_name) - - return g - - -def load_data(dataset_name): - if dataset_name == 'cora': - data = citegrh.load_cora() - if dataset_name == 'citeseer': - data = citegrh.load_citeseer() - if dataset_name == 'pubmed': - data = citegrh.load_pubmed() - if dataset_name == 'amazoncomputer': - data = AmazonCoBuyComputerDataset() - if dataset_name == 'amazonphoto': - data = AmazonCoBuyPhotoDataset() - if dataset_name == 'coauthorCS': - data = CoauthorCSDataset() - if dataset_name == 'coauthorphysics': - data = CoauthorPhysicsDataset() - if dataset_name == 'reddit': - data = RedditDataset() - if dataset_name == 'wiki': - data = WikiCSDataset() - if dataset_name == 'amazonrating': - data = AmazonRatingsDataset() - if dataset_name == 'question': - data = QuestionsDataset() - if dataset_name == 'roman': - data = RomanEmpireDataset() - if dataset_name == 'flickr': - data = FlickrDataset() - if dataset_name == 'cora_full': - data = CoraFullDataset() - if dataset_name == 'dblp': - data = CitationFull(root='./data/', name='DBLP') - data = data[0] - - if dataset_name == 'dblp': - g = convert_pyg_to_dgl(data) - else: - g = data[0] - - isolated_nodes = ((g.in_degrees() == 0) & (g.out_degrees() == 0)).nonzero().squeeze(1) - g.remove_nodes(isolated_nodes) - - if dataset_name in ['cora', 'citeseer', 'pubmed', 'reddit', 'flickr']: - features = g.ndata['feat'] - labels = g.ndata['label'] - train_mask = g.ndata['train_mask'] - test_mask = g.ndata['test_mask'] - num_nodes = g.num_nodes() - elif dataset_name in ['wiki']: - features = g.ndata['feat'] - labels = g.ndata['label'] - test_mask = g.ndata['test_mask'].bool() - train_mask = (1 - g.ndata['test_mask']).bool() # - num_nodes = g.num_nodes() - elif dataset_name in ['amazoncomputer', 'amazonphoto', 'coauthorCS', 'coauthorphysics', 'cora_full', 'dblp']: - features = g.ndata['feat'] - labels = g.ndata['label'] - num_nodes = g.num_nodes() - train_mask = torch.zeros(num_nodes, dtype=torch.bool) - test_mask = torch.zeros(num_nodes, dtype=torch.bool) - - torch.manual_seed(42) - indices = torch.randperm(num_nodes) - num_train = int(num_nodes * 0.6) - train_mask[indices[:num_train]] = True - test_mask[indices[num_train:]] = True - assert train_mask.sum() + test_mask.sum() == num_nodes - elif dataset_name in ['amazonrating', 'question', 'roman']: - features = g.ndata['feat'] - labels = g.ndata['label'] - num_nodes = g.num_nodes() - train_mask = g.ndata['train_mask'][:, 0] - test_mask = g.ndata['test_mask'][:, 0] - return g, features, labels, num_nodes, train_mask, test_mask - - -def evaluate(model, g, features, labels, mask): - model.eval() - with torch.no_grad(): - logits = model(g, features) - logits = logits[mask] - labels = labels[mask] - _, indices = torch.max(logits, dim=1) - correct = torch.sum(indices == labels) - f1score = f1_score(labels.cpu().numpy(), indices.cpu().numpy(), average='macro') - return correct.item() * 1.0 / len(labels), f1score - - -class GcnNet(nn.Module): - def __init__(self, feature_number, label_number): - super(GcnNet, self).__init__() - self.layers = nn.ModuleList() - self.layers.append(GraphConv(feature_number, 16, activation=F.relu)) - self.layers.append(GraphConv(16, label_number)) - self.dropout = nn.Dropout(p=0.5) - - def forward(self, g, features): - x = F.relu(self.layers[0](g, features)) - x = self.layers[1](g, x) - return x - - -# Initialization -def init_mask(C, sub_train_mask, sub_labels): - # print(f"=========Initialization with {2 * C} Nodes==========================") - initial_set = [] - for label in range(C): - label_nodes = [] - for i, l in enumerate(sub_labels): - if sub_train_mask[i] == True and l == label: - label_nodes.append(i) - selected_nodes = random.sample(label_nodes, k=2) # initial pool for each class - initial_set.extend(selected_nodes) - - # print(initial_set) - return initial_set - - # node pool - ## center_rank = rank_centrality(sub_g, sub_train_mask, sub_train_init, num_center, return_rank=True) - ## selected_indices_center = center_rank[:num_center] - ## sub_train_init[selected_indices_center] = True - # Randomly select the rest of the initial nodes - ## full_true_indices = th.nonzero(sub_train_mask & ~sub_train_init).squeeze() - ## selected_indices_random = random.sample(full_true_indices.tolist(), num_random) - ## sub_train_init[selected_indices_random] = True - - # Transform the formality and return the outcome; note the output are indicators - # sub_train_init = th.zeros(len(sub_train_mask), dtype=th.bool) - # sub_train_init[initial_set] = True - # print(sub_labels[initial_set]) - # sub_train_init = th.tensor(initial_set) - # return sub_train_init - - -def update_sub_train_mask(num_each, sub_train_mask, sub_train_mask_new): - full_true_indices = th.nonzero(sub_train_mask).squeeze() - current_true_indices = th.nonzero(sub_train_mask_new).squeeze() - missing_indices = set(full_true_indices.tolist()) - set(current_true_indices.tolist()) - if len(missing_indices) >= num_each: - # print(f"=========Update Random Querying Label with {num_each} Nodes==========================") - selected_indices = random.sample(list(missing_indices), num_each) - ## sub_train_mask_new[selected_indices] = True - - return selected_indices - - -# Calculate the entropy -def calculate_entropy(probs): - return -th.sum(probs * th.log(probs + 1e-9), dim=-1) - - -def rank_entropy(net, sub_g, sub_features, sub_train_mask, sub_train_mask_new, - num_each, return_rank=True): - logits = net(sub_g, sub_features) - prob = F.softmax(logits, dim=-1) - nodes_interest = th.nonzero(sub_train_mask & ~sub_train_mask_new).squeeze() - probs_interest = prob[nodes_interest] - entropy_interest = calculate_entropy(probs_interest) - nodes_rank = nodes_interest[th.argsort(entropy_interest, descending=True)] - if len(nodes_rank) >= num_each: - if return_rank: - return nodes_rank - else: - print(f"=========Update Entropy Querying Label with {num_each} Nodes==========================") - # selected_indices = random.sample(list(missing_indices), num_each) - selected_indices = nodes_rank[:num_each] - sub_train_mask_new[selected_indices] = True - return sub_train_mask_new - - -def rank_density(net, sub_g, sub_features, sub_train_mask, sub_train_mask_new, - num_each, num_clusters, return_rank=True): - full_true_indices = th.nonzero(sub_train_mask).squeeze() - current_true_indices = th.nonzero(sub_train_mask_new).squeeze() - missing_indices = set(full_true_indices.tolist()) - set(current_true_indices.tolist()) - ## Get the embeddings that we need - ## Under numpy formality - embedding_all = net(sub_g, sub_features, return_hidden=True).detach().numpy() - kmeans = KMeans(n_clusters=num_clusters) - kmeans.fit(embedding_all) - ## Set up cluster_centers - cluster_centers = kmeans.cluster_centers_ - - # Calculate the Euclidean distance - dist = np.linalg.norm(embedding_all - cluster_centers[kmeans.labels_], axis=1) - density_scores = th.from_numpy(1 / (1 + dist)) - - # pull back to the node coefficients - list_missing_indices = torch.tensor(list(missing_indices)) - shuffle_order = th.argsort(density_scores, descending=True) - positions = [th.where(shuffle_order == temp)[0].item() for temp in list_missing_indices] - sorted_positions = th.argsort(th.tensor(positions)) - list_output = list_missing_indices[sorted_positions] - - if len(list_output) >= num_each: - if return_rank: - return list_output - else: - print(f"=========Update Entropy Querying Label with {num_each} Nodes==========================") - # selected_indices = random.sample(list(missing_indices), num_each) - selected_indices = list_output[:num_each] - sub_train_mask_new[selected_indices] = True - return sub_train_mask_new - - -def rank_centrality(sub_g, sub_train_mask, - sub_train_mask_new, num_each, return_rank=True): - nodes_interest = th.nonzero(sub_train_mask & ~sub_train_mask_new).squeeze() - page_rank_score = page_rank(sub_g)[nodes_interest] - nodes_centrality = nodes_interest[th.argsort(page_rank_score, descending=True)] - - if len(nodes_centrality) >= num_each: - if return_rank: - return nodes_centrality - else: - print(f"=========Update Entropy Querying Label with {num_each} Nodes==========================") - # selected_indices = random.sample(list(missing_indices), num_each) - selected_indices = nodes_centrality[:num_each] - sub_train_mask_new[selected_indices] = True - return sub_train_mask_new - - -# Hand-written pagerank score -def page_rank(graph, damping_factor=0.85, max_iter=100, tol=1e-8): - num_nodes = graph.number_of_nodes() - - # Initialize the PageRank score for all nodes to be uniform - pagerank_scores = torch.ones(num_nodes) / num_nodes - graph.ndata['pagerank'] = pagerank_scores - - # Degree normalization factor - # with graph.local_scope(): - graph.ndata['deg'] = graph.out_degrees().float().clamp(min=1) # Avoid dividing by 0 - - for _ in range(max_iter): - # Perform message passing (send normalized pagerank score) - # print("Iteration ", _) - prev_scores = pagerank_scores.clone() - graph.ndata['h'] = pagerank_scores / graph.ndata['deg'] - graph.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h_new')) - # Apply PageRank formula - pagerank_scores = damping_factor * graph.ndata['h_new'] + (1 - damping_factor) / num_nodes - # pagerank_scores_new = (1 - damping_factor) / num_nodes + damping_factor * graph.ndata['pagerank_sum'] / \ - # graph.ndata['deg'] - - # Check for convergence - delta = torch.abs(pagerank_scores - prev_scores).sum().item() - if delta < tol: - break - # Update pagerank scores - graph.ndata['pagerank'] = pagerank_scores - - return graph.ndata['pagerank'] - - -# ECE and Perturbation -def perturb_features(sub_features, noise_level=0.05): - noise = th.randn_like(sub_features) * noise_level - perturbed_features = sub_features + noise - return perturbed_features - - -# Take the perturbation and count the average -def perturb_avg(net, sub_g, sub_features, num_perturbations, noise_level): - original_logits = net(sub_g, sub_features) - # Number of classes - num_classes = original_logits.size(-1) - # Initialization - cumulative_probs = th.zeros(sub_features.size(0), num_classes, - device=original_logits.device) - # Perturbation - for _ in range(num_perturbations): - features_p = perturb_features(sub_features, noise_level=noise_level) - logits_p = net(sub_g, features_p) - probs_p = F.softmax(logits_p, dim=-1) - cumulative_probs += probs_p - # get a fair estimation for the distribution on existing label - avg_probs = cumulative_probs / num_perturbations - - return avg_probs - - -# Try the traditional way: count the number of perturbed labels for each node -def rank_perturb(net, sub_g, sub_features, num_perturbations, - sub_train_mask, sub_train_mask_new, noise_level, - num_each, return_rank=True): - original_logits = net(sub_g, sub_features) - nodes_interest = th.nonzero(sub_train_mask & ~sub_train_mask_new).squeeze() - original_pred = th.argmax(original_logits[nodes_interest], dim=-1) - ## Store the outcome - # unchanged_counts = th.zeros_like(original_pred, dtype = th.float) - unchanged_counts = th.zeros_like(original_pred) - # Perturbation - for _ in range(num_perturbations): - features_p = perturb_features(sub_features, noise_level=noise_level) - logits_p = net(sub_g, features_p) - labels_p = th.argmax(logits_p[nodes_interest], dim=-1) - unchanged = labels_p.eq(original_pred) - unchanged_counts += unchanged.int() - - # unchanged_counts_float = unchanged_counts.float() - # unchanged_counts_float.mean() - _, change_indices = torch.sort(unchanged_counts) - nodes_rank_label = nodes_interest[change_indices] - - if len(nodes_rank_label) >= num_each: - if return_rank: - return nodes_rank_label - else: - print(f"=========Update Perturbation Querying Label with {num_each} Nodes==========================") - # selected_indices = random.sample(list(missing_indices), num_each) - selected_indices = nodes_rank_label[:num_each] - sub_train_mask_new[selected_indices] = True - return sub_train_mask_new - - -# Consider items in the embedding space -def rank_cluster(net, sub_g, sub_features, labels, total_sub_nodes, - sub_train_mask, sub_train_mask_new, num_clusters, - num_each, return_rank=True): - # Work on missing indices - full_true_indices = th.nonzero(sub_train_mask).squeeze() - current_true_indices = th.nonzero(sub_train_mask_new).squeeze() - missing_indices = set(full_true_indices.tolist()) - set(current_true_indices.tolist()) - # Work on prep of embedding - labels_true = labels[total_sub_nodes] - logits = net(sub_g, sub_features) - prob = F.softmax(logits, dim=-1) - labels_pred = th.argmax(prob, dim=-1) - embedding_all = net(sub_g, sub_features, return_hidden=True) - mismatches_queried = (labels_true != labels_pred) & sub_train_mask_new - selected_embeddings = embedding_all[mismatches_queried].detach().numpy() - # Try kmeans - num_clusters_used = min(num_clusters, th.sum(mismatches_queried).item()) - # print(selected_embeddings) - print("mismatches_queried:" + str(th.sum(mismatches_queried).item())) - print("num_clusters_used:" + str(num_clusters_used)) - if num_clusters_used >= 1: - kmeans = KMeans(n_clusters=num_clusters_used, random_state=0) - kmeans.fit(selected_embeddings) - cluster_centers = th.tensor(kmeans.cluster_centers_, dtype=torch.float32) - # Get back to the original field: Try to use a separate function for remaining functions - list_missing_indices = list(missing_indices) - embedding_pool = embedding_all[list_missing_indices] - min_distances = find_short_dist(embedding_pool, cluster_centers) - shuffle_order = th.argsort(min_distances) - output_order = [list_missing_indices[i] for i in shuffle_order] - nodes_rank_distance = torch.tensor(output_order) - else: - print("All nodes give the same label.") - nodes_rank_distance = torch.tensor(list(missing_indices)) - - if len(nodes_rank_distance) >= num_each: - if return_rank: - return nodes_rank_distance - else: - print(f"=========Update Cluster Querying Label with {num_each} Nodes==========================") - # selected_indices = random.sample(list(missing_indices), num_each) - selected_indices = nodes_rank_distance[:num_each] - sub_train_mask_new[selected_indices] = True - return sub_train_mask_new - - -# Use a separate function to write out the calculation of distance -def find_short_dist(embedding_pool, cluster_centers): - distances = torch.cdist(embedding_pool, cluster_centers) - min_distances, _ = torch.min(distances, dim=1) - return min_distances - - -# Consider Diversity; see what we can do from here. -def rank_diversity(net, sub_g, sub_features, sub_train_mask, sub_train_mask_new, num_each, num_clusters, rho, - return_rank=True): - full_indices = th.nonzero(sub_train_mask).squeeze() - queried_indices = th.nonzero(sub_train_mask_new).squeeze() - candidate_indices = set(full_indices.tolist()) - set(queried_indices.tolist()) - # Get the embeddings - embedding_all = net(sub_g, sub_features, return_hidden=True).detach().numpy() - embedding_queried = embedding_all[queried_indices] - kmeans = KMeans(n_clusters=num_clusters, random_state=42) - kmeans.fit(embedding_queried) - cluster_centers = kmeans.cluster_centers_ - - node_embeddings = th.tensor(embedding_all, dtype=th.float32) - centroids = th.tensor(cluster_centers, dtype=th.float32) - kmeans_labels = th.tensor(kmeans.labels_, dtype=th.int32) - - minimal_distance = th.min(th.cdist(node_embeddings, centroids, p=2), dim=1).values - proposed_labels = th.min(th.cdist(node_embeddings, centroids, p=2), dim=1).indices - - # Closeness Scores (Distance to assigned centroid) - close_temp = 1 / (1 + minimal_distance) - close_normalized = (close_temp - close_temp.min()) / (close_temp.max() - close_temp.min() + 1e-10) - - # Rarity Scores (How rare as shown in ) - queried_bincount = th.bincount(kmeans_labels) - rarity_temp = 1 / (1 + queried_bincount[proposed_labels]) - rarity_normalized = (rarity_temp - rarity_temp.min()) / (rarity_temp.max() - rarity_temp.min() + 1e-10) - - # Assemble the scores; rho is subject to tuning - composite_scores = rho * close_normalized + (1 - rho) * rarity_normalized - composite_scores_candidate = composite_scores[list(candidate_indices)] - candidate_tensor = th.tensor(list(candidate_indices)) - nodes_rank_diversity = candidate_tensor[th.argsort(composite_scores_candidate, descending=True)] - - if len(nodes_rank_diversity) >= num_each: - if return_rank: - return nodes_rank_diversity - else: - print(f"=========Update Cluster Querying Label with {num_each} Nodes==========================") - # selected_indices = random.sample(list(missing_indices), num_each) - selected_indices = nodes_rank_diversity[:num_each] - sub_train_mask_new[selected_indices] = True - return sub_train_mask_new - - -def quantile_selection(A, B, C, index_1, index_2, index_3, sub_train_mask, sub_train_mask_new, num_each): - elements = th.nonzero(sub_train_mask & ~sub_train_mask_new).squeeze() - - ranks_A = [compute_rank(A, el) for el in elements] - ranks_B = [compute_rank(B, el) for el in elements] - ranks_C = [compute_rank(C, el) for el in elements] - - weighted_ranks = [] - for i in range(len(elements)): - weighted_rank = index_1 * ranks_A[i] + index_2 * ranks_B[i] + index_3 * ranks_C[i] - weighted_ranks.append(weighted_rank) - - # Sort elements based on weighted ranks - sorted_indices = np.argsort(weighted_ranks) - sorted_elements = th.stack([elements[i] for i in sorted_indices]) - # sorted_weighted_ranks = [weighted_ranks[i] for i in sorted_indices] - - # print(f"=========Update Entropy Querying Label with {num_each} Nodes==========================") - # selected_indices = random.sample(list(missing_indices), num_each) - selected_indices = sorted_elements[:num_each] - # sub_train_mask_new[selected_indices] = True - - return selected_indices - - -def compute_rank(tensor, element): - return np.where(tensor == element)[0][0] - - -class GcnNet(nn.Module): - def __init__(self, feature_number, label_number): - super(GcnNet, self).__init__() - self.layers = nn.ModuleList() - self.layers.append(GraphConv(feature_number, 16, activation=F.relu)) - self.layers.append(GraphConv(16, label_number)) - self.dropout = nn.Dropout(p=0.5) - - def forward(self, g, features, return_hidden=False): - relu = nn.ReLU() - x = F.relu(self.layers[0](g, features)) - if return_hidden: - return x - x = self.layers[1](g, x) - return x - - -## Main Function -def attack0(dataset_name, seed, cuda, attack_node_arg=0.25, file_path='', LR=1e-3, TGT_LR=1e-2, - EVAL_EPOCH=1000, TGT_EPOCH=1000, WARMUP_EPOCH=400, dropout=False, model_performance=True, **kwargs): - # Initialization - device = th.device(cuda) - set_seed(seed) - metrics_df = pd.DataFrame(columns=['Num Attack Nodes', 'Method', 'Test Accuracy', 'Test Fidelity']) - - g, features, labels, node_number, train_mask, test_mask = load_data(dataset_name) - attack_node_number = int(node_number * attack_node_arg) - feature_number = features.shape[1] - label_number = len(labels.unique()) - C_var = label_number - - print('The attack node number is: ', attack_node_number) - - g = g.to(device) - degs = g.in_degrees().float() - norm = th.pow(degs, -0.5) - norm[th.isinf(norm)] = 0 - if cuda != None: - norm = norm.cuda() - g.ndata['norm'] = norm.unsqueeze(1) - if dropout == True: - gcn_Net = GCN_drop(feature_number, label_number) - else: - gcn_Net = GcnNet(feature_number, label_number) - optimizer = th.optim.Adam(gcn_Net.parameters(), lr=TGT_LR, weight_decay=5e-4) - dur = [] - - ## Send the training to cuda - features = features.to(device) - gcn_Net = gcn_Net.to(device) - train_mask = train_mask.to(device) - test_mask = test_mask.to(device) - labels = labels.to(device) - target_performance = { - 'acc': 0, - 'f1score': 0 - } - - print("=========Target Model Generating==========================") - for epoch in range(TGT_EPOCH): - if epoch >= 3: - t0 = time.time() - - gcn_Net.train() - logits = gcn_Net(g, features) - logp = F.log_softmax(logits, 1) - loss = F.nll_loss(logp[train_mask], labels[train_mask]) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - if epoch >= 3: - dur.append(time.time() - t0) - - acc, f1score = evaluate(gcn_Net, g, features, labels, test_mask) - if acc > target_performance['acc']: - target_performance['acc'] = acc - if f1score > target_performance['f1score']: - target_performance['f1score'] = f1score - - print("Epoch {:05d} | Loss {:.4f} | Test Acc {:.4f} | Test F1 macro {:.4f} | Time(s) {:.4f}".format( - epoch, loss.item(), acc, f1score, np.mean(dur))) - - ## Get the cuda-trained data back - g = g.cpu() - features = features.cpu() - gcn_Net = gcn_Net.cpu() - train_mask = train_mask.cpu() - test_mask = test_mask.cpu() - labels = labels.cpu() - - # Generate sub-graph index - alpha = 0.8 - sub_graph_node_index = [] - for i in range(attack_node_number): - sub_graph_node_index.append(random.randint(0, node_number - 1)) - - sub_labels = labels[sub_graph_node_index] - - syn_nodes = [] - g_matrix = np.asmatrix(g.adjacency_matrix().to_dense()) - - for node_index in sub_graph_node_index: - # get nodes - one_step_node_index = g_matrix[node_index, :].nonzero()[1].tolist() - two_step_node_index = [] - for first_order_node_index in one_step_node_index: - syn_nodes.append(first_order_node_index) - two_step_node_index = g_matrix[first_order_node_index, :].nonzero()[1].tolist() - - sub_graph_syn_node_index = list(set(syn_nodes) - set(sub_graph_node_index)) - total_sub_nodes = list(set(sub_graph_syn_node_index + sub_graph_node_index)) - - # Generate features for SubGraph attack - np_features_query = features.clone() - - for node_index in sub_graph_syn_node_index: - # initialized as zero - np_features_query[node_index] = np_features_query[node_index] * 0 - # get one step and two steps nodes - one_step_node_index = g_matrix[node_index, :].nonzero()[1].tolist() - one_step_node_index = list(set(one_step_node_index).intersection(set(sub_graph_node_index))) - - total_two_step_node_index = [] - num_one_step = len(one_step_node_index) - for first_order_node_index in one_step_node_index: - # caculate the feature: features = 0.8 * average_one + 0.8^2 * average_two - # new_array = features[first_order_node_index]*0.8/num_one_step - this_node_degree = len(g_matrix[first_order_node_index, :].nonzero()[1].tolist()) - np_features_query[node_index] = torch.from_numpy(np.sum( - [np_features_query[node_index], - features[first_order_node_index] * alpha / math.sqrt(num_one_step * this_node_degree)], - axis=0)) - - two_step_node_index = g_matrix[first_order_node_index, :].nonzero()[1].tolist() - total_two_step_node_index = list( - set(total_two_step_node_index + two_step_node_index) - set(one_step_node_index)) - total_two_step_node_index = list(set(total_two_step_node_index).intersection(set(sub_graph_node_index))) - - num_two_step = len(total_two_step_node_index) - for second_order_node_index in total_two_step_node_index: - - # caculate the feature: features = 0.8 * average_one + 0.8^2 * average_two - this_node_second_step_nodes = [] - this_node_first_step_nodes = g_matrix[second_order_node_index, :].nonzero()[1].tolist() - for nodes_in_this_node in this_node_first_step_nodes: - this_node_second_step_nodes = list( - set(this_node_second_step_nodes + g_matrix[nodes_in_this_node, :].nonzero()[1].tolist())) - this_node_second_step_nodes = list(set(this_node_second_step_nodes) - set(this_node_first_step_nodes)) - - this_node_second_degree = len(this_node_second_step_nodes) - np_features_query[node_index] = torch.from_numpy(np.sum( - [np_features_query[node_index], - features[second_order_node_index] * (1 - alpha) / math.sqrt(num_two_step * this_node_second_degree)], - axis=0)) - - features_query = th.FloatTensor(np_features_query) - - # generate sub-graph adj-matrix, features, labels - total_sub_nodes = list(set(sub_graph_syn_node_index + sub_graph_node_index)) - sub_g = np.zeros((len(total_sub_nodes), len(total_sub_nodes))) - for sub_index in range(len(total_sub_nodes)): - sub_g[sub_index] = g_matrix[total_sub_nodes[sub_index], total_sub_nodes] - - for i in range(node_number): - if i in sub_graph_node_index: - test_mask[i] = 0 - train_mask[i] = 1 - continue - if i in sub_graph_syn_node_index: - test_mask[i] = 1 - train_mask[i] = 0 - else: - test_mask[i] = 1 - train_mask[i] = 0 - - sub_train_mask = train_mask[total_sub_nodes] - - sub_features = features_query[total_sub_nodes] - sub_labels = labels[total_sub_nodes] - - sub_features = th.FloatTensor(sub_features) - sub_labels = th.LongTensor(sub_labels) - sub_train_mask = sub_train_mask - sub_test_mask = test_mask - # sub_g = DGLGraph(nx.from_numpy_matrix(sub_g)) - - # features = th.FloatTensor(data.features) - # labels = th.LongTensor(data.labels) - # train_mask = th.ByteTensor(data.train_mask) - # test_mask = th.ByteTensor(data.test_mask) - # g = DGLGraph(data.graph) - - gcn_Net.eval() - - # =================Generate Label=================================================== - logits_query = gcn_Net(g, features) - _, labels_query = th.max(logits_query, dim=1) - - sub_labels_query = labels_query[total_sub_nodes] - sub_g = nx.from_numpy_array(sub_g) - - sub_g.remove_edges_from(nx.selfloop_edges(sub_g)) - sub_g.add_edges_from(zip(sub_g.nodes(), sub_g.nodes())) - - sub_g = dgl.from_networkx(sub_g) # sub_g = DGLGraph(sub_g) - n_edges = sub_g.number_of_edges() - # normalization - degs = sub_g.in_degrees().float() - norm = th.pow(degs, -0.5) - norm[th.isinf(norm)] = 0 - - sub_g.ndata['norm'] = norm.unsqueeze(1) - - print("=========Model Extracting==========================") - - # hyperparameters get from kwargs - # no need to change these default for now - num_perturbations = kwargs.get('num_perturbations', 100) - noise_level = kwargs.get('noise_level', 0.05) - rho = kwargs.get('rho', 0.8) - num_each = kwargs.get('num_each', 1) - epochs_per_cycle = kwargs.get('epochs_per_cycle', 1) - setup = kwargs.get('setup', "experiment") - # This need to be relatively bigger to allow for more accurate classification - if_warmup = kwargs.get('if_warmup', False) - LR_CEGA = kwargs.get('LR_CEGA', 1e-2) - # Tuning parameters for adaptive weight in each of the CEGA iteration - # Default works for cora and amazonphoto and coauthorCS - # Need specific modification for citeseer and pubmed - curve = kwargs.get('curve', 0.3) - init_1 = kwargs.get('init_1', 0.2) - init_2 = kwargs.get('init_2', 0.2) - init_3 = kwargs.get('init_3', 0.2) - gap = kwargs.get('gap', 0.6) - - # Derivative parameters - num_node = sub_features.shape[0] - total_epochs = epochs_per_cycle * 18 * C_var - total_num = 20 * C_var - num_cycles = total_epochs // epochs_per_cycle - - # Set up adaptive weights: set the numbers then reweight them - # For citeseer, try k = 0.5, init_1 = 0.3. The other parameters seem to be working fine - cycles = np.linspace(0, 1, num_cycles) - index_1 = init_1 + gap * np.exp(-1 * curve * cycles) - index_2 = init_2 + gap * (1 - np.exp(-1 * curve * cycles)) - index_3 = init_3 * (1 - np.exp(-1 * cycles)) - total = index_1 + index_2 + index_3 - index_1 /= total - index_2 /= total - index_3 /= total - - # Set up output data formality - # data_output = pd.DataFrame(columns=['Num Attack Nodes', 'Method', 'Test Accuracy', 'Test Fidelity']) - - # create GCN model - max_acc1 = 0 - max_acc2 = 0 - max_f1 = 0 - dur = [] - - if dropout == True: - net = GCN_drop(feature_number, label_number) - else: - net = GcnNet(feature_number, label_number) - optimizer = th.optim.Adam(net.parameters(), lr=LR_CEGA, weight_decay=5e-4) - - ## Set up initial set which is iteratively progressive - train_inits = init_mask(C_var, sub_train_mask, sub_labels) - train_inits_tensor = th.tensor(train_inits) - sub_train_mask_new = th.zeros(len(sub_train_mask), dtype=th.bool) - sub_train_mask_new[train_inits] = True - - ## Record the initial nodes in torch object - nodes_queried = th.tensor([], dtype=th.long) - nodes_queried = th.cat((nodes_queried, train_inits_tensor)) - - ## Do warm up if that is ever an option - if if_warmup == True: - sub_train_mask_warmup = th.zeros(len(sub_train_mask), dtype=th.bool) - sub_train_mask_warmup[train_inits] = True - net.train() - - for epoch in range(WARMUP_EPOCH): - logits = net(sub_g, sub_features) - logp = F.log_softmax(logits, dim=1) - - loss = F.nll_loss(logp[sub_train_mask_warmup], sub_labels_query[sub_train_mask_warmup]) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - acc, f1score = evaluate(net, g, features, labels, test_mask) - print("Epoch {:05d} | Loss {:.4f} | Test Acc {:.4f} | Test F1 score {:.4f}".format( - epoch + 1, loss.item(), acc, f1score)) - - net.eval() - - ## Now start timing when the real cycles begin - start_time = time.time() - log_dir = f"{file_path}/timelogs/{dataset_name}/logtime_cega_{seed}" - os.makedirs(os.path.dirname(log_dir), exist_ok=True) - - # Learn a node in each cycle - for cycle in range(10): - # print(f"=========Cycle {cycle + 1}==========================") - # print(f"========={int(sub_train_mask_new.sum())} Selected Nodes==========================") - - # Train some epochs: - net.train() - - for epoch in range(epochs_per_cycle): - logits = net(sub_g, sub_features) - - ## Need to get new sub_train_mask - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[sub_train_mask_new], sub_labels_query[sub_train_mask_new]) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - # if epoch >= 3: - # dur.append(time.time() - t0) - # dur.append(time.time() - t0) - - acc1, _ = evaluate(net, g, features, labels_query, test_mask) - acc2, f1score = evaluate(net, g, features, labels, test_mask) - if acc1 > max_acc1: - max_acc1 = acc1 - if acc2 > max_acc2: - max_acc2 = acc2 - if f1score > max_f1: - max_f1 = f1score - # Add f1 in output - print( - "Cycle {:05d} | Epoch {:05d} | Loss {:.4f} | Test Acc {:.4f} | Test Fid {:.4f} | Test F1score {:.4f} ".format( - cycle + 1, epoch + 1 + cycle * epochs_per_cycle, loss.item(), acc2, acc1, max_f1)) - - net.eval() - - ## Not realized here! - # new_row = {"Epoch": epoch + 1 + cycle * epochs_per_cycle, "Loss": loss.item(), "Accuracy": acc2, "Fidelity": acc1} - # data_output = data_output.append(new_row, ignore_index = True) - # data_output.append(new_row) - - # Update the sub_train_mask using your specially-designed algorithm - if sub_train_mask_new.sum() < total_num: - # Random - if setup == "random": - print("Setup: Random") - # Add the entry to the node pool nodes_queried on the supposed order - node_queried = update_sub_train_mask(num_each, sub_train_mask, sub_train_mask_new) - node_queried_tensor = th.tensor(node_queried) - # node_queried_tensor = th.tensor(node_queried, dtype = th.long) - nodes_queried = th.cat((nodes_queried, node_queried_tensor)) - sub_train_mask_new[node_queried] = True - - elif setup == "experiment": - print("Setup: Experiment") - ## First: Representativeness - ## Can be replaced by other centrality measurement - Rank1 = rank_centrality(sub_g, sub_train_mask, sub_train_mask_new, num_each, return_rank=True) - ## Second: Uncertainty - Rank2 = rank_entropy(net, sub_g, sub_features, sub_train_mask, sub_train_mask_new, - num_each, return_rank=True) - ## Third: Diversity - Rank3 = rank_diversity(net, sub_g, sub_features, sub_train_mask, sub_train_mask_new, - num_each, C_var, rho, return_rank=True) - - if Rank1 is None: - print("Completed!") - selected_indices = quantile_selection(Rank1, Rank2, Rank3, index_1[cycle], index_2[cycle], - index_3[cycle], - sub_train_mask, sub_train_mask_new, num_each) - selected_indices_tensor = selected_indices.clone().detach() - # th.tensor(, dtype = th.long) - nodes_queried = th.cat((nodes_queried, selected_indices_tensor)) - sub_train_mask_new[selected_indices] = True - - elif setup == "perturbation": - print("Setup: Experiment with Perturbation") - Rank1 = rank_centrality(sub_g, sub_train_mask, sub_train_mask_new, num_each, return_rank=True) - Rank2 = rank_perturb(net, sub_g, sub_features, num_perturbations, - sub_train_mask, sub_train_mask_new, noise_level, - num_each, return_rank=True) - Rank3 = rank_diversity(net, sub_g, sub_features, sub_train_mask, sub_train_mask_new, - num_each, C_var, rho, return_rank=True) - - if Rank1 is None: - print("Completed!") - selected_indices = quantile_selection(Rank1, Rank2, Rank3, index_1[cycle], index_2[cycle], - index_3[cycle], - sub_train_mask, sub_train_mask_new, num_each) - selected_indices_tensor = selected_indices.clone().detach() - nodes_queried = th.cat((nodes_queried, selected_indices_tensor)) - sub_train_mask_new[selected_indices] = True - else: - print("Wrong Setup!") - return 1 - else: - print("Move on with designated nodes!") - sub_train_mask_new = sub_train_mask_new - - ## Record time for all these cycles when the loop is complete - node_selection_time = time.time() - start_time - with open(log_dir, 'a') as log_file: - log_file.write(f"CEGA {dataset_name} {seed} ") - log_file.write(f"{node_selection_time:.4f}s\n") - - idx_train = nodes_queried.tolist() - - output_data = { - 'total_sub_nodes': total_sub_nodes, - 'idx_train': idx_train - } - - ## Assertation and printing - assert len(idx_train) == 20 * C_var - print('node selection finished') - with open(f'./node_selection/CEGA_{setup}_{dataset_name}_selected_nodes_{(20 * label_number)}_{seed}.json', - 'w') as f: - json.dump(output_data, f) - - sub_g = sub_g.to(device) - sub_features = sub_features.to(device) - sub_labels_query = sub_labels_query.to(device) - labels_query = labels_query.to(device) - g = g.to(device) - features = features.to(device) - test_mask = test_mask.to(device) - labels = labels.to(device) - - print('=========Model Evaluation==========================') - if model_performance: - for iter in range(2 * C_var, 21 * C_var, C_var): - set_seed(seed) - - ## Create net from scratch - if dropout == True: - net_scratch = GCN_drop(feature_number, label_number) - else: - net_scratch = GcnNet(feature_number, label_number) - optimizer = th.optim.Adam(net_scratch.parameters(), lr=LR, weight_decay=5e-4) - - ## set up training nodes and send them to device - sub_train_scratch = th.zeros(sub_features.size()[0], dtype=th.bool) - sub_train_scratch[idx_train[:iter]] = True - sub_train_scratch = sub_train_scratch.to(device) - net_scratch = net_scratch.to(device) - - ## Reset data - max_acc1 = 0 - max_acc2 = 0 - max_f1 = 0 - dur = [] - - for epoch in range(EVAL_EPOCH): - if epoch >= 3: - t0 = time.time() - - net_scratch.train() - logits = net_scratch(sub_g, sub_features) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[sub_train_scratch], sub_labels_query[sub_train_scratch]) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - if epoch >= 3: - dur.append(time.time() - t0) - - acc1, _ = evaluate(net_scratch, g, features, labels_query, test_mask) - acc2, f1score = evaluate(net_scratch, g, features, labels, test_mask) - if acc1 > max_acc1: - max_acc1 = acc1 - if acc2 > max_acc2: - max_acc2 = acc2 - if f1score > max_f1: - max_f1 = f1score - - # Output Epoch Scores - epoch_metrics = pd.DataFrame({ - 'Num Attack Nodes': [iter], - 'Method': ['CEGA'], - 'Test Accuracy': [max_acc2], - 'Test Fidelity': [max_acc1], - 'Test F1score': [max_f1], - }) - metrics_df = pd.concat([metrics_df, epoch_metrics], ignore_index=True) - - print("Test Acc {:.4f} | Test Fid {:.4f} | Test F1score {:.4f} | Time(s) {:.4f}".format( - acc2, acc1, max_f1, np.mean(dur))) - - ## Should this be 'f1score'? - epoch_metrics = pd.DataFrame({ - 'Num Attack Nodes': [int(th.sum(train_mask))], - 'Method': ['CEGA'], - 'Test Accuracy': [target_performance['acc']], - 'Test Fidelity': [1], - 'Test F1score': [target_performance['f1score']], - }) - metrics_df = pd.concat([metrics_df, epoch_metrics], ignore_index=True) - - log_file_path = f"{file_path}/{dataset_name}/log_cega_{seed}.csv" - metrics_df.to_csv(log_file_path, mode='w', header=False, index=False) - - # Set net_full for the next graph to be taken care of, which is expected to include all nodes - if True: - set_seed(seed) - log_file_path = f"{file_path}/{dataset_name}/log_cega_{seed}.csv" - if dropout == True: - net_full = GCN_drop(feature_number, label_number) - else: - net_full = GcnNet(feature_number, label_number) - optimizer_full = th.optim.Adam(net_full.parameters(), lr=LR, weight_decay=5e-4) - - net_full = net_full.to(device) - net = net.to(device) - - perfm_attack = { - 'acc': 0, - 'fid': 0, - 'f1score': 0 - } - - print('========================== Model Evaluation ==========================') - progress_bar = tqdm(range(EVAL_EPOCH), desc="Generating model with ALL attack nodes", ncols=100) - for epoch in progress_bar: - if epoch >= 3: - t0 = time.time() - - net_full.train() - logits = net_full(sub_g, sub_features) - logp = F.log_softmax(logits, 1) - loss = F.nll_loss(logp, sub_labels_query) # [sub_train_mask] - - optimizer_full.zero_grad() - loss.backward() - optimizer_full.step() - - if epoch >= 3: - dur.append(time.time() - t0) - - acc, f1score = evaluate(net_full, g, features, labels, test_mask) - fid, _ = evaluate(net_full, g, features, labels_query, test_mask) - if acc > perfm_attack['acc']: - perfm_attack['acc'] = acc - if fid > perfm_attack['fid']: - perfm_attack['fid'] = fid - if f1score > perfm_attack['f1score']: - perfm_attack['f1score'] = f1score - - progress_bar.set_postfix({ - "Loss": f"{loss.item():.4f}", - "Test Acc": f"{acc:.4f}", - "Test F1": f"{f1score:.4f}", - # "Processed %": f"{(epoch + 1) / TGT_EPOCH * 100:.2f}", - # "Time(s)": f"{np.mean(dur) if dur else 0:.4f}" - }) - epoch_metrics = pd.DataFrame({ - 'Num Attack Nodes': [sub_train_mask.sum().item()], - 'Method': ['cega'], - 'Test Accuracy': [perfm_attack['acc']], - 'Test Fidelity': [perfm_attack['fid']], - 'Test F1score': [perfm_attack['f1score']], - }) - metrics_df = pd.concat([metrics_df, epoch_metrics], ignore_index=True) - log_file_path = f"{file_path}/{dataset_name}/log_cega_{seed}.csv" - metrics_df.to_csv(log_file_path, mode='w', header=False, index=False) - - -from pygip.models.attack.base import BaseAttack -from pygip.datasets import Dataset -from pygip.utils.metrics import AttackMetric, AttackCompMetric - -class CEGA(BaseAttack): - supported_api_types = {"dgl"} - - # ====== only signature and stored params are changed here ====== - def __init__( - self, - dataset: Dataset, - attack_node_fraction: float, - model_path: str = None, - attack_x_ratio: float = 1.0, - attack_a_ratio: float = 1.0, - ): - super(CEGA, self).__init__(dataset, attack_node_fraction, model_path) - # graph data - self.dataset = dataset - self.graph = dataset.graph_data.to(self.device) - self.features = dataset.graph_data.ndata['feat'] - self.labels = dataset.graph_data.ndata['label'] - self.train_mask = dataset.graph_data.ndata['train_mask'] - self.test_mask = dataset.graph_data.ndata['test_mask'] - # meta data - self.node_number = dataset.num_nodes - self.feature_number = dataset.num_features - self.label_number = dataset.num_classes - self.attack_node_number = int(dataset.num_nodes * attack_node_fraction) - self.attack_node_fraction = attack_node_fraction - # new visibility knobs for inputs (kept for consistency across attacks) - self.attack_x_ratio = float(attack_x_ratio) - self.attack_a_ratio = float(attack_a_ratio) - - def attack( - self, - seed=1, - cuda=None, - LR=1e-3, - TGT_LR=1e-2, - EVAL_EPOCH=10, - TGT_EPOCH=10, - WARMUP_EPOCH=4, - dropout=False, - model_performance=True, - **kwargs - ): - """ - Returns - ------- - perf_json : dict - Performance metrics (JSON-serialisable): accuracy/fidelity/F1 of the surrogate, - and optionally target accuracy/F1 for reference. - comp_json : dict - Computation metrics (JSON-serialisable): attack_time, query_target_time, train_surrogate_time, etc. - """ - # ===== metrics collection (computation) ===== - attack_time_start = time.time() - query_target_time = 0.0 - train_surrogate_time = 0.0 - - # Initialization - set_seed(seed) - metrics_df = pd.DataFrame(columns=['Num Attack Nodes', 'Method', 'Test Accuracy', 'Test Fidelity']) - - # data handles - g = self.graph - features = self.features - labels = self.labels - node_number = self.node_number - train_mask = self.train_mask - test_mask = self.test_mask - - attack_node_arg = self.attack_node_fraction - attack_node_number = int(node_number * attack_node_arg) - feature_number = features.shape[1] - label_number = len(labels.unique()) - C_var = label_number - - print('The attack node number is: ', attack_node_number) - - g = g.to(self.device) - degs = g.in_degrees().float() - norm = th.pow(degs, -0.5) - norm[th.isinf(norm)] = 0 - if cuda is not None: - norm = norm.cuda() - g.ndata['norm'] = norm.unsqueeze(1) - if dropout: - gcn_Net = GCN_drop(feature_number, label_number) - else: - gcn_Net = GcnNet(feature_number, label_number) - optimizer = th.optim.Adam(gcn_Net.parameters(), lr=TGT_LR, weight_decay=5e-4) - dur = [] - - # Send the training to device - features = features.to(self.device) - gcn_Net = gcn_Net.to(self.device) - train_mask = train_mask.to(self.device) - test_mask = test_mask.to(self.device) - labels = labels.to(self.device) - target_performance = {'acc': 0, 'f1score': 0} - - print("=========Target Model Generating==========================") - # train target model - tgt_train_t0 = time.time() - for epoch in range(TGT_EPOCH): - if epoch >= 3: - t0 = time.time() - gcn_Net.train() - logits = gcn_Net(g, features) - logp = F.log_softmax(logits, 1) - loss = F.nll_loss(logp[train_mask], labels[train_mask]) - optimizer.zero_grad() - loss.backward() - optimizer.step() - if epoch >= 3: - dur.append(time.time() - t0) - acc, f1score = evaluate(gcn_Net, g, features, labels, test_mask) - target_performance['acc'] = max(target_performance['acc'], acc) - target_performance['f1score'] = max(target_performance['f1score'], f1score) - print("Epoch {:05d} | Loss {:.4f} | Test Acc {:.4f} | Test F1 macro {:.4f} | Time(s) {:.4f}".format( - epoch, loss.item(), acc, f1score, np.mean(dur))) - train_surrogate_time += (time.time() - tgt_train_t0) - - # move tensors back to cpu for subgraph building - g = g.cpu() - features = features.cpu() - gcn_Net = gcn_Net.cpu() - train_mask = train_mask.cpu() - test_mask = test_mask.cpu() - labels = labels.cpu() - - # Generate sub-graph index - alpha = 0.8 - sub_graph_node_index = [random.randint(0, node_number - 1) for _ in range(attack_node_number)] - sub_labels = labels[sub_graph_node_index] - - syn_nodes = [] - g_matrix = np.asmatrix(g.adjacency_matrix().to_dense()) - for node_index in sub_graph_node_index: - # get nodes - one_step_node_index = g_matrix[node_index, :].nonzero()[1].tolist() - two_step_node_index = [] - for first_order_node_index in one_step_node_index: - syn_nodes.append(first_order_node_index) - two_step_node_index = g_matrix[first_order_node_index, :].nonzero()[1].tolist() - - sub_graph_syn_node_index = list(set(syn_nodes) - set(sub_graph_node_index)) - total_sub_nodes = list(set(sub_graph_syn_node_index + sub_graph_node_index)) - - # Generate features for SubGraph attack - np_features_query = features.clone() - for node_index in sub_graph_syn_node_index: - # initialized as zero - np_features_query[node_index] = np_features_query[node_index] * 0 - # get one step and two steps nodes - one_step_node_index = g_matrix[node_index, :].nonzero()[1].tolist() - one_step_node_index = list(set(one_step_node_index).intersection(set(sub_graph_node_index))) - - total_two_step_node_index = [] - num_one_step = len(one_step_node_index) - for first_order_node_index in one_step_node_index: - # features = 0.8 * average_one / sqrt(num_one_step * deg) - this_node_degree = len(g_matrix[first_order_node_index, :].nonzero()[1].tolist()) - x1 = np_features_query[node_index] - x2 = features[first_order_node_index] * alpha / math.sqrt(max(1, num_one_step * this_node_degree)) - np_features_query[node_index] = x1 + x2 - - two_step_node_index = g_matrix[first_order_node_index, :].nonzero()[1].tolist() - total_two_step_node_index = list( - set(total_two_step_node_index + two_step_node_index) - set(one_step_node_index) - ) - total_two_step_node_index = list(set(total_two_step_node_index).intersection(set(sub_graph_node_index))) - - num_two_step = len(total_two_step_node_index) - for second_order_node_index in total_two_step_node_index: - this_node_second_step_nodes = [] - this_node_first_step_nodes = g_matrix[second_order_node_index, :].nonzero()[1].tolist() - for nodes_in_this_node in this_node_first_step_nodes: - this_node_second_step_nodes = list( - set(this_node_second_step_nodes + g_matrix[nodes_in_this_node, :].nonzero()[1].tolist()) - ) - this_node_second_step_nodes = list(set(this_node_second_step_nodes) - set(this_node_first_step_nodes)) - this_node_second_degree = len(this_node_second_step_nodes) - x1 = np_features_query[node_index] - x2 = features[first_order_node_index] * alpha / math.sqrt(max(1, num_one_step * this_node_degree)) - np_features_query[node_index] = x1 + x2 - - features_query = th.FloatTensor(np_features_query) - - # generate sub-graph adj-matrix, features, labels - total_sub_nodes = list(set(sub_graph_syn_node_index + sub_graph_node_index)) - sub_g = np.zeros((len(total_sub_nodes), len(total_sub_nodes))) - for sub_index in range(len(total_sub_nodes)): - sub_g[sub_index] = g_matrix[total_sub_nodes[sub_index], total_sub_nodes] - - for i in range(node_number): - if i in sub_graph_node_index: - test_mask[i] = 0 - train_mask[i] = 1 - continue - if i in sub_graph_syn_node_index: - test_mask[i] = 1 - train_mask[i] = 0 - else: - test_mask[i] = 1 - train_mask[i] = 0 - - sub_train_mask = train_mask[total_sub_nodes] - sub_features = features_query[total_sub_nodes] - sub_labels = labels[total_sub_nodes] - - sub_features = th.FloatTensor(sub_features) - sub_labels = th.LongTensor(sub_labels) - sub_train_mask = sub_train_mask - sub_test_mask = test_mask - - gcn_Net.eval() - - # =================Generate Label======================== - # timing: query the target once for labels on query set - qt0 = time.time() - logits_query = gcn_Net(g, features) - _, labels_query = th.max(logits_query, dim=1) - query_target_time += (time.time() - qt0) - - sub_labels_query = labels_query[total_sub_nodes] - sub_g = nx.from_numpy_array(sub_g) - sub_g.remove_edges_from(nx.selfloop_edges(sub_g)) - sub_g.add_edges_from(zip(sub_g.nodes(), sub_g.nodes())) - sub_g = dgl.from_networkx(sub_g) - # normalization - degs = sub_g.in_degrees().float() - norm = th.pow(degs, -0.5) - norm[th.isinf(norm)] = 0 - sub_g.ndata['norm'] = norm.unsqueeze(1) - - print("=========Model Extracting==========================") - - # hyperparameters from kwargs - num_perturbations = kwargs.get('num_perturbations', 100) - noise_level = kwargs.get('noise_level', 0.05) - rho = kwargs.get('rho', 0.8) - num_each = kwargs.get('num_each', 1) - epochs_per_cycle = kwargs.get('epochs_per_cycle', 1) - setup = kwargs.get('setup', "experiment") - if_warmup = kwargs.get('if_warmup', False) - LR_CEGA = kwargs.get('LR_CEGA', 1e-2) - curve = kwargs.get('curve', 0.3) - init_1 = kwargs.get('init_1', 0.2) - init_2 = kwargs.get('init_2', 0.2) - init_3 = kwargs.get('init_3', 0.2) - gap = kwargs.get('gap', 0.6) - - # derivative params - num_node = sub_features.shape[0] - total_epochs = epochs_per_cycle * 18 * C_var - total_num = 20 * C_var - num_cycles = total_epochs // epochs_per_cycle - - cycles = np.linspace(0, 1, num_cycles) - index_1 = init_1 + gap * np.exp(-1 * curve * cycles) - index_2 = init_2 + gap * (1 - np.exp(-1 * curve * cycles)) - index_3 = init_3 * (1 - np.exp(-1 * cycles)) - total = index_1 + index_2 + index_3 - index_1 /= total - index_2 /= total - index_3 /= total - - # create surrogate model - max_acc1 = 0 - max_acc2 = 0 - max_f1 = 0 - dur = [] - - net = GCN_drop(feature_number, label_number) if dropout else GcnNet(feature_number, label_number) - optimizer = th.optim.Adam(net.parameters(), lr=LR_CEGA, weight_decay=5e-4) - - # initial training set - train_inits = init_mask(C_var, sub_train_mask, sub_labels) - train_inits_tensor = th.tensor(train_inits) - sub_train_mask_new = th.zeros(len(sub_train_mask), dtype=th.bool) - sub_train_mask_new[train_inits] = True - nodes_queried = th.tensor([], dtype=th.long) - nodes_queried = th.cat((nodes_queried, train_inits_tensor)) - - # warmup - if if_warmup: - sub_train_mask_warmup = th.zeros(len(sub_train_mask), dtype=th.bool) - sub_train_mask_warmup[train_inits] = True - net.train() - warm_s = time.time() - for epoch in range(WARMUP_EPOCH): - logits = net(sub_g, sub_features) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[sub_train_mask_warmup], sub_labels_query[sub_train_mask_warmup]) - optimizer.zero_grad() - loss.backward() - optimizer.step() - acc, f1score = evaluate(net, g, features, labels, test_mask) - print("Epoch {:05d} | Loss {:.4f} | Test Acc {:.4f} | Test F1 score {:.4f}".format( - epoch + 1, loss.item(), acc, f1score)) - train_surrogate_time += (time.time() - warm_s) - net.eval() - - # cycles - print("=========Learn a node in each cycle==========================") - cycle_train_s = time.time() - for cycle in range(num_cycles): - net.train() - for epoch in range(epochs_per_cycle): - logits = net(sub_g, sub_features) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[sub_train_mask_new], sub_labels_query[sub_train_mask_new]) - optimizer.zero_grad() - loss.backward() - optimizer.step() - - acc1, _ = evaluate(net, g, features, labels_query, test_mask) # fidelity proxy - acc2, f1score = evaluate(net, g, features, labels, test_mask) - max_acc1 = max(max_acc1, acc1) - max_acc2 = max(max_acc2, acc2) - max_f1 = max(max_f1, f1score) - print( - "Cycle {:05d} | Epoch {:05d} | Loss {:.4f} | Test Acc {:.4f} | Test Fid {:.4f} | Test F1score {:.4f} ".format( - cycle + 1, epoch + 1 + cycle * epochs_per_cycle, loss.item(), acc2, acc1, max_f1)) - - net.eval() - if sub_train_mask_new.sum() < total_num: - if setup == "random": - print("Setup: Random") - node_queried = update_sub_train_mask(num_each, sub_train_mask, sub_train_mask_new) - node_queried_tensor = th.tensor(node_queried) - nodes_queried = th.cat((nodes_queried, node_queried_tensor)) - sub_train_mask_new[node_queried] = True - elif setup == "experiment": - print("Setup: Experiment") - Rank1 = rank_centrality(sub_g, sub_train_mask, sub_train_mask_new, num_each, return_rank=True) - Rank2 = rank_entropy(net, sub_g, sub_features, sub_train_mask, sub_train_mask_new, - num_each, return_rank=True) - Rank3 = rank_diversity(net, sub_g, sub_features, sub_train_mask, sub_train_mask_new, - num_each, C_var, rho, return_rank=True) - if Rank1 is None: - print("Completed!") - selected_indices = quantile_selection( - Rank1, Rank2, Rank3, index_1[cycle], index_2[cycle], index_3[cycle], - sub_train_mask, sub_train_mask_new, num_each - ) - selected_indices_tensor = selected_indices.clone().detach() - nodes_queried = th.cat((nodes_queried, selected_indices_tensor)) - sub_train_mask_new[selected_indices] = True - elif setup == "perturbation": - print("Setup: Experiment with Perturbation") - Rank1 = rank_centrality(sub_g, sub_train_mask, sub_train_mask_new, num_each, return_rank=True) - Rank2 = rank_perturb(net, sub_g, sub_features, num_perturbations, - sub_train_mask, sub_train_mask_new, noise_level, - num_each, return_rank=True) - Rank3 = rank_diversity(net, sub_g, sub_features, sub_train_mask, sub_train_mask_new, - num_each, C_var, rho, return_rank=True) - if Rank1 is None: - print("Completed!") - selected_indices = quantile_selection( - Rank1, Rank2, Rank3, index_1[cycle], index_2[cycle], index_3[cycle], - sub_train_mask, sub_train_mask_new, num_each - ) - selected_indices_tensor = selected_indices.clone().detach() - nodes_queried = th.cat((nodes_queried, selected_indices_tensor)) - sub_train_mask_new[selected_indices] = True - else: - print("Wrong Setup!") - return 1 - else: - print("Move on with designated nodes!") - train_surrogate_time += (time.time() - cycle_train_s) - - idx_train = nodes_queried.tolist() - output_data = {'total_sub_nodes': total_sub_nodes, 'idx_train': idx_train} - print('node selection finished') - - # move to device for evaluation/training-from-scratch - sub_g = sub_g.to(self.device) - sub_features = sub_features.to(self.device) - sub_labels_query = sub_labels_query.to(self.device) - labels_query = labels_query.to(self.device) - g = g.to(self.device) - features = features.to(self.device) - test_mask = test_mask.to(self.device) - labels = labels.to(self.device) - - print('=========Model Evaluation==========================') - if model_performance: - for iter in range(2 * C_var, 21 * C_var, C_var): - set_seed(seed) - net_scratch = GCN_drop(feature_number, label_number) if dropout else GcnNet(feature_number, label_number) - optimizer = th.optim.Adam(net_scratch.parameters(), lr=LR, weight_decay=5e-4) - sub_train_scratch = th.zeros(sub_features.size()[0], dtype=th.bool) - sub_train_scratch[idx_train[:iter]] = True - sub_train_scratch = sub_train_scratch.to(self.device) - net_scratch = net_scratch.to(self.device) - max_acc1 = 0 - max_acc2 = 0 - max_f1 = 0 - dur = [] - eval_train_s = time.time() - for epoch in range(EVAL_EPOCH): - if epoch >= 3: - t0 = time.time() - net_scratch.train() - logits = net_scratch(sub_g, sub_features) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[sub_train_scratch], sub_labels_query[sub_train_scratch]) - optimizer.zero_grad() - loss.backward() - optimizer.step() - if epoch >= 3: - dur.append(time.time() - t0) - acc1, _ = evaluate(net_scratch, g, features, labels_query, test_mask) - acc2, f1score = evaluate(net_scratch, g, features, labels, test_mask) - max_acc1 = max(max_acc1, acc1) - max_acc2 = max(max_acc2, acc2) - max_f1 = max(max_f1, f1score) - train_surrogate_time += (time.time() - eval_train_s) - epoch_metrics = pd.DataFrame({ - 'Num Attack Nodes': [iter], - 'Method': ['CEGA'], - 'Test Accuracy': [max_acc2], - 'Test Fidelity': [max_acc1], - 'Test F1score': [max_f1], - }) - metrics_df = pd.concat([metrics_df, epoch_metrics], ignore_index=True) - print("Test Acc {:.4f} | Test Fid {:.4f} | Test F1score {:.4f} | Time(s) {:.4f}".format( - acc2, acc1, max_f1, np.mean(dur))) - epoch_metrics = pd.DataFrame({ - 'Num Attack Nodes': [int(th.sum(train_mask))], - 'Method': ['CEGA'], - 'Test Accuracy': [target_performance['acc']], - 'Test Fidelity': [1], - 'Test F1score': [target_performance['f1score']], - }) - metrics_df = pd.concat([metrics_df, epoch_metrics], ignore_index=True) - - # train a surrogate on all selected nodes for final report - set_seed(seed) - net_full = GCN_drop(feature_number, label_number) if dropout else GcnNet(feature_number, label_number) - optimizer_full = th.optim.Adam(net_full.parameters(), lr=LR, weight_decay=5e-4) - net_full = net_full.to(self.device) - net = net.to(self.device) - perfm_attack = {'acc': 0, 'fid': 0, 'f1score': 0} - - print('========================== Model Evaluation ==========================') - final_train_s = time.time() - progress_bar = tqdm(range(EVAL_EPOCH), desc="Generating model with ALL attack nodes", ncols=100) - for epoch in progress_bar: - if epoch >= 3: - t0 = time.time() - net_full.train() - logits = net_full(sub_g, sub_features) - logp = F.log_softmax(logits, 1) - loss = F.nll_loss(logp, sub_labels_query) - optimizer_full.zero_grad() - loss.backward() - optimizer_full.step() - if epoch >= 3: - dur.append(time.time() - t0) - - acc, f1score = evaluate(net_full, g, features, labels, test_mask) - fid, _ = evaluate(net_full, g, features, labels_query, test_mask) - perfm_attack['acc'] = max(perfm_attack['acc'], acc) - perfm_attack['fid'] = max(perfm_attack['fid'], fid) - perfm_attack['f1score'] = max(perfm_attack['f1score'], f1score) - progress_bar.set_postfix({ - "Loss": f"{loss.item():.4f}", - "Test Acc": f"{acc:.4f}", - "Test F1": f"{f1score:.4f}", - }) - train_surrogate_time += (time.time() - final_train_s) - - print("Test Acc {:.4f} | Test Fid {:.4f} | Test F1score {:.4f} | Time(s) {:.4f}".format( - perfm_attack['acc'], perfm_attack['fid'], perfm_attack['f1score'], np.mean(dur))) - - epoch_metrics = pd.DataFrame({ - 'Num Attack Nodes': [sub_train_mask.sum().item()], - 'Method': ['cega'], - 'Test Accuracy': [perfm_attack['acc']], - 'Test Fidelity': [perfm_attack['fid']], - 'Test F1score': [perfm_attack['f1score']], - }) - metrics_df = pd.concat([metrics_df, epoch_metrics], ignore_index=True) - - # ===== assemble JSON outputs required by the new metric API ===== - attack_total_time = time.time() - attack_time_start - - perf_json = { - "attack": "CEGA", - "num_attack_nodes": int(sub_train_mask.sum().item()), - "acc": float(perfm_attack['acc']), - "fid": float(perfm_attack['fid']), - "f1": float(perfm_attack['f1score']), - "target_acc": float(target_performance['acc']), - "target_f1": float(target_performance['f1score']), - } - comp_json = { - "device": str(self.device), - "attack_time": float(attack_total_time), - "query_target_time": float(query_target_time), - "train_surrogate_time": float(train_surrogate_time), - # optional placeholders to align with AdvMEA if present there - "inference_surrogate_time": None, - "peak_memory": None, - } - return perf_json, comp_json - diff --git a/pygip/models/attack/DataFreeMEA.py b/pygip/models/attack/DataFreeMEA.py deleted file mode 100644 index 7669d2e0..00000000 --- a/pygip/models/attack/DataFreeMEA.py +++ /dev/null @@ -1,310 +0,0 @@ -from abc import abstractmethod -import time - -import dgl -import networkx as nx -import torch -import torch.nn.functional as F -from tqdm import tqdm - -from pygip.models.attack.base import BaseAttack -from pygip.models.nn import GCN, GraphSAGE # Backbone architectures -from pygip.utils.metrics import AttackMetric, AttackCompMetric # Consistent with AdvMEA - - -class GraphGenerator: - def __init__(self, node_number, feature_number, label_number): - self.node_number = node_number - self.feature_number = feature_number - self.label_number = label_number - - def generate(self): - # Generate a random Erdős–Rényi graph and convert it to DGL - g_nx = nx.erdos_renyi_graph(n=self.node_number, p=0.05) - g_dgl = dgl.from_networkx(g_nx) - # Random node features - features = torch.randn((self.node_number, self.feature_number)) - return g_dgl, features - - -class DFEAAttack(BaseAttack): - supported_api_types = {"dgl"} - - # Use unified attack_x_ratio and attack_a_ratio - def __init__(self, dataset, attack_x_ratio, attack_a_ratio, model_path=None): - super().__init__(dataset, attack_x_ratio, model_path) - # Load graph data - self.graph = dataset.graph_data.to(self.device) - self.features = self.graph.ndata['feat'] - self.labels = self.graph.ndata['label'] - self.train_mask = self.graph.ndata['train_mask'] - self.val_mask = self.graph.ndata.get('val_mask', None) - self.test_mask = self.graph.ndata['test_mask'] - # Meta data - self.feature_number = dataset.num_features - self.label_number = dataset.num_classes - - # Use the maximum of the two visibility ratios as the budget; - # if both are 0, fallback to a small default value to avoid zero-size graph - ratio_budget = max(float(attack_x_ratio), float(attack_a_ratio)) - if ratio_budget <= 0.0: - ratio_budget = 0.05 - self.attack_node_number = max(1, int(dataset.num_nodes * ratio_budget)) - - # Generate synthetic graph and features for surrogate training (data-free) - self.generator = GraphGenerator( - node_number=self.attack_node_number, - feature_number=self.feature_number, - label_number=self.label_number - ) - self.synthetic_graph, self.synthetic_features = self.generator.generate() - self.synthetic_graph = self.synthetic_graph.to(self.device) - self.synthetic_features = self.synthetic_features.to(self.device) - - if model_path is None: - self._train_target_model() - else: - self._load_model(model_path) - - def _train_target_model(self): - # Train the victim GCN model on real data - model = GCN(self.feature_number, self.label_number).to(self.device) - optimizer = torch.optim.Adam( - model.parameters(), lr=0.01, weight_decay=5e-4 - ) - model.train() - # Dataset-specific label shaping - name = getattr(self.dataset, 'dataset_name', None) or getattr(self.dataset, 'name', None) - epochs = 200 - for _ in range(epochs): - optimizer.zero_grad() - logits = model(self.graph, self.features) - labels = self.labels.squeeze() if name == 'ogb-arxiv' else self.labels - loss = F.nll_loss( - F.log_softmax(logits[self.train_mask], dim=1), - labels[self.train_mask] - ) - loss.backward() - optimizer.step() - - model.eval() - self.model = model - - def _load_model(self, model_path): - # Load a pretrained victim model - model = GCN(self.feature_number, self.label_number) - state = torch.load(model_path, map_location=self.device) - model.load_state_dict(state) - model.eval() - self.model = model - - def _forward(self, model, graph, features): - # Forward wrapper for GCN and GraphSAGE - if isinstance(model, GraphSAGE): - # GraphSAGE expects a two-block input list - return model([graph, graph], features) - return model(graph, features) - - def _evaluate_on_real_test(self, surrogate, metric: AttackMetric, metric_comp: AttackCompMetric): - """Evaluate the surrogate on the real test set and update metrics""" - g = self.graph - x = self.features - y = self.labels - mask = self.test_mask - - # Victim inference time - t0 = time.time() - with torch.no_grad(): - logits_v = self._forward(self.model, g, x) - metric_comp.update(inference_target_time=(time.time() - t0)) - labels_query = logits_v.argmax(dim=1) - - # Surrogate inference time - t0 = time.time() - with torch.no_grad(): - logits_s = self._forward(surrogate, g, x) - metric_comp.update(inference_surrogate_time=(time.time() - t0)) - preds_s = logits_s.argmax(dim=1) - - # Update performance metrics: accuracy and fidelity - metric.update(preds_s[mask], y[mask], labels_query[mask]) - - @abstractmethod - def attack(self): - pass - - -class DFEATypeI(DFEAAttack): - """ - Type I: Uses victim outputs + gradients for surrogate training. - """ - - def attack(self): - metric = AttackMetric() - metric_comp = AttackCompMetric() - - attack_start = time.time() - surrogate = GCN(self.feature_number, self.label_number).to(self.device) - optimizer = torch.optim.Adam(surrogate.parameters(), lr=0.01) - - # Surrogate training time - train_surrogate_start = time.time() - # Victim query time - total_query_time = 0.0 - - for _ in tqdm(range(200)): - surrogate.train() - optimizer.zero_grad() - # Victim logits (no gradient), count query time - t_q = time.time() - with torch.no_grad(): - logits_v = self._forward( - self.model, self.synthetic_graph, self.synthetic_features - ) - total_query_time += (time.time() - t_q) - - logits_s = self._forward( - surrogate, self.synthetic_graph, self.synthetic_features - ) - loss = F.kl_div( - F.log_softmax(logits_s, dim=1), - F.softmax(logits_v, dim=1), - reduction='batchmean' - ) - loss.backward() - optimizer.step() - - train_surrogate_end = time.time() - - surrogate.eval() - self._evaluate_on_real_test(surrogate, metric, metric_comp) - - metric_comp.end() - metric_comp.update( - attack_time=(time.time() - attack_start), - query_target_time=total_query_time, - train_surrogate_time=(train_surrogate_end - train_surrogate_start), - ) - res = metric.compute() - res_comp = metric_comp.compute() - return res, res_comp - - -class DFEATypeII(DFEAAttack): - """ - Type II: Uses victim outputs only (hard labels). - """ - - def attack(self): - metric = AttackMetric() - metric_comp = AttackCompMetric() - - attack_start = time.time() - surrogate = GraphSAGE(self.feature_number, 16, self.label_number).to(self.device) - optimizer = torch.optim.Adam(surrogate.parameters(), lr=0.01) - - train_surrogate_start = time.time() - total_query_time = 0.0 - - for _ in tqdm(range(200)): - surrogate.train() - optimizer.zero_grad() - # Victim pseudo labels - t_q = time.time() - with torch.no_grad(): - logits_v = self._forward( - self.model, self.synthetic_graph, self.synthetic_features - ) - total_query_time += (time.time() - t_q) - pseudo = logits_v.argmax(dim=1) - - logits_s = self._forward( - surrogate, self.synthetic_graph, self.synthetic_features - ) - loss = F.cross_entropy(logits_s, pseudo) - loss.backward() - optimizer.step() - - train_surrogate_end = time.time() - - surrogate.eval() - self._evaluate_on_real_test(surrogate, metric, metric_comp) - - metric_comp.end() - metric_comp.update( - attack_time=(time.time() - attack_start), - query_target_time=total_query_time, - train_surrogate_time=(train_surrogate_end - train_surrogate_start), - ) - res = metric.compute() - res_comp = metric_comp.compute() - return res, res_comp - - -class DFEATypeIII(DFEAAttack): - """ - Type III: Two surrogates with victim supervision + consistency. - """ - - def attack(self): - metric = AttackMetric() - metric_comp = AttackCompMetric() - - attack_start = time.time() - s1 = GCN(self.feature_number, self.label_number).to(self.device) - s2 = GraphSAGE(self.feature_number, 16, self.label_number).to(self.device) - opt1 = torch.optim.Adam(s1.parameters(), lr=0.01) - opt2 = torch.optim.Adam(s2.parameters(), lr=0.01) - - train_surrogate_start = time.time() - total_query_time = 0.0 - - for _ in tqdm(range(200)): - s1.train() - s2.train() - opt1.zero_grad() - opt2.zero_grad() - # Victim pseudo-labels - t_q = time.time() - with torch.no_grad(): - logits_v = self._forward( - self.model, self.synthetic_graph, self.synthetic_features - ) - total_query_time += (time.time() - t_q) - pseudo_v = logits_v.argmax(dim=1) - # Surrogate predictions - l1 = self._forward(s1, self.synthetic_graph, self.synthetic_features) - l2 = self._forward(s2, self.synthetic_graph, self.synthetic_features) - # Loss: supervised + consistency - loss1 = F.cross_entropy(l1, pseudo_v) - loss2 = F.cross_entropy(l2, pseudo_v) - cons = F.mse_loss(l1, l2) - total = loss1 + loss2 + 0.5 * cons - total.backward() - opt1.step() - opt2.step() - - train_surrogate_end = time.time() - - # Use s1 as the final surrogate for evaluation - s1.eval() - self._evaluate_on_real_test(s1, metric, metric_comp) - - metric_comp.end() - metric_comp.update( - attack_time=(time.time() - attack_start), - query_target_time=total_query_time, - train_surrogate_time=(train_surrogate_end - train_surrogate_start), - ) - res = metric.compute() - res_comp = metric_comp.compute() - return res, res_comp - - -# Factory mapping of attack names to classes -ATTACK_FACTORY = { - "ModelExtractionAttack0": DFEATypeI, - "ModelExtractionAttack1": DFEATypeI, - "ModelExtractionAttack2": DFEATypeII, - "ModelExtractionAttack3": DFEATypeIII -} diff --git a/pygip/models/attack/Realistic.py b/pygip/models/attack/Realistic.py deleted file mode 100644 index e80cb6da..00000000 --- a/pygip/models/attack/Realistic.py +++ /dev/null @@ -1,333 +0,0 @@ -import random -import warnings -import time - -import dgl -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -from sklearn.metrics.pairwise import cosine_similarity - -from pygip.models.nn.backbones import GCN -from .base import BaseAttack -from pygip.utils.metrics import AttackMetric, AttackCompMetric # align with AdvMEA - -warnings.filterwarnings('ignore') - - -class DGLEdgePredictor(nn.Module): - """DGL version of edge prediction module.""" - - def __init__(self, input_dim, hidden_dim, num_classes, device): - super(DGLEdgePredictor, self).__init__() - self.input_dim = input_dim - self.hidden_dim = hidden_dim - self.num_classes = num_classes - self.device = device - - # Use the same GCN backbone as the target model to obtain node embeddings. - self.gnn = GCN(input_dim, hidden_dim) - self.node_classifier = nn.Linear(hidden_dim, num_classes) - - # Edge prediction head. - self.edge_predictor = nn.Sequential( - nn.Linear(hidden_dim * 2, hidden_dim), - nn.ReLU(), - nn.Linear(hidden_dim, 1), - nn.Sigmoid() - ) - - def forward(self, graph, features): - # Compute node embeddings then logits for node classification. - node_embeddings = self.gnn(graph, features) - node_logits = self.node_classifier(node_embeddings) - return node_embeddings, node_logits - - def predict_edges(self, node_embeddings, node_pairs): - """Predict edge existence probability for a list of node index pairs.""" - if len(node_pairs) == 0: - return torch.tensor([], device=self.device) - - node_pairs = torch.tensor(node_pairs, device=self.device) - src_embeddings = node_embeddings[node_pairs[:, 0]] - dst_embeddings = node_embeddings[node_pairs[:, 1]] - - edge_features = torch.cat([src_embeddings, dst_embeddings], dim=1) - edge_probs = self.edge_predictor(edge_features).squeeze() - return edge_probs - - -class DGLSurrogateModel(nn.Module): - """DGL version of surrogate model.""" - - def __init__(self, input_dim, num_classes, model_type='GCN'): - super(DGLSurrogateModel, self).__init__() - self.model_type = model_type - if model_type == 'GCN': - self.gnn = GCN(input_dim, num_classes) - else: - # Default to GCN; can be extended to other backbones. - self.gnn = GCN(input_dim, num_classes) - - def forward(self, graph, features): - return self.gnn(graph, features) - - -class RealisticAttack(BaseAttack): - """DGL-based GNN model extraction attack with updated metrics API.""" - supported_api_types = {"dgl"} - supported_datasets = {} - - def __init__(self, dataset, attack_x_ratio: float, attack_a_ratio: float, model_path: str = None, - hidden_dim: int = 64, threshold_s: float = 0.7, threshold_a: float = 0.5): - # Keep BaseAttack initialization contract; store ratios for this attack. - super().__init__(dataset, attack_x_ratio, model_path) - - self.attack_x_ratio = float(attack_x_ratio) - self.attack_a_ratio = float(attack_a_ratio) - - self.hidden_dim = hidden_dim - self.threshold_s = threshold_s # Cosine similarity threshold - self.threshold_a = threshold_a # Edge prediction threshold - - # Determine the number of queried nodes by the availability ratios. - ratio_budget = max(self.attack_x_ratio, self.attack_a_ratio) - if ratio_budget <= 0.0: - ratio_budget = 0.05 # small default to avoid zero queries - self.attack_node_number = max(1, int(self.num_nodes * ratio_budget)) - - # Graph tensors - self.graph_data = self.graph_data.to(self.device) - self.graph = self.graph_data - self.features = self.graph.ndata['feat'] - - # Initialize edge predictor and surrogate model. - self.edge_predictor = DGLEdgePredictor( - self.num_features, hidden_dim, self.num_classes, self.device - ).to(self.device) - self.surrogate_model = DGLSurrogateModel( - self.num_features, self.num_classes - ).to(self.device) - - # Target model used to simulate black-box responses. - self.net1 = GCN(self.num_features, self.num_classes).to(self.device) - - # Optimizers - self.optimizer_edge = optim.Adam(self.edge_predictor.parameters(), lr=0.01, weight_decay=5e-4) - self.optimizer_surrogate = optim.Adam(self.surrogate_model.parameters(), lr=0.01, weight_decay=5e-4) - - print(f"Initialized attack on {dataset.dataset_name} dataset") - print(f"Nodes: {self.num_nodes}, Features: {self.num_features}, Classes: {self.num_classes}") - print(f"Attack nodes: {self.attack_node_number} (x_ratio={self.attack_x_ratio:.2f}, a_ratio={self.attack_a_ratio:.2f})") - - def simulate_target_model_queries(self, query_nodes, error_rate=0.15): - """Query the target model for labels on query_nodes and introduce a small error rate.""" - self.net1.eval() - with torch.no_grad(): - logits = self.net1(self.graph, self.features) - predictions = F.log_softmax(logits, dim=1).argmax(dim=1) - - predicted_labels = predictions[query_nodes].clone() - - # Flip a portion of labels to simulate noise in responses. - num_errors = int(len(predicted_labels) * error_rate) - if num_errors > 0: - error_indices = random.sample(range(len(predicted_labels)), num_errors) - for idx in error_indices: - wrong_label = random.randint(0, self.num_classes - 1) - predicted_labels[idx] = wrong_label - return predicted_labels - - def compute_cosine_similarity(self, features): - """Compute cosine similarity of node features.""" - features_np = features.cpu().detach().numpy() - similarity_matrix = cosine_similarity(features_np) - return torch.tensor(similarity_matrix, dtype=torch.float32, device=self.device) - - def generate_candidate_edges(self, labeled_nodes, unlabeled_nodes): - """Generate candidate edges based on feature cosine similarity threshold.""" - similarity_matrix = self.compute_cosine_similarity(self.features) - candidate_edges = [] - for u_node in unlabeled_nodes: - for l_node in labeled_nodes: - if similarity_matrix[u_node, l_node] > self.threshold_s: - candidate_edges.append([u_node, l_node]) - print(f"Generated {len(candidate_edges)} candidate edges based on cosine similarity") - return candidate_edges - - def train_edge_predictor(self, labeled_nodes, predicted_labels, epochs=100): - """Train the auxiliary edge prediction model.""" - print("Training edge predictor...") - self.edge_predictor.train() - - # Create node labels tensor; only queried nodes are labeled. - train_labels = torch.full((self.num_nodes,), -1, dtype=torch.long, device=self.device) - train_labels[labeled_nodes] = predicted_labels - - for epoch in range(epochs): - self.optimizer_edge.zero_grad() - - # Forward pass through edge predictor - node_embeddings, node_logits = self.edge_predictor(self.graph, self.features) - - # Node classification loss (supervised on labeled nodes) - labeled_mask = train_labels != -1 - if labeled_mask.sum() > 0: - node_loss = F.cross_entropy(node_logits[labeled_mask], train_labels[labeled_mask]) - else: - node_loss = torch.tensor(0.0, device=self.device) - - # Positive and negative edge samples - src_nodes, dst_nodes = self.graph.edges() - positive_pairs = list(zip(src_nodes.cpu().numpy(), dst_nodes.cpu().numpy())) - - pos_edge_probs = self.edge_predictor.predict_edges(node_embeddings, positive_pairs) - pos_loss = -torch.log(pos_edge_probs + 1e-15).mean() - - negative_pairs = [] - num_neg_samples = min(len(positive_pairs), 1000) - for _ in range(num_neg_samples): - src = random.randint(0, self.num_nodes - 1) - dst = random.randint(0, self.num_nodes - 1) - if src != dst and not self.graph_data.has_edges_between(src, dst): - negative_pairs.append([src, dst]) - - if negative_pairs: - neg_edge_probs = self.edge_predictor.predict_edges(node_embeddings, negative_pairs) - neg_loss = -torch.log(1 - neg_edge_probs + 1e-15).mean() - else: - neg_loss = torch.tensor(0.0, device=self.device) - - total_loss = node_loss + 0.5 * (pos_loss + neg_loss) - total_loss.backward() - self.optimizer_edge.step() - - if epoch % 20 == 0: - print(f"Epoch {epoch:3d}: total={total_loss.item():.4f}, " - f"node={node_loss.item():.4f}, edge={(pos_loss + neg_loss).item():.4f}") - - def add_potential_edges(self, candidate_edges, labeled_nodes): - """Add potential edges whose predicted probability exceeds the threshold.""" - if not candidate_edges: - return self.graph - - print("Predicting edge weights and adding potential edges...") - self.edge_predictor.eval() - with torch.no_grad(): - node_embeddings, _ = self.edge_predictor(self.graph, self.features) - edge_probs = self.edge_predictor.predict_edges(node_embeddings, candidate_edges) - - selected_edges = [] - for i, (src, dst) in enumerate(candidate_edges): - if edge_probs[i] > self.threshold_a: - selected_edges.extend([(src, dst), (dst, src)]) # undirected - - print(f"Selected {len(selected_edges) // 2} potential edges to add") - if selected_edges: - enhanced_graph = dgl.add_edges( - self.graph, - [e[0] for e in selected_edges], - [e[1] for e in selected_edges] - ) - return enhanced_graph - else: - return self.graph - - def train_surrogate_model(self, enhanced_graph, labeled_nodes, predicted_labels, epochs=200): - """Train the surrogate model on queried nodes and pseudo labels.""" - print("Training surrogate model...") - self.surrogate_model.train() - - # Build training labels for queried nodes. - train_labels = torch.full((self.num_nodes,), -1, dtype=torch.long, device=self.device) - train_labels[labeled_nodes] = predicted_labels - labeled_mask = train_labels != -1 - - for epoch in range(epochs): - self.optimizer_surrogate.zero_grad() - logits = self.surrogate_model(enhanced_graph, self.features) - - if labeled_mask.sum() > 0: - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[labeled_mask], train_labels[labeled_mask]) - loss.backward() - self.optimizer_surrogate.step() - - if epoch % 50 == 0: - print(f"Surrogate epoch {epoch:3d}, loss={loss.item():.4f}") - - def _evaluate_and_update_metrics(self, enhanced_graph, metric: AttackMetric, metric_comp: AttackCompMetric): - """Evaluate surrogate against target on the real test set and update metric containers.""" - # Target inference - t0 = time.time() - with torch.no_grad(): - logits_target = self.net1(self.graph, self.features) - metric_comp.update(inference_target_time=(time.time() - t0)) - target_preds = F.log_softmax(logits_target, dim=1).argmax(dim=1) - - # Surrogate inference - t0 = time.time() - with torch.no_grad(): - logits_surrogate = self.surrogate_model(enhanced_graph, self.features) - metric_comp.update(inference_surrogate_time=(time.time() - t0)) - surrogate_preds = F.log_softmax(logits_surrogate, dim=1).argmax(dim=1) - - # Update performance metrics with ground truth and target predictions on test split. - mask = self.graph_data.ndata['test_mask'] - labels = self.graph_data.ndata['label'] - metric.update(surrogate_preds[mask], labels[mask], target_preds[mask]) - - def attack(self): - """Execute the attack and return two JSON-like dicts: performance and computation metrics.""" - metric = AttackMetric() - metric_comp = AttackCompMetric() - - print("=" * 60) - print("Starting GNN Model Extraction Attack (Realistic)") - print("=" * 60) - - attack_start = time.time() - - # Step 1: Randomly select query nodes according to the budget. - all_nodes = list(range(self.num_nodes)) - labeled_nodes = random.sample(all_nodes, self.attack_node_number) - unlabeled_nodes = [n for n in all_nodes if n not in labeled_nodes] - print(f"Selected {len(labeled_nodes)} nodes for querying") - - # Step 2: Query the target model once for pseudo labels. - t_q = time.time() - predicted_labels = self.simulate_target_model_queries(labeled_nodes) - query_time = time.time() - t_q - metric_comp.update(query_target_time=query_time) - print("Finished querying the target model") - - # Step 3: Generate candidate edges (feature similarity). - candidate_edges = self.generate_candidate_edges(labeled_nodes, unlabeled_nodes) - - # Step 4: Train the auxiliary edge predictor (included in total attack time). - self.train_edge_predictor(labeled_nodes, predicted_labels) - - # Step 5: Add potential edges to obtain an enhanced graph. - enhanced_graph = self.add_potential_edges(candidate_edges, labeled_nodes) - original_edges = self.graph_data.num_edges() - enhanced_edges = enhanced_graph.num_edges() - print(f"Enhanced graph: {original_edges} -> {enhanced_edges} edges (+{enhanced_edges - original_edges})") - - # Step 6: Train the surrogate model and record its training time. - t_train_surr = time.time() - self.train_surrogate_model(enhanced_graph, labeled_nodes, predicted_labels) - train_surrogate_time = time.time() - t_train_surr - metric_comp.update(train_surrogate_time=train_surrogate_time) - - # Step 7: One-shot evaluation and metrics update. - self._evaluate_and_update_metrics(enhanced_graph, metric, metric_comp) - - # Finalize computation stats. - metric_comp.end() - metric_comp.update(attack_time=(time.time() - attack_start)) - - # Return two JSON-like dicts as required by the new API. - res = metric.compute() - res_comp = metric_comp.compute() - return res, res_comp diff --git a/pygip/models/attack/__init__.py b/pygip/models/attack/__init__.py deleted file mode 100644 index 8cbeb84b..00000000 --- a/pygip/models/attack/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -from .AdvMEA import AdvMEA -from .CEGA import CEGA -from .DataFreeMEA import ( - DFEATypeI, - DFEATypeII, - DFEATypeIII -) -from .mea.MEA import ( - ModelExtractionAttack0, - ModelExtractionAttack1, - ModelExtractionAttack2, - ModelExtractionAttack3, - ModelExtractionAttack4, - ModelExtractionAttack5 -) -from .Realistic import RealisticAttack - -__all__ = [ - 'AdvMEA', - 'CEGA', - 'RealisticAttack', - 'DFEATypeI', - 'DFEATypeII', - 'DFEATypeIII', - 'ModelExtractionAttack0', - 'ModelExtractionAttack1', - 'ModelExtractionAttack2', - 'ModelExtractionAttack3', - 'ModelExtractionAttack4', - 'ModelExtractionAttack5', -] diff --git a/pygip/models/attack/base.py b/pygip/models/attack/base.py deleted file mode 100644 index 33186e44..00000000 --- a/pygip/models/attack/base.py +++ /dev/null @@ -1,110 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Union, Optional - -import torch - -from pygip.datasets import Dataset -from pygip.utils.hardware import get_device - - -class BaseAttack(ABC): - """Abstract base class for attack models. - - This class provides a common interface for various attack strategies on graph-based - machine learning models. It handles device management, dataset loading, and - compatibility checks to ensure that the attack can be executed on the given - dataset and model API type. - - Attributes: - supported_api_types (set): A set of strings representing the supported API - types (e.g., 'pyg', 'dgl'). - supported_datasets (set): A set of strings representing the names of - supported dataset classes. - device (torch.device): The computing device (CPU or GPU) to be used for - the attack. - dataset (Dataset): The dataset object containing graph data and metadata. - graph_dataset: The raw graph dataset from the underlying library. - graph_data: The primary graph data structure. - num_nodes (int): The number of nodes in the graph. - num_features (int): The number of features per node. - num_classes (int): The number of classes for node classification. - attack_node_fraction (float, optional): The fraction of nodes to be - targeted by the attack. - model_path (str, optional): The path to a pre-trained target model. - """ - supported_api_types = set() - supported_datasets = set() - - def __init__(self, dataset: Dataset, attack_node_fraction: float = None, model_path: str = None, - device: Optional[Union[str, torch.device]] = None): - """Initializes the BaseAttack. - - Args: - dataset (Dataset): The dataset to be attacked. - attack_node_fraction (float, optional): The fraction of nodes to - target in the attack. Defaults to None. - model_path (str, optional): The path to a pre-trained model file. - Defaults to None. - device (Union[str, torch.device], optional): The device to run the - attack on. If None, it will be automatically selected. - Defaults to None. - """ - self.device = torch.device(device) if device else get_device() - print(f"Using device: {self.device}") - - # graph data - self.dataset = dataset - self.graph_dataset = dataset.graph_dataset - self.graph_data = dataset.graph_data - - # meta data - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes - - # params - self.attack_node_fraction = attack_node_fraction - self.model_path = model_path - - self._check_dataset_compatibility() - - def _check_dataset_compatibility(self): - """Checks if the dataset is compatible with the attack. - - Raises: - ValueError: If the dataset's API type or class name is not in the - list of supported types. - """ - cls_name = self.dataset.__class__.__name__ - - if self.supported_api_types and self.dataset.api_type not in self.supported_api_types: - raise ValueError( - f"API type '{self.dataset.api_type}' is not supported. Supported: {self.supported_api_types}") - - if self.supported_datasets and cls_name not in self.supported_datasets: - raise ValueError(f"Dataset '{cls_name}' is not supported. Supported: {self.supported_datasets}") - - @abstractmethod - def attack(self): - """ - Execute the attack. - """ - raise NotImplementedError - - def _load_model(self, model_path): - """ - Load a pre-trained model. - """ - raise NotImplementedError - - def _train_target_model(self): - """ - Train the target model if not provided. - """ - raise NotImplementedError - - def _train_attack_model(self): - """ - Train the attack model. - """ - raise NotImplementedError diff --git a/pygip/models/attack/mea/MEA.py b/pygip/models/attack/mea/MEA.py deleted file mode 100644 index c2009e70..00000000 --- a/pygip/models/attack/mea/MEA.py +++ /dev/null @@ -1,480 +0,0 @@ -import os -import random -import time -from typing import List, Tuple, Optional - -import dgl -import torch -import torch.nn.functional as F -from torch import nn - -from pygip.models.attack.base import BaseAttack -from pygip.models.nn.backbones import GCN -from pygip.utils.metrics import AttackMetric, AttackCompMetric - - -def _as_tensor(x, device): - if isinstance(x, torch.Tensor): - return x.to(device) - return torch.tensor(x, device=device) - - -def add_self_loops(g: dgl.DGLGraph) -> dgl.DGLGraph: - """Return a copy of g with self-loops added to every node.""" - num_nodes = g.number_of_nodes() - src = torch.arange(num_nodes) - dst = src.clone() - return dgl.add_edges(g, src, dst) - - -def subgraph_from_nodes(g: dgl.DGLGraph, node_idx: List[int]) -> dgl.DGLGraph: - """Induce a subgraph that contains only edges whose endpoints are both in node_idx.""" - sg = dgl.node_subgraph(g, node_idx) - sg = dgl.remove_self_loop(sg) - sg = dgl.add_self_loop(sg) - return sg - - -def erdos_renyi_graph(num_nodes: int, p: float = 0.05) -> dgl.DGLGraph: - import networkx as nx - g_nx = nx.erdos_renyi_graph(num_nodes, p) - g = dgl.from_networkx(g_nx) - g = add_self_loops(g) - return g - - -def random_shadow_indices(g: dgl.DGLGraph, k: int, extra: int = 2) -> Tuple[List[int], List[int]]: - """ - Heuristic shadow graph index generator. - Returns two sets: target_nodes (size k) and potential_nodes (neighbors around target nodes). - """ - num_nodes = g.number_of_nodes() - k = max(1, min(k, num_nodes)) - target_nodes = random.sample(range(num_nodes), k) - # collect neighbors up to 2 hops around the target nodes - neigh = set(target_nodes) - src, dst = g.edges() - src = src.tolist() - dst = dst.tolist() - adj = [[] for _ in range(num_nodes)] - for s, d in zip(src, dst): - adj[s].append(d) - adj[d].append(s) - for u in list(target_nodes): - for v in adj[u]: - neigh.add(v) - for w in adj[v]: - neigh.add(w) - # potential nodes are neighbors that are not target nodes - potential_nodes = list(sorted(set(neigh) - set(target_nodes))) - # if too many, sample a multiple of k - max_size = min(num_nodes, extra * k if extra * k > k else k) - if len(potential_nodes) > max_size: - potential_nodes = random.sample(potential_nodes, max_size) - return list(target_nodes), potential_nodes - - -def _safe_dir() -> str: - return os.path.dirname(os.path.abspath(__file__)) - - -def load_attack2_generated_graph(dataset_name: str, default_nodes: int) -> Tuple[ - dgl.DGLGraph, torch.Tensor, Optional[List[int]]]: - """ - Try to load an attack-2 pre-generated graph. If files are missing, fall back to - an on-the-fly Erdos–Rényi graph with random features. Returns (graph, features, selected_indices). - """ - base = os.path.join(_safe_dir(), "data", "attack2_generated_graph", dataset_name) - graph_label = os.path.join(base, "graph_label.txt") - selected_idx = os.path.join(base, "selected_index.txt") - if os.path.exists(graph_label): - # we only need the number of nodes; reconstruct a random graph and random features - try: - with open(graph_label, "r") as f: - n = sum(1 for _ in f) - num_nodes = max(1, n) - except Exception: - num_nodes = max(1, default_nodes) - g = erdos_renyi_graph(num_nodes, p=0.05) - return g, None, None - else: - g = erdos_renyi_graph(default_nodes, p=0.05) - return g, None, None - - -def load_attack3_shadow_indices(dataset_name: str, g: dgl.DGLGraph, k: int) -> Tuple[List[int], List[int]]: - """ - Try to load shadow graph indices from disk; if not found, generate heuristically. - """ - base = os.path.join(_safe_dir(), "data", "attack3_shadow_graph", dataset_name) - target_path = os.path.join(base, "target_graph_index.txt") - if os.path.exists(target_path): - try: - with open(target_path, "r") as f: - target_nodes = [int(x.strip()) for x in f if len(x.strip()) > 0] - except Exception: - target_nodes = None - else: - target_nodes = None - - potential_nodes = None - if os.path.isdir(base): - for fn in os.listdir(base): - if fn.startswith("protential") and fn.endswith(".txt"): - try: - with open(os.path.join(base, fn), "r") as f: - potential_nodes = [int(x.strip()) for x in f if len(x.strip()) > 0] - except Exception: - potential_nodes = None - break - - if target_nodes is None or potential_nodes is None: - t, p = random_shadow_indices(g, k) - return t, p - return target_nodes, potential_nodes - - -class _MEABase(BaseAttack): - """ - Base class for MEA family attacks. This class handles the target model training, - metric bookkeeping, and utility helpers. Subclasses only need to decide which - training indices and which graph to use for the surrogate. - """ - supported_api_types = {"dgl"} - - def __init__(self, dataset, attack_x_ratio: float, attack_a_ratio: float, model_path: Optional[str] = None): - super().__init__(dataset, attack_x_ratio, model_path) - - self.dataset = dataset - self.graph: dgl.DGLGraph = dataset.graph_data.to(self.device) - self.features: torch.Tensor = self.graph.ndata['feat'] - self.labels: torch.Tensor = dataset.graph_data.ndata['label'] - self.train_mask: torch.Tensor = dataset.graph_data.ndata['train_mask'] - self.test_mask: torch.Tensor = dataset.graph_data.ndata['test_mask'] - - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes - - # budget based on the availability of features X and adjacency A - self.attack_x_ratio = float(attack_x_ratio) - self.attack_a_ratio = float(attack_a_ratio) - self.attack_node_num = max(1, int(self.num_nodes * max(self.attack_x_ratio, self.attack_a_ratio))) - - # target model - if model_path is None: - self._train_target_model() - else: - self._load_model(model_path) - - def _train_target_model(self): - self.net1 = GCN(self.num_features, self.num_classes).to(self.device) - opt = torch.optim.Adam(self.net1.parameters(), lr=0.01, weight_decay=5e-4) - - self.net1.train() - for _ in range(200): - opt.zero_grad() - logits = self.net1(self.graph, self.features) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[self.train_mask], self.labels[self.train_mask]) - loss.backward() - opt.step() - self.net1.eval() - - def _load_model(self, model_path: str): - self.net1 = GCN(self.num_features, self.num_classes).to(self.device) - state = torch.load(model_path, map_location=self.device) - self.net1.load_state_dict(state) - self.net1.eval() - - # ---------- core utilities ---------- - def _query_target(self, g: dgl.DGLGraph, x: torch.Tensor) -> Tuple[torch.Tensor, float]: - start = time.time() - with torch.no_grad(): - logits = self.net1(g, x) - return logits, time.time() - start - - def _train_surrogate(self, g: dgl.DGLGraph, x: torch.Tensor, train_idx: torch.Tensor, y_train: torch.Tensor, - epochs: int = 200, lr: float = 0.01) -> Tuple[nn.Module, float]: - model = GCN(self.num_features, self.num_classes).to(self.device) - opt = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-4) - - # boolean mask - if y_train.dim() > 1: - y_train = y_train.argmax(dim=1) - mask = torch.zeros(g.number_of_nodes(), dtype=torch.bool, device=self.device) - mask[train_idx] = True - - start = time.time() - for _ in range(epochs): - model.train() - opt.zero_grad() - logits = model(g, x) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[mask], y_train[mask]) - loss.backward() - opt.step() - train_time = time.time() - start - model.eval() - return model, train_time - - def _compute_metrics(self, surrogate: nn.Module, metric: AttackMetric, metric_comp: AttackCompMetric): - # target inference - g = self.graph - x = self.features - y = self.labels - mask = self.test_mask - - t0 = time.time() - with torch.no_grad(): - logits_v = self.net1(g, x) - metric_comp.update(inference_target_time=time.time() - t0) - y_target = logits_v.argmax(dim=1) - - # surrogate inference - t0 = time.time() - with torch.no_grad(): - logits_s = surrogate(g, x) - metric_comp.update(inference_surrogate_time=time.time() - t0) - y_pred = logits_s.argmax(dim=1) - - metric.update(y_pred[mask], y[mask], y_target[mask]) - - # ---------- template method ---------- - def _attack_impl(self) -> Tuple[AttackMetric, AttackCompMetric]: - """ - Subclasses must implement this method to - 1) build a graph g_att and features x_att for training, - 2) pick a list of training indices idx_train of length attack_node_num, - 3) query the target for labels on idx_train and train a surrogate, - and then return filled metrics objects. - """ - raise NotImplementedError - - def attack(self, *args, **kwargs): - metric = AttackMetric() - metric_comp = AttackCompMetric() - start_all = time.time() - - # delegate to subclass implementation - metric, metric_comp = self._attack_impl() - - # finalize - metric_comp.update(attack_time=time.time() - start_all) - metric_comp.end() - - return metric.compute(), metric_comp.compute() - - -# ----------------------- concrete attacks ----------------------- - -class ModelExtractionAttack0(_MEABase): - """ - Attack-0: Random-node label-only extraction on the original graph. - """ - - def _attack_impl(self) -> Tuple[AttackMetric, AttackCompMetric]: - metric = AttackMetric() - metric_comp = AttackCompMetric() - - # sample nodes to query - idx_train = random.sample(range(self.num_nodes), self.attack_node_num) - idx_train_t = torch.tensor(idx_train, device=self.device) - - # query target - logits_v, q_time = self._query_target(self.graph, self.features) - y_pseudo = logits_v.argmax(dim=1) - metric_comp.update(query_target_time=q_time) - - # train surrogate on original graph but only using queried nodes - surrogate, t_train = self._train_surrogate(self.graph, self.features, idx_train_t, y_pseudo) - metric_comp.update(train_surrogate_time=t_train) - - # evaluate on real test set - self._compute_metrics(surrogate, metric, metric_comp) - return metric, metric_comp - - -class ModelExtractionAttack1(_MEABase): - """ - Attack-1: Degree-based sampling of query nodes on the original graph. - """ - - def _attack_impl(self) -> Tuple[AttackMetric, AttackCompMetric]: - metric = AttackMetric() - metric_comp = AttackCompMetric() - - g = self.graph - deg = g.in_degrees().cpu().tolist() - order = sorted(range(self.num_nodes), key=lambda i: deg[i], reverse=True) - idx_train = order[:self.attack_node_num] - idx_train_t = torch.tensor(idx_train, device=self.device) - - logits_v, q_time = self._query_target(self.graph, self.features) - y_pseudo = logits_v.argmax(dim=1) - metric_comp.update(query_target_time=q_time) - - surrogate, t_train = self._train_surrogate(self.graph, self.features, idx_train_t, y_pseudo) - metric_comp.update(train_surrogate_time=t_train) - - self._compute_metrics(surrogate, metric, metric_comp) - return metric, metric_comp - - -class ModelExtractionAttack2(_MEABase): - """ - Attack-2: Data-free extraction on a synthetic graph with random features. - """ - - def _attack_impl(self) -> Tuple[AttackMetric, AttackCompMetric]: - metric = AttackMetric() - metric_comp = AttackCompMetric() - - # build synthetic graph - syn_g, _, _ = load_attack2_generated_graph( - getattr(self.dataset, 'dataset_name', getattr(self.dataset, 'name', 'default')), - default_nodes=self.attack_node_num - ) - syn_g = syn_g.to(self.device) - syn_x = torch.randn(syn_g.number_of_nodes(), self.num_features, device=self.device) - - # query target on synthetic inputs - logits_v, q_time = self._query_target(syn_g, syn_x) - y_pseudo = logits_v.argmax(dim=1) - metric_comp.update(query_target_time=q_time) - - # use all nodes in synthetic graph for training - idx_train_t = torch.arange(syn_g.number_of_nodes(), device=self.device) - surrogate, t_train = self._train_surrogate(syn_g, syn_x, idx_train_t, y_pseudo) - metric_comp.update(train_surrogate_time=t_train) - - # evaluate on real test set - self._compute_metrics(surrogate, metric, metric_comp) - return metric, metric_comp - - -class ModelExtractionAttack3(_MEABase): - """ - Attack-3: Shadow-graph extraction. Train on a subgraph induced by a - set of target nodes and their neighbors (potential nodes). - """ - - def _attack_impl(self) -> Tuple[AttackMetric, AttackCompMetric]: - metric = AttackMetric() - metric_comp = AttackCompMetric() - - dataset_name = getattr(self.dataset, 'dataset_name', getattr(self.dataset, 'name', 'default')) - target_nodes, potential_nodes = load_attack3_shadow_indices(dataset_name, self.graph, self.attack_node_num) - - # training nodes are the union - idx_train = list(sorted(set(target_nodes) | set(potential_nodes))) - idx_train_t = torch.tensor(idx_train, device=self.device) - - sg = subgraph_from_nodes(self.graph, idx_train) - x_sg = self.features[idx_train_t] - - # map back to subgraph index for labels - logits_v_full, q_time = self._query_target(self.graph, self.features) - metric_comp.update(query_target_time=q_time) - y_pseudo_full = logits_v_full.argmax(dim=1) - y_pseudo = y_pseudo_full[idx_train_t] - - # train on the shadow subgraph - surrogate, t_train = self._train_surrogate(sg, x_sg, torch.arange(sg.number_of_nodes(), device=self.device), - y_pseudo) - metric_comp.update(train_surrogate_time=t_train) - - self._compute_metrics(surrogate, metric, metric_comp) - return metric, metric_comp - - -class ModelExtractionAttack4(_MEABase): - """ - Attack-4: Cosine-similarity neighbor expansion. Start from random seeds and - expand candidates by feature similarity to form the training subgraph. - """ - - def _attack_impl(self) -> Tuple[AttackMetric, AttackCompMetric]: - metric = AttackMetric() - metric_comp = AttackCompMetric() - - seeds = random.sample(range(self.num_nodes), max(1, self.attack_node_num // 4)) - # compute cosine similarity on CPU to save GPU memory - x = self.features.detach().cpu() - norm = x.norm(dim=1, keepdim=True) + 1e-12 - x_n = x / norm - sims = torch.mm(x_n, x_n.t()) - # choose top-k neighbors for each seed - cand = set(seeds) - for s in seeds: - topk = torch.topk(sims[s], k=min(self.num_nodes, self.attack_node_num)).indices.tolist() - cand.update(topk) - idx_train = list(sorted(cand))[:self.attack_node_num] - idx_train_t = torch.tensor(idx_train, device=self.device) - - # query target on original graph to get labels for these nodes - logits_v, q_time = self._query_target(self.graph, self.features) - metric_comp.update(query_target_time=q_time) - y_pseudo = logits_v.argmax(dim=1) - - sg = subgraph_from_nodes(self.graph, idx_train) - x_sg = self.features[idx_train_t] - surrogate, t_train = self._train_surrogate(sg, x_sg, torch.arange(sg.number_of_nodes(), device=self.device), - y_pseudo[idx_train_t]) - metric_comp.update(train_surrogate_time=t_train) - - self._compute_metrics(surrogate, metric, metric_comp) - return metric, metric_comp - - -class ModelExtractionAttack5(_MEABase): - """ - Attack-5: Variant of the shadow-graph attack that samples two candidate lists and - trains on their union. If attack_6 index files are present (historical name), - they will be used; otherwise we fall back to generated indices. - """ - - def _attack_impl(self) -> Tuple[AttackMetric, AttackCompMetric]: - metric = AttackMetric() - metric_comp = AttackCompMetric() - - dataset_name = getattr(self.dataset, 'dataset_name', getattr(self.dataset, 'name', 'default')) - base = os.path.join(_safe_dir(), "data", "attack3_shadow_graph", dataset_name) - f_a = os.path.join(base, "attack_6_sub_shadow_graph_index_attack_2.txt") - f_b = os.path.join(base, "attack_6_sub_shadow_graph_index_attack_3.txt") - - a_idx, b_idx = None, None - if os.path.exists(f_a): - try: - with open(f_a, "r") as f: - a_idx = [int(x.strip()) for x in f if len(x.strip()) > 0] - except Exception: - a_idx = None - if os.path.exists(f_b): - try: - with open(f_b, "r") as f: - b_idx = [int(x.strip()) for x in f if len(x.strip()) > 0] - except Exception: - b_idx = None - - if a_idx is None or b_idx is None: - t, p = random_shadow_indices(self.graph, self.attack_node_num, extra=3) - a_idx = t - b_idx = p - - idx_train = list(sorted(set(a_idx) | set(b_idx))) - idx_train = idx_train[:max(self.attack_node_num, len(idx_train))] - idx_train_t = torch.tensor(idx_train, device=self.device) - - logits_v, q_time = self._query_target(self.graph, self.features) - metric_comp.update(query_target_time=q_time) - y_pseudo = logits_v.argmax(dim=1) - - sg = subgraph_from_nodes(self.graph, idx_train) - x_sg = self.features[idx_train_t] - surrogate, t_train = self._train_surrogate(sg, x_sg, torch.arange(sg.number_of_nodes(), device=self.device), - y_pseudo[idx_train_t]) - metric_comp.update(train_surrogate_time=t_train) - - self._compute_metrics(surrogate, metric, metric_comp) - return metric, metric_comp diff --git a/pygip/models/attack/mea/__init__.py b/pygip/models/attack/mea/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/pygip/models/attack/mea/data/attack2_generated_graph/citeseer/graph_label.txt b/pygip/models/attack/mea/data/attack2_generated_graph/citeseer/graph_label.txt deleted file mode 100644 index 416254fb..00000000 --- a/pygip/models/attack/mea/data/attack2_generated_graph/citeseer/graph_label.txt +++ /dev/null @@ -1,4460 +0,0 @@ -0 5 -0 13 -0 18 -0 38 -0 40 -0 43 -0 60 -0 65 -0 83 -0 89 -0 90 -0 115 -0 121 -0 137 -0 166 -0 171 -0 175 -0 187 -0 199 -0 243 -0 245 -0 257 -0 261 -0 269 -0 272 -0 286 -0 288 -0 295 -0 310 -0 349 -0 379 -0 396 -0 397 -0 414 -0 426 -0 457 -0 462 -0 463 -0 466 -0 471 -0 477 -0 478 -0 496 -0 513 -0 516 -0 522 -0 552 -0 562 -0 564 -0 588 -0 595 -0 619 -0 620 -0 621 -0 659 -0 663 -0 666 -0 669 -0 671 -0 690 -1 49 -1 172 -1 238 -1 258 -1 316 -1 318 -1 325 -1 331 -1 412 -1 423 -1 525 -1 563 -1 594 -1 627 -1 637 -1 665 -2 214 -2 225 -2 239 -2 316 -2 331 -2 364 -2 549 -2 594 -2 618 -2 649 -2 676 -3 15 -3 18 -3 91 -3 92 -3 104 -3 194 -3 273 -3 283 -3 369 -3 372 -3 377 -3 419 -3 438 -3 449 -3 499 -3 519 -3 578 -3 596 -3 603 -3 616 -3 638 -4 22 -4 130 -4 184 -4 189 -4 238 -4 239 -4 271 -4 286 -4 317 -4 407 -4 596 -4 598 -4 652 -4 665 -4 668 -4 679 -5 0 -5 36 -5 65 -5 126 -5 129 -5 191 -5 206 -5 275 -5 333 -5 362 -5 384 -5 488 -5 536 -5 540 -5 567 -5 585 -5 604 -5 619 -5 640 -5 662 -5 673 -6 49 -6 95 -6 107 -6 130 -6 232 -6 245 -6 299 -6 361 -6 381 -6 418 -6 421 -6 434 -6 485 -6 552 -6 553 -6 654 -6 696 -7 11 -7 13 -7 47 -7 48 -7 50 -7 57 -7 61 -7 69 -7 95 -7 104 -7 106 -7 113 -7 142 -7 171 -7 196 -7 226 -7 229 -7 233 -7 267 -7 292 -7 294 -7 297 -7 316 -7 344 -7 358 -7 366 -7 368 -7 383 -7 390 -7 402 -7 417 -7 428 -7 469 -7 479 -7 482 -7 498 -7 522 -7 539 -7 590 -7 633 -7 637 -7 651 -7 661 -7 672 -7 677 -7 687 -8 11 -8 26 -8 42 -8 45 -8 49 -8 57 -8 78 -8 81 -8 82 -8 93 -8 98 -8 113 -8 126 -8 127 -8 141 -8 142 -8 145 -8 163 -8 232 -8 236 -8 248 -8 255 -8 264 -8 330 -8 348 -8 360 -8 368 -8 397 -8 398 -8 400 -8 428 -8 430 -8 459 -8 467 -8 533 -8 577 -8 624 -8 629 -8 635 -8 654 -8 683 -8 685 -9 71 -9 123 -9 125 -9 190 -9 202 -9 242 -9 334 -9 361 -9 362 -9 416 -9 455 -9 483 -9 493 -9 550 -9 661 -10 73 -10 77 -10 104 -10 139 -10 155 -10 216 -10 344 -10 372 -10 403 -10 441 -10 462 -10 469 -10 480 -10 518 -10 519 -10 524 -10 579 -10 583 -10 619 -10 693 -11 7 -11 8 -11 250 -11 270 -11 407 -11 500 -11 551 -11 604 -11 621 -11 627 -12 13 -12 41 -12 49 -12 70 -12 79 -12 80 -12 82 -12 88 -12 91 -12 102 -12 116 -12 117 -12 123 -12 138 -12 152 -12 156 -12 162 -12 207 -12 221 -12 226 -12 261 -12 287 -12 288 -12 303 -12 341 -12 385 -12 390 -12 395 -12 396 -12 409 -12 415 -12 422 -12 430 -12 447 -12 449 -12 457 -12 466 -12 490 -12 497 -12 499 -12 509 -12 529 -12 570 -12 577 -12 592 -12 612 -12 614 -12 628 -12 630 -12 638 -12 639 -12 644 -12 648 -12 649 -12 653 -12 658 -12 667 -12 673 -13 0 -13 7 -13 12 -13 17 -13 47 -13 85 -13 112 -13 125 -13 151 -13 163 -13 235 -13 255 -13 281 -13 333 -13 341 -13 361 -13 367 -13 379 -13 401 -13 439 -13 448 -13 500 -13 522 -13 527 -13 621 -13 672 -13 673 -13 685 -14 91 -14 104 -14 139 -14 184 -14 208 -14 244 -14 247 -14 344 -14 369 -14 377 -14 403 -14 408 -14 436 -14 440 -14 524 -14 591 -14 615 -14 623 -14 645 -14 658 -14 668 -15 3 -15 103 -15 119 -15 137 -15 141 -15 191 -15 249 -15 255 -15 276 -15 285 -15 341 -15 346 -15 435 -15 440 -15 505 -15 522 -15 528 -15 552 -15 599 -15 621 -16 27 -16 55 -16 114 -16 128 -16 161 -16 182 -16 190 -16 266 -16 268 -16 324 -16 399 -16 400 -16 421 -16 466 -16 592 -16 596 -16 622 -16 658 -16 659 -16 664 -16 665 -17 13 -17 74 -17 103 -17 112 -17 192 -17 239 -17 262 -17 271 -17 327 -17 589 -17 591 -17 650 -18 0 -18 3 -18 91 -18 252 -18 283 -18 284 -18 292 -18 342 -18 344 -18 435 -18 438 -18 486 -18 512 -18 519 -18 542 -18 592 -18 663 -18 670 -18 671 -18 678 -19 40 -19 55 -19 68 -19 69 -19 97 -19 161 -19 202 -19 252 -19 324 -19 347 -19 359 -19 371 -19 375 -19 414 -19 438 -19 466 -19 513 -19 557 -19 600 -19 665 -20 41 -20 77 -20 78 -20 88 -20 103 -20 115 -20 133 -20 135 -20 182 -20 216 -20 228 -20 238 -20 244 -20 247 -20 255 -20 259 -20 280 -20 288 -20 322 -20 326 -20 330 -20 335 -20 341 -20 385 -20 407 -20 410 -20 412 -20 416 -20 425 -20 441 -20 470 -20 489 -20 492 -20 518 -20 527 -20 555 -20 597 -20 608 -20 609 -20 618 -20 624 -20 639 -20 673 -20 682 -20 685 -20 688 -21 73 -21 87 -21 112 -21 125 -21 143 -21 207 -21 244 -21 283 -21 361 -21 367 -21 393 -21 560 -21 589 -21 661 -21 672 -21 685 -22 4 -22 27 -22 36 -22 65 -22 99 -22 112 -22 118 -22 132 -22 164 -22 225 -22 317 -22 390 -22 430 -22 590 -22 637 -23 50 -23 137 -23 215 -23 234 -23 247 -23 255 -23 398 -23 665 -23 679 -24 143 -24 186 -24 188 -24 309 -24 322 -24 408 -24 474 -24 494 -24 664 -25 84 -25 107 -25 190 -25 218 -25 238 -25 276 -25 296 -25 503 -25 665 -26 8 -26 33 -26 93 -26 98 -26 101 -26 183 -26 305 -26 306 -26 317 -26 409 -26 436 -26 509 -26 586 -26 647 -26 655 -27 16 -27 22 -27 45 -27 62 -27 75 -27 80 -27 144 -27 244 -27 255 -27 257 -27 281 -27 305 -27 322 -27 364 -27 520 -27 577 -27 603 -27 696 -28 98 -28 108 -28 115 -28 117 -28 161 -28 176 -28 281 -28 322 -28 351 -28 412 -28 448 -28 455 -28 527 -28 577 -28 641 -28 674 -28 686 -29 93 -29 159 -29 191 -29 322 -29 379 -29 496 -29 501 -29 530 -29 577 -29 686 -29 689 -30 164 -30 221 -30 405 -30 498 -31 54 -31 81 -31 82 -31 144 -31 168 -31 181 -31 210 -31 226 -31 294 -31 337 -31 368 -31 370 -31 378 -31 404 -31 422 -31 456 -31 476 -31 521 -31 548 -31 590 -31 630 -31 631 -31 680 -32 184 -32 359 -33 26 -33 55 -33 87 -33 111 -33 159 -34 173 -34 210 -35 245 -35 357 -35 379 -35 494 -35 578 -35 659 -35 669 -36 5 -36 22 -36 65 -36 112 -36 179 -36 255 -36 267 -36 332 -36 401 -36 552 -36 567 -36 627 -37 255 -37 279 -37 530 -38 0 -38 209 -38 411 -39 147 -39 287 -39 297 -39 328 -39 335 -39 360 -39 370 -39 417 -39 470 -39 515 -39 587 -39 629 -39 634 -40 0 -40 19 -40 244 -40 493 -41 12 -41 20 -41 57 -41 64 -41 82 -41 88 -41 118 -41 120 -41 148 -41 221 -41 241 -41 294 -41 312 -41 337 -41 383 -41 394 -41 413 -41 422 -41 470 -41 601 -41 610 -41 639 -41 643 -41 653 -41 677 -41 678 -41 697 -42 8 -42 133 -42 543 -42 569 -42 590 -42 633 -43 0 -43 95 -43 107 -43 119 -43 256 -43 258 -43 282 -43 466 -43 506 -43 597 -43 667 -44 84 -44 194 -44 238 -44 331 -44 418 -45 8 -45 27 -45 51 -45 115 -45 187 -45 340 -45 589 -46 247 -46 282 -47 7 -47 13 -47 70 -47 103 -47 112 -47 239 -47 346 -47 401 -47 427 -47 430 -47 496 -47 529 -47 563 -47 585 -48 7 -48 330 -48 505 -49 1 -49 6 -49 8 -49 12 -49 266 -49 681 -50 7 -50 23 -50 95 -50 293 -50 472 -50 506 -50 544 -50 637 -50 645 -50 690 -51 45 -51 75 -51 217 -51 452 -52 62 -52 73 -52 77 -52 91 -52 97 -52 139 -52 209 -52 252 -52 253 -52 268 -52 283 -52 344 -52 372 -52 375 -52 617 -52 691 -53 60 -53 193 -53 325 -54 31 -54 78 -54 272 -55 16 -55 19 -55 33 -55 288 -55 300 -55 399 -55 609 -56 244 -56 250 -56 348 -56 398 -56 458 -56 623 -57 7 -57 8 -57 41 -57 88 -57 101 -57 110 -57 168 -57 201 -57 241 -57 254 -57 287 -57 294 -57 328 -57 356 -57 416 -57 533 -57 629 -58 634 -59 230 -59 233 -60 0 -60 53 -60 66 -60 89 -60 140 -60 154 -60 243 -60 272 -60 396 -60 450 -60 516 -60 666 -60 671 -61 7 -61 540 -62 27 -62 52 -62 166 -62 263 -62 582 -62 632 -63 218 -63 238 -63 245 -63 258 -63 303 -63 375 -63 421 -63 491 -63 498 -63 506 -63 549 -63 619 -63 637 -63 679 -64 41 -64 145 -64 153 -64 227 -64 229 -64 279 -64 291 -64 328 -64 417 -64 422 -64 443 -64 449 -64 541 -64 543 -64 547 -64 569 -64 601 -64 631 -64 639 -64 650 -64 653 -64 677 -64 680 -64 687 -64 688 -65 0 -65 5 -65 22 -65 36 -65 71 -65 99 -65 103 -65 239 -65 281 -65 520 -66 60 -66 89 -66 272 -66 396 -66 477 -67 203 -67 402 -67 631 -67 678 -68 19 -68 69 -68 73 -68 91 -68 104 -68 136 -68 169 -68 208 -68 216 -68 262 -68 342 -68 344 -68 372 -68 438 -68 480 -68 578 -68 582 -68 691 -69 7 -69 19 -69 68 -69 348 -69 434 -70 12 -70 47 -70 71 -70 99 -70 103 -70 179 -70 239 -70 346 -70 551 -70 567 -71 9 -71 65 -71 70 -72 249 -72 316 -72 568 -72 674 -73 10 -73 21 -73 52 -73 68 -73 252 -73 372 -73 375 -73 473 -73 513 -73 582 -73 616 -74 17 -74 125 -74 163 -74 239 -74 248 -74 275 -74 307 -74 362 -74 430 -74 455 -74 481 -74 522 -74 529 -74 551 -74 571 -74 585 -74 621 -74 659 -74 685 -75 27 -75 51 -75 161 -75 187 -75 242 -75 322 -76 116 -76 147 -76 168 -76 205 -76 320 -76 353 -76 356 -76 404 -76 413 -76 416 -76 429 -76 437 -76 547 -76 631 -76 633 -76 688 -76 697 -77 10 -77 20 -77 52 -77 252 -77 344 -77 403 -77 449 -77 473 -77 513 -77 542 -77 605 -78 8 -78 20 -78 54 -78 241 -78 269 -78 289 -78 302 -78 356 -78 388 -78 416 -78 510 -78 531 -78 577 -79 12 -79 372 -80 12 -80 27 -80 89 -80 439 -80 671 -81 8 -81 31 -81 82 -81 134 -81 145 -81 181 -81 279 -81 360 -81 417 -81 470 -81 532 -81 648 -82 8 -82 12 -82 31 -82 41 -82 81 -82 88 -82 118 -82 134 -82 145 -82 153 -82 287 -82 303 -82 335 -82 353 -82 356 -82 417 -82 630 -83 0 -83 89 -83 243 -83 272 -83 396 -83 450 -83 516 -83 669 -83 690 -84 25 -84 44 -84 95 -84 119 -84 137 -84 247 -84 266 -84 282 -84 293 -84 299 -84 331 -84 463 -84 598 -84 618 -85 13 -85 153 -85 181 -85 192 -85 201 -85 227 -85 279 -85 294 -85 302 -85 321 -85 337 -85 533 -85 576 -85 653 -85 677 -86 150 -86 354 -86 580 -86 673 -87 21 -87 33 -87 349 -87 537 -87 609 -88 12 -88 20 -88 41 -88 57 -88 82 -88 92 -88 165 -88 329 -88 515 -88 548 -88 569 -88 601 -88 630 -88 648 -88 653 -89 0 -89 60 -89 66 -89 80 -89 83 -89 272 -89 295 -89 300 -89 516 -90 0 -90 268 -90 280 -90 403 -90 462 -90 575 -90 615 -91 3 -91 12 -91 14 -91 18 -91 52 -91 68 -91 252 -91 344 -91 554 -92 3 -92 88 -92 337 -93 8 -93 26 -93 29 -93 101 -93 161 -93 284 -93 322 -93 376 -93 409 -93 577 -94 166 -94 477 -95 6 -95 7 -95 43 -95 50 -95 84 -95 119 -95 137 -95 256 -95 271 -95 286 -95 331 -95 421 -95 466 -95 597 -95 622 -95 665 -95 667 -95 668 -96 275 -96 276 -96 338 -96 389 -97 19 -97 52 -97 449 -97 632 -98 8 -98 26 -98 28 -98 101 -98 509 -99 22 -99 65 -99 70 -99 103 -99 112 -99 170 -99 276 -99 361 -99 390 -99 523 -100 240 -100 247 -100 371 -100 577 -100 594 -101 26 -101 57 -101 93 -101 98 -101 186 -101 191 -101 305 -101 376 -101 408 -102 12 -102 123 -102 137 -102 182 -102 188 -102 238 -102 245 -102 299 -102 364 -102 381 -102 412 -102 432 -102 506 -102 525 -102 544 -102 570 -102 597 -102 622 -102 637 -102 665 -102 676 -102 690 -103 15 -103 17 -103 20 -103 47 -103 65 -103 70 -103 99 -103 346 -103 401 -103 529 -103 567 -103 585 -103 640 -104 3 -104 7 -104 10 -104 14 -104 68 -104 169 -104 184 -104 342 -104 344 -104 369 -104 377 -104 438 -104 473 -104 474 -104 513 -104 578 -105 172 -105 350 -105 381 -105 471 -105 495 -105 522 -105 690 -106 7 -106 242 -106 284 -106 292 -106 305 -106 322 -106 649 -107 6 -107 25 -107 43 -107 331 -107 632 -108 28 -108 167 -108 564 -109 367 -109 378 -110 57 -110 378 -110 401 -111 33 -111 182 -111 238 -111 247 -111 331 -111 381 -111 418 -111 491 -111 527 -112 13 -112 17 -112 21 -112 22 -112 36 -112 47 -112 99 -112 361 -112 529 -113 7 -113 8 -113 364 -113 423 -113 460 -113 505 -114 16 -114 299 -114 352 -114 491 -115 0 -115 20 -115 28 -115 45 -115 263 -115 507 -116 12 -116 76 -116 120 -116 133 -116 147 -116 153 -116 174 -116 269 -116 312 -116 335 -116 352 -116 470 -116 576 -116 643 -116 677 -117 12 -117 28 -117 126 -117 202 -117 350 -117 440 -117 461 -117 471 -117 483 -118 22 -118 41 -118 82 -118 236 -118 287 -118 312 -118 328 -118 335 -118 370 -118 383 -118 417 -118 648 -118 650 -118 677 -118 688 -119 15 -119 43 -119 84 -119 95 -119 381 -119 598 -120 41 -120 116 -120 123 -120 144 -120 230 -120 269 -120 294 -120 303 -120 335 -120 629 -120 653 -120 698 -121 0 -121 154 -121 243 -121 272 -121 300 -121 311 -121 450 -121 462 -121 520 -121 564 -121 669 -122 250 -122 511 -122 658 -123 9 -123 12 -123 102 -123 120 -123 134 -123 226 -123 269 -123 291 -123 417 -123 533 -123 539 -123 548 -123 601 -123 610 -123 629 -124 193 -124 230 -124 606 -125 9 -125 13 -125 21 -125 74 -125 129 -125 151 -125 163 -125 362 -125 522 -125 523 -125 551 -125 585 -125 621 -125 659 -125 662 -125 672 -125 693 -126 5 -126 8 -126 117 -126 333 -126 664 -127 8 -127 148 -127 367 -127 404 -127 462 -128 16 -128 232 -128 645 -128 685 -129 5 -129 125 -129 586 -129 673 -130 4 -130 6 -130 247 -130 256 -130 258 -130 266 -130 282 -130 296 -130 391 -130 412 -130 466 -130 506 -130 525 -130 597 -130 607 -130 618 -130 619 -130 667 -131 147 -131 148 -131 153 -131 180 -131 200 -131 294 -131 353 -131 415 -131 437 -131 464 -131 514 -131 569 -131 653 -131 688 -132 22 -132 250 -132 490 -132 493 -132 672 -132 673 -133 20 -133 42 -133 116 -133 135 -133 153 -133 210 -133 227 -133 289 -133 315 -133 353 -133 368 -133 429 -133 521 -133 533 -133 631 -133 639 -133 694 -134 81 -134 82 -134 123 -134 429 -135 20 -135 133 -135 259 -135 533 -136 68 -136 164 -136 350 -136 579 -137 0 -137 15 -137 23 -137 84 -137 95 -137 102 -137 203 -137 258 -137 331 -137 491 -137 525 -137 594 -137 612 -138 12 -138 183 -138 222 -138 274 -138 324 -138 416 -138 485 -138 678 -138 698 -139 10 -139 14 -139 52 -139 184 -139 208 -139 209 -139 253 -139 342 -139 369 -139 372 -139 438 -139 512 -139 579 -139 582 -139 616 -140 60 -140 150 -140 340 -141 8 -141 15 -141 202 -141 308 -141 339 -141 352 -141 440 -141 475 -142 7 -142 8 -142 181 -142 507 -143 21 -143 24 -143 332 -143 577 -143 659 -144 27 -144 31 -144 120 -144 389 -144 408 -145 8 -145 64 -145 81 -145 82 -145 413 -146 243 -146 428 -146 564 -147 39 -147 76 -147 116 -147 131 -147 476 -147 492 -148 41 -148 127 -148 131 -148 165 -148 210 -148 274 -148 584 -149 320 -149 321 -150 86 -150 140 -150 237 -150 423 -150 588 -151 13 -151 125 -151 529 -151 662 -152 12 -152 226 -152 332 -152 543 -152 552 -152 671 -153 64 -153 82 -153 85 -153 116 -153 131 -153 133 -153 337 -153 545 -154 60 -154 121 -154 295 -154 534 -155 10 -155 177 -155 487 -155 549 -155 561 -156 12 -156 219 -156 229 -156 233 -156 404 -156 616 -156 681 -156 694 -157 433 -157 483 -157 507 -157 574 -158 520 -158 537 -158 660 -159 29 -159 33 -159 179 -159 209 -159 216 -159 219 -159 258 -159 439 -159 552 -159 560 -160 213 -160 261 -160 391 -160 503 -160 669 -161 16 -161 19 -161 28 -161 75 -161 93 -161 258 -161 409 -162 12 -162 233 -162 323 -163 8 -163 13 -163 74 -163 125 -163 455 -164 22 -164 30 -164 136 -164 184 -165 88 -165 148 -165 484 -166 0 -166 62 -166 94 -166 299 -166 441 -166 477 -166 546 -166 625 -167 108 -167 180 -167 453 -167 636 -167 694 -168 31 -168 57 -168 76 -168 261 -168 521 -168 559 -169 68 -169 104 -169 348 -170 99 -170 517 -171 0 -171 7 -171 471 -171 522 -171 566 -172 1 -172 105 -172 258 -172 525 -173 34 -173 287 -173 498 -174 116 -174 315 -174 504 -175 0 -175 318 -175 379 -175 406 -175 606 -176 28 -176 216 -176 534 -176 609 -176 663 -177 155 -177 493 -177 687 -178 221 -178 357 -178 481 -179 36 -179 70 -179 159 -179 308 -179 522 -180 131 -180 167 -180 301 -180 437 -181 31 -181 81 -181 85 -181 142 -181 289 -181 630 -182 16 -182 20 -182 102 -182 111 -182 274 -182 277 -183 26 -183 138 -183 294 -183 389 -183 407 -183 454 -183 473 -183 574 -183 658 -184 4 -184 14 -184 32 -184 104 -184 139 -184 164 -184 344 -185 189 -185 245 -185 271 -185 439 -185 520 -186 24 -186 101 -186 376 -187 0 -187 45 -187 75 -187 477 -187 478 -188 24 -188 102 -188 412 -188 421 -188 629 -189 4 -189 185 -189 501 -190 9 -190 16 -190 25 -190 198 -190 455 -190 630 -191 5 -191 15 -191 29 -191 101 -191 284 -191 292 -191 577 -191 606 -191 664 -192 17 -192 85 -192 667 -193 53 -193 124 -193 332 -193 393 -193 573 -194 3 -194 44 -194 268 -195 246 -195 477 -195 573 -196 7 -196 396 -196 424 -196 458 -197 257 -197 574 -197 608 -198 190 -198 244 -198 296 -198 497 -198 576 -199 0 -199 253 -199 487 -199 604 -200 131 -200 287 -200 398 -200 484 -200 543 -201 57 -201 85 -201 267 -201 543 -201 680 -202 9 -202 19 -202 117 -202 141 -202 225 -202 355 -202 448 -202 647 -203 67 -203 137 -203 331 -204 336 -204 466 -204 636 -205 76 -205 422 -205 633 -205 654 -206 5 -206 242 -206 426 -206 430 -206 488 -206 596 -207 12 -207 21 -207 358 -207 667 -208 14 -208 68 -208 139 -208 229 -208 449 -209 38 -209 52 -209 139 -209 159 -209 366 -209 411 -210 31 -210 34 -210 133 -210 148 -210 510 -210 629 -211 268 -211 439 -212 253 -212 690 -213 160 -213 225 -213 248 -213 338 -213 501 -214 2 -214 433 -214 647 -215 23 -215 344 -215 519 -215 526 -215 632 -216 10 -216 20 -216 68 -216 159 -216 176 -216 283 -216 473 -217 51 -217 364 -217 412 -217 466 -217 520 -217 525 -217 668 -218 25 -218 63 -218 418 -219 156 -219 159 -219 426 -219 673 -220 494 -220 576 -221 12 -221 30 -221 41 -221 178 -221 360 -221 413 -222 138 -222 441 -222 559 -222 636 -222 640 -223 256 -223 684 -224 226 -224 235 -224 237 -224 307 -224 395 -224 410 -224 445 -225 2 -225 22 -225 202 -225 213 -225 471 -225 492 -226 7 -226 12 -226 31 -226 123 -226 152 -226 224 -226 335 -226 601 -227 64 -227 85 -227 133 -227 230 -227 254 -228 20 -228 327 -228 392 -229 7 -229 64 -229 156 -229 208 -229 449 -229 601 -230 59 -230 120 -230 124 -230 227 -230 233 -231 350 -231 422 -231 437 -232 6 -232 8 -232 128 -232 436 -232 475 -233 7 -233 59 -233 156 -233 162 -233 230 -233 318 -234 23 -234 391 -234 618 -235 13 -235 224 -235 390 -235 693 -236 8 -236 118 -236 302 -236 576 -236 651 -237 150 -237 224 -237 340 -237 507 -238 1 -238 4 -238 20 -238 25 -238 44 -238 63 -238 102 -238 111 -238 331 -239 2 -239 4 -239 17 -239 47 -239 65 -239 70 -239 74 -239 276 -239 679 -240 100 -240 304 -240 563 -240 625 -241 41 -241 57 -241 78 -241 653 -241 681 -242 9 -242 75 -242 106 -242 206 -242 292 -243 0 -243 60 -243 83 -243 121 -243 146 -243 272 -244 14 -244 20 -244 21 -244 27 -244 40 -244 56 -244 198 -244 288 -244 348 -244 364 -244 540 -244 558 -245 0 -245 6 -245 35 -245 63 -245 102 -245 185 -246 195 -246 314 -247 14 -247 20 -247 23 -247 46 -247 84 -247 100 -247 111 -247 130 -247 249 -247 364 -247 472 -248 8 -248 74 -248 213 -248 563 -248 673 -249 15 -249 72 -249 247 -249 466 -249 617 -250 11 -250 56 -250 122 -250 132 -250 355 -250 361 -250 583 -250 658 -251 414 -251 671 -251 680 -252 18 -252 19 -252 52 -252 73 -252 77 -252 91 -252 344 -253 52 -253 139 -253 199 -253 212 -253 519 -254 57 -254 227 -254 610 -255 8 -255 13 -255 15 -255 20 -255 23 -255 27 -255 36 -255 37 -255 313 -255 522 -256 43 -256 95 -256 130 -256 223 -256 676 -257 0 -257 27 -257 197 -257 529 -258 1 -258 43 -258 63 -258 130 -258 137 -258 159 -258 161 -258 172 -258 463 -259 20 -259 135 -259 494 -259 620 -259 640 -259 647 -260 528 -260 575 -260 654 -260 692 -261 0 -261 12 -261 160 -261 168 -261 457 -261 594 -261 622 -261 637 -262 17 -262 68 -262 462 -262 562 -263 62 -263 115 -263 477 -263 580 -264 8 -264 548 -265 411 -265 420 -265 433 -265 579 -265 584 -266 16 -266 49 -266 84 -266 130 -266 331 -267 7 -267 36 -267 201 -267 475 -267 481 -267 543 -267 551 -268 16 -268 52 -268 90 -268 194 -268 211 -268 417 -268 458 -268 470 -268 489 -268 561 -268 578 -268 586 -268 619 -269 0 -269 78 -269 116 -269 120 -269 123 -269 294 -269 456 -270 11 -270 361 -270 455 -270 488 -270 647 -271 4 -271 17 -271 95 -271 185 -271 290 -272 0 -272 54 -272 60 -272 66 -272 83 -272 89 -272 121 -272 243 -273 3 -273 403 -273 480 -274 138 -274 148 -274 182 -274 548 -275 5 -275 74 -275 96 -275 390 -275 471 -276 15 -276 25 -276 96 -276 99 -276 239 -277 182 -277 463 -277 506 -278 285 -278 293 -278 458 -278 644 -279 37 -279 64 -279 81 -279 85 -279 388 -279 521 -280 20 -280 90 -280 422 -280 556 -281 13 -281 27 -281 28 -281 65 -281 475 -282 43 -282 46 -282 84 -282 130 -283 3 -283 18 -283 21 -283 52 -283 216 -284 18 -284 93 -284 106 -284 191 -284 641 -285 15 -285 278 -285 478 -285 560 -286 0 -286 4 -286 95 -286 463 -286 491 -287 12 -287 39 -287 57 -287 82 -287 118 -287 173 -287 200 -287 294 -287 335 -287 413 -288 0 -288 12 -288 20 -288 55 -288 244 -288 449 -288 609 -288 615 -289 78 -289 133 -289 181 -289 630 -289 633 -290 271 -290 358 -291 64 -291 123 -291 368 -291 437 -291 559 -291 687 -292 7 -292 18 -292 106 -292 191 -292 242 -293 50 -293 84 -293 278 -294 7 -294 31 -294 41 -294 57 -294 85 -294 120 -294 131 -294 183 -294 269 -294 287 -294 629 -295 0 -295 89 -295 154 -295 477 -296 25 -296 130 -296 198 -296 491 -297 7 -297 39 -297 330 -297 539 -298 370 -298 410 -298 443 -299 6 -299 84 -299 102 -299 114 -299 166 -299 300 -300 55 -300 89 -300 121 -300 299 -301 180 -301 306 -301 361 -301 435 -301 496 -302 78 -302 85 -302 236 -303 12 -303 63 -303 82 -303 120 -303 548 -303 569 -304 240 -304 563 -304 606 -304 690 -304 692 -305 26 -305 27 -305 101 -305 106 -305 509 -306 26 -306 301 -306 527 -306 666 -307 74 -307 224 -307 390 -307 503 -308 141 -308 179 -308 390 -308 462 -308 658 -309 24 -309 496 -309 632 -309 692 -310 0 -310 409 -310 590 -310 658 -311 121 -311 564 -312 41 -312 116 -312 118 -312 360 -312 539 -313 255 -313 662 -314 246 -314 327 -314 350 -314 640 -314 661 -315 133 -315 174 -315 387 -315 422 -315 535 -316 1 -316 2 -316 7 -316 72 -316 568 -317 4 -317 22 -317 26 -317 471 -317 488 -318 1 -318 175 -318 233 -318 332 -318 461 -318 489 -319 328 -319 454 -320 76 -320 149 -320 321 -321 85 -321 149 -321 320 -321 515 -322 20 -322 24 -322 27 -322 28 -322 29 -322 75 -322 93 -322 106 -322 577 -322 661 -323 162 -323 530 -323 649 -324 16 -324 19 -324 138 -324 400 -325 1 -325 53 -325 580 -326 20 -326 339 -326 425 -326 523 -326 570 -327 17 -327 228 -327 314 -328 39 -328 57 -328 64 -328 118 -328 319 -328 378 -329 88 -329 444 -330 8 -330 20 -330 48 -330 297 -331 1 -331 2 -331 44 -331 84 -331 95 -331 107 -331 111 -331 137 -331 203 -331 238 -331 266 -332 36 -332 143 -332 152 -332 193 -332 318 -332 552 -332 672 -333 5 -333 13 -333 126 -333 649 -334 9 -334 363 -334 457 -334 488 -335 20 -335 39 -335 82 -335 116 -335 118 -335 120 -335 226 -335 287 -336 204 -336 432 -336 518 -336 525 -336 654 -337 31 -337 41 -337 85 -337 92 -337 153 -338 96 -338 213 -338 390 -338 492 -338 524 -339 141 -339 326 -339 461 -339 465 -340 45 -340 140 -340 237 -340 396 -340 477 -341 12 -341 13 -341 15 -341 20 -341 555 -341 647 -341 659 -342 18 -342 68 -342 104 -342 139 -343 406 -343 434 -343 540 -343 666 -343 694 -344 7 -344 10 -344 14 -344 18 -344 52 -344 68 -344 77 -344 91 -344 104 -344 184 -344 215 -344 252 -345 367 -345 384 -345 418 -345 667 -346 15 -346 47 -346 70 -346 103 -346 361 -347 19 -347 419 -348 8 -348 56 -348 69 -348 169 -348 244 -348 449 -348 558 -349 0 -349 87 -349 372 -349 580 -349 591 -350 105 -350 117 -350 136 -350 231 -350 314 -350 512 -350 516 -351 28 -351 508 -351 591 -352 114 -352 116 -352 141 -352 383 -352 450 -352 587 -352 601 -352 642 -353 76 -353 82 -353 131 -353 133 -353 383 -353 653 -354 86 -354 509 -354 568 -354 660 -355 202 -355 250 -355 640 -355 650 -355 696 -356 57 -356 76 -356 78 -356 82 -356 547 -357 35 -357 178 -358 7 -358 207 -358 290 -358 377 -358 453 -358 578 -359 19 -359 32 -359 499 -359 686 -360 8 -360 39 -360 81 -360 221 -360 312 -361 6 -361 9 -361 13 -361 21 -361 99 -361 112 -361 250 -361 270 -361 301 -361 346 -361 621 -362 5 -362 9 -362 74 -362 125 -362 419 -362 453 -363 334 -363 452 -363 485 -363 589 -364 2 -364 27 -364 102 -364 113 -364 217 -364 244 -364 247 -365 462 -366 7 -366 209 -366 393 -366 411 -367 13 -367 21 -367 109 -367 127 -367 345 -367 376 -368 7 -368 8 -368 31 -368 133 -368 291 -368 533 -368 633 -369 3 -369 14 -369 104 -369 139 -369 372 -369 623 -370 31 -370 39 -370 118 -370 298 -370 633 -371 19 -371 100 -371 375 -372 3 -372 10 -372 52 -372 68 -372 73 -372 79 -372 139 -372 349 -372 369 -372 542 -372 593 -373 385 -373 415 -374 565 -374 566 -375 19 -375 52 -375 63 -375 73 -375 371 -376 93 -376 101 -376 186 -376 367 -377 3 -377 14 -377 104 -377 358 -377 449 -378 31 -378 109 -378 110 -378 328 -378 629 -379 0 -379 13 -379 29 -379 35 -379 175 -379 490 -379 613 -379 692 -380 415 -380 680 -380 698 -381 6 -381 102 -381 105 -381 111 -381 119 -381 598 -382 428 -382 518 -382 522 -382 658 -383 7 -383 41 -383 118 -383 352 -383 353 -383 653 -384 5 -384 345 -384 640 -385 12 -385 20 -385 373 -385 455 -385 560 -386 565 -386 685 -387 315 -387 535 -388 78 -388 279 -388 655 -389 96 -389 144 -389 183 -389 454 -390 7 -390 12 -390 22 -390 99 -390 235 -390 275 -390 307 -390 308 -390 338 -390 560 -391 130 -391 160 -391 234 -391 538 -392 228 -392 433 -392 552 -393 21 -393 193 -393 366 -393 488 -393 574 -394 41 -394 532 -394 680 -395 12 -395 224 -395 653 -396 0 -396 12 -396 60 -396 66 -396 83 -396 196 -396 340 -397 0 -397 8 -397 419 -397 479 -397 536 -397 657 -397 663 -397 671 -398 8 -398 23 -398 56 -398 200 -398 527 -398 546 -399 16 -399 55 -399 401 -399 474 -399 543 -400 8 -400 16 -400 324 -401 13 -401 36 -401 47 -401 103 -401 110 -401 399 -401 662 -402 7 -402 67 -402 610 -402 643 -403 10 -403 14 -403 77 -403 90 -403 273 -403 480 -404 31 -404 76 -404 127 -404 156 -404 569 -404 577 -405 30 -405 489 -405 516 -405 639 -406 175 -406 343 -406 474 -406 690 -407 4 -407 11 -407 20 -407 183 -407 444 -407 652 -408 14 -408 24 -408 101 -408 144 -408 577 -408 649 -409 12 -409 26 -409 93 -409 161 -409 310 -409 444 -409 699 -410 20 -410 224 -410 298 -410 411 -410 450 -410 642 -411 38 -411 209 -411 265 -411 366 -411 410 -412 1 -412 20 -412 28 -412 102 -412 130 -412 188 -412 217 -412 520 -413 41 -413 76 -413 145 -413 221 -413 287 -413 628 -414 0 -414 19 -414 251 -414 438 -414 646 -415 12 -415 131 -415 373 -415 380 -416 9 -416 20 -416 57 -416 76 -416 78 -416 138 -417 7 -417 39 -417 64 -417 81 -417 82 -417 118 -417 123 -417 268 -417 576 -417 633 -418 6 -418 44 -418 111 -418 218 -418 345 -418 676 -419 3 -419 347 -419 362 -419 397 -420 265 -420 522 -420 565 -421 6 -421 16 -421 63 -421 95 -421 188 -421 571 -422 12 -422 31 -422 41 -422 64 -422 205 -422 231 -422 280 -422 315 -422 556 -423 1 -423 113 -423 150 -423 528 -423 566 -423 622 -424 196 -425 20 -425 326 -425 581 -426 0 -426 206 -426 219 -426 488 -426 546 -426 640 -427 47 -427 439 -427 488 -428 7 -428 8 -428 146 -428 382 -428 462 -428 548 -428 618 -429 76 -429 133 -429 134 -429 541 -429 651 -430 8 -430 12 -430 22 -430 47 -430 74 -430 206 -430 586 -431 482 -431 635 -432 102 -432 336 -432 469 -432 498 -432 615 -432 654 -433 157 -433 214 -433 265 -433 392 -433 647 -434 6 -434 69 -434 343 -434 560 -435 15 -435 18 -435 301 -435 552 -435 565 -435 566 -435 608 -436 14 -436 26 -436 232 -436 483 -437 76 -437 131 -437 180 -437 231 -437 291 -437 533 -437 610 -438 3 -438 18 -438 19 -438 68 -438 104 -438 139 -438 414 -439 13 -439 80 -439 159 -439 185 -439 211 -439 427 -439 466 -439 507 -440 14 -440 15 -440 117 -440 141 -440 446 -440 503 -440 598 -441 10 -441 20 -441 166 -441 222 -441 508 -441 565 -442 562 -443 64 -443 298 -443 672 -444 329 -444 407 -444 409 -444 575 -444 699 -445 224 -445 451 -445 467 -445 484 -445 502 -445 624 -445 663 -446 440 -446 681 -447 12 -447 484 -447 559 -447 639 -448 13 -448 28 -448 202 -448 455 -448 554 -448 647 -449 3 -449 12 -449 64 -449 77 -449 97 -449 208 -449 229 -449 288 -449 348 -449 377 -450 60 -450 83 -450 121 -450 352 -450 410 -451 445 -451 657 -451 683 -452 51 -452 363 -453 167 -453 358 -453 362 -454 183 -454 319 -454 389 -455 9 -455 28 -455 74 -455 163 -455 190 -455 270 -455 385 -455 448 -455 560 -456 31 -456 269 -457 0 -457 12 -457 261 -457 334 -457 492 -458 56 -458 196 -458 268 -458 278 -458 611 -459 8 -459 520 -459 540 -459 568 -460 113 -461 117 -461 318 -461 339 -461 490 -461 536 -462 0 -462 10 -462 90 -462 121 -462 127 -462 262 -462 308 -462 365 -462 428 -463 0 -463 84 -463 258 -463 277 -463 286 -463 666 -464 131 -464 514 -465 339 -465 523 -465 622 -465 657 -466 0 -466 12 -466 16 -466 19 -466 43 -466 95 -466 130 -466 204 -466 217 -466 249 -466 439 -467 8 -467 445 -467 567 -467 571 -468 493 -468 571 -468 603 -468 635 -468 644 -469 7 -469 10 -469 432 -469 491 -470 20 -470 39 -470 41 -470 81 -470 116 -470 268 -470 515 -470 548 -471 0 -471 105 -471 117 -471 171 -471 225 -471 275 -471 317 -471 522 -471 606 -472 50 -472 247 -472 491 -473 73 -473 77 -473 104 -473 183 -473 216 -473 579 -473 582 -474 24 -474 104 -474 399 -474 406 -474 578 -474 582 -475 141 -475 232 -475 267 -475 281 -475 567 -476 31 -476 147 -476 521 -477 0 -477 66 -477 94 -477 166 -477 187 -477 195 -477 263 -477 295 -477 340 -478 0 -478 187 -478 285 -478 522 -478 638 -479 7 -479 397 -479 556 -479 651 -480 10 -480 68 -480 273 -480 403 -480 578 -480 616 -481 74 -481 178 -481 267 -481 661 -482 7 -482 431 -482 560 -482 602 -483 9 -483 117 -483 157 -483 436 -483 615 -484 165 -484 200 -484 445 -484 447 -484 502 -485 6 -485 138 -485 363 -485 589 -486 18 -486 557 -486 619 -486 649 -486 670 -487 155 -487 199 -487 561 -488 5 -488 206 -488 270 -488 317 -488 334 -488 393 -488 426 -488 427 -488 492 -488 523 -489 20 -489 268 -489 318 -489 405 -489 528 -489 538 -489 582 -489 676 -490 12 -490 132 -490 379 -490 461 -490 535 -490 604 -491 63 -491 111 -491 114 -491 137 -491 286 -491 296 -491 469 -491 472 -491 637 -492 20 -492 147 -492 225 -492 338 -492 457 -492 488 -493 9 -493 40 -493 132 -493 177 -493 468 -493 602 -493 689 -494 24 -494 35 -494 220 -494 259 -494 662 -495 105 -495 531 -496 0 -496 29 -496 47 -496 301 -496 309 -496 682 -497 12 -497 198 -497 601 -498 7 -498 30 -498 63 -498 173 -498 432 -498 651 -499 3 -499 12 -499 359 -499 519 -500 11 -500 13 -500 501 -501 29 -501 189 -501 213 -501 500 -502 445 -502 484 -502 594 -503 25 -503 160 -503 307 -503 440 -503 509 -504 174 -504 505 -504 646 -505 15 -505 48 -505 113 -505 504 -505 620 -505 689 -506 43 -506 50 -506 63 -506 102 -506 130 -506 277 -507 115 -507 142 -507 157 -507 237 -507 439 -508 351 -508 441 -508 591 -508 619 -509 12 -509 26 -509 98 -509 305 -509 354 -509 503 -510 78 -510 210 -510 677 -511 122 -511 560 -511 647 -511 662 -512 18 -512 139 -512 350 -512 554 -512 562 -513 0 -513 19 -513 73 -513 77 -513 104 -513 542 -514 131 -514 464 -514 573 -514 677 -515 39 -515 88 -515 321 -515 470 -515 548 -516 0 -516 60 -516 83 -516 89 -516 350 -516 405 -517 170 -517 672 -518 10 -518 20 -518 336 -518 382 -519 3 -519 10 -519 18 -519 215 -519 253 -519 499 -520 27 -520 65 -520 121 -520 158 -520 185 -520 217 -520 412 -520 459 -521 31 -521 133 -521 168 -521 279 -521 476 -522 0 -522 7 -522 13 -522 15 -522 74 -522 105 -522 125 -522 171 -522 179 -522 255 -522 382 -522 420 -522 471 -522 478 -523 99 -523 125 -523 326 -523 465 -523 488 -524 10 -524 14 -524 338 -525 1 -525 102 -525 130 -525 137 -525 172 -525 217 -525 336 -526 215 -526 570 -526 591 -526 649 -526 652 -527 13 -527 20 -527 28 -527 111 -527 306 -527 398 -527 666 -528 15 -528 260 -528 423 -528 489 -528 606 -528 640 -529 12 -529 47 -529 74 -529 103 -529 112 -529 151 -529 257 -529 621 -530 29 -530 37 -530 323 -531 78 -531 495 -531 680 -532 81 -532 394 -532 680 -533 8 -533 57 -533 85 -533 123 -533 133 -533 135 -533 368 -533 437 -533 677 -534 154 -534 176 -534 573 -534 638 -535 315 -535 387 -535 490 -536 5 -536 397 -536 461 -537 87 -537 158 -538 391 -538 489 -538 553 -539 7 -539 123 -539 297 -539 312 -539 543 -540 5 -540 61 -540 244 -540 343 -540 459 -540 550 -541 64 -541 429 -541 556 -542 18 -542 77 -542 372 -542 513 -543 42 -543 64 -543 152 -543 200 -543 201 -543 267 -543 399 -543 539 -543 641 -544 50 -544 102 -544 615 -544 622 -544 638 -545 153 -545 620 -546 166 -546 398 -546 426 -546 553 -546 574 -546 590 -547 64 -547 76 -547 356 -548 31 -548 88 -548 123 -548 264 -548 274 -548 303 -548 428 -548 470 -548 515 -549 2 -549 63 -549 155 -549 606 -549 645 -550 9 -550 540 -550 563 -551 11 -551 70 -551 74 -551 125 -551 267 -551 627 -552 0 -552 6 -552 15 -552 36 -552 152 -552 159 -552 332 -552 392 -552 435 -553 6 -553 538 -553 546 -553 622 -554 91 -554 448 -554 512 -555 20 -555 341 -555 686 -556 280 -556 422 -556 479 -556 541 -557 19 -557 486 -558 244 -558 348 -558 667 -559 168 -559 222 -559 291 -559 447 -559 601 -560 21 -560 159 -560 285 -560 385 -560 390 -560 434 -560 455 -560 482 -560 511 -560 566 -561 155 -561 268 -561 487 -562 0 -562 262 -562 442 -562 512 -563 1 -563 47 -563 240 -563 248 -563 304 -563 550 -563 585 -564 0 -564 108 -564 121 -564 146 -564 311 -565 374 -565 386 -565 420 -565 435 -565 441 -565 566 -566 171 -566 374 -566 423 -566 435 -566 560 -566 565 -567 5 -567 36 -567 70 -567 103 -567 467 -567 475 -567 640 -568 72 -568 316 -568 354 -568 459 -568 579 -569 42 -569 64 -569 88 -569 131 -569 303 -569 404 -570 12 -570 102 -570 326 -570 526 -570 612 -571 74 -571 421 -571 467 -571 468 -572 642 -573 193 -573 195 -573 514 -573 534 -574 157 -574 183 -574 197 -574 393 -574 546 -574 575 -574 640 -575 90 -575 260 -575 444 -575 574 -575 672 -576 85 -576 116 -576 198 -576 220 -576 236 -576 417 -576 631 -576 653 -577 8 -577 12 -577 27 -577 28 -577 29 -577 78 -577 93 -577 100 -577 143 -577 191 -577 322 -577 404 -577 408 -578 3 -578 35 -578 68 -578 104 -578 268 -578 358 -578 474 -578 480 -579 10 -579 136 -579 139 -579 265 -579 473 -579 568 -580 86 -580 263 -580 325 -580 349 -581 425 -581 600 -581 661 -581 699 -582 62 -582 68 -582 73 -582 139 -582 473 -582 474 -582 489 -582 596 -583 10 -583 250 -583 602 -583 645 -583 650 -584 148 -584 265 -584 628 -585 5 -585 47 -585 74 -585 103 -585 125 -585 563 -586 26 -586 129 -586 268 -586 430 -587 39 -587 352 -588 0 -588 150 -589 17 -589 21 -589 45 -589 363 -589 485 -589 623 -589 696 -590 7 -590 22 -590 31 -590 42 -590 310 -590 546 -590 633 -591 14 -591 17 -591 349 -591 351 -591 508 -591 526 -592 12 -592 16 -592 18 -593 372 -594 1 -594 2 -594 100 -594 137 -594 261 -594 502 -595 0 -595 620 -595 680 -595 698 -596 3 -596 4 -596 16 -596 206 -596 582 -597 20 -597 43 -597 95 -597 102 -597 130 -598 4 -598 84 -598 119 -598 381 -598 440 -599 15 -599 654 -600 19 -600 581 -600 652 -600 662 -601 41 -601 64 -601 88 -601 123 -601 226 -601 229 -601 352 -601 497 -601 559 -602 482 -602 493 -602 583 -602 603 -603 3 -603 27 -603 468 -603 602 -604 5 -604 11 -604 199 -604 490 -605 77 -606 124 -606 175 -606 191 -606 304 -606 471 -606 528 -606 549 -607 130 -608 20 -608 197 -608 435 -608 681 -608 686 -609 20 -609 55 -609 87 -609 176 -609 288 -610 41 -610 123 -610 254 -610 402 -610 437 -611 458 -612 12 -612 137 -612 570 -613 379 -613 664 -613 681 -614 12 -615 14 -615 90 -615 288 -615 432 -615 483 -615 544 -616 3 -616 73 -616 139 -616 156 -616 480 -617 52 -617 249 -618 2 -618 20 -618 84 -618 130 -618 234 -618 428 -619 0 -619 5 -619 10 -619 63 -619 130 -619 268 -619 486 -619 508 -620 0 -620 259 -620 505 -620 545 -620 595 -621 0 -621 11 -621 13 -621 15 -621 74 -621 125 -621 361 -621 529 -622 16 -622 95 -622 102 -622 261 -622 423 -622 465 -622 544 -622 553 -623 14 -623 56 -623 369 -623 589 -624 8 -624 20 -624 445 -624 690 -625 166 -625 240 -625 662 -626 660 -627 1 -627 11 -627 36 -627 551 -628 12 -628 413 -628 584 -629 8 -629 39 -629 57 -629 120 -629 123 -629 188 -629 210 -629 294 -629 378 -630 12 -630 31 -630 82 -630 88 -630 181 -630 190 -630 289 -631 31 -631 64 -631 67 -631 76 -631 133 -631 576 -632 62 -632 97 -632 107 -632 215 -632 309 -633 7 -633 42 -633 76 -633 205 -633 289 -633 368 -633 370 -633 417 -633 590 -633 653 -634 39 -634 58 -635 8 -635 431 -635 468 -636 167 -636 204 -636 222 -636 683 -637 1 -637 7 -637 22 -637 50 -637 63 -637 102 -637 261 -637 491 -638 3 -638 12 -638 478 -638 534 -638 544 -638 656 -639 12 -639 20 -639 41 -639 64 -639 133 -639 405 -639 447 -640 5 -640 103 -640 222 -640 259 -640 314 -640 355 -640 384 -640 426 -640 528 -640 567 -640 574 -641 28 -641 284 -641 543 -642 352 -642 410 -642 572 -643 41 -643 116 -643 402 -643 697 -644 12 -644 278 -644 468 -644 667 -645 14 -645 50 -645 128 -645 549 -645 583 -645 679 -646 414 -646 504 -647 26 -647 202 -647 214 -647 259 -647 270 -647 341 -647 433 -647 448 -647 511 -648 12 -648 81 -648 88 -648 118 -649 2 -649 12 -649 106 -649 323 -649 333 -649 408 -649 486 -649 526 -650 17 -650 64 -650 118 -650 355 -650 583 -650 651 -651 7 -651 236 -651 429 -651 479 -651 498 -651 650 -652 4 -652 407 -652 526 -652 600 -653 12 -653 41 -653 64 -653 85 -653 88 -653 120 -653 131 -653 241 -653 353 -653 383 -653 395 -653 576 -653 633 -654 6 -654 8 -654 205 -654 260 -654 336 -654 432 -654 599 -655 26 -655 388 -656 638 -657 397 -657 451 -657 465 -658 12 -658 14 -658 16 -658 122 -658 183 -658 250 -658 308 -658 310 -658 382 -659 0 -659 16 -659 35 -659 74 -659 125 -659 143 -659 341 -660 158 -660 354 -660 626 -660 670 -661 7 -661 9 -661 21 -661 314 -661 322 -661 481 -661 581 -661 662 -662 5 -662 125 -662 151 -662 313 -662 401 -662 494 -662 511 -662 600 -662 625 -662 661 -663 0 -663 18 -663 176 -663 397 -663 445 -664 16 -664 24 -664 126 -664 191 -664 613 -665 1 -665 4 -665 16 -665 19 -665 23 -665 25 -665 95 -665 102 -665 666 -666 0 -666 60 -666 306 -666 343 -666 463 -666 527 -666 665 -667 12 -667 43 -667 95 -667 130 -667 192 -667 207 -667 345 -667 558 -667 644 -668 4 -668 14 -668 95 -668 217 -669 0 -669 35 -669 83 -669 121 -669 160 -670 18 -670 486 -670 660 -670 674 -671 0 -671 18 -671 60 -671 80 -671 152 -671 251 -671 397 -672 7 -672 13 -672 21 -672 125 -672 132 -672 332 -672 443 -672 517 -672 575 -673 5 -673 12 -673 13 -673 20 -673 86 -673 129 -673 132 -673 219 -673 248 -674 28 -674 72 -674 670 -676 2 -676 102 -676 256 -676 418 -676 489 -677 7 -677 41 -677 64 -677 85 -677 116 -677 118 -677 510 -677 514 -677 533 -678 18 -678 41 -678 67 -678 138 -679 4 -679 23 -679 63 -679 239 -679 645 -680 31 -680 64 -680 201 -680 251 -680 380 -680 394 -680 531 -680 532 -680 595 -681 49 -681 156 -681 241 -681 446 -681 608 -681 613 -682 20 -682 496 -683 8 -683 451 -683 636 -684 223 -685 8 -685 13 -685 20 -685 21 -685 74 -685 128 -685 386 -686 28 -686 29 -686 359 -686 555 -686 608 -686 690 -687 7 -687 64 -687 177 -687 291 -688 20 -688 64 -688 76 -688 118 -688 131 -689 29 -689 493 -689 505 -690 0 -690 50 -690 83 -690 102 -690 105 -690 212 -690 304 -690 406 -690 624 -690 686 -691 52 -691 68 -692 260 -692 304 -692 309 -692 379 -693 10 -693 125 -693 235 -694 133 -694 156 -694 167 -694 343 -696 6 -696 27 -696 355 -696 589 -697 41 -697 76 -697 643 -698 120 -698 138 -698 380 -698 595 -699 409 -699 444 -699 581 diff --git a/pygip/models/attack/mea/data/attack2_generated_graph/citeseer/query_labels.txt b/pygip/models/attack/mea/data/attack2_generated_graph/citeseer/query_labels.txt deleted file mode 100644 index 59badaf2..00000000 --- a/pygip/models/attack/mea/data/attack2_generated_graph/citeseer/query_labels.txt +++ /dev/null @@ -1,3327 +0,0 @@ -0 3 -1 1 -2 5 -3 5 -4 3 -5 1 -6 3 -7 0 -8 3 -9 5 -10 2 -11 4 -12 2 -13 1 -14 2 -15 3 -16 2 -17 4 -18 4 -19 0 -20 1 -21 5 -22 5 -23 3 -24 5 -25 2 -26 5 -27 2 -28 4 -29 2 -30 2 -31 2 -32 4 -33 5 -34 2 -35 3 -36 4 -37 5 -38 3 -39 3 -40 2 -41 1 -42 2 -43 2 -44 5 -45 1 -46 1 -47 4 -48 2 -49 3 -50 3 -51 2 -52 5 -53 2 -54 5 -55 1 -56 4 -57 1 -58 4 -59 2 -60 2 -61 3 -62 4 -63 5 -64 5 -65 1 -66 3 -67 3 -68 4 -69 2 -70 4 -71 1 -72 1 -73 5 -74 0 -75 2 -76 0 -77 3 -78 5 -79 2 -80 4 -81 1 -82 1 -83 4 -84 4 -85 0 -86 4 -87 4 -88 5 -89 3 -90 5 -91 5 -92 4 -93 5 -94 3 -95 1 -96 4 -97 4 -98 3 -99 1 -100 3 -101 0 -102 1 -103 1 -104 1 -105 3 -106 0 -107 5 -108 0 -109 0 -110 0 -111 0 -112 0 -113 2 -114 0 -115 0 -116 0 -117 0 -118 0 -119 0 -120 3 -121 0 -122 1 -123 5 -124 2 -125 2 -126 0 -127 2 -128 2 -129 5 -130 4 -131 2 -132 4 -133 4 -134 2 -135 4 -136 1 -137 2 -138 2 -139 5 -140 1 -141 2 -142 4 -143 2 -144 1 -145 0 -146 2 -147 2 -148 3 -149 1 -150 2 -151 2 -152 1 -153 2 -154 2 -155 0 -156 5 -157 2 -158 1 -159 4 -160 0 -161 0 -162 2 -163 3 -164 0 -165 3 -166 2 -167 1 -168 5 -169 0 -170 3 -171 4 -172 3 -173 4 -174 3 -175 5 -176 1 -177 3 -178 3 -179 3 -180 2 -181 2 -182 4 -183 0 -184 2 -185 5 -186 1 -187 5 -188 2 -189 3 -190 5 -191 5 -192 1 -193 2 -194 4 -195 3 -196 3 -197 2 -198 5 -199 1 -200 2 -201 4 -202 3 -203 4 -204 0 -205 4 -206 2 -207 1 -208 1 -209 4 -210 1 -211 1 -212 4 -213 2 -214 5 -215 4 -216 1 -217 3 -218 1 -219 2 -220 2 -221 2 -222 5 -223 3 -224 2 -225 3 -226 5 -227 5 -228 4 -229 2 -230 5 -231 2 -232 4 -233 2 -234 4 -235 2 -236 2 -237 2 -238 1 -239 1 -240 3 -241 3 -242 2 -243 2 -244 5 -245 3 -246 2 -247 0 -248 0 -249 5 -250 3 -251 0 -252 5 -253 5 -254 5 -255 2 -256 2 -257 1 -258 2 -259 5 -260 3 -261 3 -262 0 -263 2 -264 0 -265 4 -266 5 -267 2 -268 3 -269 0 -270 2 -271 4 -272 5 -273 2 -274 5 -275 2 -276 1 -277 3 -278 3 -279 3 -280 3 -281 0 -282 1 -283 4 -284 4 -285 4 -286 3 -287 1 -288 4 -289 2 -290 2 -291 5 -292 1 -293 2 -294 4 -295 2 -296 2 -297 0 -298 2 -299 5 -300 4 -301 4 -302 2 -303 3 -304 4 -305 5 -306 3 -307 2 -308 5 -309 0 -310 3 -311 2 -312 0 -313 3 -314 2 -315 1 -316 4 -317 4 -318 1 -319 4 -320 1 -321 1 -322 2 -323 5 -324 2 -325 5 -326 0 -327 3 -328 1 -329 1 -330 5 -331 5 -332 4 -333 4 -334 0 -335 1 -336 0 -337 3 -338 5 -339 0 -340 2 -341 1 -342 4 -343 3 -344 3 -345 1 -346 2 -347 1 -348 1 -349 3 -350 2 -351 2 -352 5 -353 3 -354 5 -355 0 -356 5 -357 3 -358 3 -359 4 -360 5 -361 2 -362 0 -363 2 -364 1 -365 2 -366 3 -367 0 -368 5 -369 1 -370 0 -371 4 -372 5 -373 2 -374 5 -375 0 -376 4 -377 2 -378 5 -379 4 -380 5 -381 0 -382 2 -383 3 -384 5 -385 2 -386 2 -387 5 -388 2 -389 0 -390 3 -391 5 -392 4 -393 4 -394 2 -395 2 -396 2 -397 4 -398 2 -399 1 -400 3 -401 4 -402 3 -403 2 -404 1 -405 4 -406 2 -407 4 -408 4 -409 1 -410 5 -411 4 -412 4 -413 3 -414 2 -415 1 -416 2 -417 4 -418 0 -419 4 -420 2 -421 4 -422 3 -423 2 -424 0 -425 5 -426 3 -427 0 -428 2 -429 4 -430 4 -431 3 -432 5 -433 2 -434 2 -435 2 -436 0 -437 1 -438 1 -439 3 -440 4 -441 0 -442 4 -443 2 -444 2 -445 1 -446 3 -447 5 -448 3 -449 5 -450 3 -451 2 -452 4 -453 2 -454 2 -455 4 -456 1 -457 2 -458 3 -459 3 -460 2 -461 1 -462 4 -463 3 -464 2 -465 4 -466 3 -467 3 -468 2 -469 3 -470 5 -471 0 -472 0 -473 5 -474 0 -475 4 -476 4 -477 5 -478 2 -479 0 -480 1 -481 1 -482 5 -483 3 -484 4 -485 4 -486 1 -487 1 -488 2 -489 4 -490 0 -491 3 -492 2 -493 2 -494 4 -495 2 -496 5 -497 5 -498 3 -499 2 -500 5 -501 1 -502 1 -503 1 -504 4 -505 5 -506 1 -507 1 -508 5 -509 1 -510 2 -511 5 -512 2 -513 0 -514 0 -515 4 -516 4 -517 3 -518 1 -519 4 -520 1 -521 3 -522 3 -523 1 -524 0 -525 1 -526 4 -527 2 -528 2 -529 2 -530 3 -531 4 -532 2 -533 1 -534 5 -535 1 -536 2 -537 1 -538 5 -539 5 -540 4 -541 0 -542 4 -543 2 -544 2 -545 4 -546 3 -547 4 -548 2 -549 2 -550 4 -551 2 -552 0 -553 0 -554 2 -555 4 -556 1 -557 2 -558 1 -559 1 -560 5 -561 0 -562 4 -563 4 -564 4 -565 3 -566 4 -567 4 -568 3 -569 5 -570 0 -571 0 -572 3 -573 4 -574 3 -575 2 -576 2 -577 4 -578 5 -579 4 -580 1 -581 0 -582 4 -583 1 -584 4 -585 2 -586 2 -587 4 -588 1 -589 1 -590 2 -591 1 -592 3 -593 3 -594 2 -595 4 -596 5 -597 5 -598 2 -599 2 -600 3 -601 0 -602 5 -603 3 -604 1 -605 4 -606 1 -607 2 -608 3 -609 2 -610 4 -611 3 -612 5 -613 5 -614 1 -615 4 -616 2 -617 2 -618 3 -619 2 -620 3 -621 2 -622 2 -623 5 -624 4 -625 3 -626 0 -627 2 -628 3 -629 3 -630 2 -631 2 -632 5 -633 2 -634 4 -635 1 -636 3 -637 4 -638 4 -639 5 -640 5 -641 0 -642 4 -643 5 -644 2 -645 3 -646 1 -647 4 -648 1 -649 4 -650 1 -651 1 -652 4 -653 4 -654 4 -655 5 -656 5 -657 3 -658 3 -659 0 -660 1 -661 2 -662 5 -663 2 -664 3 -665 3 -666 2 -667 3 -668 1 -669 5 -670 4 -671 3 -672 0 -673 0 -674 4 -675 3 -676 4 -677 1 -678 4 -679 1 -680 2 -681 1 -682 3 -683 2 -684 5 -685 2 -686 2 -687 5 -688 3 -689 5 -690 5 -691 2 -692 2 -693 2 -694 4 -695 1 -696 3 -697 5 -698 4 -699 1 -700 4 -701 3 -702 5 -703 0 -704 3 -705 1 -706 2 -707 4 -708 5 -709 3 -710 3 -711 5 -712 1 -713 0 -714 5 -715 2 -716 0 -717 1 -718 5 -719 2 -720 1 -721 4 -722 5 -723 1 -724 1 -725 2 -726 5 -727 1 -728 5 -729 4 -730 1 -731 5 -732 5 -733 2 -734 1 -735 2 -736 4 -737 2 -738 4 -739 5 -740 4 -741 3 -742 4 -743 5 -744 3 -745 2 -746 1 -747 0 -748 2 -749 3 -750 3 -751 4 -752 4 -753 3 -754 2 -755 3 -756 2 -757 3 -758 1 -759 3 -760 2 -761 2 -762 4 -763 4 -764 2 -765 2 -766 4 -767 1 -768 2 -769 1 -770 2 -771 4 -772 3 -773 2 -774 4 -775 2 -776 2 -777 4 -778 4 -779 0 -780 0 -781 3 -782 4 -783 4 -784 5 -785 0 -786 3 -787 4 -788 1 -789 0 -790 4 -791 2 -792 5 -793 2 -794 2 -795 1 -796 2 -797 5 -798 5 -799 2 -800 3 -801 1 -802 2 -803 0 -804 3 -805 3 -806 0 -807 2 -808 4 -809 4 -810 5 -811 4 -812 4 -813 3 -814 0 -815 4 -816 1 -817 0 -818 2 -819 4 -820 4 -821 1 -822 0 -823 1 -824 5 -825 0 -826 1 -827 1 -828 1 -829 4 -830 5 -831 2 -832 5 -833 5 -834 3 -835 2 -836 1 -837 1 -838 3 -839 2 -840 0 -841 3 -842 5 -843 5 -844 5 -845 3 -846 5 -847 1 -848 0 -849 1 -850 2 -851 2 -852 1 -853 4 -854 4 -855 1 -856 4 -857 2 -858 5 -859 0 -860 4 -861 3 -862 3 -863 0 -864 3 -865 4 -866 5 -867 1 -868 1 -869 3 -870 5 -871 3 -872 2 -873 5 -874 3 -875 2 -876 0 -877 3 -878 5 -879 4 -880 0 -881 3 -882 2 -883 5 -884 5 -885 2 -886 3 -887 2 -888 5 -889 3 -890 0 -891 0 -892 0 -893 4 -894 2 -895 2 -896 0 -897 5 -898 5 -899 3 -900 3 -901 3 -902 4 -903 5 -904 3 -905 2 -906 5 -907 2 -908 1 -909 3 -910 3 -911 4 -912 2 -913 5 -914 3 -915 4 -916 2 -917 3 -918 1 -919 0 -920 4 -921 4 -922 0 -923 4 -924 4 -925 2 -926 0 -927 4 -928 0 -929 2 -930 2 -931 2 -932 0 -933 5 -934 0 -935 3 -936 3 -937 2 -938 5 -939 1 -940 2 -941 2 -942 5 -943 5 -944 2 -945 3 -946 1 -947 4 -948 2 -949 5 -950 4 -951 4 -952 5 -953 2 -954 5 -955 3 -956 4 -957 1 -958 5 -959 4 -960 1 -961 3 -962 2 -963 3 -964 3 -965 2 -966 2 -967 2 -968 2 -969 5 -970 4 -971 3 -972 2 -973 3 -974 4 -975 1 -976 3 -977 5 -978 2 -979 1 -980 0 -981 2 -982 2 -983 3 -984 3 -985 2 -986 3 -987 5 -988 1 -989 3 -990 0 -991 4 -992 2 -993 2 -994 2 -995 2 -996 2 -997 1 -998 1 -999 3 -1000 4 -1001 3 -1002 4 -1003 1 -1004 4 -1005 5 -1006 5 -1007 5 -1008 2 -1009 2 -1010 2 -1011 1 -1012 2 -1013 2 -1014 4 -1015 2 -1016 1 -1017 3 -1018 4 -1019 2 -1020 2 -1021 1 -1022 4 -1023 1 -1024 2 -1025 2 -1026 3 -1027 4 -1028 3 -1029 1 -1030 3 -1031 0 -1032 1 -1033 3 -1034 2 -1035 1 -1036 2 -1037 4 -1038 5 -1039 4 -1040 2 -1041 0 -1042 3 -1043 0 -1044 3 -1045 4 -1046 4 -1047 1 -1048 5 -1049 4 -1050 2 -1051 1 -1052 4 -1053 5 -1054 2 -1055 5 -1056 1 -1057 2 -1058 2 -1059 1 -1060 1 -1061 1 -1062 3 -1063 5 -1064 2 -1065 3 -1066 3 -1067 2 -1068 4 -1069 3 -1070 0 -1071 2 -1072 4 -1073 5 -1074 5 -1075 3 -1076 5 -1077 4 -1078 3 -1079 3 -1080 0 -1081 5 -1082 4 -1083 5 -1084 5 -1085 4 -1086 3 -1087 4 -1088 1 -1089 2 -1090 5 -1091 2 -1092 3 -1093 1 -1094 4 -1095 5 -1096 2 -1097 1 -1098 4 -1099 3 -1100 3 -1101 4 -1102 4 -1103 4 -1104 1 -1105 1 -1106 0 -1107 4 -1108 2 -1109 2 -1110 4 -1111 5 -1112 2 -1113 4 -1114 5 -1115 1 -1116 4 -1117 2 -1118 2 -1119 3 -1120 3 -1121 0 -1122 2 -1123 5 -1124 2 -1125 0 -1126 2 -1127 0 -1128 2 -1129 0 -1130 2 -1131 2 -1132 0 -1133 3 -1134 2 -1135 4 -1136 2 -1137 4 -1138 5 -1139 3 -1140 1 -1141 5 -1142 0 -1143 5 -1144 0 -1145 1 -1146 1 -1147 4 -1148 5 -1149 2 -1150 5 -1151 4 -1152 1 -1153 1 -1154 1 -1155 3 -1156 0 -1157 2 -1158 1 -1159 3 -1160 1 -1161 2 -1162 4 -1163 1 -1164 4 -1165 3 -1166 4 -1167 1 -1168 2 -1169 4 -1170 4 -1171 3 -1172 5 -1173 0 -1174 2 -1175 0 -1176 2 -1177 1 -1178 2 -1179 2 -1180 0 -1181 2 -1182 1 -1183 0 -1184 5 -1185 0 -1186 5 -1187 2 -1188 1 -1189 2 -1190 2 -1191 5 -1192 3 -1193 0 -1194 5 -1195 2 -1196 4 -1197 1 -1198 2 -1199 5 -1200 4 -1201 4 -1202 1 -1203 3 -1204 5 -1205 2 -1206 1 -1207 5 -1208 5 -1209 5 -1210 4 -1211 3 -1212 2 -1213 3 -1214 2 -1215 2 -1216 1 -1217 0 -1218 5 -1219 2 -1220 0 -1221 4 -1222 3 -1223 0 -1224 2 -1225 4 -1226 2 -1227 5 -1228 3 -1229 2 -1230 2 -1231 4 -1232 2 -1233 4 -1234 2 -1235 4 -1236 2 -1237 5 -1238 2 -1239 4 -1240 3 -1241 5 -1242 2 -1243 2 -1244 5 -1245 5 -1246 0 -1247 4 -1248 2 -1249 0 -1250 5 -1251 2 -1252 4 -1253 3 -1254 3 -1255 1 -1256 4 -1257 2 -1258 4 -1259 3 -1260 0 -1261 2 -1262 3 -1263 3 -1264 3 -1265 4 -1266 4 -1267 0 -1268 2 -1269 3 -1270 4 -1271 4 -1272 2 -1273 2 -1274 5 -1275 1 -1276 3 -1277 5 -1278 5 -1279 5 -1280 4 -1281 2 -1282 1 -1283 4 -1284 2 -1285 2 -1286 5 -1287 5 -1288 5 -1289 3 -1290 2 -1291 0 -1292 2 -1293 0 -1294 1 -1295 2 -1296 3 -1297 3 -1298 3 -1299 2 -1300 2 -1301 2 -1302 4 -1303 4 -1304 1 -1305 2 -1306 3 -1307 4 -1308 5 -1309 3 -1310 5 -1311 4 -1312 2 -1313 1 -1314 3 -1315 4 -1316 3 -1317 2 -1318 2 -1319 3 -1320 3 -1321 1 -1322 0 -1323 3 -1324 4 -1325 0 -1326 4 -1327 5 -1328 3 -1329 4 -1330 3 -1331 2 -1332 5 -1333 2 -1334 5 -1335 4 -1336 2 -1337 1 -1338 0 -1339 1 -1340 0 -1341 1 -1342 2 -1343 2 -1344 1 -1345 5 -1346 2 -1347 1 -1348 0 -1349 3 -1350 2 -1351 2 -1352 0 -1353 4 -1354 1 -1355 2 -1356 5 -1357 2 -1358 2 -1359 2 -1360 4 -1361 3 -1362 1 -1363 5 -1364 4 -1365 3 -1366 0 -1367 1 -1368 3 -1369 4 -1370 5 -1371 2 -1372 0 -1373 3 -1374 0 -1375 3 -1376 2 -1377 2 -1378 1 -1379 3 -1380 4 -1381 3 -1382 3 -1383 4 -1384 1 -1385 4 -1386 2 -1387 4 -1388 4 -1389 0 -1390 4 -1391 5 -1392 5 -1393 2 -1394 1 -1395 3 -1396 1 -1397 4 -1398 1 -1399 2 -1400 1 -1401 5 -1402 1 -1403 5 -1404 3 -1405 3 -1406 3 -1407 3 -1408 2 -1409 2 -1410 4 -1411 5 -1412 3 -1413 1 -1414 2 -1415 2 -1416 4 -1417 2 -1418 4 -1419 0 -1420 2 -1421 2 -1422 3 -1423 4 -1424 2 -1425 5 -1426 4 -1427 2 -1428 3 -1429 1 -1430 2 -1431 5 -1432 2 -1433 5 -1434 0 -1435 5 -1436 3 -1437 4 -1438 3 -1439 2 -1440 2 -1441 5 -1442 4 -1443 2 -1444 1 -1445 0 -1446 0 -1447 4 -1448 3 -1449 4 -1450 4 -1451 0 -1452 2 -1453 2 -1454 0 -1455 4 -1456 1 -1457 0 -1458 0 -1459 3 -1460 3 -1461 4 -1462 1 -1463 1 -1464 0 -1465 5 -1466 2 -1467 2 -1468 3 -1469 4 -1470 2 -1471 3 -1472 4 -1473 2 -1474 4 -1475 4 -1476 2 -1477 4 -1478 4 -1479 5 -1480 2 -1481 1 -1482 1 -1483 2 -1484 3 -1485 4 -1486 2 -1487 2 -1488 4 -1489 2 -1490 2 -1491 2 -1492 2 -1493 1 -1494 2 -1495 0 -1496 5 -1497 2 -1498 2 -1499 2 -1500 3 -1501 3 -1502 1 -1503 3 -1504 3 -1505 4 -1506 5 -1507 0 -1508 0 -1509 2 -1510 3 -1511 2 -1512 4 -1513 4 -1514 2 -1515 1 -1516 4 -1517 2 -1518 1 -1519 3 -1520 0 -1521 4 -1522 4 -1523 3 -1524 5 -1525 4 -1526 3 -1527 1 -1528 2 -1529 3 -1530 3 -1531 4 -1532 2 -1533 2 -1534 1 -1535 2 -1536 4 -1537 3 -1538 0 -1539 0 -1540 2 -1541 3 -1542 2 -1543 5 -1544 2 -1545 4 -1546 2 -1547 0 -1548 1 -1549 0 -1550 3 -1551 0 -1552 1 -1553 5 -1554 5 -1555 3 -1556 1 -1557 3 -1558 2 -1559 1 -1560 0 -1561 2 -1562 4 -1563 3 -1564 5 -1565 2 -1566 2 -1567 2 -1568 5 -1569 0 -1570 1 -1571 4 -1572 5 -1573 2 -1574 4 -1575 1 -1576 2 -1577 3 -1578 4 -1579 2 -1580 2 -1581 2 -1582 3 -1583 1 -1584 1 -1585 3 -1586 4 -1587 1 -1588 5 -1589 5 -1590 2 -1591 1 -1592 5 -1593 2 -1594 3 -1595 4 -1596 4 -1597 1 -1598 1 -1599 2 -1600 0 -1601 3 -1602 5 -1603 3 -1604 5 -1605 2 -1606 1 -1607 2 -1608 3 -1609 2 -1610 5 -1611 5 -1612 2 -1613 5 -1614 3 -1615 4 -1616 0 -1617 2 -1618 2 -1619 2 -1620 4 -1621 3 -1622 2 -1623 1 -1624 3 -1625 1 -1626 4 -1627 1 -1628 5 -1629 2 -1630 2 -1631 4 -1632 5 -1633 2 -1634 3 -1635 2 -1636 0 -1637 5 -1638 4 -1639 1 -1640 2 -1641 3 -1642 2 -1643 1 -1644 2 -1645 0 -1646 2 -1647 2 -1648 2 -1649 2 -1650 3 -1651 1 -1652 2 -1653 1 -1654 2 -1655 1 -1656 4 -1657 2 -1658 2 -1659 3 -1660 4 -1661 5 -1662 3 -1663 2 -1664 3 -1665 2 -1666 1 -1667 5 -1668 3 -1669 0 -1670 2 -1671 5 -1672 3 -1673 3 -1674 3 -1675 5 -1676 0 -1677 5 -1678 4 -1679 4 -1680 1 -1681 5 -1682 1 -1683 4 -1684 4 -1685 0 -1686 0 -1687 5 -1688 1 -1689 0 -1690 2 -1691 4 -1692 4 -1693 4 -1694 5 -1695 1 -1696 2 -1697 2 -1698 3 -1699 2 -1700 1 -1701 4 -1702 0 -1703 2 -1704 0 -1705 0 -1706 3 -1707 3 -1708 2 -1709 5 -1710 4 -1711 0 -1712 0 -1713 1 -1714 0 -1715 5 -1716 4 -1717 3 -1718 1 -1719 1 -1720 3 -1721 4 -1722 1 -1723 1 -1724 2 -1725 2 -1726 5 -1727 4 -1728 5 -1729 5 -1730 1 -1731 2 -1732 2 -1733 5 -1734 2 -1735 4 -1736 1 -1737 1 -1738 0 -1739 5 -1740 2 -1741 5 -1742 1 -1743 2 -1744 5 -1745 5 -1746 4 -1747 1 -1748 1 -1749 2 -1750 1 -1751 2 -1752 1 -1753 4 -1754 2 -1755 3 -1756 4 -1757 2 -1758 1 -1759 2 -1760 2 -1761 2 -1762 5 -1763 5 -1764 1 -1765 0 -1766 0 -1767 2 -1768 1 -1769 5 -1770 3 -1771 4 -1772 5 -1773 3 -1774 0 -1775 4 -1776 3 -1777 2 -1778 4 -1779 5 -1780 4 -1781 0 -1782 0 -1783 5 -1784 5 -1785 4 -1786 2 -1787 2 -1788 2 -1789 4 -1790 5 -1791 4 -1792 4 -1793 1 -1794 1 -1795 1 -1796 2 -1797 4 -1798 0 -1799 2 -1800 3 -1801 1 -1802 3 -1803 5 -1804 1 -1805 5 -1806 3 -1807 3 -1808 5 -1809 5 -1810 3 -1811 3 -1812 2 -1813 4 -1814 1 -1815 3 -1816 1 -1817 4 -1818 3 -1819 0 -1820 5 -1821 2 -1822 5 -1823 3 -1824 2 -1825 2 -1826 4 -1827 2 -1828 2 -1829 0 -1830 3 -1831 2 -1832 4 -1833 0 -1834 2 -1835 0 -1836 4 -1837 1 -1838 1 -1839 3 -1840 0 -1841 3 -1842 4 -1843 1 -1844 2 -1845 5 -1846 3 -1847 5 -1848 2 -1849 1 -1850 2 -1851 5 -1852 0 -1853 1 -1854 2 -1855 5 -1856 5 -1857 4 -1858 5 -1859 2 -1860 3 -1861 0 -1862 3 -1863 5 -1864 3 -1865 4 -1866 1 -1867 3 -1868 5 -1869 1 -1870 4 -1871 1 -1872 2 -1873 4 -1874 3 -1875 1 -1876 2 -1877 3 -1878 3 -1879 2 -1880 2 -1881 0 -1882 3 -1883 1 -1884 2 -1885 3 -1886 1 -1887 3 -1888 3 -1889 1 -1890 5 -1891 4 -1892 0 -1893 5 -1894 1 -1895 2 -1896 1 -1897 1 -1898 2 -1899 4 -1900 1 -1901 1 -1902 1 -1903 5 -1904 4 -1905 5 -1906 5 -1907 4 -1908 3 -1909 1 -1910 2 -1911 5 -1912 4 -1913 5 -1914 1 -1915 3 -1916 3 -1917 4 -1918 2 -1919 4 -1920 2 -1921 4 -1922 2 -1923 2 -1924 4 -1925 1 -1926 4 -1927 5 -1928 3 -1929 3 -1930 5 -1931 0 -1932 4 -1933 3 -1934 0 -1935 5 -1936 5 -1937 4 -1938 1 -1939 4 -1940 2 -1941 3 -1942 3 -1943 2 -1944 1 -1945 4 -1946 4 -1947 3 -1948 3 -1949 3 -1950 2 -1951 1 -1952 4 -1953 2 -1954 5 -1955 1 -1956 2 -1957 5 -1958 2 -1959 2 -1960 3 -1961 2 -1962 5 -1963 5 -1964 1 -1965 2 -1966 3 -1967 4 -1968 5 -1969 5 -1970 4 -1971 3 -1972 1 -1973 3 -1974 2 -1975 1 -1976 2 -1977 5 -1978 0 -1979 4 -1980 2 -1981 3 -1982 0 -1983 5 -1984 0 -1985 1 -1986 2 -1987 2 -1988 0 -1989 1 -1990 5 -1991 1 -1992 4 -1993 5 -1994 2 -1995 2 -1996 2 -1997 2 -1998 2 -1999 4 -2000 1 -2001 0 -2002 2 -2003 2 -2004 3 -2005 4 -2006 4 -2007 3 -2008 5 -2009 4 -2010 5 -2011 2 -2012 5 -2013 3 -2014 1 -2015 3 -2016 2 -2017 3 -2018 1 -2019 3 -2020 3 -2021 2 -2022 3 -2023 2 -2024 2 -2025 4 -2026 1 -2027 5 -2028 4 -2029 5 -2030 4 -2031 5 -2032 2 -2033 3 -2034 4 -2035 0 -2036 3 -2037 1 -2038 1 -2039 1 -2040 0 -2041 3 -2042 5 -2043 5 -2044 2 -2045 1 -2046 1 -2047 2 -2048 1 -2049 2 -2050 4 -2051 2 -2052 4 -2053 2 -2054 4 -2055 4 -2056 0 -2057 2 -2058 5 -2059 4 -2060 2 -2061 3 -2062 3 -2063 3 -2064 2 -2065 5 -2066 4 -2067 5 -2068 3 -2069 4 -2070 2 -2071 5 -2072 5 -2073 1 -2074 0 -2075 1 -2076 3 -2077 2 -2078 4 -2079 2 -2080 4 -2081 1 -2082 0 -2083 3 -2084 5 -2085 4 -2086 2 -2087 2 -2088 0 -2089 2 -2090 5 -2091 2 -2092 3 -2093 1 -2094 5 -2095 5 -2096 0 -2097 2 -2098 0 -2099 3 -2100 4 -2101 1 -2102 2 -2103 3 -2104 5 -2105 3 -2106 3 -2107 2 -2108 1 -2109 1 -2110 1 -2111 0 -2112 4 -2113 2 -2114 0 -2115 2 -2116 2 -2117 4 -2118 1 -2119 1 -2120 4 -2121 2 -2122 2 -2123 3 -2124 2 -2125 2 -2126 0 -2127 5 -2128 2 -2129 3 -2130 1 -2131 3 -2132 4 -2133 0 -2134 4 -2135 5 -2136 1 -2137 0 -2138 2 -2139 2 -2140 5 -2141 1 -2142 3 -2143 2 -2144 2 -2145 2 -2146 4 -2147 1 -2148 1 -2149 4 -2150 4 -2151 2 -2152 1 -2153 4 -2154 4 -2155 5 -2156 0 -2157 0 -2158 3 -2159 3 -2160 5 -2161 1 -2162 2 -2163 4 -2164 1 -2165 2 -2166 2 -2167 4 -2168 2 -2169 5 -2170 1 -2171 5 -2172 1 -2173 2 -2174 3 -2175 2 -2176 2 -2177 4 -2178 3 -2179 4 -2180 5 -2181 2 -2182 2 -2183 2 -2184 4 -2185 2 -2186 2 -2187 1 -2188 1 -2189 4 -2190 4 -2191 5 -2192 3 -2193 1 -2194 1 -2195 5 -2196 3 -2197 2 -2198 1 -2199 2 -2200 2 -2201 1 -2202 5 -2203 4 -2204 2 -2205 2 -2206 2 -2207 4 -2208 3 -2209 3 -2210 4 -2211 2 -2212 4 -2213 1 -2214 5 -2215 4 -2216 2 -2217 2 -2218 4 -2219 1 -2220 4 -2221 2 -2222 5 -2223 3 -2224 1 -2225 5 -2226 2 -2227 1 -2228 3 -2229 0 -2230 2 -2231 5 -2232 3 -2233 3 -2234 2 -2235 2 -2236 4 -2237 3 -2238 5 -2239 2 -2240 4 -2241 0 -2242 2 -2243 0 -2244 2 -2245 4 -2246 2 -2247 2 -2248 1 -2249 4 -2250 3 -2251 4 -2252 4 -2253 2 -2254 2 -2255 5 -2256 3 -2257 2 -2258 4 -2259 4 -2260 3 -2261 4 -2262 4 -2263 0 -2264 2 -2265 0 -2266 5 -2267 2 -2268 0 -2269 0 -2270 2 -2271 1 -2272 1 -2273 3 -2274 2 -2275 4 -2276 2 -2277 2 -2278 4 -2279 2 -2280 0 -2281 2 -2282 2 -2283 4 -2284 3 -2285 3 -2286 4 -2287 0 -2288 5 -2289 1 -2290 0 -2291 2 -2292 4 -2293 2 -2294 3 -2295 2 -2296 4 -2297 3 -2298 2 -2299 2 -2300 4 -2301 4 -2302 3 -2303 2 -2304 3 -2305 5 -2306 5 -2307 3 -2308 3 -2309 3 -2310 5 -2311 3 -2312 4 -2313 2 -2314 4 -2315 2 -2316 4 -2317 0 -2318 4 -2319 2 -2320 3 -2321 3 -2322 3 -2323 3 -2324 1 -2325 3 -2326 3 -2327 0 -2328 2 -2329 0 -2330 1 -2331 1 -2332 0 -2333 2 -2334 5 -2335 4 -2336 1 -2337 0 -2338 0 -2339 4 -2340 0 -2341 3 -2342 3 -2343 5 -2344 4 -2345 4 -2346 4 -2347 5 -2348 3 -2349 0 -2350 3 -2351 3 -2352 3 -2353 3 -2354 3 -2355 1 -2356 1 -2357 3 -2358 3 -2359 3 -2360 2 -2361 2 -2362 1 -2363 3 -2364 3 -2365 0 -2366 5 -2367 5 -2368 5 -2369 3 -2370 2 -2371 3 -2372 3 -2373 2 -2374 2 -2375 3 -2376 5 -2377 2 -2378 5 -2379 1 -2380 1 -2381 0 -2382 1 -2383 4 -2384 1 -2385 0 -2386 2 -2387 1 -2388 3 -2389 3 -2390 2 -2391 4 -2392 3 -2393 3 -2394 2 -2395 1 -2396 2 -2397 2 -2398 2 -2399 0 -2400 0 -2401 3 -2402 0 -2403 3 -2404 2 -2405 2 -2406 2 -2407 2 -2408 0 -2409 3 -2410 3 -2411 4 -2412 4 -2413 2 -2414 1 -2415 2 -2416 2 -2417 2 -2418 2 -2419 3 -2420 1 -2421 1 -2422 2 -2423 5 -2424 5 -2425 5 -2426 1 -2427 1 -2428 0 -2429 2 -2430 4 -2431 3 -2432 0 -2433 1 -2434 3 -2435 2 -2436 3 -2437 4 -2438 3 -2439 2 -2440 4 -2441 4 -2442 3 -2443 1 -2444 2 -2445 0 -2446 2 -2447 4 -2448 4 -2449 4 -2450 4 -2451 4 -2452 4 -2453 5 -2454 0 -2455 3 -2456 0 -2457 1 -2458 3 -2459 1 -2460 3 -2461 1 -2462 3 -2463 4 -2464 4 -2465 3 -2466 3 -2467 2 -2468 5 -2469 3 -2470 2 -2471 2 -2472 2 -2473 0 -2474 2 -2475 2 -2476 5 -2477 0 -2478 1 -2479 2 -2480 2 -2481 2 -2482 5 -2483 1 -2484 2 -2485 5 -2486 5 -2487 2 -2488 2 -2489 2 -2490 2 -2491 5 -2492 5 -2493 5 -2494 3 -2495 4 -2496 4 -2497 4 -2498 1 -2499 0 -2500 3 -2501 0 -2502 0 -2503 5 -2504 5 -2505 1 -2506 1 -2507 1 -2508 3 -2509 3 -2510 3 -2511 3 -2512 5 -2513 1 -2514 4 -2515 4 -2516 1 -2517 1 -2518 1 -2519 3 -2520 2 -2521 3 -2522 2 -2523 2 -2524 5 -2525 2 -2526 0 -2527 1 -2528 3 -2529 3 -2530 0 -2531 4 -2532 4 -2533 1 -2534 1 -2535 1 -2536 1 -2537 1 -2538 5 -2539 3 -2540 3 -2541 5 -2542 2 -2543 5 -2544 5 -2545 2 -2546 2 -2547 4 -2548 5 -2549 5 -2550 5 -2551 5 -2552 3 -2553 2 -2554 0 -2555 0 -2556 2 -2557 4 -2558 0 -2559 0 -2560 4 -2561 5 -2562 4 -2563 3 -2564 2 -2565 2 -2566 1 -2567 5 -2568 5 -2569 2 -2570 1 -2571 2 -2572 4 -2573 1 -2574 1 -2575 1 -2576 3 -2577 1 -2578 4 -2579 5 -2580 0 -2581 0 -2582 1 -2583 3 -2584 1 -2585 2 -2586 3 -2587 1 -2588 0 -2589 1 -2590 2 -2591 0 -2592 1 -2593 3 -2594 2 -2595 0 -2596 2 -2597 2 -2598 2 -2599 2 -2600 3 -2601 4 -2602 4 -2603 5 -2604 0 -2605 3 -2606 4 -2607 2 -2608 4 -2609 4 -2610 4 -2611 4 -2612 4 -2613 0 -2614 0 -2615 1 -2616 4 -2617 1 -2618 1 -2619 5 -2620 4 -2621 2 -2622 3 -2623 0 -2624 3 -2625 2 -2626 2 -2627 5 -2628 5 -2629 3 -2630 3 -2631 4 -2632 2 -2633 1 -2634 5 -2635 0 -2636 4 -2637 2 -2638 2 -2639 2 -2640 2 -2641 2 -2642 5 -2643 0 -2644 0 -2645 2 -2646 4 -2647 1 -2648 1 -2649 2 -2650 2 -2651 2 -2652 3 -2653 1 -2654 3 -2655 3 -2656 4 -2657 4 -2658 0 -2659 0 -2660 2 -2661 4 -2662 4 -2663 2 -2664 2 -2665 2 -2666 2 -2667 2 -2668 2 -2669 5 -2670 4 -2671 2 -2672 4 -2673 4 -2674 2 -2675 1 -2676 2 -2677 0 -2678 2 -2679 4 -2680 2 -2681 2 -2682 2 -2683 4 -2684 3 -2685 5 -2686 5 -2687 3 -2688 4 -2689 3 -2690 1 -2691 1 -2692 2 -2693 5 -2694 1 -2695 1 -2696 3 -2697 3 -2698 3 -2699 0 -2700 0 -2701 3 -2702 1 -2703 1 -2704 1 -2705 1 -2706 1 -2707 1 -2708 1 -2709 1 -2710 1 -2711 1 -2712 1 -2713 4 -2714 4 -2715 2 -2716 2 -2717 2 -2718 2 -2719 2 -2720 2 -2721 4 -2722 3 -2723 3 -2724 3 -2725 5 -2726 5 -2727 5 -2728 2 -2729 5 -2730 5 -2731 4 -2732 5 -2733 5 -2734 4 -2735 2 -2736 2 -2737 2 -2738 3 -2739 3 -2740 4 -2741 4 -2742 5 -2743 2 -2744 5 -2745 1 -2746 5 -2747 5 -2748 5 -2749 3 -2750 4 -2751 1 -2752 4 -2753 5 -2754 2 -2755 2 -2756 1 -2757 3 -2758 1 -2759 2 -2760 3 -2761 5 -2762 5 -2763 5 -2764 5 -2765 5 -2766 5 -2767 5 -2768 2 -2769 2 -2770 2 -2771 4 -2772 5 -2773 5 -2774 2 -2775 2 -2776 0 -2777 5 -2778 3 -2779 3 -2780 3 -2781 5 -2782 3 -2783 2 -2784 2 -2785 2 -2786 2 -2787 0 -2788 0 -2789 4 -2790 5 -2791 1 -2792 1 -2793 3 -2794 1 -2795 1 -2796 1 -2797 2 -2798 4 -2799 3 -2800 1 -2801 3 -2802 3 -2803 3 -2804 3 -2805 3 -2806 1 -2807 2 -2808 2 -2809 1 -2810 1 -2811 5 -2812 0 -2813 3 -2814 4 -2815 4 -2816 4 -2817 4 -2818 5 -2819 4 -2820 2 -2821 2 -2822 2 -2823 2 -2824 2 -2825 2 -2826 2 -2827 3 -2828 3 -2829 2 -2830 2 -2831 1 -2832 0 -2833 5 -2834 0 -2835 4 -2836 4 -2837 1 -2838 2 -2839 2 -2840 4 -2841 5 -2842 2 -2843 2 -2844 4 -2845 4 -2846 2 -2847 3 -2848 0 -2849 1 -2850 1 -2851 1 -2852 1 -2853 3 -2854 1 -2855 0 -2856 1 -2857 5 -2858 1 -2859 2 -2860 2 -2861 3 -2862 2 -2863 0 -2864 2 -2865 2 -2866 3 -2867 3 -2868 3 -2869 2 -2870 2 -2871 0 -2872 5 -2873 0 -2874 3 -2875 3 -2876 3 -2877 2 -2878 3 -2879 5 -2880 2 -2881 3 -2882 2 -2883 2 -2884 0 -2885 5 -2886 2 -2887 2 -2888 3 -2889 3 -2890 3 -2891 1 -2892 3 -2893 0 -2894 3 -2895 3 -2896 2 -2897 3 -2898 4 -2899 4 -2900 4 -2901 4 -2902 4 -2903 4 -2904 3 -2905 3 -2906 1 -2907 0 -2908 3 -2909 4 -2910 4 -2911 4 -2912 2 -2913 4 -2914 2 -2915 1 -2916 1 -2917 1 -2918 4 -2919 1 -2920 4 -2921 4 -2922 1 -2923 5 -2924 2 -2925 2 -2926 2 -2927 2 -2928 5 -2929 5 -2930 1 -2931 4 -2932 2 -2933 1 -2934 1 -2935 5 -2936 3 -2937 3 -2938 0 -2939 5 -2940 5 -2941 5 -2942 2 -2943 5 -2944 5 -2945 4 -2946 0 -2947 1 -2948 5 -2949 5 -2950 4 -2951 4 -2952 4 -2953 1 -2954 2 -2955 0 -2956 0 -2957 0 -2958 0 -2959 0 -2960 1 -2961 3 -2962 1 -2963 4 -2964 2 -2965 5 -2966 1 -2967 2 -2968 2 -2969 2 -2970 1 -2971 2 -2972 0 -2973 4 -2974 5 -2975 5 -2976 1 -2977 4 -2978 5 -2979 5 -2980 1 -2981 1 -2982 5 -2983 0 -2984 3 -2985 1 -2986 1 -2987 1 -2988 1 -2989 0 -2990 0 -2991 2 -2992 2 -2993 4 -2994 3 -2995 5 -2996 1 -2997 1 -2998 3 -2999 5 -3000 2 -3001 1 -3002 2 -3003 5 -3004 5 -3005 5 -3006 5 -3007 5 -3008 2 -3009 4 -3010 4 -3011 5 -3012 4 -3013 2 -3014 2 -3015 5 -3016 1 -3017 4 -3018 4 -3019 1 -3020 2 -3021 3 -3022 5 -3023 3 -3024 1 -3025 0 -3026 4 -3027 1 -3028 1 -3029 4 -3030 2 -3031 2 -3032 5 -3033 5 -3034 2 -3035 5 -3036 3 -3037 3 -3038 3 -3039 4 -3040 2 -3041 2 -3042 1 -3043 3 -3044 5 -3045 3 -3046 4 -3047 4 -3048 4 -3049 5 -3050 4 -3051 4 -3052 5 -3053 5 -3054 2 -3055 2 -3056 0 -3057 0 -3058 1 -3059 1 -3060 4 -3061 5 -3062 2 -3063 2 -3064 5 -3065 4 -3066 1 -3067 1 -3068 5 -3069 5 -3070 2 -3071 4 -3072 1 -3073 2 -3074 2 -3075 4 -3076 4 -3077 4 -3078 1 -3079 1 -3080 1 -3081 1 -3082 5 -3083 1 -3084 5 -3085 3 -3086 2 -3087 0 -3088 1 -3089 1 -3090 1 -3091 1 -3092 2 -3093 2 -3094 3 -3095 3 -3096 1 -3097 1 -3098 2 -3099 4 -3100 0 -3101 2 -3102 5 -3103 5 -3104 3 -3105 5 -3106 5 -3107 5 -3108 5 -3109 5 -3110 5 -3111 3 -3112 1 -3113 5 -3114 2 -3115 2 -3116 3 -3117 4 -3118 4 -3119 4 -3120 4 -3121 0 -3122 3 -3123 5 -3124 3 -3125 5 -3126 1 -3127 0 -3128 3 -3129 3 -3130 4 -3131 4 -3132 5 -3133 0 -3134 0 -3135 3 -3136 2 -3137 5 -3138 3 -3139 5 -3140 2 -3141 1 -3142 2 -3143 5 -3144 3 -3145 3 -3146 4 -3147 4 -3148 4 -3149 2 -3150 1 -3151 1 -3152 5 -3153 4 -3154 4 -3155 5 -3156 5 -3157 3 -3158 2 -3159 5 -3160 2 -3161 0 -3162 3 -3163 0 -3164 2 -3165 4 -3166 4 -3167 2 -3168 2 -3169 3 -3170 2 -3171 3 -3172 3 -3173 2 -3174 5 -3175 3 -3176 0 -3177 0 -3178 1 -3179 1 -3180 3 -3181 3 -3182 5 -3183 1 -3184 5 -3185 3 -3186 2 -3187 3 -3188 5 -3189 3 -3190 3 -3191 3 -3192 1 -3193 0 -3194 5 -3195 5 -3196 5 -3197 5 -3198 2 -3199 1 -3200 5 -3201 5 -3202 4 -3203 3 -3204 3 -3205 5 -3206 5 -3207 4 -3208 4 -3209 2 -3210 5 -3211 0 -3212 5 -3213 1 -3214 5 -3215 4 -3216 4 -3217 4 -3218 4 -3219 5 -3220 5 -3221 4 -3222 2 -3223 4 -3224 3 -3225 5 -3226 3 -3227 3 -3228 1 -3229 1 -3230 1 -3231 4 -3232 4 -3233 2 -3234 2 -3235 2 -3236 0 -3237 1 -3238 5 -3239 5 -3240 3 -3241 4 -3242 3 -3243 3 -3244 4 -3245 0 -3246 1 -3247 2 -3248 5 -3249 5 -3250 2 -3251 5 -3252 4 -3253 4 -3254 1 -3255 2 -3256 2 -3257 4 -3258 2 -3259 3 -3260 3 -3261 0 -3262 3 -3263 2 -3264 0 -3265 4 -3266 2 -3267 5 -3268 1 -3269 3 -3270 3 -3271 5 -3272 5 -3273 4 -3274 0 -3275 2 -3276 5 -3277 5 -3278 5 -3279 1 -3280 2 -3281 2 -3282 4 -3283 1 -3284 4 -3285 5 -3286 5 -3287 5 -3288 4 -3289 5 -3290 4 -3291 0 -3292 2 -3293 5 -3294 4 -3295 3 -3296 4 -3297 2 -3298 2 -3299 4 -3300 2 -3301 3 -3302 3 -3303 5 -3304 1 -3305 5 -3306 5 -3307 2 -3308 0 -3309 1 -3310 0 -3311 1 -3312 3 -3313 5 -3314 4 -3315 3 -3316 2 -3317 4 -3318 4 -3319 1 -3320 1 -3321 0 -3322 1 -3323 2 -3324 3 -3325 0 -3326 5 diff --git a/pygip/models/attack/mea/data/attack2_generated_graph/citeseer/selected_index.txt b/pygip/models/attack/mea/data/attack2_generated_graph/citeseer/selected_index.txt deleted file mode 100644 index d194cde9..00000000 --- a/pygip/models/attack/mea/data/attack2_generated_graph/citeseer/selected_index.txt +++ /dev/null @@ -1,700 +0,0 @@ -4 -15 -17 -22 -23 -24 -25 -31 -33 -40 -56 -73 -77 -80 -82 -96 -100 -111 -112 -113 -116 -117 -129 -136 -137 -143 -147 -155 -158 -160 -161 -168 -174 -179 -181 -191 -198 -200 -220 -243 -249 -250 -251 -253 -258 -263 -277 -278 -282 -284 -285 -290 -294 -301 -302 -312 -313 -314 -317 -322 -324 -325 -327 -330 -343 -351 -354 -363 -367 -370 -375 -388 -397 -400 -402 -404 -405 -409 -410 -412 -413 -415 -417 -419 -421 -429 -430 -431 -432 -435 -440 -445 -446 -448 -461 -469 -472 -475 -489 -495 -502 -507 -508 -510 -512 -519 -521 -525 -528 -546 -549 -564 -567 -571 -572 -575 -580 -583 -586 -595 -599 -600 -603 -611 -612 -613 -615 -640 -647 -651 -652 -660 -668 -673 -678 -679 -686 -696 -702 -706 -707 -709 -711 -713 -722 -725 -726 -745 -752 -762 -763 -764 -765 -766 -781 -786 -787 -794 -798 -799 -810 -814 -815 -819 -822 -835 -841 -850 -851 -853 -854 -861 -862 -863 -867 -876 -884 -898 -902 -904 -906 -907 -910 -914 -916 -921 -923 -932 -935 -937 -947 -949 -950 -951 -957 -959 -990 -991 -994 -1003 -1004 -1007 -1008 -1009 -1011 -1015 -1025 -1029 -1031 -1032 -1039 -1040 -1046 -1048 -1051 -1059 -1060 -1064 -1069 -1085 -1095 -1097 -1100 -1105 -1109 -1111 -1112 -1115 -1125 -1126 -1136 -1140 -1143 -1147 -1148 -1150 -1153 -1154 -1156 -1157 -1159 -1162 -1167 -1169 -1175 -1178 -1180 -1182 -1191 -1194 -1196 -1223 -1238 -1241 -1244 -1254 -1255 -1262 -1266 -1270 -1273 -1281 -1284 -1286 -1290 -1302 -1305 -1306 -1307 -1310 -1314 -1317 -1323 -1324 -1325 -1327 -1329 -1350 -1358 -1359 -1362 -1375 -1377 -1380 -1382 -1383 -1385 -1386 -1387 -1389 -1391 -1394 -1398 -1403 -1406 -1407 -1413 -1419 -1435 -1437 -1442 -1446 -1450 -1455 -1457 -1459 -1464 -1472 -1479 -1480 -1491 -1492 -1495 -1502 -1503 -1504 -1510 -1512 -1515 -1517 -1518 -1520 -1521 -1529 -1542 -1547 -1549 -1558 -1560 -1564 -1568 -1580 -1582 -1583 -1585 -1586 -1587 -1589 -1592 -1597 -1600 -1602 -1607 -1613 -1616 -1629 -1630 -1637 -1645 -1646 -1648 -1652 -1657 -1674 -1675 -1679 -1682 -1686 -1687 -1692 -1697 -1706 -1707 -1715 -1735 -1737 -1745 -1750 -1753 -1768 -1773 -1787 -1791 -1799 -1800 -1802 -1812 -1821 -1823 -1825 -1836 -1843 -1848 -1858 -1866 -1867 -1868 -1870 -1882 -1884 -1885 -1888 -1891 -1900 -1901 -1907 -1908 -1911 -1913 -1921 -1924 -1932 -1933 -1934 -1938 -1941 -1945 -1950 -1961 -1976 -1980 -1988 -1991 -2001 -2014 -2017 -2022 -2023 -2025 -2029 -2037 -2038 -2042 -2045 -2048 -2050 -2054 -2055 -2061 -2062 -2070 -2072 -2079 -2094 -2103 -2104 -2106 -2114 -2121 -2136 -2138 -2151 -2155 -2156 -2163 -2177 -2185 -2192 -2204 -2205 -2214 -2215 -2217 -2218 -2221 -2225 -2228 -2234 -2235 -2243 -2250 -2254 -2256 -2257 -2261 -2264 -2270 -2274 -2300 -2301 -2303 -2308 -2309 -2310 -2313 -2314 -2319 -2338 -2340 -2342 -2355 -2356 -2362 -2365 -2370 -2372 -2382 -2387 -2388 -2395 -2398 -2400 -2401 -2405 -2415 -2419 -2433 -2436 -2447 -2452 -2454 -2459 -2464 -2469 -2473 -2479 -2484 -2492 -2494 -2496 -2498 -2502 -2510 -2516 -2517 -2519 -2520 -2521 -2523 -2537 -2543 -2544 -2545 -2547 -2551 -2552 -2558 -2565 -2571 -2572 -2580 -2585 -2593 -2596 -2598 -2603 -2619 -2620 -2623 -2624 -2632 -2635 -2639 -2651 -2656 -2658 -2660 -2661 -2667 -2672 -2675 -2676 -2680 -2683 -2684 -2687 -2692 -2694 -2704 -2706 -2721 -2725 -2738 -2741 -2744 -2745 -2748 -2749 -2756 -2757 -2760 -2770 -2776 -2789 -2792 -2795 -2796 -2798 -2799 -2800 -2801 -2810 -2813 -2818 -2821 -2822 -2829 -2831 -2836 -2837 -2838 -2846 -2849 -2858 -2859 -2863 -2864 -2870 -2873 -2874 -2875 -2877 -2879 -2887 -2890 -2895 -2899 -2903 -2908 -2911 -2912 -2913 -2915 -2916 -2917 -2936 -2941 -2942 -2943 -2945 -2946 -2947 -2949 -2952 -2955 -2957 -2962 -2963 -2967 -2982 -2991 -2993 -2996 -2997 -2998 -3005 -3014 -3015 -3017 -3019 -3029 -3030 -3038 -3041 -3051 -3052 -3055 -3064 -3070 -3072 -3078 -3086 -3087 -3101 -3105 -3107 -3113 -3116 -3123 -3130 -3136 -3138 -3141 -3149 -3152 -3155 -3156 -3169 -3178 -3179 -3185 -3186 -3195 -3197 -3205 -3206 -3210 -3212 -3215 -3216 -3217 -3219 -3221 -3228 -3230 -3233 -3242 -3244 -3248 -3258 -3271 -3275 -3284 -3286 -3287 -3300 -3308 -3319 -3325 -3326 diff --git a/pygip/models/attack/mea/data/attack2_generated_graph/cora/graph_label.txt b/pygip/models/attack/mea/data/attack2_generated_graph/cora/graph_label.txt deleted file mode 100644 index 4b30ec80..00000000 --- a/pygip/models/attack/mea/data/attack2_generated_graph/cora/graph_label.txt +++ /dev/null @@ -1,1908 +0,0 @@ -0 68 -0 334 -0 391 -0 469 -0 472 -0 480 -0 490 -0 497 -0 567 -0 606 -1 66 -1 199 -1 417 -1 568 -1 674 -2 9 -2 20 -2 103 -2 349 -2 634 -2 659 -3 7 -3 19 -3 90 -3 108 -3 129 -3 236 -3 326 -3 502 -3 506 -3 544 -3 554 -4 154 -4 322 -4 332 -4 395 -4 434 -4 530 -4 685 -5 147 -5 344 -5 580 -5 623 -6 29 -6 189 -6 529 -7 3 -7 70 -7 81 -7 125 -7 143 -7 210 -7 285 -7 352 -7 473 -7 559 -7 600 -8 207 -8 372 -8 441 -8 473 -9 2 -9 12 -9 130 -9 349 -9 357 -9 575 -10 43 -10 170 -10 200 -10 341 -10 343 -10 623 -11 18 -11 207 -11 331 -11 354 -11 462 -11 663 -12 9 -12 123 -12 168 -12 219 -12 454 -13 134 -13 183 -13 232 -13 235 -13 485 -13 575 -13 625 -14 80 -14 166 -14 207 -14 226 -14 333 -14 380 -14 647 -15 174 -15 425 -15 536 -15 637 -16 266 -16 431 -16 640 -17 43 -17 45 -17 50 -17 63 -17 262 -17 433 -17 595 -17 597 -17 654 -18 11 -18 70 -18 134 -18 143 -18 241 -18 540 -18 615 -19 3 -19 31 -19 176 -19 220 -19 507 -19 508 -20 2 -20 103 -20 118 -20 311 -20 689 -21 57 -21 118 -21 268 -21 486 -22 49 -22 120 -22 123 -22 232 -22 247 -22 398 -22 521 -23 29 -23 96 -23 170 -23 262 -24 79 -24 236 -24 261 -24 341 -24 438 -24 512 -24 516 -25 403 -25 410 -25 469 -25 545 -25 546 -25 583 -26 45 -26 262 -27 207 -27 418 -27 464 -28 45 -28 111 -28 351 -28 474 -28 569 -29 6 -29 23 -29 119 -29 373 -29 640 -30 170 -30 571 -30 607 -30 651 -31 19 -31 91 -31 198 -31 229 -31 364 -31 415 -32 408 -32 521 -32 620 -33 69 -33 198 -33 250 -33 361 -33 503 -33 582 -33 636 -34 44 -34 141 -35 460 -36 496 -36 559 -37 54 -37 86 -37 232 -37 316 -37 546 -37 637 -37 654 -38 72 -38 401 -39 248 -39 353 -39 487 -39 556 -39 615 -39 685 -39 693 -40 663 -40 675 -41 342 -41 380 -42 425 -42 651 -43 10 -43 17 -43 265 -43 328 -43 388 -43 489 -43 527 -43 530 -43 542 -43 609 -43 614 -44 34 -44 179 -44 329 -45 17 -45 26 -45 28 -45 349 -46 262 -47 469 -47 490 -47 623 -47 630 -47 686 -48 147 -48 207 -48 327 -49 22 -49 200 -49 654 -50 17 -50 318 -50 567 -51 280 -51 432 -52 139 -52 225 -52 270 -52 310 -52 404 -52 423 -53 55 -53 146 -53 307 -53 494 -53 608 -53 630 -53 636 -54 37 -54 157 -54 654 -55 53 -55 519 -56 94 -56 184 -56 396 -56 658 -57 21 -57 183 -57 508 -57 625 -57 658 -58 63 -58 388 -59 94 -59 216 -59 353 -59 440 -59 500 -59 501 -60 184 -60 347 -61 111 -61 470 -61 608 -62 64 -62 268 -63 17 -63 58 -63 388 -63 489 -64 62 -64 184 -65 562 -65 563 -66 1 -66 428 -67 359 -67 425 -68 0 -68 170 -68 247 -68 326 -68 487 -68 653 -69 33 -69 170 -69 205 -69 243 -69 447 -69 615 -70 7 -70 18 -70 199 -70 295 -70 314 -70 537 -70 687 -71 480 -71 500 -72 38 -72 115 -72 428 -73 198 -73 507 -74 236 -74 316 -74 424 -74 567 -74 590 -74 610 -75 250 -75 462 -75 474 -75 536 -75 622 -76 289 -77 393 -77 607 -77 694 -78 202 -78 510 -78 671 -79 24 -79 107 -79 175 -80 14 -80 161 -80 222 -81 7 -81 85 -81 562 -82 375 -82 456 -83 150 -83 193 -84 489 -85 81 -85 562 -86 37 -86 364 -86 527 -87 130 -87 424 -87 602 -88 169 -88 170 -89 200 -89 344 -89 399 -89 559 -89 594 -90 3 -90 173 -90 331 -90 593 -91 31 -91 445 -91 519 -91 547 -92 96 -93 308 -93 348 -94 56 -94 59 -94 275 -94 405 -94 669 -95 654 -96 23 -96 92 -96 103 -96 202 -96 588 -97 103 -97 125 -98 193 -98 221 -98 252 -98 259 -99 143 -99 473 -99 563 -99 603 -99 687 -100 489 -101 220 -102 123 -102 410 -103 2 -103 20 -103 96 -103 97 -103 152 -103 592 -104 170 -104 454 -105 354 -106 170 -106 285 -107 79 -107 373 -107 470 -108 3 -108 278 -108 537 -109 504 -110 198 -110 447 -110 560 -110 580 -110 630 -110 636 -110 668 -110 693 -111 28 -111 61 -111 229 -111 339 -111 349 -111 654 -112 129 -112 160 -112 409 -112 627 -112 649 -112 672 -113 327 -113 569 -114 245 -114 654 -115 72 -115 253 -115 576 -116 473 -116 595 -116 640 -117 259 -117 288 -118 20 -118 21 -118 134 -118 136 -118 529 -119 29 -119 338 -119 475 -119 507 -120 22 -120 179 -120 299 -120 362 -120 367 -120 465 -120 473 -120 511 -120 669 -120 685 -121 201 -121 405 -122 288 -122 359 -122 424 -122 622 -122 663 -123 12 -123 22 -123 102 -123 183 -123 327 -123 380 -124 170 -124 327 -124 574 -125 7 -125 97 -125 176 -125 606 -126 143 -126 228 -126 543 -127 350 -128 161 -128 609 -129 3 -129 112 -129 194 -129 235 -129 315 -129 640 -130 9 -130 87 -130 288 -131 139 -131 184 -132 224 -132 654 -133 174 -133 260 -133 567 -133 568 -134 13 -134 18 -134 118 -134 207 -134 246 -135 136 -135 540 -135 567 -135 615 -135 622 -136 118 -136 135 -136 340 -136 401 -136 527 -137 349 -137 607 -138 654 -139 52 -139 131 -139 637 -140 187 -141 34 -141 198 -141 249 -142 571 -142 617 -143 7 -143 18 -143 99 -143 126 -143 172 -143 288 -143 323 -143 334 -143 372 -143 385 -143 442 -143 473 -143 481 -143 507 -143 540 -143 564 -143 631 -143 647 -143 667 -144 454 -145 298 -145 334 -146 53 -146 273 -147 5 -147 48 -147 180 -147 384 -147 561 -147 572 -147 667 -148 426 -149 184 -150 83 -150 437 -150 632 -151 298 -152 103 -152 609 -154 4 -154 430 -154 438 -154 543 -154 551 -155 308 -155 341 -156 501 -156 567 -157 54 -157 287 -157 603 -158 617 -159 418 -159 548 -160 112 -161 80 -161 128 -161 246 -161 609 -162 271 -163 543 -163 564 -164 169 -165 193 -165 504 -166 14 -166 308 -166 547 -167 280 -167 393 -168 12 -168 408 -168 445 -169 88 -169 164 -169 454 -170 10 -170 23 -170 30 -170 68 -170 69 -170 88 -170 104 -170 106 -170 124 -170 202 -170 651 -171 249 -171 531 -172 143 -172 340 -173 90 -173 636 -174 15 -174 133 -174 517 -175 79 -175 349 -176 19 -176 125 -176 431 -176 478 -177 202 -177 592 -178 635 -179 44 -179 120 -180 147 -180 246 -180 681 -181 565 -181 679 -183 13 -183 57 -183 123 -183 428 -184 56 -184 60 -184 64 -184 131 -184 149 -184 630 -184 668 -184 669 -185 575 -185 611 -186 334 -186 525 -187 140 -187 189 -188 247 -188 606 -189 6 -189 187 -189 428 -190 614 -191 268 -192 395 -192 543 -193 83 -193 98 -193 165 -193 198 -193 221 -194 129 -194 428 -195 470 -195 555 -196 676 -196 694 -197 418 -198 31 -198 33 -198 73 -198 110 -198 141 -198 193 -198 605 -199 1 -199 70 -199 246 -200 10 -200 49 -200 89 -200 262 -200 523 -201 121 -201 349 -202 78 -202 96 -202 170 -202 177 -202 580 -203 228 -204 349 -205 69 -205 426 -205 473 -206 238 -206 675 -207 8 -207 11 -207 14 -207 27 -207 48 -207 134 -208 251 -208 486 -209 574 -209 585 -210 7 -210 486 -211 402 -211 441 -212 254 -212 663 -213 508 -214 428 -215 256 -215 651 -216 59 -216 519 -216 543 -217 427 -217 504 -218 303 -218 646 -219 12 -220 19 -220 101 -220 540 -220 567 -221 98 -221 193 -222 80 -223 405 -223 592 -224 132 -224 527 -225 52 -226 14 -226 540 -226 626 -227 288 -227 325 -228 126 -228 203 -229 31 -229 111 -230 373 -230 462 -231 423 -231 546 -232 13 -232 22 -232 37 -232 521 -232 542 -233 291 -233 393 -235 13 -235 129 -235 431 -236 3 -236 24 -236 74 -236 443 -236 486 -237 503 -238 206 -238 396 -240 608 -240 651 -241 18 -241 284 -241 289 -242 500 -242 580 -243 69 -243 454 -244 279 -244 408 -245 114 -245 312 -245 550 -246 134 -246 161 -246 180 -246 199 -246 681 -247 22 -247 68 -247 188 -247 273 -247 561 -248 39 -248 392 -248 478 -249 141 -249 171 -249 327 -249 531 -250 33 -250 75 -250 340 -251 208 -252 98 -253 115 -253 544 -254 212 -254 431 -254 663 -255 354 -256 215 -257 511 -259 98 -259 117 -259 402 -260 133 -261 24 -261 377 -262 17 -262 23 -262 26 -262 46 -262 200 -262 651 -262 654 -263 298 -263 324 -264 358 -265 43 -265 576 -266 16 -266 423 -266 473 -267 298 -268 21 -268 62 -268 191 -269 428 -269 511 -270 52 -270 599 -271 162 -271 525 -272 562 -273 146 -273 247 -273 676 -274 410 -275 94 -276 351 -276 527 -277 389 -278 108 -278 291 -279 244 -280 51 -280 167 -280 432 -281 595 -282 651 -283 655 -284 241 -285 7 -285 106 -285 431 -285 663 -286 527 -286 654 -287 157 -287 603 -288 117 -288 122 -288 130 -288 143 -288 227 -288 582 -289 76 -289 241 -291 233 -291 278 -291 565 -292 507 -292 527 -294 404 -295 70 -295 335 -295 552 -296 358 -296 663 -297 428 -298 145 -298 151 -298 263 -298 267 -298 619 -299 120 -299 608 -300 408 -301 528 -302 393 -303 218 -304 562 -305 332 -305 395 -306 324 -307 53 -307 622 -308 93 -308 155 -308 166 -308 408 -309 400 -309 537 -310 52 -310 432 -310 692 -311 20 -311 351 -312 245 -312 550 -313 507 -314 70 -314 534 -314 573 -315 129 -315 410 -315 527 -316 37 -316 74 -317 342 -318 50 -319 547 -320 493 -320 654 -322 4 -323 143 -323 428 -324 263 -324 306 -324 676 -325 227 -326 3 -326 68 -326 663 -327 48 -327 113 -327 123 -327 124 -327 249 -327 687 -328 43 -328 460 -329 44 -329 454 -330 529 -331 11 -331 90 -331 456 -332 4 -332 305 -332 425 -333 14 -333 384 -334 0 -334 143 -334 145 -334 186 -334 525 -335 295 -335 498 -335 669 -336 654 -338 119 -338 353 -339 111 -340 136 -340 172 -340 250 -340 532 -341 10 -341 24 -341 155 -341 510 -342 41 -342 317 -342 461 -343 10 -343 360 -343 623 -344 5 -344 89 -344 405 -344 497 -346 454 -347 60 -347 651 -348 93 -348 608 -349 2 -349 9 -349 45 -349 111 -349 137 -349 175 -349 201 -349 204 -349 350 -350 127 -350 349 -350 462 -351 28 -351 276 -351 311 -351 414 -352 7 -353 39 -353 59 -353 338 -353 394 -353 473 -353 615 -354 11 -354 105 -354 255 -354 408 -354 454 -355 637 -356 580 -356 665 -357 9 -358 264 -358 296 -358 677 -359 67 -359 122 -359 425 -360 343 -361 33 -361 567 -362 120 -362 588 -364 31 -364 86 -364 476 -364 654 -365 375 -366 507 -367 120 -368 473 -368 592 -369 425 -370 669 -371 473 -372 8 -372 143 -373 29 -373 107 -373 230 -374 423 -375 82 -375 365 -376 396 -376 536 -377 261 -377 567 -377 623 -378 644 -379 637 -380 14 -380 41 -380 123 -381 606 -382 431 -383 682 -384 147 -384 333 -385 143 -385 676 -386 654 -387 472 -388 43 -388 58 -388 63 -388 489 -389 277 -390 512 -391 0 -392 248 -392 431 -392 461 -393 77 -393 167 -393 233 -393 302 -393 398 -394 353 -394 663 -395 4 -395 192 -395 305 -395 435 -395 507 -396 56 -396 238 -396 376 -396 405 -396 663 -397 553 -397 616 -398 22 -398 393 -399 89 -399 645 -400 309 -400 537 -400 654 -401 38 -401 136 -401 428 -401 478 -402 211 -402 259 -402 425 -402 588 -403 25 -403 479 -404 52 -404 294 -404 680 -405 94 -405 121 -405 223 -405 344 -405 396 -405 669 -406 637 -406 663 -407 584 -408 32 -408 168 -408 244 -408 300 -408 308 -408 354 -409 112 -409 663 -410 25 -410 102 -410 274 -410 315 -410 452 -411 523 -411 588 -412 418 -413 610 -413 619 -413 641 -414 351 -415 31 -415 480 -416 609 -417 1 -417 527 -418 27 -418 159 -418 197 -418 412 -418 475 -418 548 -419 527 -420 654 -422 482 -422 589 -423 52 -423 231 -423 266 -423 374 -423 428 -424 74 -424 87 -424 122 -425 15 -425 42 -425 67 -425 332 -425 359 -425 369 -425 402 -426 148 -426 205 -426 683 -427 217 -427 543 -427 663 -428 66 -428 72 -428 183 -428 189 -428 194 -428 214 -428 269 -428 297 -428 323 -428 401 -428 423 -430 154 -431 16 -431 176 -431 235 -431 254 -431 285 -431 382 -431 392 -432 51 -432 280 -432 310 -433 17 -434 4 -434 486 -435 395 -435 507 -435 680 -436 650 -437 150 -438 24 -438 154 -438 622 -440 59 -440 527 -440 677 -441 8 -441 211 -441 589 -442 143 -442 523 -442 665 -443 236 -444 519 -444 521 -445 91 -445 168 -445 690 -446 521 -446 589 -447 69 -447 110 -448 669 -450 613 -450 683 -451 495 -452 410 -454 12 -454 104 -454 144 -454 169 -454 243 -454 329 -454 346 -454 354 -454 527 -456 82 -456 331 -457 610 -457 669 -459 687 -460 35 -460 328 -461 342 -461 392 -462 11 -462 75 -462 230 -462 350 -463 655 -464 27 -464 624 -465 120 -467 489 -467 602 -467 609 -467 627 -469 0 -469 25 -469 47 -470 61 -470 107 -470 195 -471 654 -472 0 -472 387 -473 7 -473 8 -473 99 -473 116 -473 120 -473 143 -473 205 -473 266 -473 353 -473 368 -473 371 -473 554 -474 28 -474 75 -475 119 -475 418 -475 654 -476 364 -477 507 -478 176 -478 248 -478 401 -478 606 -479 403 -479 523 -480 0 -480 71 -480 415 -481 143 -481 580 -482 422 -482 623 -483 502 -483 512 -485 13 -485 675 -486 21 -486 208 -486 210 -486 236 -486 434 -486 527 -487 39 -487 68 -488 694 -489 43 -489 63 -489 84 -489 100 -489 388 -489 467 -490 0 -490 47 -491 651 -493 320 -494 53 -494 511 -495 451 -495 527 -496 36 -496 654 -497 0 -497 344 -498 335 -500 59 -500 71 -500 242 -500 502 -500 504 -501 59 -501 156 -502 3 -502 483 -502 500 -502 610 -503 33 -503 237 -503 620 -504 109 -504 165 -504 217 -504 500 -506 3 -507 19 -507 73 -507 119 -507 143 -507 292 -507 313 -507 366 -507 395 -507 435 -507 477 -507 529 -507 651 -508 19 -508 57 -508 213 -509 527 -510 78 -510 341 -510 523 -511 120 -511 257 -511 269 -511 494 -511 512 -512 24 -512 390 -512 483 -512 511 -514 548 -515 625 -516 24 -517 174 -518 547 -519 55 -519 91 -519 216 -519 444 -519 521 -521 22 -521 32 -521 232 -521 444 -521 446 -521 519 -521 526 -522 654 -523 200 -523 411 -523 442 -523 479 -523 510 -525 186 -525 271 -525 334 -526 521 -526 538 -527 43 -527 86 -527 136 -527 224 -527 276 -527 286 -527 292 -527 315 -527 417 -527 419 -527 440 -527 454 -527 486 -527 495 -527 509 -528 301 -528 661 -529 6 -529 118 -529 330 -529 507 -529 530 -530 4 -530 43 -530 529 -531 171 -531 249 -532 340 -533 654 -534 314 -535 640 -535 666 -536 15 -536 75 -536 376 -537 70 -537 108 -537 309 -537 400 -538 526 -540 18 -540 135 -540 143 -540 220 -540 226 -541 626 -542 43 -542 232 -542 544 -543 126 -543 154 -543 163 -543 192 -543 216 -543 427 -543 663 -543 683 -544 3 -544 253 -544 542 -545 25 -546 25 -546 37 -546 231 -546 569 -546 623 -546 631 -547 91 -547 166 -547 319 -547 518 -547 575 -547 647 -547 664 -548 159 -548 418 -548 514 -550 245 -550 312 -551 154 -552 295 -552 669 -553 397 -553 616 -554 3 -554 473 -555 195 -555 651 -556 39 -557 654 -559 7 -559 36 -559 89 -559 695 -560 110 -561 147 -561 247 -562 65 -562 81 -562 85 -562 272 -562 304 -563 65 -563 99 -564 143 -564 163 -564 565 -565 181 -565 291 -565 564 -566 571 -567 0 -567 50 -567 74 -567 133 -567 135 -567 156 -567 220 -567 361 -567 377 -568 1 -568 133 -569 28 -569 113 -569 546 -571 30 -571 142 -571 566 -571 637 -572 147 -573 314 -573 637 -574 124 -574 209 -574 651 -575 9 -575 13 -575 185 -575 547 -576 115 -576 265 -577 677 -579 651 -580 5 -580 110 -580 202 -580 242 -580 356 -580 481 -580 609 -582 33 -582 288 -582 605 -583 25 -584 407 -584 620 -585 209 -587 651 -588 96 -588 362 -588 402 -588 411 -588 665 -589 422 -589 441 -589 446 -590 74 -592 103 -592 177 -592 223 -592 368 -592 609 -593 90 -594 89 -595 17 -595 116 -595 281 -595 681 -596 610 -597 17 -599 270 -600 7 -602 87 -602 467 -603 99 -603 157 -603 287 -604 605 -604 699 -605 198 -605 582 -605 604 -606 0 -606 125 -606 188 -606 381 -606 478 -607 30 -607 77 -607 137 -608 53 -608 61 -608 240 -608 299 -608 348 -609 43 -609 128 -609 152 -609 161 -609 416 -609 467 -609 580 -609 592 -610 74 -610 413 -610 457 -610 502 -610 596 -611 185 -612 663 -613 450 -613 683 -614 43 -614 190 -615 18 -615 39 -615 69 -615 135 -615 353 -616 397 -616 553 -617 142 -617 158 -617 651 -619 298 -619 413 -620 32 -620 503 -620 584 -622 75 -622 122 -622 135 -622 307 -622 438 -622 655 -623 5 -623 10 -623 47 -623 343 -623 377 -623 482 -623 546 -623 640 -624 464 -625 13 -625 57 -625 515 -625 684 -626 226 -626 541 -627 112 -627 467 -630 47 -630 53 -630 110 -630 184 -631 143 -631 546 -631 644 -632 150 -634 2 -635 178 -636 33 -636 53 -636 110 -636 173 -636 637 -637 15 -637 37 -637 139 -637 355 -637 379 -637 406 -637 571 -637 573 -637 636 -638 647 -640 16 -640 29 -640 116 -640 129 -640 535 -640 623 -641 413 -642 682 -644 378 -644 631 -645 399 -646 218 -647 14 -647 143 -647 547 -647 638 -649 112 -650 436 -651 30 -651 42 -651 170 -651 215 -651 240 -651 262 -651 282 -651 347 -651 491 -651 507 -651 555 -651 574 -651 579 -651 587 -651 617 -652 663 -653 68 -653 669 -654 17 -654 37 -654 49 -654 54 -654 95 -654 111 -654 114 -654 132 -654 138 -654 262 -654 286 -654 320 -654 336 -654 364 -654 386 -654 400 -654 420 -654 471 -654 475 -654 496 -654 522 -654 533 -654 557 -655 283 -655 463 -655 622 -658 56 -658 57 -659 2 -661 528 -663 11 -663 40 -663 122 -663 212 -663 254 -663 285 -663 296 -663 326 -663 394 -663 396 -663 406 -663 409 -663 427 -663 543 -663 612 -663 652 -664 547 -665 356 -665 442 -665 588 -666 535 -667 143 -667 147 -668 110 -668 184 -669 94 -669 120 -669 184 -669 335 -669 370 -669 405 -669 448 -669 457 -669 552 -669 653 -671 78 -672 112 -674 1 -674 696 -675 40 -675 206 -675 485 -676 196 -676 273 -676 324 -676 385 -677 358 -677 440 -677 577 -677 678 -678 677 -679 181 -680 404 -680 435 -681 180 -681 246 -681 595 -682 383 -682 642 -683 426 -683 450 -683 543 -683 613 -684 625 -685 4 -685 39 -685 120 -686 47 -687 70 -687 99 -687 327 -687 459 -689 20 -690 445 -692 310 -693 39 -693 110 -694 77 -694 196 -694 488 -695 559 -696 674 -699 604 diff --git a/pygip/models/attack/mea/data/attack2_generated_graph/cora/query_labels.txt b/pygip/models/attack/mea/data/attack2_generated_graph/cora/query_labels.txt deleted file mode 100644 index 6f43ee22..00000000 --- a/pygip/models/attack/mea/data/attack2_generated_graph/cora/query_labels.txt +++ /dev/null @@ -1,2708 +0,0 @@ -0 2 -1 5 -2 4 -3 4 -4 3 -5 3 -6 6 -7 2 -8 2 -9 6 -10 2 -11 1 -12 3 -13 0 -14 2 -15 2 -16 4 -17 2 -18 2 -19 2 -20 6 -21 4 -22 1 -23 3 -24 4 -25 6 -26 2 -27 3 -28 2 -29 5 -30 0 -31 2 -32 0 -33 2 -34 4 -35 4 -36 2 -37 4 -38 1 -39 4 -40 6 -41 3 -42 1 -43 2 -44 0 -45 4 -46 2 -47 0 -48 3 -49 0 -50 0 -51 3 -52 6 -53 1 -54 6 -55 1 -56 6 -57 4 -58 2 -59 2 -60 4 -61 0 -62 3 -63 4 -64 0 -65 1 -66 0 -67 3 -68 2 -69 0 -70 6 -71 3 -72 2 -73 2 -74 3 -75 0 -76 3 -77 2 -78 6 -79 6 -80 2 -81 3 -82 4 -83 2 -84 3 -85 4 -86 5 -87 5 -88 6 -89 6 -90 2 -91 2 -92 4 -93 3 -94 3 -95 3 -96 0 -97 2 -98 6 -99 3 -100 1 -101 2 -102 2 -103 4 -104 2 -105 3 -106 5 -107 3 -108 3 -109 6 -110 2 -111 2 -112 2 -113 2 -114 6 -115 3 -116 0 -117 3 -118 4 -119 5 -120 1 -121 2 -122 2 -123 2 -124 2 -125 2 -126 3 -127 2 -128 4 -129 1 -130 6 -131 3 -132 4 -133 2 -134 2 -135 5 -136 5 -137 3 -138 3 -139 1 -140 2 -141 1 -142 2 -143 2 -144 1 -145 1 -146 2 -147 6 -148 4 -149 0 -150 0 -151 5 -152 2 -153 2 -154 2 -155 3 -156 5 -157 2 -158 5 -159 5 -160 2 -161 3 -162 4 -163 1 -164 6 -165 5 -166 0 -167 4 -168 1 -169 2 -170 3 -171 6 -172 6 -173 4 -174 1 -175 2 -176 2 -177 6 -178 6 -179 2 -180 2 -181 3 -182 3 -183 0 -184 5 -185 3 -186 4 -187 2 -188 1 -189 1 -190 2 -191 1 -192 2 -193 2 -194 2 -195 5 -196 2 -197 4 -198 2 -199 6 -200 2 -201 2 -202 2 -203 2 -204 2 -205 2 -206 2 -207 4 -208 6 -209 1 -210 4 -211 3 -212 2 -213 3 -214 2 -215 0 -216 5 -217 2 -218 6 -219 1 -220 4 -221 0 -222 0 -223 5 -224 5 -225 0 -226 3 -227 6 -228 5 -229 2 -230 2 -231 0 -232 2 -233 2 -234 2 -235 2 -236 6 -237 1 -238 2 -239 4 -240 0 -241 2 -242 2 -243 1 -244 4 -245 0 -246 3 -247 1 -248 3 -249 5 -250 3 -251 0 -252 2 -253 2 -254 2 -255 6 -256 6 -257 2 -258 2 -259 5 -260 3 -261 3 -262 1 -263 2 -264 6 -265 1 -266 1 -267 6 -268 2 -269 4 -270 3 -271 3 -272 2 -273 1 -274 4 -275 3 -276 3 -277 4 -278 3 -279 3 -280 2 -281 4 -282 2 -283 4 -284 0 -285 0 -286 6 -287 6 -288 0 -289 0 -290 1 -291 2 -292 3 -293 2 -294 4 -295 4 -296 6 -297 2 -298 4 -299 3 -300 0 -301 2 -302 5 -303 5 -304 2 -305 4 -306 4 -307 5 -308 6 -309 1 -310 2 -311 4 -312 2 -313 0 -314 5 -315 2 -316 5 -317 5 -318 0 -319 6 -320 2 -321 4 -322 2 -323 6 -324 3 -325 3 -326 3 -327 3 -328 2 -329 1 -330 2 -331 2 -332 3 -333 4 -334 6 -335 5 -336 2 -337 4 -338 2 -339 3 -340 2 -341 2 -342 3 -343 1 -344 5 -345 3 -346 1 -347 3 -348 5 -349 2 -350 2 -351 2 -352 3 -353 2 -354 2 -355 2 -356 2 -357 1 -358 3 -359 2 -360 2 -361 1 -362 2 -363 2 -364 2 -365 2 -366 6 -367 4 -368 2 -369 2 -370 4 -371 6 -372 1 -373 5 -374 4 -375 6 -376 3 -377 2 -378 5 -379 2 -380 1 -381 5 -382 6 -383 6 -384 2 -385 4 -386 2 -387 2 -388 2 -389 3 -390 1 -391 2 -392 3 -393 2 -394 2 -395 1 -396 6 -397 2 -398 3 -399 2 -400 2 -401 3 -402 1 -403 1 -404 2 -405 2 -406 2 -407 2 -408 2 -409 2 -410 4 -411 2 -412 4 -413 2 -414 5 -415 1 -416 3 -417 5 -418 6 -419 3 -420 2 -421 6 -422 1 -423 3 -424 6 -425 4 -426 5 -427 2 -428 5 -429 6 -430 5 -431 6 -432 5 -433 5 -434 5 -435 2 -436 6 -437 2 -438 6 -439 2 -440 4 -441 6 -442 6 -443 2 -444 2 -445 2 -446 4 -447 2 -448 1 -449 6 -450 2 -451 4 -452 3 -453 6 -454 0 -455 4 -456 6 -457 0 -458 2 -459 6 -460 4 -461 2 -462 5 -463 4 -464 3 -465 4 -466 4 -467 3 -468 3 -469 2 -470 6 -471 4 -472 4 -473 4 -474 6 -475 2 -476 2 -477 2 -478 4 -479 0 -480 2 -481 6 -482 6 -483 4 -484 3 -485 0 -486 0 -487 2 -488 6 -489 2 -490 4 -491 3 -492 4 -493 2 -494 2 -495 3 -496 2 -497 6 -498 3 -499 3 -500 4 -501 2 -502 4 -503 2 -504 2 -505 2 -506 6 -507 6 -508 6 -509 5 -510 2 -511 2 -512 2 -513 3 -514 6 -515 3 -516 2 -517 6 -518 3 -519 2 -520 6 -521 5 -522 3 -523 1 -524 6 -525 3 -526 2 -527 2 -528 2 -529 2 -530 1 -531 2 -532 3 -533 3 -534 0 -535 6 -536 2 -537 1 -538 2 -539 0 -540 0 -541 0 -542 2 -543 0 -544 2 -545 2 -546 1 -547 2 -548 4 -549 6 -550 6 -551 6 -552 4 -553 2 -554 4 -555 6 -556 6 -557 3 -558 3 -559 4 -560 6 -561 2 -562 4 -563 1 -564 4 -565 4 -566 4 -567 4 -568 0 -569 6 -570 2 -571 6 -572 2 -573 3 -574 6 -575 4 -576 2 -577 3 -578 0 -579 2 -580 0 -581 0 -582 0 -583 0 -584 2 -585 5 -586 2 -587 2 -588 4 -589 2 -590 2 -591 6 -592 2 -593 0 -594 0 -595 2 -596 2 -597 5 -598 5 -599 2 -600 2 -601 4 -602 1 -603 0 -604 2 -605 2 -606 1 -607 6 -608 2 -609 2 -610 2 -611 2 -612 0 -613 2 -614 6 -615 6 -616 2 -617 2 -618 6 -619 3 -620 2 -621 4 -622 2 -623 3 -624 1 -625 1 -626 2 -627 5 -628 2 -629 2 -630 2 -631 5 -632 6 -633 4 -634 5 -635 6 -636 4 -637 3 -638 5 -639 2 -640 6 -641 3 -642 3 -643 0 -644 4 -645 3 -646 3 -647 3 -648 3 -649 0 -650 4 -651 6 -652 5 -653 4 -654 5 -655 2 -656 4 -657 2 -658 1 -659 1 -660 6 -661 3 -662 3 -663 2 -664 2 -665 2 -666 2 -667 2 -668 2 -669 2 -670 0 -671 4 -672 6 -673 2 -674 2 -675 5 -676 2 -677 3 -678 2 -679 0 -680 2 -681 1 -682 0 -683 5 -684 3 -685 2 -686 6 -687 2 -688 4 -689 1 -690 0 -691 4 -692 6 -693 2 -694 3 -695 2 -696 2 -697 3 -698 2 -699 4 -700 2 -701 5 -702 2 -703 2 -704 3 -705 2 -706 6 -707 2 -708 2 -709 2 -710 6 -711 0 -712 1 -713 3 -714 1 -715 3 -716 2 -717 1 -718 2 -719 2 -720 6 -721 1 -722 6 -723 4 -724 2 -725 3 -726 2 -727 1 -728 1 -729 3 -730 2 -731 2 -732 6 -733 4 -734 3 -735 5 -736 2 -737 2 -738 4 -739 2 -740 6 -741 2 -742 3 -743 1 -744 1 -745 2 -746 2 -747 2 -748 3 -749 4 -750 4 -751 2 -752 0 -753 2 -754 6 -755 1 -756 2 -757 1 -758 1 -759 3 -760 0 -761 6 -762 0 -763 2 -764 1 -765 1 -766 6 -767 6 -768 3 -769 1 -770 2 -771 2 -772 3 -773 4 -774 2 -775 4 -776 3 -777 3 -778 6 -779 2 -780 1 -781 1 -782 3 -783 0 -784 3 -785 0 -786 6 -787 2 -788 3 -789 1 -790 0 -791 6 -792 6 -793 1 -794 6 -795 6 -796 3 -797 2 -798 6 -799 0 -800 1 -801 0 -802 3 -803 1 -804 4 -805 2 -806 0 -807 2 -808 2 -809 2 -810 2 -811 1 -812 2 -813 1 -814 2 -815 1 -816 2 -817 0 -818 0 -819 0 -820 2 -821 2 -822 5 -823 2 -824 0 -825 0 -826 6 -827 3 -828 2 -829 2 -830 2 -831 0 -832 2 -833 1 -834 2 -835 4 -836 5 -837 5 -838 3 -839 2 -840 2 -841 3 -842 4 -843 2 -844 1 -845 3 -846 1 -847 6 -848 6 -849 2 -850 3 -851 3 -852 3 -853 5 -854 1 -855 3 -856 1 -857 0 -858 4 -859 6 -860 1 -861 6 -862 5 -863 2 -864 2 -865 6 -866 2 -867 2 -868 4 -869 3 -870 6 -871 2 -872 2 -873 2 -874 2 -875 4 -876 6 -877 5 -878 2 -879 2 -880 1 -881 2 -882 4 -883 0 -884 6 -885 6 -886 2 -887 0 -888 3 -889 3 -890 2 -891 3 -892 3 -893 5 -894 3 -895 3 -896 3 -897 3 -898 3 -899 2 -900 1 -901 0 -902 1 -903 2 -904 3 -905 3 -906 2 -907 2 -908 5 -909 3 -910 1 -911 0 -912 3 -913 3 -914 2 -915 3 -916 2 -917 3 -918 2 -919 2 -920 2 -921 2 -922 0 -923 0 -924 6 -925 3 -926 2 -927 0 -928 0 -929 3 -930 2 -931 6 -932 6 -933 4 -934 1 -935 1 -936 4 -937 5 -938 2 -939 2 -940 1 -941 1 -942 1 -943 1 -944 6 -945 3 -946 2 -947 0 -948 2 -949 2 -950 3 -951 0 -952 3 -953 2 -954 2 -955 6 -956 1 -957 6 -958 1 -959 1 -960 5 -961 1 -962 2 -963 5 -964 1 -965 1 -966 1 -967 6 -968 2 -969 2 -970 4 -971 3 -972 2 -973 3 -974 2 -975 2 -976 3 -977 0 -978 2 -979 6 -980 2 -981 1 -982 1 -983 0 -984 3 -985 5 -986 0 -987 3 -988 4 -989 2 -990 2 -991 0 -992 0 -993 3 -994 0 -995 1 -996 3 -997 3 -998 3 -999 3 -1000 6 -1001 0 -1002 0 -1003 6 -1004 6 -1005 2 -1006 6 -1007 5 -1008 5 -1009 1 -1010 5 -1011 5 -1012 4 -1013 3 -1014 2 -1015 3 -1016 1 -1017 2 -1018 1 -1019 5 -1020 6 -1021 2 -1022 2 -1023 2 -1024 2 -1025 6 -1026 6 -1027 6 -1028 2 -1029 3 -1030 1 -1031 2 -1032 6 -1033 5 -1034 1 -1035 1 -1036 4 -1037 2 -1038 2 -1039 0 -1040 0 -1041 2 -1042 2 -1043 2 -1044 2 -1045 4 -1046 0 -1047 0 -1048 2 -1049 1 -1050 2 -1051 2 -1052 2 -1053 6 -1054 1 -1055 2 -1056 2 -1057 2 -1058 2 -1059 6 -1060 1 -1061 5 -1062 2 -1063 1 -1064 3 -1065 1 -1066 2 -1067 2 -1068 2 -1069 1 -1070 1 -1071 1 -1072 2 -1073 5 -1074 6 -1075 2 -1076 3 -1077 1 -1078 3 -1079 2 -1080 2 -1081 5 -1082 5 -1083 3 -1084 0 -1085 0 -1086 3 -1087 2 -1088 2 -1089 0 -1090 3 -1091 3 -1092 6 -1093 2 -1094 2 -1095 6 -1096 1 -1097 3 -1098 1 -1099 1 -1100 2 -1101 0 -1102 5 -1103 2 -1104 5 -1105 6 -1106 1 -1107 0 -1108 3 -1109 0 -1110 1 -1111 2 -1112 5 -1113 1 -1114 1 -1115 2 -1116 2 -1117 1 -1118 4 -1119 2 -1120 2 -1121 0 -1122 0 -1123 0 -1124 6 -1125 0 -1126 0 -1127 1 -1128 3 -1129 2 -1130 1 -1131 1 -1132 3 -1133 5 -1134 2 -1135 5 -1136 1 -1137 4 -1138 0 -1139 1 -1140 1 -1141 3 -1142 1 -1143 2 -1144 2 -1145 2 -1146 2 -1147 3 -1148 0 -1149 2 -1150 2 -1151 2 -1152 0 -1153 1 -1154 0 -1155 0 -1156 3 -1157 2 -1158 3 -1159 1 -1160 3 -1161 6 -1162 3 -1163 0 -1164 0 -1165 6 -1166 3 -1167 2 -1168 3 -1169 2 -1170 6 -1171 2 -1172 3 -1173 5 -1174 6 -1175 2 -1176 0 -1177 5 -1178 1 -1179 2 -1180 5 -1181 6 -1182 5 -1183 4 -1184 1 -1185 6 -1186 6 -1187 5 -1188 0 -1189 6 -1190 2 -1191 6 -1192 4 -1193 4 -1194 0 -1195 2 -1196 2 -1197 6 -1198 5 -1199 3 -1200 2 -1201 3 -1202 4 -1203 2 -1204 1 -1205 1 -1206 1 -1207 1 -1208 5 -1209 0 -1210 0 -1211 2 -1212 0 -1213 4 -1214 0 -1215 0 -1216 1 -1217 3 -1218 1 -1219 1 -1220 2 -1221 1 -1222 2 -1223 6 -1224 1 -1225 1 -1226 5 -1227 6 -1228 6 -1229 2 -1230 1 -1231 1 -1232 1 -1233 0 -1234 5 -1235 2 -1236 5 -1237 6 -1238 6 -1239 2 -1240 3 -1241 3 -1242 2 -1243 2 -1244 5 -1245 2 -1246 2 -1247 2 -1248 0 -1249 1 -1250 2 -1251 2 -1252 2 -1253 1 -1254 2 -1255 3 -1256 2 -1257 1 -1258 2 -1259 5 -1260 5 -1261 1 -1262 2 -1263 0 -1264 2 -1265 2 -1266 2 -1267 5 -1268 3 -1269 6 -1270 6 -1271 5 -1272 2 -1273 5 -1274 1 -1275 5 -1276 3 -1277 1 -1278 0 -1279 1 -1280 6 -1281 2 -1282 2 -1283 2 -1284 2 -1285 2 -1286 1 -1287 3 -1288 3 -1289 6 -1290 5 -1291 2 -1292 5 -1293 6 -1294 3 -1295 3 -1296 6 -1297 5 -1298 2 -1299 1 -1300 2 -1301 5 -1302 0 -1303 1 -1304 0 -1305 1 -1306 2 -1307 2 -1308 3 -1309 6 -1310 2 -1311 1 -1312 5 -1313 2 -1314 4 -1315 2 -1316 4 -1317 0 -1318 2 -1319 2 -1320 2 -1321 2 -1322 5 -1323 2 -1324 3 -1325 1 -1326 3 -1327 6 -1328 5 -1329 5 -1330 1 -1331 2 -1332 2 -1333 1 -1334 1 -1335 6 -1336 6 -1337 5 -1338 3 -1339 6 -1340 2 -1341 2 -1342 2 -1343 6 -1344 0 -1345 0 -1346 2 -1347 3 -1348 0 -1349 3 -1350 2 -1351 1 -1352 2 -1353 2 -1354 0 -1355 0 -1356 2 -1357 3 -1358 6 -1359 2 -1360 6 -1361 2 -1362 1 -1363 6 -1364 6 -1365 2 -1366 2 -1367 0 -1368 0 -1369 1 -1370 5 -1371 1 -1372 3 -1373 4 -1374 2 -1375 6 -1376 4 -1377 0 -1378 4 -1379 1 -1380 1 -1381 2 -1382 2 -1383 2 -1384 2 -1385 0 -1386 6 -1387 3 -1388 6 -1389 2 -1390 3 -1391 2 -1392 1 -1393 3 -1394 6 -1395 2 -1396 2 -1397 3 -1398 2 -1399 2 -1400 6 -1401 4 -1402 6 -1403 2 -1404 1 -1405 2 -1406 2 -1407 0 -1408 1 -1409 1 -1410 1 -1411 2 -1412 0 -1413 5 -1414 3 -1415 3 -1416 0 -1417 2 -1418 2 -1419 3 -1420 0 -1421 3 -1422 6 -1423 0 -1424 1 -1425 3 -1426 0 -1427 3 -1428 5 -1429 5 -1430 6 -1431 2 -1432 1 -1433 6 -1434 2 -1435 2 -1436 3 -1437 2 -1438 4 -1439 1 -1440 4 -1441 1 -1442 5 -1443 4 -1444 5 -1445 5 -1446 2 -1447 4 -1448 0 -1449 6 -1450 2 -1451 2 -1452 6 -1453 0 -1454 6 -1455 1 -1456 6 -1457 1 -1458 3 -1459 4 -1460 6 -1461 2 -1462 2 -1463 2 -1464 2 -1465 0 -1466 1 -1467 1 -1468 6 -1469 6 -1470 2 -1471 2 -1472 2 -1473 1 -1474 1 -1475 0 -1476 1 -1477 2 -1478 6 -1479 5 -1480 3 -1481 1 -1482 6 -1483 0 -1484 6 -1485 2 -1486 5 -1487 2 -1488 2 -1489 5 -1490 2 -1491 2 -1492 2 -1493 2 -1494 2 -1495 1 -1496 0 -1497 0 -1498 1 -1499 2 -1500 6 -1501 3 -1502 3 -1503 3 -1504 3 -1505 6 -1506 3 -1507 3 -1508 2 -1509 2 -1510 0 -1511 2 -1512 6 -1513 0 -1514 6 -1515 1 -1516 1 -1517 2 -1518 1 -1519 5 -1520 2 -1521 6 -1522 6 -1523 1 -1524 2 -1525 3 -1526 2 -1527 3 -1528 5 -1529 4 -1530 1 -1531 0 -1532 3 -1533 1 -1534 0 -1535 0 -1536 1 -1537 0 -1538 2 -1539 5 -1540 4 -1541 3 -1542 0 -1543 3 -1544 1 -1545 3 -1546 4 -1547 2 -1548 3 -1549 3 -1550 3 -1551 2 -1552 0 -1553 4 -1554 0 -1555 3 -1556 2 -1557 6 -1558 1 -1559 2 -1560 6 -1561 6 -1562 2 -1563 1 -1564 2 -1565 2 -1566 6 -1567 6 -1568 6 -1569 5 -1570 5 -1571 1 -1572 1 -1573 1 -1574 3 -1575 1 -1576 5 -1577 2 -1578 5 -1579 2 -1580 2 -1581 4 -1582 3 -1583 1 -1584 0 -1585 4 -1586 6 -1587 5 -1588 1 -1589 1 -1590 1 -1591 2 -1592 2 -1593 3 -1594 1 -1595 2 -1596 5 -1597 0 -1598 1 -1599 4 -1600 2 -1601 5 -1602 3 -1603 1 -1604 2 -1605 3 -1606 2 -1607 6 -1608 2 -1609 5 -1610 2 -1611 1 -1612 2 -1613 3 -1614 3 -1615 2 -1616 4 -1617 5 -1618 2 -1619 3 -1620 5 -1621 0 -1622 5 -1623 2 -1624 3 -1625 3 -1626 0 -1627 5 -1628 1 -1629 2 -1630 2 -1631 1 -1632 4 -1633 2 -1634 2 -1635 0 -1636 6 -1637 6 -1638 6 -1639 2 -1640 0 -1641 3 -1642 0 -1643 4 -1644 5 -1645 2 -1646 1 -1647 6 -1648 0 -1649 4 -1650 1 -1651 5 -1652 2 -1653 2 -1654 3 -1655 2 -1656 2 -1657 5 -1658 6 -1659 2 -1660 3 -1661 6 -1662 2 -1663 2 -1664 3 -1665 0 -1666 5 -1667 3 -1668 2 -1669 6 -1670 1 -1671 5 -1672 2 -1673 1 -1674 0 -1675 1 -1676 4 -1677 0 -1678 5 -1679 5 -1680 0 -1681 3 -1682 2 -1683 3 -1684 5 -1685 1 -1686 5 -1687 4 -1688 1 -1689 1 -1690 1 -1691 1 -1692 6 -1693 2 -1694 1 -1695 0 -1696 1 -1697 3 -1698 0 -1699 0 -1700 2 -1701 2 -1702 2 -1703 3 -1704 2 -1705 6 -1706 0 -1707 0 -1708 1 -1709 2 -1710 2 -1711 2 -1712 6 -1713 3 -1714 3 -1715 1 -1716 3 -1717 1 -1718 2 -1719 1 -1720 3 -1721 5 -1722 3 -1723 3 -1724 2 -1725 2 -1726 5 -1727 4 -1728 1 -1729 1 -1730 4 -1731 3 -1732 2 -1733 3 -1734 1 -1735 0 -1736 1 -1737 1 -1738 1 -1739 6 -1740 1 -1741 4 -1742 3 -1743 2 -1744 2 -1745 1 -1746 3 -1747 3 -1748 6 -1749 3 -1750 3 -1751 2 -1752 0 -1753 2 -1754 2 -1755 2 -1756 2 -1757 3 -1758 2 -1759 3 -1760 2 -1761 3 -1762 6 -1763 2 -1764 3 -1765 3 -1766 2 -1767 2 -1768 2 -1769 1 -1770 3 -1771 1 -1772 3 -1773 2 -1774 2 -1775 1 -1776 5 -1777 3 -1778 2 -1779 2 -1780 2 -1781 5 -1782 4 -1783 3 -1784 1 -1785 1 -1786 3 -1787 2 -1788 2 -1789 2 -1790 4 -1791 6 -1792 1 -1793 2 -1794 2 -1795 2 -1796 1 -1797 1 -1798 2 -1799 1 -1800 3 -1801 2 -1802 3 -1803 3 -1804 2 -1805 2 -1806 2 -1807 1 -1808 6 -1809 5 -1810 2 -1811 2 -1812 0 -1813 2 -1814 2 -1815 2 -1816 4 -1817 2 -1818 2 -1819 5 -1820 2 -1821 2 -1822 2 -1823 6 -1824 2 -1825 2 -1826 3 -1827 3 -1828 4 -1829 3 -1830 0 -1831 1 -1832 1 -1833 2 -1834 1 -1835 6 -1836 1 -1837 5 -1838 0 -1839 1 -1840 1 -1841 4 -1842 3 -1843 0 -1844 2 -1845 2 -1846 2 -1847 2 -1848 6 -1849 3 -1850 1 -1851 2 -1852 3 -1853 2 -1854 0 -1855 0 -1856 3 -1857 2 -1858 6 -1859 4 -1860 2 -1861 2 -1862 5 -1863 5 -1864 0 -1865 2 -1866 2 -1867 2 -1868 3 -1869 2 -1870 3 -1871 2 -1872 1 -1873 2 -1874 2 -1875 2 -1876 2 -1877 6 -1878 2 -1879 2 -1880 1 -1881 5 -1882 3 -1883 1 -1884 2 -1885 2 -1886 2 -1887 2 -1888 6 -1889 2 -1890 1 -1891 2 -1892 2 -1893 2 -1894 3 -1895 6 -1896 2 -1897 6 -1898 3 -1899 2 -1900 3 -1901 3 -1902 2 -1903 2 -1904 0 -1905 1 -1906 3 -1907 3 -1908 5 -1909 5 -1910 2 -1911 1 -1912 2 -1913 3 -1914 2 -1915 2 -1916 2 -1917 1 -1918 6 -1919 5 -1920 3 -1921 1 -1922 2 -1923 3 -1924 3 -1925 1 -1926 3 -1927 2 -1928 2 -1929 2 -1930 3 -1931 1 -1932 2 -1933 2 -1934 3 -1935 2 -1936 1 -1937 3 -1938 3 -1939 2 -1940 1 -1941 3 -1942 2 -1943 1 -1944 5 -1945 3 -1946 1 -1947 1 -1948 2 -1949 2 -1950 4 -1951 0 -1952 2 -1953 2 -1954 4 -1955 5 -1956 5 -1957 4 -1958 4 -1959 1 -1960 3 -1961 5 -1962 6 -1963 6 -1964 5 -1965 1 -1966 2 -1967 2 -1968 2 -1969 2 -1970 3 -1971 1 -1972 3 -1973 1 -1974 2 -1975 4 -1976 0 -1977 3 -1978 3 -1979 2 -1980 1 -1981 2 -1982 3 -1983 4 -1984 1 -1985 1 -1986 6 -1987 2 -1988 3 -1989 2 -1990 2 -1991 3 -1992 3 -1993 3 -1994 3 -1995 3 -1996 2 -1997 2 -1998 2 -1999 2 -2000 3 -2001 1 -2002 3 -2003 2 -2004 6 -2005 3 -2006 2 -2007 3 -2008 3 -2009 3 -2010 2 -2011 6 -2012 2 -2013 2 -2014 4 -2015 3 -2016 3 -2017 5 -2018 4 -2019 6 -2020 2 -2021 0 -2022 3 -2023 2 -2024 2 -2025 3 -2026 2 -2027 4 -2028 6 -2029 2 -2030 1 -2031 5 -2032 5 -2033 6 -2034 6 -2035 0 -2036 6 -2037 0 -2038 2 -2039 1 -2040 6 -2041 2 -2042 5 -2043 2 -2044 2 -2045 6 -2046 2 -2047 3 -2048 0 -2049 2 -2050 2 -2051 2 -2052 0 -2053 6 -2054 6 -2055 3 -2056 2 -2057 6 -2058 1 -2059 6 -2060 0 -2061 0 -2062 0 -2063 3 -2064 3 -2065 2 -2066 5 -2067 1 -2068 2 -2069 3 -2070 2 -2071 0 -2072 2 -2073 2 -2074 5 -2075 0 -2076 3 -2077 1 -2078 4 -2079 5 -2080 6 -2081 3 -2082 2 -2083 3 -2084 3 -2085 2 -2086 2 -2087 1 -2088 3 -2089 1 -2090 5 -2091 5 -2092 6 -2093 2 -2094 3 -2095 6 -2096 3 -2097 3 -2098 6 -2099 6 -2100 5 -2101 2 -2102 1 -2103 1 -2104 1 -2105 2 -2106 5 -2107 2 -2108 2 -2109 3 -2110 6 -2111 1 -2112 2 -2113 3 -2114 3 -2115 3 -2116 1 -2117 2 -2118 4 -2119 2 -2120 2 -2121 2 -2122 0 -2123 0 -2124 2 -2125 1 -2126 5 -2127 6 -2128 1 -2129 2 -2130 1 -2131 0 -2132 5 -2133 1 -2134 3 -2135 2 -2136 3 -2137 6 -2138 2 -2139 1 -2140 2 -2141 3 -2142 2 -2143 2 -2144 3 -2145 0 -2146 6 -2147 2 -2148 3 -2149 5 -2150 4 -2151 5 -2152 2 -2153 3 -2154 2 -2155 6 -2156 6 -2157 0 -2158 5 -2159 3 -2160 3 -2161 3 -2162 2 -2163 3 -2164 3 -2165 2 -2166 3 -2167 3 -2168 6 -2169 6 -2170 2 -2171 5 -2172 5 -2173 1 -2174 4 -2175 1 -2176 2 -2177 1 -2178 2 -2179 4 -2180 6 -2181 2 -2182 6 -2183 4 -2184 0 -2185 3 -2186 2 -2187 2 -2188 5 -2189 5 -2190 5 -2191 2 -2192 5 -2193 2 -2194 5 -2195 6 -2196 1 -2197 6 -2198 6 -2199 1 -2200 1 -2201 2 -2202 1 -2203 2 -2204 1 -2205 1 -2206 1 -2207 3 -2208 3 -2209 2 -2210 6 -2211 1 -2212 2 -2213 5 -2214 2 -2215 5 -2216 1 -2217 5 -2218 2 -2219 3 -2220 1 -2221 4 -2222 4 -2223 2 -2224 6 -2225 5 -2226 4 -2227 2 -2228 2 -2229 5 -2230 2 -2231 5 -2232 1 -2233 2 -2234 3 -2235 3 -2236 3 -2237 1 -2238 2 -2239 2 -2240 0 -2241 3 -2242 3 -2243 5 -2244 0 -2245 0 -2246 6 -2247 2 -2248 1 -2249 1 -2250 1 -2251 1 -2252 1 -2253 5 -2254 1 -2255 2 -2256 2 -2257 1 -2258 2 -2259 1 -2260 2 -2261 1 -2262 3 -2263 2 -2264 3 -2265 1 -2266 3 -2267 1 -2268 2 -2269 2 -2270 2 -2271 1 -2272 2 -2273 6 -2274 1 -2275 2 -2276 0 -2277 1 -2278 2 -2279 2 -2280 1 -2281 2 -2282 5 -2283 2 -2284 2 -2285 3 -2286 1 -2287 6 -2288 3 -2289 0 -2290 5 -2291 5 -2292 3 -2293 3 -2294 0 -2295 1 -2296 1 -2297 0 -2298 1 -2299 0 -2300 5 -2301 3 -2302 2 -2303 4 -2304 2 -2305 4 -2306 2 -2307 2 -2308 0 -2309 2 -2310 0 -2311 3 -2312 5 -2313 3 -2314 2 -2315 6 -2316 1 -2317 2 -2318 3 -2319 6 -2320 3 -2321 2 -2322 2 -2323 3 -2324 5 -2325 2 -2326 5 -2327 3 -2328 4 -2329 6 -2330 1 -2331 6 -2332 1 -2333 2 -2334 6 -2335 2 -2336 2 -2337 2 -2338 6 -2339 5 -2340 2 -2341 3 -2342 5 -2343 6 -2344 2 -2345 3 -2346 2 -2347 1 -2348 3 -2349 2 -2350 6 -2351 3 -2352 2 -2353 1 -2354 6 -2355 2 -2356 6 -2357 2 -2358 2 -2359 0 -2360 6 -2361 1 -2362 3 -2363 1 -2364 0 -2365 5 -2366 3 -2367 3 -2368 4 -2369 5 -2370 2 -2371 0 -2372 0 -2373 2 -2374 2 -2375 3 -2376 2 -2377 3 -2378 2 -2379 1 -2380 0 -2381 2 -2382 3 -2383 2 -2384 3 -2385 2 -2386 2 -2387 2 -2388 2 -2389 3 -2390 2 -2391 6 -2392 2 -2393 2 -2394 0 -2395 2 -2396 1 -2397 2 -2398 0 -2399 0 -2400 2 -2401 3 -2402 6 -2403 0 -2404 4 -2405 2 -2406 4 -2407 1 -2408 4 -2409 5 -2410 2 -2411 4 -2412 2 -2413 2 -2414 2 -2415 2 -2416 3 -2417 6 -2418 3 -2419 3 -2420 3 -2421 3 -2422 2 -2423 5 -2424 5 -2425 3 -2426 5 -2427 6 -2428 6 -2429 2 -2430 2 -2431 5 -2432 0 -2433 2 -2434 3 -2435 3 -2436 5 -2437 2 -2438 5 -2439 2 -2440 2 -2441 5 -2442 4 -2443 6 -2444 2 -2445 2 -2446 1 -2447 6 -2448 2 -2449 5 -2450 5 -2451 1 -2452 3 -2453 2 -2454 2 -2455 6 -2456 3 -2457 2 -2458 3 -2459 2 -2460 3 -2461 3 -2462 6 -2463 3 -2464 3 -2465 6 -2466 0 -2467 2 -2468 3 -2469 2 -2470 1 -2471 5 -2472 4 -2473 6 -2474 3 -2475 6 -2476 2 -2477 2 -2478 4 -2479 0 -2480 4 -2481 2 -2482 0 -2483 6 -2484 0 -2485 4 -2486 3 -2487 5 -2488 3 -2489 0 -2490 1 -2491 2 -2492 3 -2493 5 -2494 3 -2495 6 -2496 2 -2497 2 -2498 2 -2499 2 -2500 2 -2501 2 -2502 1 -2503 2 -2504 1 -2505 2 -2506 3 -2507 2 -2508 6 -2509 6 -2510 3 -2511 3 -2512 3 -2513 2 -2514 0 -2515 2 -2516 2 -2517 2 -2518 1 -2519 3 -2520 0 -2521 1 -2522 2 -2523 2 -2524 3 -2525 3 -2526 2 -2527 5 -2528 2 -2529 0 -2530 2 -2531 3 -2532 3 -2533 2 -2534 2 -2535 2 -2536 4 -2537 2 -2538 2 -2539 5 -2540 2 -2541 5 -2542 3 -2543 5 -2544 0 -2545 2 -2546 3 -2547 3 -2548 6 -2549 2 -2550 6 -2551 4 -2552 2 -2553 2 -2554 1 -2555 6 -2556 0 -2557 1 -2558 3 -2559 3 -2560 0 -2561 3 -2562 2 -2563 1 -2564 1 -2565 0 -2566 3 -2567 2 -2568 0 -2569 2 -2570 2 -2571 2 -2572 2 -2573 2 -2574 2 -2575 3 -2576 2 -2577 6 -2578 3 -2579 2 -2580 5 -2581 0 -2582 2 -2583 2 -2584 2 -2585 5 -2586 6 -2587 5 -2588 3 -2589 5 -2590 2 -2591 5 -2592 3 -2593 6 -2594 2 -2595 3 -2596 2 -2597 0 -2598 1 -2599 2 -2600 1 -2601 3 -2602 2 -2603 2 -2604 1 -2605 0 -2606 2 -2607 2 -2608 2 -2609 5 -2610 2 -2611 2 -2612 2 -2613 2 -2614 0 -2615 2 -2616 0 -2617 5 -2618 6 -2619 2 -2620 3 -2621 2 -2622 2 -2623 3 -2624 2 -2625 2 -2626 3 -2627 6 -2628 4 -2629 4 -2630 3 -2631 2 -2632 3 -2633 6 -2634 6 -2635 5 -2636 5 -2637 0 -2638 1 -2639 2 -2640 2 -2641 1 -2642 4 -2643 2 -2644 3 -2645 0 -2646 2 -2647 2 -2648 4 -2649 5 -2650 5 -2651 2 -2652 0 -2653 6 -2654 2 -2655 2 -2656 0 -2657 6 -2658 2 -2659 1 -2660 3 -2661 3 -2662 2 -2663 5 -2664 1 -2665 3 -2666 2 -2667 1 -2668 6 -2669 2 -2670 2 -2671 2 -2672 4 -2673 1 -2674 5 -2675 5 -2676 2 -2677 6 -2678 2 -2679 3 -2680 6 -2681 2 -2682 3 -2683 2 -2684 2 -2685 6 -2686 2 -2687 4 -2688 1 -2689 2 -2690 6 -2691 2 -2692 3 -2693 3 -2694 6 -2695 6 -2696 6 -2697 3 -2698 3 -2699 3 -2700 3 -2701 2 -2702 2 -2703 1 -2704 3 -2705 1 -2706 0 -2707 2 diff --git a/pygip/models/attack/mea/data/attack2_generated_graph/cora/selected_index.txt b/pygip/models/attack/mea/data/attack2_generated_graph/cora/selected_index.txt deleted file mode 100644 index 93ecc28f..00000000 --- a/pygip/models/attack/mea/data/attack2_generated_graph/cora/selected_index.txt +++ /dev/null @@ -1,700 +0,0 @@ -4 -22 -23 -27 -28 -30 -31 -33 -38 -50 -51 -57 -60 -61 -65 -71 -73 -84 -86 -88 -89 -98 -103 -108 -111 -112 -113 -118 -119 -130 -132 -133 -139 -141 -143 -144 -151 -153 -154 -172 -185 -186 -189 -196 -201 -203 -204 -205 -210 -212 -218 -223 -227 -231 -234 -235 -237 -242 -248 -256 -259 -265 -269 -275 -281 -283 -285 -289 -297 -298 -299 -304 -323 -326 -329 -335 -336 -340 -341 -344 -347 -348 -352 -358 -362 -368 -372 -391 -392 -394 -395 -406 -408 -409 -411 -413 -414 -416 -425 -429 -430 -434 -436 -442 -444 -445 -449 -451 -456 -460 -477 -479 -482 -483 -484 -497 -498 -501 -502 -504 -507 -510 -513 -514 -516 -519 -522 -524 -529 -531 -533 -536 -542 -543 -545 -554 -557 -559 -563 -567 -568 -570 -573 -574 -579 -581 -583 -595 -597 -607 -609 -618 -627 -628 -631 -635 -637 -640 -643 -645 -649 -654 -656 -660 -661 -662 -663 -664 -670 -671 -677 -679 -695 -700 -702 -703 -708 -710 -712 -714 -715 -719 -722 -725 -727 -728 -729 -730 -744 -748 -750 -752 -754 -762 -765 -767 -775 -782 -788 -789 -790 -793 -795 -805 -809 -811 -814 -815 -816 -821 -825 -829 -831 -838 -839 -840 -843 -845 -846 -849 -851 -859 -861 -863 -864 -871 -873 -883 -884 -890 -896 -907 -910 -917 -918 -940 -941 -944 -948 -950 -951 -956 -958 -962 -971 -975 -984 -987 -989 -995 -997 -1002 -1007 -1008 -1011 -1021 -1022 -1031 -1037 -1038 -1039 -1041 -1051 -1052 -1053 -1058 -1059 -1060 -1062 -1064 -1066 -1070 -1071 -1075 -1077 -1095 -1100 -1102 -1109 -1113 -1114 -1118 -1123 -1128 -1135 -1136 -1142 -1147 -1148 -1152 -1155 -1158 -1160 -1167 -1169 -1175 -1186 -1187 -1199 -1202 -1205 -1215 -1230 -1232 -1234 -1237 -1241 -1246 -1248 -1249 -1250 -1276 -1279 -1282 -1283 -1286 -1290 -1293 -1294 -1302 -1304 -1305 -1308 -1309 -1316 -1317 -1321 -1324 -1328 -1330 -1332 -1334 -1339 -1340 -1346 -1347 -1352 -1354 -1359 -1360 -1365 -1367 -1376 -1380 -1382 -1388 -1391 -1404 -1406 -1408 -1409 -1414 -1418 -1421 -1424 -1426 -1433 -1441 -1445 -1447 -1451 -1452 -1455 -1457 -1459 -1465 -1475 -1478 -1481 -1486 -1488 -1490 -1494 -1500 -1501 -1502 -1508 -1518 -1534 -1535 -1536 -1539 -1544 -1545 -1554 -1556 -1560 -1565 -1569 -1580 -1582 -1583 -1594 -1597 -1598 -1604 -1605 -1615 -1618 -1619 -1622 -1625 -1627 -1628 -1632 -1633 -1642 -1645 -1651 -1654 -1655 -1656 -1658 -1663 -1673 -1675 -1685 -1689 -1690 -1694 -1699 -1701 -1709 -1713 -1719 -1722 -1724 -1726 -1729 -1730 -1734 -1735 -1737 -1738 -1740 -1748 -1754 -1757 -1765 -1766 -1768 -1769 -1776 -1779 -1784 -1786 -1790 -1795 -1799 -1800 -1801 -1802 -1803 -1804 -1809 -1812 -1815 -1821 -1825 -1827 -1835 -1848 -1855 -1857 -1864 -1869 -1871 -1872 -1873 -1874 -1878 -1886 -1890 -1892 -1893 -1897 -1902 -1908 -1917 -1920 -1921 -1929 -1939 -1944 -1956 -1957 -1961 -1963 -1964 -1971 -1974 -1978 -1982 -1983 -1987 -1988 -1991 -1996 -1997 -1998 -2000 -2002 -2004 -2005 -2007 -2010 -2012 -2015 -2024 -2026 -2028 -2035 -2039 -2042 -2050 -2056 -2064 -2069 -2077 -2079 -2080 -2081 -2086 -2091 -2092 -2094 -2095 -2096 -2099 -2100 -2101 -2103 -2104 -2106 -2107 -2115 -2118 -2132 -2144 -2145 -2155 -2165 -2167 -2168 -2174 -2177 -2179 -2181 -2190 -2196 -2199 -2200 -2207 -2214 -2218 -2226 -2229 -2233 -2235 -2236 -2245 -2246 -2248 -2250 -2251 -2252 -2254 -2262 -2263 -2266 -2272 -2279 -2282 -2284 -2285 -2288 -2295 -2296 -2298 -2299 -2308 -2309 -2311 -2313 -2316 -2320 -2321 -2322 -2324 -2330 -2332 -2333 -2335 -2336 -2341 -2342 -2343 -2353 -2354 -2361 -2362 -2364 -2366 -2367 -2368 -2370 -2371 -2374 -2376 -2379 -2385 -2388 -2391 -2394 -2399 -2400 -2406 -2407 -2408 -2409 -2411 -2413 -2415 -2417 -2421 -2423 -2427 -2430 -2433 -2435 -2436 -2437 -2445 -2449 -2453 -2457 -2459 -2460 -2465 -2466 -2467 -2469 -2474 -2480 -2482 -2484 -2486 -2490 -2498 -2511 -2515 -2527 -2530 -2539 -2545 -2546 -2548 -2554 -2561 -2572 -2573 -2575 -2583 -2584 -2590 -2591 -2596 -2601 -2605 -2609 -2615 -2616 -2625 -2629 -2630 -2632 -2633 -2634 -2636 -2637 -2642 -2647 -2648 -2651 -2654 -2657 -2659 -2661 -2666 -2671 -2681 -2688 -2689 -2690 -2695 -2696 -2699 -2702 -2705 -2706 diff --git a/pygip/models/attack/mea/data/attack2_generated_graph/pubmed/graph_label.txt b/pygip/models/attack/mea/data/attack2_generated_graph/pubmed/graph_label.txt deleted file mode 100644 index 014e3795..00000000 --- a/pygip/models/attack/mea/data/attack2_generated_graph/pubmed/graph_label.txt +++ /dev/null @@ -1,1868 +0,0 @@ -0 70 -0 84 -0 88 -0 89 -0 95 -0 110 -0 141 -0 213 -0 234 -0 259 -0 291 -0 320 -0 349 -0 353 -0 363 -0 368 -0 384 -0 404 -0 425 -0 430 -0 453 -0 485 -0 503 -0 510 -0 519 -0 568 -0 603 -0 641 -0 642 -0 656 -0 663 -0 664 -0 675 -0 687 -1 135 -1 306 -1 482 -1 535 -1 659 -1 667 -1 684 -1 685 -2 46 -2 140 -2 175 -2 231 -2 235 -2 253 -2 370 -2 383 -2 407 -2 419 -2 442 -2 560 -2 585 -2 587 -2 613 -2 649 -3 40 -3 72 -4 203 -4 529 -5 97 -5 671 -6 58 -6 357 -7 135 -7 535 -8 36 -8 48 -8 83 -8 199 -8 261 -8 289 -8 378 -8 381 -8 397 -8 413 -8 448 -8 469 -8 505 -8 519 -8 664 -8 691 -9 150 -9 644 -10 345 -10 396 -11 168 -11 627 -12 524 -13 18 -13 104 -13 231 -13 237 -13 289 -13 368 -13 370 -13 389 -13 466 -13 519 -13 640 -13 671 -13 687 -14 153 -14 379 -15 77 -15 98 -15 104 -15 128 -15 182 -15 188 -15 298 -15 315 -15 320 -15 419 -15 450 -15 472 -15 531 -15 555 -15 563 -15 606 -15 641 -16 390 -16 686 -17 333 -17 670 -18 13 -18 446 -18 641 -19 592 -19 601 -20 38 -20 522 -21 557 -21 653 -22 60 -22 465 -23 669 -24 494 -25 322 -25 637 -26 171 -26 662 -27 192 -27 457 -28 95 -28 322 -28 556 -28 566 -29 239 -29 616 -30 439 -30 609 -31 270 -31 402 -32 262 -32 427 -33 276 -33 631 -34 165 -34 565 -35 166 -35 236 -35 277 -36 8 -36 104 -36 183 -36 373 -37 403 -37 612 -38 20 -39 210 -39 566 -40 3 -40 70 -40 611 -40 657 -40 696 -41 262 -42 132 -42 175 -42 251 -42 345 -42 392 -42 545 -43 169 -43 480 -44 371 -44 448 -45 507 -45 685 -46 2 -46 283 -46 561 -47 132 -47 517 -48 8 -48 73 -48 304 -49 118 -49 319 -49 521 -50 286 -50 500 -51 171 -51 273 -52 442 -52 538 -53 334 -53 411 -53 687 -54 257 -54 293 -54 365 -55 461 -56 280 -56 553 -57 285 -57 549 -58 6 -58 389 -58 433 -59 206 -59 654 -60 22 -60 155 -61 675 -61 689 -62 151 -62 170 -62 691 -63 562 -63 575 -63 646 -64 136 -64 148 -65 565 -65 697 -66 200 -66 566 -67 136 -67 398 -68 206 -68 438 -68 687 -69 269 -69 407 -70 0 -70 40 -70 331 -70 623 -71 133 -71 640 -72 3 -72 477 -73 48 -73 304 -74 365 -74 404 -75 273 -75 419 -76 163 -76 375 -77 15 -77 289 -78 573 -78 693 -79 386 -79 451 -80 209 -80 429 -81 201 -81 332 -82 89 -82 168 -82 449 -83 8 -83 182 -83 435 -84 0 -84 270 -84 593 -85 267 -85 421 -85 452 -86 283 -86 583 -86 680 -87 455 -87 489 -88 0 -88 401 -88 455 -89 0 -89 82 -89 252 -90 204 -90 293 -91 206 -91 654 -92 195 -92 496 -93 260 -93 471 -94 488 -94 570 -95 0 -95 28 -95 234 -95 594 -95 616 -96 228 -96 554 -97 5 -97 568 -98 15 -98 134 -98 219 -99 277 -99 324 -99 619 -100 273 -100 480 -101 533 -101 692 -102 211 -102 438 -103 327 -103 443 -104 13 -104 15 -104 36 -104 519 -105 214 -106 192 -106 614 -107 205 -108 350 -109 360 -109 420 -110 0 -110 206 -110 695 -111 499 -111 578 -112 175 -112 366 -113 140 -113 389 -114 201 -114 372 -115 393 -115 446 -116 133 -116 175 -117 186 -118 49 -118 521 -118 615 -119 124 -119 168 -119 351 -120 146 -120 300 -121 172 -121 369 -122 320 -123 267 -123 452 -124 119 -124 450 -124 594 -125 681 -126 568 -127 331 -127 629 -128 15 -128 446 -129 609 -130 444 -130 595 -131 304 -132 42 -132 47 -132 517 -133 71 -133 116 -133 478 -134 98 -134 501 -135 1 -135 7 -135 264 -135 535 -136 64 -136 67 -136 398 -136 451 -137 303 -137 678 -138 184 -138 284 -138 468 -138 681 -139 164 -139 177 -140 2 -140 113 -140 446 -141 0 -141 206 -142 398 -143 184 -143 211 -144 455 -144 559 -145 225 -145 569 -146 120 -146 382 -146 425 -147 247 -148 64 -148 252 -148 639 -149 158 -149 314 -150 9 -150 241 -150 308 -150 644 -151 62 -151 338 -151 625 -152 562 -152 600 -153 14 -153 257 -153 372 -153 437 -153 654 -154 583 -154 584 -155 60 -155 341 -155 495 -156 412 -156 586 -157 206 -158 149 -158 303 -159 423 -160 263 -160 481 -161 389 -161 537 -162 336 -162 409 -163 76 -163 394 -163 680 -164 139 -164 268 -165 34 -165 351 -166 35 -166 611 -166 657 -167 228 -167 439 -168 11 -168 82 -168 119 -168 495 -169 43 -169 188 -170 62 -171 26 -171 51 -171 233 -172 121 -172 203 -172 529 -173 645 -173 668 -174 245 -175 2 -175 42 -175 112 -175 116 -175 377 -176 227 -176 260 -177 139 -178 325 -178 361 -179 376 -179 469 -180 303 -181 319 -181 403 -182 15 -182 83 -182 435 -183 36 -183 258 -184 138 -184 143 -184 289 -185 323 -185 364 -186 117 -186 506 -187 695 -188 15 -188 169 -188 285 -189 389 -189 537 -190 434 -190 527 -191 587 -191 675 -192 27 -192 106 -192 457 -193 215 -193 544 -194 225 -194 532 -195 92 -195 496 -195 595 -196 319 -196 439 -197 393 -197 431 -198 246 -198 281 -198 299 -198 377 -198 635 -199 8 -199 413 -199 623 -200 66 -200 566 -201 81 -201 114 -201 332 -202 382 -202 552 -203 4 -203 172 -203 257 -203 503 -204 90 -204 380 -205 107 -205 611 -206 59 -206 68 -206 91 -206 110 -206 141 -206 157 -206 654 -207 393 -207 401 -208 697 -209 80 -209 429 -210 39 -210 352 -211 102 -211 143 -212 251 -213 0 -213 353 -214 105 -215 193 -216 591 -216 638 -217 672 -218 233 -218 699 -219 98 -219 444 -220 288 -220 440 -220 579 -220 697 -221 423 -221 683 -222 228 -222 368 -223 439 -224 673 -225 145 -225 194 -225 322 -226 378 -226 512 -227 176 -227 273 -227 305 -228 96 -228 167 -228 222 -228 523 -229 455 -229 611 -230 533 -231 2 -231 13 -231 298 -232 419 -233 171 -233 218 -234 0 -234 95 -234 568 -234 658 -235 2 -235 241 -236 35 -236 464 -237 13 -237 349 -237 540 -238 460 -238 503 -239 29 -239 316 -240 525 -240 664 -240 681 -241 150 -241 235 -241 289 -242 697 -243 603 -244 482 -244 682 -245 174 -245 556 -246 198 -246 495 -246 587 -247 147 -247 391 -247 569 -248 458 -248 595 -249 368 -249 646 -250 607 -251 42 -251 212 -251 592 -251 652 -252 89 -252 148 -252 639 -253 2 -253 510 -254 375 -254 567 -255 354 -255 371 -256 374 -257 54 -257 153 -257 203 -257 293 -258 183 -258 392 -259 0 -259 378 -259 594 -260 93 -260 176 -260 471 -260 638 -261 8 -261 404 -262 32 -262 41 -262 429 -262 433 -262 532 -263 160 -263 614 -264 135 -264 535 -266 405 -266 439 -266 456 -266 494 -267 85 -267 123 -267 418 -267 452 -268 164 -269 69 -269 546 -269 669 -270 31 -270 84 -270 380 -270 496 -271 310 -271 591 -272 480 -273 51 -273 75 -273 100 -273 227 -273 419 -274 402 -274 421 -275 416 -275 675 -275 689 -276 33 -277 35 -277 99 -278 364 -278 369 -278 370 -278 401 -278 507 -278 524 -278 564 -278 597 -278 617 -278 622 -278 693 -279 452 -279 515 -280 56 -280 547 -280 553 -281 198 -281 412 -281 616 -281 622 -282 441 -283 46 -283 86 -283 561 -284 138 -284 470 -284 635 -285 57 -285 188 -285 596 -286 50 -286 409 -287 557 -287 599 -288 220 -288 363 -288 443 -289 8 -289 13 -289 77 -289 184 -289 241 -289 420 -289 560 -290 373 -291 0 -291 428 -291 597 -292 439 -292 620 -293 54 -293 90 -293 257 -294 295 -294 342 -294 486 -294 520 -294 547 -294 561 -294 582 -294 617 -295 294 -295 439 -295 516 -296 438 -296 496 -296 653 -297 510 -297 682 -298 15 -298 231 -299 198 -300 120 -300 308 -300 394 -301 311 -301 643 -302 344 -302 378 -303 137 -303 158 -303 180 -304 48 -304 73 -304 131 -305 227 -305 501 -306 1 -306 535 -307 380 -307 680 -308 150 -308 300 -309 499 -309 625 -310 271 -310 388 -311 301 -312 592 -313 526 -313 682 -314 149 -314 655 -315 15 -315 480 -316 239 -317 499 -317 665 -318 358 -318 546 -318 694 -319 49 -319 181 -319 196 -320 0 -320 15 -320 122 -320 450 -320 687 -321 605 -322 25 -322 28 -322 225 -322 455 -322 556 -323 185 -324 99 -324 619 -325 178 -325 415 -326 478 -326 507 -327 103 -328 594 -329 448 -329 695 -330 434 -330 586 -330 646 -331 70 -331 127 -331 382 -332 81 -332 201 -333 17 -333 406 -334 53 -334 486 -334 695 -335 667 -336 162 -336 409 -337 596 -338 151 -338 570 -339 439 -341 155 -341 363 -342 294 -342 351 -342 565 -343 592 -344 302 -345 10 -345 42 -345 358 -345 396 -345 434 -345 438 -345 521 -345 639 -345 654 -347 502 -348 423 -348 683 -349 0 -349 237 -349 540 -349 698 -350 108 -350 460 -351 119 -351 165 -351 342 -351 565 -352 210 -352 599 -353 0 -353 213 -354 255 -354 383 -354 578 -356 393 -357 6 -357 648 -358 318 -358 345 -358 546 -358 669 -358 675 -359 427 -359 529 -360 109 -360 381 -361 178 -361 490 -362 461 -362 549 -363 0 -363 288 -363 341 -363 641 -364 185 -364 278 -365 54 -365 74 -365 540 -365 698 -366 112 -366 642 -367 417 -367 509 -368 0 -368 13 -368 222 -368 249 -369 121 -369 278 -369 692 -370 2 -370 13 -370 278 -370 472 -370 651 -371 44 -371 255 -372 114 -372 153 -372 409 -373 36 -373 290 -374 256 -374 577 -374 580 -375 76 -375 254 -375 435 -376 179 -376 469 -376 471 -377 175 -377 198 -377 464 -377 472 -378 8 -378 226 -378 259 -378 302 -378 512 -379 14 -379 423 -379 468 -379 471 -379 523 -379 599 -379 669 -380 204 -380 270 -380 307 -380 496 -381 8 -381 360 -382 146 -382 202 -382 331 -383 2 -383 354 -383 392 -383 565 -384 0 -384 593 -385 433 -386 79 -386 639 -388 310 -388 553 -389 13 -389 58 -389 113 -389 161 -389 189 -389 537 -390 16 -391 247 -391 569 -392 42 -392 258 -392 383 -393 115 -393 197 -393 207 -393 356 -393 446 -393 670 -394 163 -394 300 -394 579 -395 470 -396 10 -396 345 -396 398 -396 487 -396 517 -396 546 -396 596 -396 623 -397 8 -398 67 -398 136 -398 142 -398 396 -399 521 -400 437 -401 88 -401 207 -401 278 -402 31 -402 274 -402 421 -403 37 -403 181 -403 443 -404 0 -404 74 -404 261 -404 446 -404 589 -404 617 -405 266 -405 583 -406 333 -406 471 -406 549 -407 2 -407 69 -407 596 -408 645 -409 162 -409 286 -409 336 -409 372 -410 650 -411 53 -411 623 -412 156 -412 281 -412 461 -412 586 -413 8 -413 199 -414 439 -414 471 -415 325 -415 668 -415 685 -416 275 -417 367 -418 267 -418 452 -419 2 -419 15 -419 75 -419 232 -419 273 -420 109 -420 289 -420 495 -421 85 -421 274 -421 402 -421 439 -421 459 -422 534 -423 159 -423 221 -423 348 -423 379 -423 683 -424 558 -425 0 -425 146 -426 699 -427 32 -427 359 -427 586 -428 291 -428 464 -429 80 -429 209 -429 262 -430 0 -430 445 -430 572 -431 197 -433 58 -433 262 -433 385 -434 190 -434 330 -434 345 -434 558 -434 577 -435 83 -435 182 -435 375 -435 464 -436 563 -437 153 -437 400 -437 471 -437 489 -437 533 -437 539 -437 544 -437 570 -437 573 -437 575 -437 651 -437 672 -437 688 -437 695 -437 697 -438 68 -438 102 -438 296 -438 345 -439 30 -439 167 -439 196 -439 223 -439 266 -439 292 -439 295 -439 339 -439 414 -439 421 -439 471 -440 220 -440 638 -440 690 -441 282 -442 2 -442 52 -442 538 -442 660 -443 103 -443 288 -443 403 -444 130 -444 219 -444 595 -445 430 -445 619 -446 18 -446 115 -446 128 -446 140 -446 393 -446 404 -446 670 -447 538 -448 8 -448 44 -448 329 -448 647 -448 695 -449 82 -449 583 -449 584 -450 15 -450 124 -450 320 -450 651 -451 79 -451 136 -452 85 -452 123 -452 267 -452 279 -452 418 -452 516 -453 0 -454 550 -454 699 -455 87 -455 88 -455 144 -455 229 -455 322 -455 509 -456 266 -457 27 -457 192 -457 522 -458 248 -459 421 -460 238 -460 350 -461 55 -461 362 -461 412 -461 514 -461 685 -462 556 -463 523 -464 236 -464 377 -464 428 -464 435 -465 22 -466 13 -467 638 -468 138 -468 379 -468 674 -469 8 -469 179 -469 376 -470 284 -470 395 -470 491 -470 611 -471 93 -471 260 -471 376 -471 379 -471 406 -471 414 -471 437 -471 439 -471 616 -472 15 -472 370 -472 377 -475 696 -476 686 -477 72 -477 674 -478 133 -478 326 -479 671 -480 43 -480 100 -480 272 -480 315 -481 160 -481 628 -481 688 -482 1 -482 244 -482 535 -483 683 -485 0 -485 652 -486 294 -486 334 -487 396 -487 555 -487 620 -488 94 -489 87 -489 437 -490 361 -490 613 -490 635 -491 470 -491 542 -491 611 -491 622 -491 657 -492 551 -494 24 -494 266 -495 155 -495 168 -495 246 -495 420 -495 662 -496 92 -496 195 -496 270 -496 296 -496 380 -497 645 -498 668 -499 111 -499 309 -499 317 -500 50 -500 633 -501 134 -501 305 -502 347 -503 0 -503 203 -503 238 -503 596 -505 8 -505 698 -506 186 -507 45 -507 278 -507 326 -507 518 -507 685 -508 569 -509 367 -509 455 -510 0 -510 253 -510 297 -511 557 -512 226 -512 378 -512 555 -512 620 -513 621 -514 461 -514 615 -514 685 -515 279 -516 295 -516 452 -517 47 -517 132 -517 396 -517 555 -518 507 -519 0 -519 8 -519 13 -519 104 -520 294 -520 643 -521 49 -521 118 -521 345 -521 399 -521 615 -522 20 -522 457 -522 615 -523 228 -523 379 -523 463 -524 12 -524 278 -524 553 -524 617 -524 674 -524 685 -525 240 -525 563 -525 621 -525 681 -526 313 -527 190 -527 567 -528 690 -529 4 -529 172 -529 359 -530 646 -531 15 -532 194 -532 262 -533 101 -533 230 -533 437 -533 692 -534 422 -535 1 -535 7 -535 135 -535 264 -535 306 -535 482 -537 161 -537 189 -537 389 -538 52 -538 442 -538 447 -539 437 -539 626 -539 661 -540 237 -540 349 -540 365 -540 698 -542 491 -544 193 -544 437 -544 679 -545 42 -546 269 -546 318 -546 358 -546 396 -546 646 -546 669 -547 280 -547 294 -547 553 -547 582 -548 661 -549 57 -549 362 -549 406 -550 454 -550 699 -551 492 -551 557 -552 202 -552 696 -553 56 -553 280 -553 388 -553 524 -553 547 -554 96 -555 15 -555 487 -555 512 -555 517 -555 620 -556 28 -556 245 -556 322 -556 462 -557 21 -557 287 -557 511 -557 551 -557 599 -557 679 -558 424 -558 434 -558 683 -559 144 -560 2 -560 289 -561 46 -561 283 -561 294 -562 63 -562 152 -562 584 -562 600 -563 15 -563 436 -563 525 -563 658 -564 278 -564 663 -565 34 -565 65 -565 342 -565 351 -565 383 -566 28 -566 39 -566 66 -566 200 -567 254 -567 527 -568 0 -568 97 -568 126 -568 234 -569 145 -569 247 -569 391 -569 508 -570 94 -570 338 -570 437 -570 656 -572 430 -573 78 -573 437 -573 634 -573 693 -574 688 -574 691 -575 63 -575 437 -575 646 -577 374 -577 434 -578 111 -578 354 -579 220 -579 394 -580 374 -580 590 -580 689 -582 294 -582 547 -582 601 -582 617 -582 642 -582 680 -582 696 -583 86 -583 154 -583 405 -583 449 -583 680 -584 154 -584 449 -584 562 -584 680 -585 2 -585 624 -586 156 -586 330 -586 412 -586 427 -586 589 -587 2 -587 191 -587 246 -587 675 -589 404 -589 586 -590 580 -591 216 -591 271 -592 19 -592 251 -592 312 -592 343 -593 84 -593 384 -593 675 -594 95 -594 124 -594 259 -594 328 -595 130 -595 195 -595 248 -595 444 -596 285 -596 337 -596 396 -596 407 -596 503 -597 278 -597 291 -599 287 -599 352 -599 379 -599 557 -599 626 -600 152 -600 562 -600 615 -601 19 -601 582 -601 611 -601 614 -601 621 -602 646 -603 0 -603 243 -605 321 -606 15 -606 651 -607 250 -609 30 -609 129 -611 40 -611 166 -611 205 -611 229 -611 470 -611 491 -611 601 -611 657 -612 37 -613 2 -613 490 -614 106 -614 263 -614 601 -615 118 -615 514 -615 521 -615 522 -615 600 -616 29 -616 95 -616 281 -616 471 -617 278 -617 294 -617 404 -617 524 -617 582 -617 635 -617 668 -618 685 -619 99 -619 324 -619 445 -620 292 -620 487 -620 512 -620 555 -621 513 -621 525 -621 601 -622 278 -622 281 -622 491 -622 685 -623 70 -623 199 -623 396 -623 411 -624 585 -625 151 -625 309 -626 539 -626 599 -627 11 -628 481 -628 688 -629 127 -631 33 -631 647 -633 500 -634 573 -634 647 -635 198 -635 284 -635 490 -635 617 -637 25 -638 216 -638 260 -638 440 -638 467 -638 656 -638 677 -639 148 -639 252 -639 345 -639 386 -640 13 -640 71 -641 0 -641 15 -641 18 -641 363 -641 670 -642 0 -642 366 -642 582 -643 301 -643 520 -644 9 -644 150 -645 173 -645 408 -645 497 -646 63 -646 249 -646 330 -646 530 -646 546 -646 575 -646 602 -647 448 -647 631 -647 634 -648 357 -648 658 -649 2 -650 410 -651 370 -651 437 -651 450 -651 606 -652 251 -652 485 -653 21 -653 296 -654 59 -654 91 -654 153 -654 206 -654 345 -655 314 -656 0 -656 570 -656 638 -657 40 -657 166 -657 491 -657 611 -658 234 -658 563 -658 648 -659 1 -660 442 -661 539 -661 548 -662 26 -662 495 -663 0 -663 564 -664 0 -664 8 -664 240 -665 317 -667 1 -667 335 -668 173 -668 415 -668 498 -668 617 -669 23 -669 269 -669 358 -669 379 -669 546 -670 17 -670 393 -670 446 -670 641 -671 5 -671 13 -671 479 -672 217 -672 437 -673 224 -674 468 -674 477 -674 524 -675 0 -675 61 -675 191 -675 275 -675 358 -675 587 -675 593 -677 638 -678 137 -679 544 -679 557 -680 86 -680 163 -680 307 -680 582 -680 583 -680 584 -681 125 -681 138 -681 240 -681 525 -682 244 -682 297 -682 313 -683 221 -683 348 -683 423 -683 483 -683 558 -684 1 -685 1 -685 45 -685 415 -685 461 -685 507 -685 514 -685 524 -685 618 -685 622 -686 16 -686 476 -687 0 -687 13 -687 53 -687 68 -687 320 -688 437 -688 481 -688 574 -688 628 -689 61 -689 275 -689 580 -690 440 -690 528 -691 8 -691 62 -691 574 -692 101 -692 369 -692 533 -693 78 -693 278 -693 573 -694 318 -695 110 -695 187 -695 329 -695 334 -695 437 -695 448 -696 40 -696 475 -696 552 -696 582 -697 65 -697 208 -697 220 -697 242 -697 437 -698 349 -698 365 -698 505 -698 540 -699 218 -699 426 -699 454 -699 550 diff --git a/pygip/models/attack/mea/data/attack2_generated_graph/pubmed/query_labels.txt b/pygip/models/attack/mea/data/attack2_generated_graph/pubmed/query_labels.txt deleted file mode 100644 index cfd38030..00000000 --- a/pygip/models/attack/mea/data/attack2_generated_graph/pubmed/query_labels.txt +++ /dev/null @@ -1,19717 +0,0 @@ -0 1 -1 1 -2 0 -3 2 -4 0 -5 2 -6 2 -7 1 -8 2 -9 1 -10 2 -11 2 -12 2 -13 2 -14 2 -15 0 -16 1 -17 2 -18 1 -19 2 -20 2 -21 1 -22 2 -23 2 -24 2 -25 1 -26 2 -27 2 -28 2 -29 1 -30 1 -31 1 -32 2 -33 1 -34 0 -35 0 -36 1 -37 1 -38 0 -39 2 -40 0 -41 1 -42 0 -43 1 -44 1 -45 1 -46 1 -47 0 -48 0 -49 0 -50 0 -51 0 -52 1 -53 0 -54 0 -55 0 -56 0 -57 0 -58 0 -59 0 -60 2 -61 2 -62 2 -63 0 -64 1 -65 1 -66 2 -67 1 -68 0 -69 1 -70 0 -71 2 -72 0 -73 2 -74 1 -75 2 -76 2 -77 0 -78 0 -79 0 -80 0 -81 2 -82 1 -83 0 -84 1 -85 2 -86 1 -87 2 -88 1 -89 1 -90 0 -91 1 -92 1 -93 2 -94 0 -95 0 -96 1 -97 2 -98 2 -99 1 -100 1 -101 0 -102 0 -103 2 -104 1 -105 1 -106 0 -107 1 -108 2 -109 2 -110 0 -111 1 -112 1 -113 1 -114 1 -115 0 -116 2 -117 2 -118 1 -119 1 -120 1 -121 0 -122 2 -123 0 -124 2 -125 1 -126 0 -127 0 -128 2 -129 1 -130 0 -131 2 -132 0 -133 0 -134 0 -135 2 -136 0 -137 1 -138 2 -139 0 -140 1 -141 0 -142 0 -143 2 -144 2 -145 2 -146 2 -147 1 -148 2 -149 2 -150 1 -151 0 -152 2 -153 0 -154 0 -155 0 -156 1 -157 1 -158 1 -159 1 -160 1 -161 1 -162 2 -163 1 -164 1 -165 1 -166 1 -167 2 -168 1 -169 0 -170 1 -171 1 -172 1 -173 0 -174 1 -175 2 -176 1 -177 1 -178 1 -179 0 -180 1 -181 2 -182 2 -183 1 -184 1 -185 1 -186 2 -187 0 -188 1 -189 2 -190 2 -191 1 -192 2 -193 1 -194 0 -195 2 -196 1 -197 1 -198 1 -199 2 -200 2 -201 2 -202 2 -203 1 -204 1 -205 2 -206 0 -207 0 -208 2 -209 2 -210 2 -211 2 -212 1 -213 2 -214 1 -215 1 -216 1 -217 1 -218 2 -219 1 -220 1 -221 1 -222 0 -223 0 -224 2 -225 2 -226 2 -227 2 -228 2 -229 2 -230 0 -231 2 -232 1 -233 0 -234 0 -235 1 -236 0 -237 0 -238 0 -239 0 -240 1 -241 1 -242 2 -243 1 -244 0 -245 2 -246 2 -247 2 -248 0 -249 1 -250 2 -251 1 -252 0 -253 1 -254 1 -255 0 -256 1 -257 1 -258 2 -259 0 -260 2 -261 0 -262 0 -263 1 -264 2 -265 2 -266 1 -267 1 -268 2 -269 1 -270 2 -271 2 -272 2 -273 1 -274 2 -275 1 -276 1 -277 1 -278 2 -279 1 -280 1 -281 2 -282 2 -283 2 -284 1 -285 1 -286 2 -287 2 -288 2 -289 1 -290 1 -291 1 -292 1 -293 2 -294 2 -295 2 -296 0 -297 0 -298 2 -299 1 -300 0 -301 0 -302 1 -303 2 -304 1 -305 0 -306 2 -307 0 -308 0 -309 0 -310 1 -311 2 -312 1 -313 1 -314 1 -315 2 -316 2 -317 2 -318 0 -319 1 -320 0 -321 1 -322 1 -323 0 -324 1 -325 1 -326 2 -327 1 -328 0 -329 1 -330 1 -331 1 -332 1 -333 2 -334 0 -335 1 -336 2 -337 2 -338 2 -339 2 -340 1 -341 1 -342 2 -343 1 -344 2 -345 2 -346 1 -347 1 -348 2 -349 2 -350 1 -351 2 -352 1 -353 2 -354 1 -355 1 -356 1 -357 1 -358 0 -359 1 -360 2 -361 2 -362 1 -363 1 -364 1 -365 1 -366 1 -367 1 -368 2 -369 1 -370 2 -371 1 -372 2 -373 1 -374 2 -375 2 -376 1 -377 1 -378 1 -379 1 -380 1 -381 1 -382 2 -383 0 -384 1 -385 2 -386 2 -387 2 -388 2 -389 2 -390 2 -391 1 -392 1 -393 1 -394 0 -395 2 -396 0 -397 1 -398 2 -399 1 -400 1 -401 1 -402 1 -403 1 -404 1 -405 1 -406 0 -407 1 -408 1 -409 0 -410 2 -411 1 -412 2 -413 0 -414 1 -415 0 -416 1 -417 1 -418 1 -419 2 -420 1 -421 2 -422 1 -423 1 -424 2 -425 1 -426 1 -427 2 -428 1 -429 0 -430 1 -431 1 -432 2 -433 1 -434 1 -435 1 -436 1 -437 0 -438 1 -439 1 -440 1 -441 2 -442 2 -443 2 -444 2 -445 1 -446 1 -447 1 -448 1 -449 0 -450 2 -451 1 -452 1 -453 1 -454 2 -455 1 -456 1 -457 1 -458 2 -459 2 -460 1 -461 1 -462 1 -463 0 -464 2 -465 1 -466 1 -467 1 -468 2 -469 2 -470 0 -471 2 -472 1 -473 0 -474 2 -475 2 -476 0 -477 1 -478 1 -479 2 -480 0 -481 1 -482 0 -483 2 -484 1 -485 2 -486 2 -487 1 -488 2 -489 2 -490 1 -491 1 -492 0 -493 2 -494 1 -495 0 -496 1 -497 1 -498 1 -499 1 -500 2 -501 0 -502 1 -503 2 -504 2 -505 1 -506 2 -507 0 -508 2 -509 0 -510 1 -511 2 -512 1 -513 1 -514 2 -515 2 -516 0 -517 1 -518 2 -519 0 -520 0 -521 1 -522 1 -523 2 -524 1 -525 1 -526 1 -527 0 -528 2 -529 0 -530 1 -531 0 -532 2 -533 2 -534 2 -535 2 -536 0 -537 0 -538 1 -539 0 -540 1 -541 1 -542 0 -543 1 -544 1 -545 2 -546 1 -547 2 -548 1 -549 0 -550 0 -551 2 -552 1 -553 2 -554 1 -555 1 -556 1 -557 0 -558 2 -559 0 -560 1 -561 1 -562 1 -563 2 -564 1 -565 2 -566 2 -567 1 -568 1 -569 1 -570 0 -571 1 -572 1 -573 1 -574 2 -575 0 -576 1 -577 2 -578 2 -579 2 -580 2 -581 0 -582 0 -583 2 -584 1 -585 1 -586 1 -587 0 -588 1 -589 0 -590 1 -591 0 -592 1 -593 1 -594 2 -595 0 -596 2 -597 0 -598 0 -599 2 -600 2 -601 2 -602 2 -603 2 -604 0 -605 1 -606 1 -607 1 -608 1 -609 1 -610 1 -611 2 -612 2 -613 1 -614 0 -615 0 -616 1 -617 2 -618 1 -619 0 -620 0 -621 1 -622 1 -623 0 -624 2 -625 1 -626 1 -627 0 -628 2 -629 1 -630 2 -631 2 -632 0 -633 0 -634 0 -635 1 -636 0 -637 2 -638 1 -639 1 -640 1 -641 1 -642 1 -643 0 -644 0 -645 1 -646 1 -647 1 -648 1 -649 2 -650 1 -651 1 -652 1 -653 2 -654 0 -655 1 -656 2 -657 0 -658 2 -659 1 -660 2 -661 0 -662 1 -663 2 -664 1 -665 1 -666 0 -667 2 -668 1 -669 2 -670 2 -671 1 -672 2 -673 1 -674 0 -675 0 -676 2 -677 0 -678 1 -679 1 -680 1 -681 2 -682 2 -683 2 -684 0 -685 0 -686 1 -687 2 -688 2 -689 1 -690 0 -691 1 -692 1 -693 0 -694 2 -695 1 -696 1 -697 1 -698 1 -699 2 -700 2 -701 2 -702 2 -703 2 -704 1 -705 1 -706 0 -707 2 -708 1 -709 2 -710 1 -711 2 -712 1 -713 1 -714 1 -715 0 -716 2 -717 2 -718 1 -719 2 -720 0 -721 1 -722 1 -723 1 -724 2 -725 1 -726 2 -727 1 -728 1 -729 0 -730 0 -731 1 -732 1 -733 2 -734 1 -735 2 -736 1 -737 0 -738 2 -739 1 -740 1 -741 1 -742 0 -743 2 -744 1 -745 1 -746 1 -747 1 -748 1 -749 2 -750 2 -751 0 -752 0 -753 2 -754 1 -755 0 -756 1 -757 1 -758 2 -759 1 -760 0 -761 1 -762 1 -763 1 -764 2 -765 2 -766 1 -767 2 -768 1 -769 0 -770 1 -771 1 -772 2 -773 1 -774 2 -775 0 -776 2 -777 2 -778 2 -779 2 -780 2 -781 1 -782 1 -783 1 -784 0 -785 1 -786 2 -787 2 -788 1 -789 0 -790 0 -791 2 -792 0 -793 1 -794 1 -795 1 -796 1 -797 1 -798 1 -799 1 -800 1 -801 1 -802 0 -803 2 -804 1 -805 2 -806 0 -807 0 -808 1 -809 2 -810 1 -811 1 -812 2 -813 1 -814 1 -815 1 -816 0 -817 0 -818 1 -819 1 -820 0 -821 1 -822 2 -823 1 -824 1 -825 2 -826 2 -827 2 -828 1 -829 1 -830 2 -831 2 -832 1 -833 1 -834 0 -835 0 -836 1 -837 1 -838 2 -839 1 -840 2 -841 1 -842 1 -843 0 -844 0 -845 2 -846 2 -847 2 -848 1 -849 2 -850 0 -851 2 -852 1 -853 0 -854 2 -855 1 -856 1 -857 1 -858 1 -859 2 -860 1 -861 2 -862 2 -863 2 -864 1 -865 2 -866 2 -867 0 -868 2 -869 0 -870 0 -871 2 -872 1 -873 1 -874 2 -875 1 -876 1 -877 2 -878 1 -879 1 -880 2 -881 2 -882 1 -883 2 -884 1 -885 1 -886 2 -887 2 -888 1 -889 1 -890 2 -891 0 -892 1 -893 2 -894 2 -895 2 -896 0 -897 1 -898 1 -899 1 -900 0 -901 1 -902 2 -903 1 -904 2 -905 1 -906 2 -907 0 -908 1 -909 2 -910 0 -911 1 -912 1 -913 0 -914 2 -915 1 -916 2 -917 2 -918 2 -919 1 -920 2 -921 1 -922 1 -923 1 -924 2 -925 1 -926 1 -927 0 -928 1 -929 0 -930 2 -931 2 -932 2 -933 0 -934 1 -935 1 -936 0 -937 0 -938 0 -939 1 -940 0 -941 1 -942 1 -943 1 -944 2 -945 1 -946 2 -947 1 -948 2 -949 2 -950 1 -951 2 -952 1 -953 2 -954 1 -955 1 -956 2 -957 0 -958 1 -959 2 -960 1 -961 1 -962 1 -963 2 -964 1 -965 1 -966 0 -967 1 -968 1 -969 2 -970 2 -971 1 -972 1 -973 2 -974 1 -975 0 -976 0 -977 0 -978 0 -979 2 -980 1 -981 1 -982 2 -983 1 -984 0 -985 2 -986 2 -987 2 -988 1 -989 1 -990 2 -991 1 -992 2 -993 1 -994 0 -995 1 -996 0 -997 1 -998 0 -999 1 -1000 2 -1001 2 -1002 2 -1003 0 -1004 0 -1005 2 -1006 1 -1007 2 -1008 1 -1009 0 -1010 0 -1011 0 -1012 2 -1013 2 -1014 2 -1015 2 -1016 1 -1017 2 -1018 1 -1019 1 -1020 1 -1021 2 -1022 0 -1023 2 -1024 2 -1025 2 -1026 1 -1027 2 -1028 2 -1029 2 -1030 2 -1031 0 -1032 1 -1033 1 -1034 0 -1035 1 -1036 2 -1037 0 -1038 1 -1039 2 -1040 2 -1041 0 -1042 0 -1043 2 -1044 1 -1045 2 -1046 1 -1047 1 -1048 2 -1049 1 -1050 2 -1051 1 -1052 2 -1053 0 -1054 1 -1055 1 -1056 1 -1057 2 -1058 1 -1059 1 -1060 1 -1061 2 -1062 0 -1063 1 -1064 1 -1065 1 -1066 0 -1067 0 -1068 1 -1069 1 -1070 1 -1071 2 -1072 0 -1073 2 -1074 2 -1075 0 -1076 2 -1077 1 -1078 1 -1079 0 -1080 2 -1081 2 -1082 2 -1083 1 -1084 0 -1085 0 -1086 0 -1087 1 -1088 1 -1089 1 -1090 1 -1091 2 -1092 2 -1093 0 -1094 1 -1095 2 -1096 1 -1097 1 -1098 2 -1099 1 -1100 0 -1101 0 -1102 2 -1103 0 -1104 1 -1105 0 -1106 1 -1107 0 -1108 0 -1109 0 -1110 1 -1111 2 -1112 1 -1113 2 -1114 1 -1115 2 -1116 2 -1117 1 -1118 1 -1119 2 -1120 1 -1121 1 -1122 0 -1123 2 -1124 2 -1125 1 -1126 1 -1127 1 -1128 2 -1129 0 -1130 1 -1131 1 -1132 1 -1133 1 -1134 0 -1135 0 -1136 2 -1137 0 -1138 2 -1139 1 -1140 0 -1141 2 -1142 2 -1143 0 -1144 2 -1145 1 -1146 2 -1147 0 -1148 0 -1149 1 -1150 0 -1151 1 -1152 1 -1153 1 -1154 1 -1155 1 -1156 1 -1157 1 -1158 1 -1159 2 -1160 2 -1161 1 -1162 1 -1163 0 -1164 1 -1165 2 -1166 1 -1167 2 -1168 1 -1169 1 -1170 0 -1171 1 -1172 2 -1173 1 -1174 0 -1175 2 -1176 0 -1177 1 -1178 2 -1179 1 -1180 1 -1181 1 -1182 1 -1183 0 -1184 2 -1185 1 -1186 1 -1187 2 -1188 0 -1189 1 -1190 1 -1191 0 -1192 2 -1193 1 -1194 2 -1195 1 -1196 2 -1197 2 -1198 1 -1199 2 -1200 2 -1201 0 -1202 1 -1203 0 -1204 2 -1205 2 -1206 1 -1207 1 -1208 1 -1209 1 -1210 2 -1211 2 -1212 1 -1213 2 -1214 2 -1215 1 -1216 0 -1217 1 -1218 0 -1219 1 -1220 1 -1221 2 -1222 1 -1223 2 -1224 1 -1225 2 -1226 1 -1227 1 -1228 1 -1229 0 -1230 2 -1231 2 -1232 2 -1233 1 -1234 1 -1235 2 -1236 0 -1237 2 -1238 1 -1239 1 -1240 1 -1241 1 -1242 1 -1243 1 -1244 2 -1245 1 -1246 1 -1247 1 -1248 0 -1249 2 -1250 1 -1251 0 -1252 0 -1253 2 -1254 1 -1255 2 -1256 1 -1257 2 -1258 0 -1259 0 -1260 1 -1261 2 -1262 0 -1263 2 -1264 2 -1265 2 -1266 1 -1267 1 -1268 2 -1269 1 -1270 2 -1271 2 -1272 1 -1273 1 -1274 2 -1275 2 -1276 2 -1277 2 -1278 2 -1279 1 -1280 2 -1281 0 -1282 2 -1283 1 -1284 2 -1285 1 -1286 1 -1287 0 -1288 1 -1289 2 -1290 1 -1291 0 -1292 2 -1293 1 -1294 1 -1295 1 -1296 2 -1297 1 -1298 0 -1299 1 -1300 1 -1301 2 -1302 1 -1303 2 -1304 1 -1305 1 -1306 0 -1307 2 -1308 1 -1309 0 -1310 0 -1311 0 -1312 2 -1313 1 -1314 2 -1315 0 -1316 1 -1317 0 -1318 0 -1319 2 -1320 1 -1321 0 -1322 1 -1323 0 -1324 1 -1325 1 -1326 1 -1327 0 -1328 1 -1329 2 -1330 1 -1331 1 -1332 1 -1333 0 -1334 1 -1335 1 -1336 1 -1337 1 -1338 1 -1339 1 -1340 1 -1341 0 -1342 1 -1343 1 -1344 0 -1345 2 -1346 1 -1347 2 -1348 1 -1349 2 -1350 1 -1351 2 -1352 0 -1353 2 -1354 2 -1355 2 -1356 1 -1357 2 -1358 0 -1359 0 -1360 1 -1361 1 -1362 2 -1363 1 -1364 0 -1365 2 -1366 0 -1367 1 -1368 2 -1369 1 -1370 1 -1371 1 -1372 0 -1373 1 -1374 2 -1375 1 -1376 1 -1377 1 -1378 1 -1379 1 -1380 2 -1381 2 -1382 1 -1383 2 -1384 2 -1385 1 -1386 1 -1387 1 -1388 2 -1389 1 -1390 1 -1391 1 -1392 1 -1393 0 -1394 2 -1395 1 -1396 1 -1397 2 -1398 1 -1399 1 -1400 0 -1401 0 -1402 1 -1403 0 -1404 1 -1405 1 -1406 2 -1407 1 -1408 2 -1409 2 -1410 1 -1411 1 -1412 2 -1413 1 -1414 1 -1415 2 -1416 2 -1417 1 -1418 1 -1419 0 -1420 0 -1421 1 -1422 0 -1423 2 -1424 2 -1425 1 -1426 2 -1427 2 -1428 0 -1429 1 -1430 0 -1431 0 -1432 1 -1433 2 -1434 1 -1435 0 -1436 2 -1437 0 -1438 2 -1439 2 -1440 1 -1441 2 -1442 2 -1443 2 -1444 2 -1445 2 -1446 0 -1447 1 -1448 0 -1449 1 -1450 1 -1451 1 -1452 2 -1453 1 -1454 2 -1455 0 -1456 1 -1457 0 -1458 1 -1459 1 -1460 1 -1461 1 -1462 2 -1463 2 -1464 2 -1465 2 -1466 1 -1467 0 -1468 0 -1469 1 -1470 2 -1471 2 -1472 1 -1473 0 -1474 2 -1475 1 -1476 2 -1477 0 -1478 1 -1479 2 -1480 1 -1481 1 -1482 1 -1483 2 -1484 2 -1485 1 -1486 1 -1487 1 -1488 2 -1489 2 -1490 0 -1491 2 -1492 2 -1493 2 -1494 1 -1495 1 -1496 1 -1497 2 -1498 1 -1499 0 -1500 2 -1501 2 -1502 1 -1503 1 -1504 2 -1505 0 -1506 2 -1507 1 -1508 0 -1509 0 -1510 2 -1511 0 -1512 1 -1513 2 -1514 0 -1515 1 -1516 0 -1517 0 -1518 1 -1519 2 -1520 0 -1521 2 -1522 0 -1523 1 -1524 0 -1525 2 -1526 0 -1527 1 -1528 2 -1529 2 -1530 0 -1531 1 -1532 1 -1533 2 -1534 1 -1535 1 -1536 2 -1537 0 -1538 1 -1539 0 -1540 0 -1541 1 -1542 1 -1543 2 -1544 1 -1545 2 -1546 1 -1547 2 -1548 0 -1549 1 -1550 0 -1551 2 -1552 2 -1553 2 -1554 0 -1555 1 -1556 2 -1557 2 -1558 1 -1559 2 -1560 1 -1561 2 -1562 1 -1563 2 -1564 2 -1565 2 -1566 2 -1567 1 -1568 1 -1569 2 -1570 2 -1571 2 -1572 1 -1573 1 -1574 1 -1575 0 -1576 0 -1577 0 -1578 1 -1579 0 -1580 0 -1581 1 -1582 1 -1583 1 -1584 1 -1585 0 -1586 1 -1587 1 -1588 2 -1589 0 -1590 1 -1591 0 -1592 1 -1593 1 -1594 1 -1595 2 -1596 1 -1597 1 -1598 1 -1599 1 -1600 1 -1601 0 -1602 2 -1603 1 -1604 1 -1605 1 -1606 1 -1607 2 -1608 0 -1609 2 -1610 0 -1611 0 -1612 0 -1613 0 -1614 0 -1615 1 -1616 1 -1617 2 -1618 2 -1619 2 -1620 0 -1621 1 -1622 1 -1623 0 -1624 2 -1625 1 -1626 1 -1627 1 -1628 1 -1629 2 -1630 2 -1631 2 -1632 0 -1633 1 -1634 2 -1635 2 -1636 2 -1637 0 -1638 1 -1639 1 -1640 2 -1641 1 -1642 2 -1643 0 -1644 1 -1645 1 -1646 1 -1647 2 -1648 0 -1649 1 -1650 1 -1651 0 -1652 1 -1653 1 -1654 2 -1655 0 -1656 1 -1657 1 -1658 0 -1659 0 -1660 1 -1661 1 -1662 1 -1663 0 -1664 0 -1665 2 -1666 1 -1667 2 -1668 2 -1669 0 -1670 1 -1671 0 -1672 1 -1673 0 -1674 1 -1675 2 -1676 1 -1677 1 -1678 2 -1679 0 -1680 1 -1681 2 -1682 2 -1683 1 -1684 0 -1685 1 -1686 1 -1687 0 -1688 1 -1689 1 -1690 2 -1691 1 -1692 2 -1693 1 -1694 2 -1695 2 -1696 1 -1697 2 -1698 0 -1699 1 -1700 0 -1701 1 -1702 2 -1703 0 -1704 2 -1705 2 -1706 1 -1707 1 -1708 1 -1709 1 -1710 0 -1711 0 -1712 0 -1713 0 -1714 1 -1715 1 -1716 1 -1717 2 -1718 1 -1719 0 -1720 1 -1721 1 -1722 1 -1723 2 -1724 2 -1725 2 -1726 1 -1727 2 -1728 1 -1729 2 -1730 1 -1731 0 -1732 1 -1733 1 -1734 0 -1735 0 -1736 2 -1737 1 -1738 1 -1739 0 -1740 1 -1741 0 -1742 2 -1743 2 -1744 1 -1745 0 -1746 2 -1747 1 -1748 1 -1749 1 -1750 0 -1751 0 -1752 0 -1753 1 -1754 1 -1755 0 -1756 2 -1757 2 -1758 0 -1759 1 -1760 1 -1761 2 -1762 1 -1763 0 -1764 1 -1765 1 -1766 1 -1767 2 -1768 1 -1769 2 -1770 0 -1771 0 -1772 1 -1773 0 -1774 0 -1775 2 -1776 2 -1777 1 -1778 2 -1779 0 -1780 2 -1781 1 -1782 1 -1783 0 -1784 1 -1785 2 -1786 1 -1787 1 -1788 2 -1789 2 -1790 0 -1791 0 -1792 0 -1793 2 -1794 2 -1795 1 -1796 1 -1797 1 -1798 2 -1799 0 -1800 1 -1801 1 -1802 0 -1803 2 -1804 1 -1805 0 -1806 1 -1807 2 -1808 1 -1809 1 -1810 1 -1811 2 -1812 1 -1813 2 -1814 1 -1815 2 -1816 0 -1817 0 -1818 2 -1819 1 -1820 1 -1821 0 -1822 1 -1823 0 -1824 1 -1825 1 -1826 1 -1827 0 -1828 2 -1829 1 -1830 0 -1831 1 -1832 2 -1833 0 -1834 1 -1835 1 -1836 1 -1837 2 -1838 0 -1839 1 -1840 1 -1841 1 -1842 2 -1843 1 -1844 1 -1845 1 -1846 2 -1847 2 -1848 0 -1849 1 -1850 2 -1851 0 -1852 1 -1853 0 -1854 1 -1855 0 -1856 1 -1857 1 -1858 2 -1859 1 -1860 2 -1861 2 -1862 2 -1863 2 -1864 1 -1865 2 -1866 2 -1867 2 -1868 2 -1869 2 -1870 1 -1871 1 -1872 0 -1873 0 -1874 1 -1875 1 -1876 2 -1877 1 -1878 0 -1879 0 -1880 1 -1881 2 -1882 2 -1883 0 -1884 2 -1885 1 -1886 0 -1887 2 -1888 2 -1889 1 -1890 2 -1891 2 -1892 1 -1893 0 -1894 1 -1895 2 -1896 1 -1897 2 -1898 0 -1899 0 -1900 1 -1901 0 -1902 2 -1903 1 -1904 1 -1905 2 -1906 1 -1907 2 -1908 1 -1909 1 -1910 2 -1911 2 -1912 1 -1913 0 -1914 0 -1915 1 -1916 2 -1917 2 -1918 1 -1919 1 -1920 1 -1921 1 -1922 0 -1923 2 -1924 2 -1925 2 -1926 1 -1927 1 -1928 1 -1929 1 -1930 2 -1931 0 -1932 1 -1933 2 -1934 1 -1935 0 -1936 1 -1937 2 -1938 1 -1939 1 -1940 0 -1941 1 -1942 2 -1943 2 -1944 1 -1945 1 -1946 2 -1947 1 -1948 2 -1949 2 -1950 1 -1951 2 -1952 2 -1953 2 -1954 2 -1955 1 -1956 2 -1957 2 -1958 2 -1959 0 -1960 0 -1961 1 -1962 2 -1963 0 -1964 1 -1965 0 -1966 1 -1967 1 -1968 1 -1969 1 -1970 1 -1971 1 -1972 1 -1973 0 -1974 1 -1975 1 -1976 1 -1977 1 -1978 1 -1979 2 -1980 0 -1981 1 -1982 1 -1983 2 -1984 0 -1985 1 -1986 1 -1987 1 -1988 2 -1989 2 -1990 1 -1991 2 -1992 2 -1993 1 -1994 2 -1995 1 -1996 2 -1997 2 -1998 2 -1999 1 -2000 0 -2001 2 -2002 1 -2003 2 -2004 2 -2005 1 -2006 1 -2007 1 -2008 2 -2009 1 -2010 0 -2011 1 -2012 0 -2013 0 -2014 0 -2015 2 -2016 0 -2017 2 -2018 0 -2019 1 -2020 1 -2021 1 -2022 2 -2023 2 -2024 1 -2025 1 -2026 0 -2027 2 -2028 2 -2029 0 -2030 2 -2031 1 -2032 2 -2033 1 -2034 1 -2035 1 -2036 1 -2037 2 -2038 1 -2039 1 -2040 2 -2041 1 -2042 1 -2043 1 -2044 1 -2045 1 -2046 1 -2047 0 -2048 1 -2049 2 -2050 2 -2051 1 -2052 2 -2053 0 -2054 1 -2055 1 -2056 0 -2057 2 -2058 1 -2059 0 -2060 2 -2061 1 -2062 2 -2063 0 -2064 1 -2065 2 -2066 1 -2067 0 -2068 1 -2069 2 -2070 1 -2071 2 -2072 1 -2073 2 -2074 1 -2075 1 -2076 2 -2077 0 -2078 1 -2079 0 -2080 1 -2081 1 -2082 2 -2083 2 -2084 2 -2085 1 -2086 2 -2087 0 -2088 1 -2089 1 -2090 1 -2091 2 -2092 0 -2093 1 -2094 1 -2095 2 -2096 0 -2097 1 -2098 2 -2099 0 -2100 2 -2101 1 -2102 0 -2103 1 -2104 2 -2105 0 -2106 1 -2107 1 -2108 1 -2109 0 -2110 1 -2111 2 -2112 2 -2113 1 -2114 2 -2115 2 -2116 2 -2117 2 -2118 1 -2119 1 -2120 0 -2121 0 -2122 2 -2123 2 -2124 2 -2125 2 -2126 2 -2127 2 -2128 2 -2129 1 -2130 2 -2131 1 -2132 1 -2133 0 -2134 2 -2135 1 -2136 1 -2137 1 -2138 0 -2139 2 -2140 1 -2141 1 -2142 2 -2143 1 -2144 1 -2145 1 -2146 0 -2147 0 -2148 1 -2149 0 -2150 0 -2151 0 -2152 2 -2153 1 -2154 2 -2155 0 -2156 1 -2157 2 -2158 1 -2159 2 -2160 1 -2161 0 -2162 1 -2163 0 -2164 2 -2165 1 -2166 1 -2167 0 -2168 1 -2169 1 -2170 2 -2171 1 -2172 1 -2173 2 -2174 0 -2175 2 -2176 1 -2177 0 -2178 2 -2179 1 -2180 2 -2181 2 -2182 0 -2183 0 -2184 0 -2185 1 -2186 1 -2187 2 -2188 2 -2189 1 -2190 2 -2191 0 -2192 1 -2193 2 -2194 1 -2195 2 -2196 1 -2197 2 -2198 0 -2199 2 -2200 1 -2201 1 -2202 0 -2203 1 -2204 0 -2205 0 -2206 1 -2207 1 -2208 1 -2209 0 -2210 1 -2211 1 -2212 1 -2213 1 -2214 0 -2215 2 -2216 2 -2217 2 -2218 0 -2219 1 -2220 1 -2221 2 -2222 1 -2223 1 -2224 0 -2225 1 -2226 1 -2227 2 -2228 2 -2229 1 -2230 0 -2231 1 -2232 2 -2233 0 -2234 0 -2235 1 -2236 0 -2237 2 -2238 0 -2239 0 -2240 2 -2241 1 -2242 1 -2243 2 -2244 1 -2245 2 -2246 1 -2247 2 -2248 0 -2249 2 -2250 2 -2251 2 -2252 2 -2253 2 -2254 2 -2255 1 -2256 2 -2257 1 -2258 1 -2259 2 -2260 1 -2261 2 -2262 0 -2263 1 -2264 0 -2265 1 -2266 1 -2267 0 -2268 2 -2269 1 -2270 1 -2271 2 -2272 0 -2273 1 -2274 1 -2275 0 -2276 1 -2277 2 -2278 2 -2279 1 -2280 2 -2281 0 -2282 1 -2283 2 -2284 1 -2285 2 -2286 2 -2287 2 -2288 0 -2289 2 -2290 1 -2291 2 -2292 1 -2293 1 -2294 1 -2295 0 -2296 1 -2297 2 -2298 1 -2299 2 -2300 0 -2301 2 -2302 0 -2303 0 -2304 2 -2305 0 -2306 2 -2307 0 -2308 2 -2309 1 -2310 1 -2311 1 -2312 2 -2313 2 -2314 1 -2315 2 -2316 1 -2317 2 -2318 0 -2319 2 -2320 2 -2321 2 -2322 2 -2323 1 -2324 2 -2325 2 -2326 1 -2327 1 -2328 1 -2329 2 -2330 1 -2331 1 -2332 2 -2333 2 -2334 2 -2335 2 -2336 2 -2337 0 -2338 0 -2339 2 -2340 2 -2341 2 -2342 1 -2343 2 -2344 1 -2345 2 -2346 2 -2347 2 -2348 1 -2349 1 -2350 2 -2351 2 -2352 2 -2353 1 -2354 2 -2355 2 -2356 1 -2357 1 -2358 1 -2359 0 -2360 1 -2361 2 -2362 0 -2363 1 -2364 1 -2365 2 -2366 2 -2367 0 -2368 2 -2369 1 -2370 2 -2371 1 -2372 1 -2373 2 -2374 1 -2375 1 -2376 1 -2377 2 -2378 2 -2379 1 -2380 1 -2381 0 -2382 2 -2383 2 -2384 1 -2385 2 -2386 2 -2387 1 -2388 2 -2389 2 -2390 0 -2391 0 -2392 0 -2393 0 -2394 1 -2395 1 -2396 2 -2397 0 -2398 1 -2399 1 -2400 2 -2401 1 -2402 2 -2403 1 -2404 1 -2405 1 -2406 2 -2407 2 -2408 2 -2409 1 -2410 2 -2411 1 -2412 1 -2413 0 -2414 1 -2415 1 -2416 2 -2417 1 -2418 0 -2419 1 -2420 0 -2421 0 -2422 1 -2423 1 -2424 1 -2425 2 -2426 0 -2427 2 -2428 2 -2429 0 -2430 1 -2431 1 -2432 0 -2433 0 -2434 0 -2435 0 -2436 1 -2437 2 -2438 1 -2439 2 -2440 2 -2441 1 -2442 1 -2443 0 -2444 1 -2445 2 -2446 1 -2447 1 -2448 1 -2449 1 -2450 1 -2451 2 -2452 1 -2453 1 -2454 0 -2455 0 -2456 0 -2457 2 -2458 2 -2459 1 -2460 1 -2461 1 -2462 1 -2463 2 -2464 2 -2465 1 -2466 2 -2467 0 -2468 1 -2469 1 -2470 1 -2471 2 -2472 2 -2473 0 -2474 0 -2475 2 -2476 1 -2477 2 -2478 1 -2479 2 -2480 1 -2481 2 -2482 0 -2483 0 -2484 2 -2485 2 -2486 0 -2487 0 -2488 1 -2489 1 -2490 0 -2491 2 -2492 0 -2493 2 -2494 2 -2495 2 -2496 2 -2497 1 -2498 2 -2499 2 -2500 1 -2501 2 -2502 2 -2503 0 -2504 1 -2505 1 -2506 2 -2507 0 -2508 2 -2509 0 -2510 2 -2511 0 -2512 1 -2513 0 -2514 0 -2515 0 -2516 0 -2517 2 -2518 1 -2519 0 -2520 2 -2521 2 -2522 2 -2523 2 -2524 2 -2525 1 -2526 1 -2527 0 -2528 1 -2529 2 -2530 2 -2531 0 -2532 2 -2533 0 -2534 2 -2535 1 -2536 1 -2537 0 -2538 1 -2539 2 -2540 0 -2541 2 -2542 2 -2543 0 -2544 0 -2545 2 -2546 1 -2547 0 -2548 1 -2549 1 -2550 0 -2551 0 -2552 1 -2553 2 -2554 2 -2555 2 -2556 2 -2557 0 -2558 2 -2559 1 -2560 0 -2561 1 -2562 1 -2563 2 -2564 1 -2565 0 -2566 1 -2567 1 -2568 0 -2569 2 -2570 0 -2571 2 -2572 0 -2573 2 -2574 0 -2575 1 -2576 2 -2577 0 -2578 1 -2579 1 -2580 1 -2581 2 -2582 2 -2583 0 -2584 2 -2585 1 -2586 2 -2587 2 -2588 2 -2589 1 -2590 2 -2591 1 -2592 2 -2593 1 -2594 2 -2595 1 -2596 1 -2597 0 -2598 1 -2599 1 -2600 0 -2601 1 -2602 2 -2603 2 -2604 1 -2605 2 -2606 0 -2607 1 -2608 0 -2609 2 -2610 1 -2611 1 -2612 1 -2613 1 -2614 0 -2615 0 -2616 0 -2617 2 -2618 1 -2619 1 -2620 1 -2621 2 -2622 1 -2623 1 -2624 1 -2625 1 -2626 1 -2627 2 -2628 0 -2629 2 -2630 2 -2631 1 -2632 0 -2633 1 -2634 0 -2635 2 -2636 1 -2637 1 -2638 2 -2639 2 -2640 2 -2641 1 -2642 1 -2643 2 -2644 1 -2645 0 -2646 2 -2647 1 -2648 1 -2649 1 -2650 1 -2651 0 -2652 1 -2653 0 -2654 1 -2655 2 -2656 0 -2657 0 -2658 1 -2659 0 -2660 2 -2661 1 -2662 2 -2663 1 -2664 1 -2665 1 -2666 0 -2667 1 -2668 1 -2669 1 -2670 2 -2671 1 -2672 1 -2673 1 -2674 2 -2675 2 -2676 1 -2677 2 -2678 1 -2679 2 -2680 0 -2681 1 -2682 2 -2683 2 -2684 1 -2685 1 -2686 1 -2687 0 -2688 0 -2689 1 -2690 1 -2691 1 -2692 2 -2693 2 -2694 1 -2695 2 -2696 2 -2697 2 -2698 0 -2699 2 -2700 0 -2701 2 -2702 2 -2703 2 -2704 1 -2705 1 -2706 2 -2707 2 -2708 2 -2709 2 -2710 2 -2711 2 -2712 2 -2713 2 -2714 2 -2715 1 -2716 2 -2717 0 -2718 0 -2719 2 -2720 1 -2721 1 -2722 0 -2723 1 -2724 1 -2725 0 -2726 1 -2727 2 -2728 0 -2729 1 -2730 1 -2731 1 -2732 0 -2733 1 -2734 1 -2735 1 -2736 1 -2737 1 -2738 1 -2739 2 -2740 1 -2741 1 -2742 2 -2743 1 -2744 2 -2745 2 -2746 2 -2747 1 -2748 1 -2749 1 -2750 0 -2751 1 -2752 2 -2753 2 -2754 2 -2755 2 -2756 1 -2757 2 -2758 1 -2759 2 -2760 2 -2761 2 -2762 0 -2763 0 -2764 0 -2765 2 -2766 1 -2767 2 -2768 1 -2769 2 -2770 1 -2771 0 -2772 2 -2773 0 -2774 0 -2775 0 -2776 0 -2777 2 -2778 0 -2779 0 -2780 2 -2781 1 -2782 1 -2783 2 -2784 2 -2785 2 -2786 1 -2787 0 -2788 1 -2789 1 -2790 1 -2791 1 -2792 1 -2793 2 -2794 2 -2795 0 -2796 2 -2797 0 -2798 2 -2799 2 -2800 1 -2801 1 -2802 1 -2803 1 -2804 2 -2805 1 -2806 0 -2807 0 -2808 2 -2809 1 -2810 1 -2811 0 -2812 0 -2813 0 -2814 0 -2815 1 -2816 1 -2817 2 -2818 0 -2819 0 -2820 0 -2821 1 -2822 0 -2823 1 -2824 2 -2825 2 -2826 2 -2827 0 -2828 1 -2829 1 -2830 2 -2831 1 -2832 0 -2833 1 -2834 2 -2835 2 -2836 2 -2837 1 -2838 1 -2839 1 -2840 0 -2841 0 -2842 1 -2843 0 -2844 1 -2845 2 -2846 0 -2847 1 -2848 1 -2849 1 -2850 0 -2851 1 -2852 1 -2853 2 -2854 1 -2855 2 -2856 0 -2857 0 -2858 1 -2859 2 -2860 2 -2861 2 -2862 0 -2863 2 -2864 0 -2865 1 -2866 1 -2867 2 -2868 1 -2869 0 -2870 2 -2871 1 -2872 1 -2873 0 -2874 1 -2875 0 -2876 1 -2877 1 -2878 2 -2879 2 -2880 0 -2881 2 -2882 2 -2883 1 -2884 2 -2885 0 -2886 0 -2887 0 -2888 2 -2889 1 -2890 1 -2891 0 -2892 1 -2893 1 -2894 0 -2895 1 -2896 1 -2897 2 -2898 1 -2899 1 -2900 1 -2901 1 -2902 2 -2903 1 -2904 1 -2905 0 -2906 1 -2907 1 -2908 1 -2909 0 -2910 2 -2911 1 -2912 0 -2913 1 -2914 1 -2915 2 -2916 2 -2917 0 -2918 2 -2919 1 -2920 0 -2921 2 -2922 2 -2923 0 -2924 2 -2925 0 -2926 2 -2927 2 -2928 2 -2929 1 -2930 2 -2931 2 -2932 1 -2933 0 -2934 2 -2935 1 -2936 0 -2937 0 -2938 0 -2939 1 -2940 1 -2941 2 -2942 1 -2943 1 -2944 0 -2945 1 -2946 1 -2947 1 -2948 1 -2949 0 -2950 1 -2951 2 -2952 0 -2953 2 -2954 0 -2955 2 -2956 2 -2957 2 -2958 1 -2959 1 -2960 1 -2961 0 -2962 2 -2963 2 -2964 2 -2965 2 -2966 2 -2967 0 -2968 0 -2969 2 -2970 2 -2971 2 -2972 1 -2973 1 -2974 1 -2975 1 -2976 1 -2977 2 -2978 0 -2979 2 -2980 2 -2981 2 -2982 2 -2983 0 -2984 1 -2985 0 -2986 0 -2987 1 -2988 2 -2989 2 -2990 0 -2991 1 -2992 2 -2993 1 -2994 1 -2995 2 -2996 0 -2997 2 -2998 2 -2999 2 -3000 2 -3001 1 -3002 2 -3003 0 -3004 2 -3005 1 -3006 0 -3007 0 -3008 1 -3009 0 -3010 1 -3011 0 -3012 2 -3013 1 -3014 0 -3015 1 -3016 1 -3017 1 -3018 2 -3019 1 -3020 1 -3021 0 -3022 1 -3023 1 -3024 0 -3025 1 -3026 0 -3027 1 -3028 0 -3029 2 -3030 1 -3031 1 -3032 1 -3033 2 -3034 2 -3035 2 -3036 2 -3037 2 -3038 1 -3039 0 -3040 1 -3041 2 -3042 0 -3043 2 -3044 2 -3045 1 -3046 0 -3047 2 -3048 1 -3049 1 -3050 1 -3051 1 -3052 2 -3053 1 -3054 2 -3055 2 -3056 2 -3057 1 -3058 1 -3059 0 -3060 2 -3061 1 -3062 2 -3063 0 -3064 1 -3065 1 -3066 2 -3067 1 -3068 2 -3069 0 -3070 1 -3071 1 -3072 1 -3073 2 -3074 2 -3075 2 -3076 0 -3077 1 -3078 1 -3079 0 -3080 2 -3081 1 -3082 1 -3083 0 -3084 2 -3085 1 -3086 1 -3087 0 -3088 0 -3089 1 -3090 0 -3091 1 -3092 2 -3093 0 -3094 1 -3095 2 -3096 1 -3097 0 -3098 0 -3099 2 -3100 0 -3101 1 -3102 0 -3103 1 -3104 2 -3105 2 -3106 0 -3107 1 -3108 0 -3109 1 -3110 2 -3111 2 -3112 0 -3113 2 -3114 1 -3115 2 -3116 1 -3117 2 -3118 0 -3119 0 -3120 0 -3121 2 -3122 0 -3123 2 -3124 1 -3125 2 -3126 2 -3127 2 -3128 1 -3129 0 -3130 1 -3131 1 -3132 1 -3133 1 -3134 1 -3135 0 -3136 2 -3137 1 -3138 1 -3139 1 -3140 0 -3141 1 -3142 1 -3143 0 -3144 1 -3145 0 -3146 2 -3147 0 -3148 0 -3149 2 -3150 2 -3151 1 -3152 2 -3153 1 -3154 2 -3155 0 -3156 0 -3157 2 -3158 1 -3159 1 -3160 1 -3161 1 -3162 0 -3163 0 -3164 0 -3165 2 -3166 0 -3167 1 -3168 1 -3169 1 -3170 1 -3171 2 -3172 1 -3173 1 -3174 1 -3175 1 -3176 2 -3177 1 -3178 2 -3179 1 -3180 1 -3181 1 -3182 2 -3183 1 -3184 2 -3185 0 -3186 2 -3187 2 -3188 0 -3189 2 -3190 1 -3191 1 -3192 1 -3193 2 -3194 0 -3195 2 -3196 1 -3197 0 -3198 1 -3199 2 -3200 1 -3201 1 -3202 2 -3203 2 -3204 1 -3205 2 -3206 2 -3207 1 -3208 1 -3209 2 -3210 2 -3211 2 -3212 1 -3213 2 -3214 2 -3215 1 -3216 2 -3217 2 -3218 1 -3219 1 -3220 2 -3221 1 -3222 1 -3223 2 -3224 2 -3225 2 -3226 1 -3227 0 -3228 1 -3229 0 -3230 2 -3231 1 -3232 1 -3233 2 -3234 1 -3235 1 -3236 1 -3237 1 -3238 0 -3239 2 -3240 0 -3241 2 -3242 1 -3243 1 -3244 2 -3245 2 -3246 1 -3247 1 -3248 1 -3249 1 -3250 1 -3251 1 -3252 0 -3253 0 -3254 1 -3255 0 -3256 2 -3257 0 -3258 0 -3259 0 -3260 1 -3261 0 -3262 0 -3263 0 -3264 1 -3265 2 -3266 0 -3267 1 -3268 0 -3269 2 -3270 2 -3271 1 -3272 2 -3273 2 -3274 0 -3275 2 -3276 1 -3277 1 -3278 1 -3279 2 -3280 2 -3281 1 -3282 1 -3283 0 -3284 1 -3285 1 -3286 1 -3287 2 -3288 1 -3289 2 -3290 1 -3291 0 -3292 0 -3293 1 -3294 1 -3295 2 -3296 2 -3297 0 -3298 2 -3299 0 -3300 1 -3301 1 -3302 0 -3303 0 -3304 0 -3305 1 -3306 0 -3307 1 -3308 2 -3309 0 -3310 1 -3311 2 -3312 1 -3313 1 -3314 1 -3315 0 -3316 1 -3317 1 -3318 2 -3319 2 -3320 0 -3321 2 -3322 2 -3323 2 -3324 0 -3325 1 -3326 0 -3327 1 -3328 0 -3329 2 -3330 2 -3331 1 -3332 2 -3333 0 -3334 1 -3335 1 -3336 0 -3337 2 -3338 2 -3339 0 -3340 0 -3341 1 -3342 0 -3343 2 -3344 2 -3345 1 -3346 1 -3347 2 -3348 0 -3349 0 -3350 1 -3351 0 -3352 1 -3353 1 -3354 1 -3355 1 -3356 1 -3357 0 -3358 2 -3359 2 -3360 0 -3361 2 -3362 0 -3363 2 -3364 2 -3365 1 -3366 1 -3367 1 -3368 1 -3369 0 -3370 2 -3371 2 -3372 2 -3373 1 -3374 0 -3375 1 -3376 2 -3377 1 -3378 1 -3379 2 -3380 1 -3381 0 -3382 2 -3383 1 -3384 2 -3385 1 -3386 1 -3387 1 -3388 1 -3389 2 -3390 1 -3391 1 -3392 2 -3393 1 -3394 1 -3395 1 -3396 0 -3397 2 -3398 2 -3399 0 -3400 2 -3401 1 -3402 1 -3403 1 -3404 1 -3405 2 -3406 1 -3407 2 -3408 2 -3409 2 -3410 2 -3411 1 -3412 1 -3413 2 -3414 2 -3415 2 -3416 1 -3417 2 -3418 2 -3419 1 -3420 1 -3421 0 -3422 2 -3423 1 -3424 1 -3425 0 -3426 0 -3427 0 -3428 2 -3429 1 -3430 2 -3431 1 -3432 0 -3433 0 -3434 1 -3435 2 -3436 1 -3437 0 -3438 1 -3439 1 -3440 1 -3441 2 -3442 2 -3443 2 -3444 0 -3445 1 -3446 1 -3447 2 -3448 1 -3449 1 -3450 1 -3451 2 -3452 2 -3453 2 -3454 0 -3455 0 -3456 1 -3457 1 -3458 2 -3459 1 -3460 1 -3461 1 -3462 1 -3463 2 -3464 2 -3465 1 -3466 2 -3467 2 -3468 1 -3469 0 -3470 0 -3471 0 -3472 2 -3473 0 -3474 0 -3475 2 -3476 1 -3477 2 -3478 2 -3479 2 -3480 1 -3481 1 -3482 1 -3483 2 -3484 2 -3485 2 -3486 1 -3487 0 -3488 1 -3489 2 -3490 0 -3491 0 -3492 2 -3493 1 -3494 1 -3495 1 -3496 2 -3497 1 -3498 1 -3499 1 -3500 2 -3501 1 -3502 0 -3503 0 -3504 1 -3505 2 -3506 0 -3507 1 -3508 2 -3509 2 -3510 0 -3511 0 -3512 1 -3513 1 -3514 2 -3515 2 -3516 1 -3517 1 -3518 2 -3519 1 -3520 0 -3521 1 -3522 2 -3523 2 -3524 2 -3525 2 -3526 0 -3527 1 -3528 1 -3529 1 -3530 1 -3531 1 -3532 1 -3533 0 -3534 0 -3535 0 -3536 1 -3537 1 -3538 0 -3539 0 -3540 2 -3541 1 -3542 1 -3543 2 -3544 2 -3545 2 -3546 2 -3547 1 -3548 1 -3549 1 -3550 1 -3551 1 -3552 1 -3553 2 -3554 1 -3555 2 -3556 2 -3557 1 -3558 0 -3559 2 -3560 2 -3561 2 -3562 2 -3563 0 -3564 1 -3565 1 -3566 0 -3567 1 -3568 2 -3569 0 -3570 1 -3571 1 -3572 2 -3573 2 -3574 2 -3575 0 -3576 2 -3577 0 -3578 0 -3579 2 -3580 0 -3581 2 -3582 2 -3583 1 -3584 2 -3585 1 -3586 2 -3587 2 -3588 0 -3589 1 -3590 2 -3591 1 -3592 2 -3593 2 -3594 0 -3595 0 -3596 2 -3597 2 -3598 2 -3599 0 -3600 2 -3601 2 -3602 0 -3603 1 -3604 1 -3605 2 -3606 1 -3607 2 -3608 1 -3609 1 -3610 1 -3611 2 -3612 0 -3613 2 -3614 1 -3615 1 -3616 2 -3617 0 -3618 1 -3619 2 -3620 0 -3621 1 -3622 0 -3623 1 -3624 1 -3625 2 -3626 1 -3627 0 -3628 1 -3629 2 -3630 2 -3631 2 -3632 1 -3633 0 -3634 0 -3635 2 -3636 1 -3637 1 -3638 0 -3639 0 -3640 1 -3641 2 -3642 0 -3643 1 -3644 1 -3645 2 -3646 0 -3647 2 -3648 0 -3649 0 -3650 1 -3651 0 -3652 1 -3653 1 -3654 2 -3655 1 -3656 2 -3657 0 -3658 1 -3659 0 -3660 2 -3661 1 -3662 1 -3663 2 -3664 1 -3665 0 -3666 1 -3667 1 -3668 1 -3669 0 -3670 2 -3671 1 -3672 2 -3673 2 -3674 1 -3675 1 -3676 2 -3677 2 -3678 0 -3679 2 -3680 0 -3681 1 -3682 1 -3683 2 -3684 1 -3685 1 -3686 2 -3687 1 -3688 1 -3689 1 -3690 1 -3691 1 -3692 2 -3693 2 -3694 1 -3695 2 -3696 1 -3697 1 -3698 1 -3699 1 -3700 0 -3701 2 -3702 1 -3703 0 -3704 0 -3705 0 -3706 1 -3707 1 -3708 1 -3709 0 -3710 2 -3711 1 -3712 1 -3713 1 -3714 1 -3715 2 -3716 2 -3717 1 -3718 0 -3719 0 -3720 0 -3721 2 -3722 1 -3723 2 -3724 1 -3725 1 -3726 2 -3727 1 -3728 2 -3729 2 -3730 2 -3731 1 -3732 1 -3733 1 -3734 1 -3735 2 -3736 1 -3737 0 -3738 2 -3739 1 -3740 2 -3741 1 -3742 0 -3743 2 -3744 2 -3745 2 -3746 1 -3747 2 -3748 2 -3749 1 -3750 1 -3751 2 -3752 0 -3753 0 -3754 2 -3755 1 -3756 0 -3757 1 -3758 1 -3759 2 -3760 0 -3761 0 -3762 1 -3763 1 -3764 2 -3765 1 -3766 1 -3767 1 -3768 1 -3769 1 -3770 0 -3771 2 -3772 1 -3773 2 -3774 0 -3775 2 -3776 1 -3777 1 -3778 1 -3779 2 -3780 1 -3781 1 -3782 2 -3783 0 -3784 0 -3785 2 -3786 0 -3787 1 -3788 1 -3789 1 -3790 2 -3791 0 -3792 1 -3793 2 -3794 1 -3795 0 -3796 1 -3797 1 -3798 1 -3799 0 -3800 2 -3801 1 -3802 2 -3803 1 -3804 1 -3805 1 -3806 1 -3807 1 -3808 0 -3809 2 -3810 1 -3811 1 -3812 1 -3813 0 -3814 1 -3815 1 -3816 1 -3817 2 -3818 1 -3819 1 -3820 1 -3821 2 -3822 2 -3823 1 -3824 2 -3825 2 -3826 0 -3827 2 -3828 2 -3829 1 -3830 0 -3831 2 -3832 1 -3833 1 -3834 0 -3835 1 -3836 0 -3837 0 -3838 0 -3839 0 -3840 2 -3841 1 -3842 0 -3843 1 -3844 0 -3845 1 -3846 1 -3847 0 -3848 2 -3849 0 -3850 2 -3851 1 -3852 1 -3853 2 -3854 2 -3855 1 -3856 1 -3857 1 -3858 1 -3859 1 -3860 0 -3861 1 -3862 0 -3863 1 -3864 1 -3865 0 -3866 0 -3867 0 -3868 0 -3869 1 -3870 1 -3871 2 -3872 1 -3873 1 -3874 1 -3875 0 -3876 0 -3877 0 -3878 2 -3879 1 -3880 2 -3881 0 -3882 1 -3883 1 -3884 1 -3885 1 -3886 0 -3887 2 -3888 1 -3889 1 -3890 1 -3891 2 -3892 1 -3893 1 -3894 2 -3895 1 -3896 2 -3897 1 -3898 1 -3899 2 -3900 2 -3901 2 -3902 2 -3903 2 -3904 1 -3905 2 -3906 2 -3907 2 -3908 1 -3909 2 -3910 2 -3911 0 -3912 1 -3913 2 -3914 1 -3915 1 -3916 0 -3917 1 -3918 2 -3919 2 -3920 2 -3921 1 -3922 2 -3923 1 -3924 2 -3925 0 -3926 1 -3927 1 -3928 1 -3929 2 -3930 2 -3931 2 -3932 1 -3933 2 -3934 2 -3935 0 -3936 0 -3937 0 -3938 2 -3939 2 -3940 1 -3941 1 -3942 2 -3943 0 -3944 1 -3945 0 -3946 2 -3947 0 -3948 1 -3949 1 -3950 1 -3951 0 -3952 1 -3953 0 -3954 1 -3955 0 -3956 2 -3957 0 -3958 1 -3959 2 -3960 2 -3961 2 -3962 1 -3963 2 -3964 1 -3965 0 -3966 2 -3967 2 -3968 1 -3969 1 -3970 0 -3971 2 -3972 2 -3973 0 -3974 2 -3975 0 -3976 1 -3977 1 -3978 0 -3979 0 -3980 0 -3981 0 -3982 0 -3983 1 -3984 1 -3985 2 -3986 0 -3987 1 -3988 0 -3989 1 -3990 0 -3991 0 -3992 2 -3993 1 -3994 2 -3995 1 -3996 0 -3997 0 -3998 2 -3999 2 -4000 0 -4001 1 -4002 1 -4003 0 -4004 1 -4005 1 -4006 1 -4007 2 -4008 2 -4009 2 -4010 1 -4011 1 -4012 2 -4013 2 -4014 0 -4015 0 -4016 0 -4017 2 -4018 1 -4019 0 -4020 0 -4021 1 -4022 1 -4023 0 -4024 0 -4025 1 -4026 0 -4027 0 -4028 0 -4029 1 -4030 0 -4031 2 -4032 1 -4033 1 -4034 1 -4035 1 -4036 0 -4037 1 -4038 2 -4039 1 -4040 1 -4041 1 -4042 0 -4043 2 -4044 1 -4045 1 -4046 2 -4047 1 -4048 1 -4049 1 -4050 1 -4051 1 -4052 2 -4053 1 -4054 0 -4055 2 -4056 2 -4057 2 -4058 2 -4059 1 -4060 0 -4061 2 -4062 1 -4063 0 -4064 0 -4065 0 -4066 1 -4067 1 -4068 2 -4069 1 -4070 1 -4071 1 -4072 0 -4073 1 -4074 2 -4075 1 -4076 0 -4077 2 -4078 1 -4079 1 -4080 1 -4081 1 -4082 1 -4083 0 -4084 1 -4085 1 -4086 2 -4087 2 -4088 0 -4089 1 -4090 0 -4091 2 -4092 2 -4093 2 -4094 1 -4095 2 -4096 1 -4097 0 -4098 2 -4099 1 -4100 0 -4101 1 -4102 1 -4103 2 -4104 1 -4105 1 -4106 1 -4107 0 -4108 0 -4109 2 -4110 0 -4111 0 -4112 1 -4113 2 -4114 1 -4115 2 -4116 0 -4117 1 -4118 2 -4119 0 -4120 0 -4121 0 -4122 2 -4123 1 -4124 0 -4125 0 -4126 0 -4127 1 -4128 0 -4129 0 -4130 2 -4131 1 -4132 1 -4133 1 -4134 1 -4135 1 -4136 2 -4137 1 -4138 2 -4139 1 -4140 0 -4141 1 -4142 2 -4143 2 -4144 1 -4145 1 -4146 1 -4147 0 -4148 2 -4149 1 -4150 2 -4151 2 -4152 1 -4153 0 -4154 0 -4155 1 -4156 2 -4157 0 -4158 0 -4159 0 -4160 0 -4161 2 -4162 1 -4163 2 -4164 1 -4165 1 -4166 2 -4167 0 -4168 0 -4169 0 -4170 1 -4171 1 -4172 2 -4173 2 -4174 2 -4175 0 -4176 1 -4177 2 -4178 2 -4179 1 -4180 0 -4181 2 -4182 2 -4183 2 -4184 1 -4185 2 -4186 2 -4187 1 -4188 2 -4189 1 -4190 0 -4191 1 -4192 2 -4193 2 -4194 2 -4195 0 -4196 1 -4197 2 -4198 2 -4199 1 -4200 1 -4201 1 -4202 2 -4203 1 -4204 0 -4205 0 -4206 2 -4207 1 -4208 1 -4209 0 -4210 1 -4211 0 -4212 2 -4213 2 -4214 0 -4215 0 -4216 1 -4217 1 -4218 0 -4219 0 -4220 1 -4221 0 -4222 1 -4223 0 -4224 2 -4225 2 -4226 2 -4227 0 -4228 2 -4229 2 -4230 0 -4231 1 -4232 0 -4233 1 -4234 0 -4235 1 -4236 2 -4237 2 -4238 1 -4239 1 -4240 1 -4241 2 -4242 1 -4243 1 -4244 1 -4245 2 -4246 1 -4247 1 -4248 2 -4249 1 -4250 2 -4251 0 -4252 1 -4253 2 -4254 1 -4255 1 -4256 2 -4257 2 -4258 0 -4259 1 -4260 0 -4261 1 -4262 1 -4263 2 -4264 2 -4265 0 -4266 0 -4267 0 -4268 2 -4269 2 -4270 1 -4271 2 -4272 1 -4273 2 -4274 1 -4275 1 -4276 1 -4277 2 -4278 1 -4279 1 -4280 1 -4281 1 -4282 0 -4283 1 -4284 0 -4285 0 -4286 2 -4287 1 -4288 1 -4289 1 -4290 1 -4291 0 -4292 1 -4293 2 -4294 1 -4295 0 -4296 0 -4297 1 -4298 1 -4299 0 -4300 1 -4301 1 -4302 1 -4303 0 -4304 2 -4305 1 -4306 1 -4307 2 -4308 2 -4309 0 -4310 0 -4311 0 -4312 0 -4313 2 -4314 0 -4315 1 -4316 0 -4317 2 -4318 1 -4319 2 -4320 0 -4321 1 -4322 1 -4323 2 -4324 0 -4325 1 -4326 1 -4327 2 -4328 0 -4329 0 -4330 2 -4331 2 -4332 2 -4333 2 -4334 1 -4335 1 -4336 2 -4337 2 -4338 1 -4339 0 -4340 1 -4341 1 -4342 1 -4343 2 -4344 1 -4345 0 -4346 0 -4347 1 -4348 0 -4349 1 -4350 0 -4351 2 -4352 1 -4353 2 -4354 2 -4355 0 -4356 2 -4357 1 -4358 1 -4359 1 -4360 1 -4361 0 -4362 1 -4363 1 -4364 1 -4365 2 -4366 1 -4367 2 -4368 1 -4369 0 -4370 1 -4371 1 -4372 0 -4373 1 -4374 1 -4375 1 -4376 1 -4377 2 -4378 0 -4379 2 -4380 1 -4381 2 -4382 2 -4383 1 -4384 0 -4385 1 -4386 1 -4387 1 -4388 2 -4389 0 -4390 0 -4391 1 -4392 1 -4393 2 -4394 1 -4395 0 -4396 0 -4397 2 -4398 1 -4399 0 -4400 2 -4401 1 -4402 1 -4403 1 -4404 0 -4405 2 -4406 1 -4407 2 -4408 2 -4409 1 -4410 2 -4411 1 -4412 2 -4413 2 -4414 2 -4415 2 -4416 1 -4417 0 -4418 1 -4419 1 -4420 1 -4421 1 -4422 0 -4423 2 -4424 1 -4425 1 -4426 2 -4427 0 -4428 1 -4429 2 -4430 1 -4431 0 -4432 1 -4433 1 -4434 1 -4435 1 -4436 1 -4437 0 -4438 2 -4439 0 -4440 2 -4441 2 -4442 0 -4443 2 -4444 2 -4445 0 -4446 1 -4447 1 -4448 2 -4449 1 -4450 2 -4451 1 -4452 2 -4453 1 -4454 2 -4455 2 -4456 2 -4457 1 -4458 1 -4459 2 -4460 0 -4461 1 -4462 1 -4463 0 -4464 2 -4465 0 -4466 1 -4467 1 -4468 2 -4469 2 -4470 2 -4471 0 -4472 0 -4473 2 -4474 2 -4475 2 -4476 0 -4477 1 -4478 0 -4479 1 -4480 1 -4481 2 -4482 1 -4483 1 -4484 2 -4485 0 -4486 2 -4487 2 -4488 1 -4489 0 -4490 2 -4491 0 -4492 1 -4493 2 -4494 1 -4495 1 -4496 0 -4497 0 -4498 2 -4499 1 -4500 1 -4501 0 -4502 0 -4503 1 -4504 1 -4505 2 -4506 1 -4507 1 -4508 1 -4509 1 -4510 2 -4511 0 -4512 1 -4513 2 -4514 0 -4515 2 -4516 2 -4517 1 -4518 1 -4519 0 -4520 1 -4521 2 -4522 1 -4523 2 -4524 0 -4525 0 -4526 2 -4527 2 -4528 1 -4529 0 -4530 0 -4531 2 -4532 2 -4533 2 -4534 1 -4535 0 -4536 0 -4537 2 -4538 1 -4539 2 -4540 0 -4541 1 -4542 2 -4543 1 -4544 2 -4545 1 -4546 2 -4547 1 -4548 1 -4549 0 -4550 2 -4551 2 -4552 0 -4553 1 -4554 0 -4555 0 -4556 1 -4557 0 -4558 1 -4559 1 -4560 1 -4561 1 -4562 1 -4563 2 -4564 2 -4565 0 -4566 1 -4567 2 -4568 2 -4569 1 -4570 2 -4571 2 -4572 1 -4573 1 -4574 1 -4575 2 -4576 0 -4577 1 -4578 2 -4579 1 -4580 1 -4581 2 -4582 1 -4583 0 -4584 1 -4585 0 -4586 0 -4587 1 -4588 0 -4589 1 -4590 1 -4591 1 -4592 0 -4593 0 -4594 2 -4595 0 -4596 2 -4597 0 -4598 2 -4599 2 -4600 1 -4601 1 -4602 1 -4603 2 -4604 1 -4605 1 -4606 0 -4607 1 -4608 0 -4609 1 -4610 1 -4611 1 -4612 1 -4613 1 -4614 1 -4615 2 -4616 2 -4617 1 -4618 0 -4619 0 -4620 2 -4621 1 -4622 0 -4623 1 -4624 0 -4625 1 -4626 2 -4627 2 -4628 1 -4629 1 -4630 2 -4631 1 -4632 2 -4633 1 -4634 1 -4635 2 -4636 1 -4637 1 -4638 2 -4639 0 -4640 2 -4641 0 -4642 2 -4643 0 -4644 1 -4645 1 -4646 2 -4647 2 -4648 0 -4649 1 -4650 2 -4651 0 -4652 2 -4653 1 -4654 1 -4655 0 -4656 1 -4657 0 -4658 2 -4659 0 -4660 2 -4661 0 -4662 0 -4663 0 -4664 2 -4665 1 -4666 1 -4667 2 -4668 2 -4669 2 -4670 0 -4671 2 -4672 2 -4673 0 -4674 0 -4675 2 -4676 1 -4677 2 -4678 2 -4679 1 -4680 1 -4681 0 -4682 0 -4683 2 -4684 2 -4685 0 -4686 2 -4687 2 -4688 1 -4689 1 -4690 1 -4691 1 -4692 1 -4693 0 -4694 1 -4695 2 -4696 1 -4697 1 -4698 1 -4699 1 -4700 0 -4701 1 -4702 2 -4703 0 -4704 2 -4705 0 -4706 0 -4707 1 -4708 1 -4709 1 -4710 2 -4711 2 -4712 1 -4713 1 -4714 1 -4715 2 -4716 0 -4717 2 -4718 1 -4719 1 -4720 0 -4721 1 -4722 1 -4723 0 -4724 1 -4725 1 -4726 0 -4727 0 -4728 1 -4729 0 -4730 1 -4731 0 -4732 1 -4733 0 -4734 0 -4735 2 -4736 2 -4737 0 -4738 1 -4739 1 -4740 2 -4741 2 -4742 2 -4743 2 -4744 2 -4745 2 -4746 2 -4747 1 -4748 0 -4749 0 -4750 1 -4751 2 -4752 1 -4753 1 -4754 0 -4755 0 -4756 0 -4757 0 -4758 2 -4759 2 -4760 2 -4761 2 -4762 0 -4763 2 -4764 0 -4765 1 -4766 2 -4767 0 -4768 1 -4769 1 -4770 2 -4771 1 -4772 1 -4773 1 -4774 2 -4775 1 -4776 1 -4777 0 -4778 0 -4779 1 -4780 2 -4781 2 -4782 0 -4783 2 -4784 1 -4785 0 -4786 0 -4787 0 -4788 1 -4789 1 -4790 1 -4791 1 -4792 1 -4793 1 -4794 0 -4795 2 -4796 1 -4797 1 -4798 2 -4799 2 -4800 2 -4801 1 -4802 1 -4803 1 -4804 1 -4805 1 -4806 1 -4807 1 -4808 2 -4809 0 -4810 1 -4811 1 -4812 1 -4813 2 -4814 1 -4815 1 -4816 1 -4817 2 -4818 0 -4819 0 -4820 0 -4821 1 -4822 0 -4823 2 -4824 1 -4825 1 -4826 1 -4827 1 -4828 1 -4829 1 -4830 1 -4831 0 -4832 2 -4833 1 -4834 1 -4835 2 -4836 2 -4837 2 -4838 1 -4839 2 -4840 0 -4841 0 -4842 1 -4843 1 -4844 1 -4845 1 -4846 2 -4847 1 -4848 0 -4849 1 -4850 1 -4851 0 -4852 1 -4853 1 -4854 2 -4855 1 -4856 2 -4857 2 -4858 1 -4859 1 -4860 2 -4861 0 -4862 1 -4863 1 -4864 0 -4865 1 -4866 2 -4867 0 -4868 1 -4869 0 -4870 2 -4871 0 -4872 2 -4873 2 -4874 2 -4875 1 -4876 2 -4877 2 -4878 1 -4879 1 -4880 2 -4881 0 -4882 1 -4883 0 -4884 2 -4885 0 -4886 2 -4887 2 -4888 1 -4889 0 -4890 1 -4891 1 -4892 2 -4893 1 -4894 2 -4895 1 -4896 0 -4897 1 -4898 2 -4899 1 -4900 1 -4901 2 -4902 1 -4903 1 -4904 1 -4905 1 -4906 0 -4907 2 -4908 0 -4909 2 -4910 1 -4911 1 -4912 1 -4913 1 -4914 1 -4915 0 -4916 1 -4917 1 -4918 1 -4919 1 -4920 1 -4921 2 -4922 2 -4923 0 -4924 2 -4925 1 -4926 2 -4927 2 -4928 2 -4929 0 -4930 1 -4931 1 -4932 1 -4933 2 -4934 0 -4935 0 -4936 1 -4937 1 -4938 0 -4939 1 -4940 2 -4941 0 -4942 2 -4943 2 -4944 0 -4945 2 -4946 1 -4947 1 -4948 2 -4949 2 -4950 1 -4951 1 -4952 0 -4953 1 -4954 1 -4955 2 -4956 1 -4957 1 -4958 2 -4959 1 -4960 1 -4961 1 -4962 2 -4963 1 -4964 1 -4965 0 -4966 0 -4967 2 -4968 0 -4969 2 -4970 1 -4971 1 -4972 1 -4973 0 -4974 0 -4975 2 -4976 1 -4977 2 -4978 1 -4979 1 -4980 1 -4981 2 -4982 1 -4983 1 -4984 2 -4985 2 -4986 0 -4987 1 -4988 1 -4989 1 -4990 2 -4991 0 -4992 2 -4993 2 -4994 0 -4995 2 -4996 1 -4997 1 -4998 2 -4999 2 -5000 0 -5001 2 -5002 1 -5003 2 -5004 2 -5005 2 -5006 1 -5007 1 -5008 1 -5009 0 -5010 0 -5011 0 -5012 1 -5013 1 -5014 0 -5015 1 -5016 1 -5017 1 -5018 1 -5019 2 -5020 1 -5021 1 -5022 2 -5023 0 -5024 2 -5025 2 -5026 0 -5027 1 -5028 1 -5029 1 -5030 1 -5031 1 -5032 0 -5033 2 -5034 0 -5035 0 -5036 1 -5037 2 -5038 1 -5039 1 -5040 2 -5041 2 -5042 1 -5043 1 -5044 2 -5045 1 -5046 1 -5047 1 -5048 1 -5049 1 -5050 0 -5051 1 -5052 1 -5053 2 -5054 1 -5055 1 -5056 1 -5057 2 -5058 0 -5059 1 -5060 2 -5061 2 -5062 1 -5063 2 -5064 0 -5065 2 -5066 2 -5067 2 -5068 1 -5069 2 -5070 2 -5071 2 -5072 1 -5073 1 -5074 1 -5075 2 -5076 2 -5077 1 -5078 0 -5079 1 -5080 0 -5081 2 -5082 1 -5083 1 -5084 0 -5085 2 -5086 2 -5087 0 -5088 2 -5089 1 -5090 2 -5091 2 -5092 0 -5093 2 -5094 1 -5095 2 -5096 1 -5097 1 -5098 1 -5099 2 -5100 2 -5101 2 -5102 2 -5103 2 -5104 1 -5105 1 -5106 2 -5107 1 -5108 2 -5109 1 -5110 1 -5111 1 -5112 2 -5113 2 -5114 1 -5115 2 -5116 1 -5117 0 -5118 2 -5119 0 -5120 0 -5121 1 -5122 2 -5123 1 -5124 2 -5125 2 -5126 2 -5127 2 -5128 2 -5129 2 -5130 2 -5131 2 -5132 1 -5133 1 -5134 1 -5135 0 -5136 1 -5137 1 -5138 1 -5139 2 -5140 1 -5141 2 -5142 2 -5143 1 -5144 2 -5145 1 -5146 1 -5147 2 -5148 1 -5149 1 -5150 0 -5151 2 -5152 0 -5153 2 -5154 1 -5155 1 -5156 2 -5157 0 -5158 1 -5159 2 -5160 2 -5161 2 -5162 0 -5163 1 -5164 1 -5165 0 -5166 0 -5167 2 -5168 1 -5169 1 -5170 0 -5171 1 -5172 1 -5173 1 -5174 1 -5175 1 -5176 0 -5177 1 -5178 2 -5179 1 -5180 0 -5181 2 -5182 2 -5183 1 -5184 1 -5185 2 -5186 0 -5187 1 -5188 0 -5189 1 -5190 0 -5191 2 -5192 1 -5193 1 -5194 2 -5195 2 -5196 0 -5197 1 -5198 1 -5199 1 -5200 0 -5201 2 -5202 1 -5203 2 -5204 1 -5205 2 -5206 1 -5207 2 -5208 2 -5209 2 -5210 1 -5211 0 -5212 0 -5213 1 -5214 0 -5215 2 -5216 0 -5217 1 -5218 1 -5219 0 -5220 1 -5221 1 -5222 1 -5223 1 -5224 2 -5225 1 -5226 2 -5227 1 -5228 1 -5229 2 -5230 1 -5231 1 -5232 1 -5233 0 -5234 2 -5235 0 -5236 1 -5237 1 -5238 1 -5239 1 -5240 2 -5241 0 -5242 0 -5243 1 -5244 0 -5245 1 -5246 2 -5247 0 -5248 2 -5249 1 -5250 0 -5251 1 -5252 0 -5253 1 -5254 2 -5255 1 -5256 2 -5257 2 -5258 1 -5259 1 -5260 1 -5261 1 -5262 2 -5263 2 -5264 2 -5265 2 -5266 1 -5267 1 -5268 0 -5269 2 -5270 2 -5271 1 -5272 2 -5273 1 -5274 2 -5275 2 -5276 1 -5277 2 -5278 2 -5279 2 -5280 0 -5281 1 -5282 0 -5283 2 -5284 1 -5285 2 -5286 2 -5287 1 -5288 0 -5289 1 -5290 1 -5291 0 -5292 2 -5293 2 -5294 1 -5295 0 -5296 2 -5297 1 -5298 1 -5299 2 -5300 0 -5301 1 -5302 1 -5303 1 -5304 1 -5305 1 -5306 1 -5307 1 -5308 0 -5309 0 -5310 0 -5311 0 -5312 2 -5313 2 -5314 2 -5315 1 -5316 0 -5317 1 -5318 1 -5319 2 -5320 1 -5321 2 -5322 2 -5323 0 -5324 1 -5325 1 -5326 1 -5327 1 -5328 2 -5329 2 -5330 0 -5331 2 -5332 1 -5333 1 -5334 0 -5335 2 -5336 2 -5337 1 -5338 1 -5339 1 -5340 0 -5341 0 -5342 1 -5343 1 -5344 1 -5345 1 -5346 1 -5347 0 -5348 0 -5349 2 -5350 1 -5351 0 -5352 1 -5353 2 -5354 1 -5355 1 -5356 2 -5357 1 -5358 1 -5359 1 -5360 0 -5361 2 -5362 2 -5363 2 -5364 0 -5365 1 -5366 2 -5367 0 -5368 2 -5369 0 -5370 0 -5371 1 -5372 2 -5373 1 -5374 0 -5375 2 -5376 1 -5377 2 -5378 0 -5379 2 -5380 1 -5381 1 -5382 0 -5383 1 -5384 0 -5385 1 -5386 0 -5387 0 -5388 2 -5389 2 -5390 1 -5391 1 -5392 1 -5393 2 -5394 2 -5395 1 -5396 2 -5397 2 -5398 1 -5399 0 -5400 0 -5401 1 -5402 2 -5403 0 -5404 2 -5405 2 -5406 1 -5407 0 -5408 1 -5409 1 -5410 1 -5411 2 -5412 2 -5413 1 -5414 1 -5415 1 -5416 1 -5417 0 -5418 1 -5419 0 -5420 1 -5421 1 -5422 0 -5423 0 -5424 1 -5425 1 -5426 1 -5427 0 -5428 1 -5429 2 -5430 1 -5431 2 -5432 0 -5433 1 -5434 2 -5435 2 -5436 0 -5437 0 -5438 1 -5439 2 -5440 2 -5441 1 -5442 1 -5443 1 -5444 0 -5445 0 -5446 1 -5447 1 -5448 0 -5449 1 -5450 2 -5451 2 -5452 2 -5453 2 -5454 1 -5455 1 -5456 1 -5457 0 -5458 1 -5459 1 -5460 1 -5461 0 -5462 0 -5463 1 -5464 1 -5465 1 -5466 1 -5467 2 -5468 1 -5469 1 -5470 0 -5471 1 -5472 1 -5473 2 -5474 2 -5475 0 -5476 0 -5477 1 -5478 1 -5479 2 -5480 1 -5481 1 -5482 1 -5483 2 -5484 1 -5485 1 -5486 0 -5487 2 -5488 0 -5489 0 -5490 1 -5491 0 -5492 1 -5493 2 -5494 1 -5495 1 -5496 2 -5497 2 -5498 2 -5499 1 -5500 2 -5501 0 -5502 0 -5503 2 -5504 1 -5505 0 -5506 2 -5507 2 -5508 0 -5509 2 -5510 2 -5511 2 -5512 1 -5513 1 -5514 1 -5515 2 -5516 2 -5517 1 -5518 1 -5519 1 -5520 0 -5521 1 -5522 1 -5523 0 -5524 2 -5525 0 -5526 1 -5527 1 -5528 1 -5529 0 -5530 0 -5531 1 -5532 1 -5533 0 -5534 2 -5535 0 -5536 2 -5537 1 -5538 2 -5539 1 -5540 0 -5541 1 -5542 1 -5543 2 -5544 2 -5545 1 -5546 1 -5547 1 -5548 1 -5549 1 -5550 2 -5551 1 -5552 1 -5553 1 -5554 1 -5555 1 -5556 1 -5557 1 -5558 0 -5559 2 -5560 2 -5561 2 -5562 0 -5563 2 -5564 1 -5565 2 -5566 2 -5567 2 -5568 1 -5569 2 -5570 2 -5571 1 -5572 0 -5573 1 -5574 2 -5575 2 -5576 1 -5577 2 -5578 1 -5579 2 -5580 2 -5581 1 -5582 1 -5583 0 -5584 1 -5585 0 -5586 1 -5587 2 -5588 0 -5589 1 -5590 0 -5591 1 -5592 2 -5593 1 -5594 2 -5595 2 -5596 1 -5597 1 -5598 1 -5599 0 -5600 1 -5601 1 -5602 2 -5603 1 -5604 1 -5605 0 -5606 1 -5607 2 -5608 2 -5609 2 -5610 1 -5611 2 -5612 0 -5613 1 -5614 1 -5615 0 -5616 2 -5617 0 -5618 2 -5619 1 -5620 2 -5621 2 -5622 0 -5623 2 -5624 1 -5625 1 -5626 1 -5627 2 -5628 1 -5629 2 -5630 0 -5631 1 -5632 1 -5633 1 -5634 2 -5635 0 -5636 1 -5637 0 -5638 1 -5639 2 -5640 1 -5641 1 -5642 1 -5643 2 -5644 2 -5645 1 -5646 2 -5647 1 -5648 1 -5649 2 -5650 2 -5651 0 -5652 2 -5653 1 -5654 0 -5655 1 -5656 0 -5657 2 -5658 2 -5659 2 -5660 0 -5661 2 -5662 2 -5663 0 -5664 2 -5665 1 -5666 2 -5667 1 -5668 0 -5669 1 -5670 0 -5671 1 -5672 1 -5673 2 -5674 1 -5675 0 -5676 2 -5677 2 -5678 1 -5679 1 -5680 1 -5681 0 -5682 0 -5683 0 -5684 0 -5685 0 -5686 0 -5687 0 -5688 0 -5689 2 -5690 1 -5691 1 -5692 0 -5693 0 -5694 2 -5695 2 -5696 0 -5697 1 -5698 2 -5699 1 -5700 0 -5701 1 -5702 0 -5703 2 -5704 2 -5705 1 -5706 0 -5707 1 -5708 1 -5709 2 -5710 2 -5711 2 -5712 2 -5713 1 -5714 2 -5715 2 -5716 0 -5717 2 -5718 1 -5719 1 -5720 2 -5721 2 -5722 0 -5723 0 -5724 2 -5725 1 -5726 0 -5727 2 -5728 0 -5729 0 -5730 1 -5731 1 -5732 1 -5733 2 -5734 0 -5735 0 -5736 1 -5737 0 -5738 0 -5739 1 -5740 1 -5741 0 -5742 2 -5743 2 -5744 2 -5745 1 -5746 0 -5747 2 -5748 1 -5749 0 -5750 1 -5751 2 -5752 1 -5753 1 -5754 2 -5755 0 -5756 2 -5757 1 -5758 1 -5759 0 -5760 2 -5761 0 -5762 2 -5763 1 -5764 1 -5765 1 -5766 1 -5767 2 -5768 2 -5769 0 -5770 1 -5771 2 -5772 0 -5773 2 -5774 1 -5775 2 -5776 0 -5777 1 -5778 1 -5779 2 -5780 1 -5781 1 -5782 1 -5783 0 -5784 1 -5785 1 -5786 2 -5787 2 -5788 1 -5789 1 -5790 1 -5791 2 -5792 0 -5793 0 -5794 0 -5795 1 -5796 2 -5797 1 -5798 0 -5799 2 -5800 2 -5801 1 -5802 1 -5803 1 -5804 1 -5805 1 -5806 1 -5807 0 -5808 2 -5809 2 -5810 1 -5811 2 -5812 2 -5813 2 -5814 2 -5815 0 -5816 2 -5817 0 -5818 1 -5819 2 -5820 1 -5821 1 -5822 1 -5823 2 -5824 2 -5825 1 -5826 0 -5827 0 -5828 1 -5829 1 -5830 1 -5831 2 -5832 1 -5833 2 -5834 0 -5835 2 -5836 1 -5837 0 -5838 1 -5839 2 -5840 0 -5841 2 -5842 2 -5843 2 -5844 1 -5845 2 -5846 0 -5847 2 -5848 1 -5849 0 -5850 2 -5851 2 -5852 2 -5853 1 -5854 2 -5855 2 -5856 1 -5857 2 -5858 1 -5859 2 -5860 0 -5861 1 -5862 1 -5863 1 -5864 1 -5865 1 -5866 0 -5867 0 -5868 0 -5869 1 -5870 1 -5871 1 -5872 1 -5873 0 -5874 2 -5875 1 -5876 0 -5877 2 -5878 1 -5879 1 -5880 0 -5881 1 -5882 2 -5883 0 -5884 2 -5885 2 -5886 1 -5887 0 -5888 1 -5889 1 -5890 0 -5891 0 -5892 1 -5893 1 -5894 1 -5895 1 -5896 2 -5897 1 -5898 1 -5899 2 -5900 1 -5901 0 -5902 1 -5903 2 -5904 2 -5905 0 -5906 1 -5907 2 -5908 2 -5909 2 -5910 2 -5911 2 -5912 2 -5913 1 -5914 1 -5915 2 -5916 2 -5917 2 -5918 1 -5919 1 -5920 2 -5921 2 -5922 2 -5923 0 -5924 1 -5925 2 -5926 1 -5927 1 -5928 0 -5929 1 -5930 1 -5931 1 -5932 1 -5933 1 -5934 1 -5935 1 -5936 1 -5937 2 -5938 1 -5939 1 -5940 1 -5941 0 -5942 2 -5943 1 -5944 0 -5945 0 -5946 1 -5947 1 -5948 2 -5949 2 -5950 2 -5951 2 -5952 1 -5953 0 -5954 1 -5955 1 -5956 1 -5957 0 -5958 0 -5959 2 -5960 2 -5961 1 -5962 1 -5963 1 -5964 2 -5965 2 -5966 1 -5967 0 -5968 2 -5969 2 -5970 2 -5971 1 -5972 2 -5973 1 -5974 0 -5975 1 -5976 1 -5977 0 -5978 1 -5979 2 -5980 1 -5981 2 -5982 0 -5983 2 -5984 2 -5985 0 -5986 1 -5987 2 -5988 2 -5989 2 -5990 1 -5991 1 -5992 0 -5993 0 -5994 1 -5995 2 -5996 1 -5997 1 -5998 2 -5999 2 -6000 2 -6001 2 -6002 1 -6003 1 -6004 1 -6005 2 -6006 0 -6007 2 -6008 0 -6009 2 -6010 1 -6011 1 -6012 1 -6013 2 -6014 1 -6015 1 -6016 1 -6017 1 -6018 2 -6019 1 -6020 1 -6021 2 -6022 1 -6023 0 -6024 1 -6025 0 -6026 1 -6027 2 -6028 1 -6029 0 -6030 2 -6031 1 -6032 2 -6033 1 -6034 1 -6035 1 -6036 1 -6037 1 -6038 2 -6039 2 -6040 1 -6041 2 -6042 1 -6043 1 -6044 1 -6045 2 -6046 2 -6047 1 -6048 1 -6049 2 -6050 2 -6051 0 -6052 1 -6053 1 -6054 0 -6055 2 -6056 0 -6057 2 -6058 2 -6059 2 -6060 1 -6061 1 -6062 2 -6063 2 -6064 1 -6065 0 -6066 1 -6067 0 -6068 1 -6069 2 -6070 0 -6071 1 -6072 2 -6073 1 -6074 1 -6075 2 -6076 0 -6077 2 -6078 1 -6079 2 -6080 0 -6081 2 -6082 0 -6083 2 -6084 1 -6085 0 -6086 1 -6087 2 -6088 0 -6089 1 -6090 1 -6091 0 -6092 1 -6093 0 -6094 0 -6095 1 -6096 2 -6097 0 -6098 2 -6099 2 -6100 1 -6101 2 -6102 2 -6103 1 -6104 1 -6105 1 -6106 1 -6107 2 -6108 2 -6109 2 -6110 2 -6111 2 -6112 1 -6113 1 -6114 2 -6115 0 -6116 2 -6117 2 -6118 0 -6119 2 -6120 2 -6121 0 -6122 2 -6123 1 -6124 0 -6125 0 -6126 2 -6127 0 -6128 0 -6129 0 -6130 2 -6131 1 -6132 1 -6133 0 -6134 0 -6135 0 -6136 2 -6137 2 -6138 0 -6139 2 -6140 2 -6141 2 -6142 2 -6143 1 -6144 2 -6145 1 -6146 2 -6147 2 -6148 0 -6149 2 -6150 1 -6151 0 -6152 2 -6153 1 -6154 0 -6155 1 -6156 1 -6157 0 -6158 1 -6159 1 -6160 0 -6161 2 -6162 1 -6163 1 -6164 2 -6165 1 -6166 0 -6167 0 -6168 1 -6169 1 -6170 1 -6171 1 -6172 1 -6173 0 -6174 1 -6175 1 -6176 2 -6177 1 -6178 2 -6179 1 -6180 2 -6181 2 -6182 1 -6183 2 -6184 1 -6185 2 -6186 1 -6187 2 -6188 0 -6189 0 -6190 2 -6191 2 -6192 0 -6193 2 -6194 0 -6195 1 -6196 2 -6197 1 -6198 1 -6199 2 -6200 2 -6201 1 -6202 0 -6203 2 -6204 0 -6205 2 -6206 1 -6207 0 -6208 2 -6209 1 -6210 2 -6211 1 -6212 0 -6213 1 -6214 0 -6215 2 -6216 2 -6217 2 -6218 0 -6219 2 -6220 1 -6221 2 -6222 1 -6223 1 -6224 2 -6225 0 -6226 1 -6227 1 -6228 1 -6229 1 -6230 2 -6231 0 -6232 1 -6233 1 -6234 0 -6235 1 -6236 2 -6237 1 -6238 1 -6239 2 -6240 2 -6241 1 -6242 1 -6243 2 -6244 1 -6245 1 -6246 1 -6247 1 -6248 1 -6249 2 -6250 2 -6251 2 -6252 2 -6253 1 -6254 1 -6255 1 -6256 1 -6257 1 -6258 2 -6259 2 -6260 0 -6261 1 -6262 2 -6263 2 -6264 2 -6265 1 -6266 1 -6267 0 -6268 1 -6269 0 -6270 0 -6271 2 -6272 2 -6273 1 -6274 1 -6275 0 -6276 1 -6277 2 -6278 1 -6279 2 -6280 0 -6281 1 -6282 2 -6283 2 -6284 1 -6285 0 -6286 0 -6287 0 -6288 1 -6289 2 -6290 2 -6291 0 -6292 2 -6293 1 -6294 2 -6295 0 -6296 1 -6297 0 -6298 1 -6299 0 -6300 0 -6301 1 -6302 2 -6303 1 -6304 1 -6305 2 -6306 0 -6307 0 -6308 1 -6309 1 -6310 1 -6311 0 -6312 1 -6313 0 -6314 1 -6315 2 -6316 2 -6317 0 -6318 2 -6319 0 -6320 2 -6321 1 -6322 1 -6323 1 -6324 1 -6325 2 -6326 0 -6327 1 -6328 2 -6329 1 -6330 0 -6331 0 -6332 1 -6333 1 -6334 1 -6335 1 -6336 1 -6337 1 -6338 1 -6339 2 -6340 1 -6341 2 -6342 0 -6343 1 -6344 0 -6345 1 -6346 1 -6347 0 -6348 1 -6349 0 -6350 2 -6351 1 -6352 2 -6353 1 -6354 0 -6355 1 -6356 1 -6357 0 -6358 1 -6359 1 -6360 1 -6361 0 -6362 1 -6363 2 -6364 1 -6365 2 -6366 1 -6367 0 -6368 1 -6369 1 -6370 2 -6371 0 -6372 0 -6373 1 -6374 1 -6375 1 -6376 1 -6377 1 -6378 1 -6379 1 -6380 2 -6381 1 -6382 1 -6383 1 -6384 1 -6385 1 -6386 2 -6387 2 -6388 2 -6389 1 -6390 2 -6391 1 -6392 2 -6393 1 -6394 0 -6395 1 -6396 1 -6397 2 -6398 0 -6399 2 -6400 2 -6401 2 -6402 0 -6403 1 -6404 2 -6405 2 -6406 2 -6407 2 -6408 0 -6409 1 -6410 2 -6411 1 -6412 0 -6413 1 -6414 1 -6415 0 -6416 2 -6417 1 -6418 2 -6419 2 -6420 1 -6421 1 -6422 1 -6423 2 -6424 0 -6425 1 -6426 2 -6427 1 -6428 2 -6429 1 -6430 2 -6431 2 -6432 2 -6433 2 -6434 1 -6435 2 -6436 0 -6437 2 -6438 1 -6439 0 -6440 2 -6441 2 -6442 1 -6443 1 -6444 1 -6445 2 -6446 1 -6447 2 -6448 1 -6449 0 -6450 1 -6451 2 -6452 1 -6453 1 -6454 0 -6455 0 -6456 0 -6457 2 -6458 2 -6459 0 -6460 0 -6461 0 -6462 0 -6463 2 -6464 1 -6465 2 -6466 1 -6467 1 -6468 2 -6469 1 -6470 2 -6471 2 -6472 2 -6473 0 -6474 0 -6475 1 -6476 1 -6477 1 -6478 1 -6479 2 -6480 1 -6481 2 -6482 1 -6483 1 -6484 1 -6485 2 -6486 1 -6487 1 -6488 2 -6489 1 -6490 1 -6491 2 -6492 0 -6493 1 -6494 1 -6495 0 -6496 1 -6497 1 -6498 0 -6499 0 -6500 1 -6501 2 -6502 0 -6503 2 -6504 2 -6505 2 -6506 0 -6507 1 -6508 1 -6509 2 -6510 1 -6511 2 -6512 1 -6513 1 -6514 2 -6515 0 -6516 1 -6517 2 -6518 0 -6519 1 -6520 0 -6521 2 -6522 0 -6523 2 -6524 2 -6525 1 -6526 2 -6527 2 -6528 0 -6529 0 -6530 1 -6531 1 -6532 1 -6533 1 -6534 1 -6535 1 -6536 0 -6537 1 -6538 2 -6539 1 -6540 1 -6541 2 -6542 1 -6543 0 -6544 2 -6545 1 -6546 1 -6547 1 -6548 2 -6549 2 -6550 1 -6551 1 -6552 1 -6553 1 -6554 2 -6555 2 -6556 2 -6557 1 -6558 2 -6559 0 -6560 2 -6561 2 -6562 2 -6563 1 -6564 0 -6565 1 -6566 1 -6567 2 -6568 1 -6569 0 -6570 0 -6571 0 -6572 2 -6573 1 -6574 1 -6575 1 -6576 1 -6577 1 -6578 2 -6579 2 -6580 1 -6581 2 -6582 0 -6583 0 -6584 0 -6585 2 -6586 2 -6587 2 -6588 0 -6589 2 -6590 2 -6591 2 -6592 2 -6593 2 -6594 0 -6595 1 -6596 2 -6597 1 -6598 1 -6599 2 -6600 2 -6601 1 -6602 1 -6603 2 -6604 1 -6605 0 -6606 1 -6607 1 -6608 1 -6609 1 -6610 1 -6611 1 -6612 2 -6613 0 -6614 1 -6615 2 -6616 2 -6617 2 -6618 2 -6619 1 -6620 2 -6621 1 -6622 2 -6623 2 -6624 2 -6625 1 -6626 2 -6627 1 -6628 1 -6629 0 -6630 1 -6631 2 -6632 0 -6633 0 -6634 0 -6635 1 -6636 0 -6637 2 -6638 1 -6639 1 -6640 1 -6641 1 -6642 2 -6643 0 -6644 2 -6645 1 -6646 0 -6647 0 -6648 1 -6649 2 -6650 1 -6651 2 -6652 0 -6653 0 -6654 1 -6655 0 -6656 1 -6657 1 -6658 1 -6659 0 -6660 1 -6661 0 -6662 1 -6663 1 -6664 2 -6665 0 -6666 0 -6667 2 -6668 2 -6669 0 -6670 2 -6671 1 -6672 2 -6673 2 -6674 1 -6675 2 -6676 1 -6677 1 -6678 1 -6679 1 -6680 2 -6681 1 -6682 1 -6683 2 -6684 1 -6685 1 -6686 0 -6687 1 -6688 1 -6689 2 -6690 1 -6691 1 -6692 2 -6693 1 -6694 2 -6695 1 -6696 1 -6697 2 -6698 1 -6699 1 -6700 1 -6701 1 -6702 1 -6703 1 -6704 1 -6705 1 -6706 0 -6707 1 -6708 1 -6709 2 -6710 2 -6711 1 -6712 2 -6713 2 -6714 2 -6715 1 -6716 2 -6717 2 -6718 1 -6719 1 -6720 2 -6721 2 -6722 1 -6723 1 -6724 2 -6725 1 -6726 2 -6727 2 -6728 2 -6729 2 -6730 2 -6731 1 -6732 2 -6733 1 -6734 0 -6735 2 -6736 0 -6737 1 -6738 2 -6739 0 -6740 2 -6741 1 -6742 1 -6743 0 -6744 1 -6745 0 -6746 1 -6747 1 -6748 2 -6749 2 -6750 2 -6751 2 -6752 1 -6753 1 -6754 2 -6755 0 -6756 1 -6757 1 -6758 1 -6759 1 -6760 2 -6761 2 -6762 2 -6763 1 -6764 2 -6765 1 -6766 0 -6767 1 -6768 0 -6769 1 -6770 0 -6771 2 -6772 2 -6773 1 -6774 0 -6775 2 -6776 2 -6777 2 -6778 0 -6779 2 -6780 0 -6781 1 -6782 0 -6783 0 -6784 1 -6785 0 -6786 2 -6787 1 -6788 0 -6789 1 -6790 1 -6791 2 -6792 0 -6793 2 -6794 1 -6795 1 -6796 1 -6797 1 -6798 2 -6799 2 -6800 2 -6801 2 -6802 1 -6803 0 -6804 1 -6805 2 -6806 2 -6807 1 -6808 1 -6809 2 -6810 1 -6811 2 -6812 2 -6813 2 -6814 0 -6815 1 -6816 1 -6817 1 -6818 0 -6819 1 -6820 2 -6821 1 -6822 0 -6823 2 -6824 2 -6825 2 -6826 0 -6827 1 -6828 2 -6829 0 -6830 0 -6831 0 -6832 1 -6833 0 -6834 2 -6835 1 -6836 1 -6837 1 -6838 2 -6839 2 -6840 2 -6841 1 -6842 0 -6843 0 -6844 1 -6845 1 -6846 1 -6847 1 -6848 1 -6849 1 -6850 2 -6851 1 -6852 2 -6853 1 -6854 1 -6855 2 -6856 2 -6857 1 -6858 1 -6859 0 -6860 0 -6861 2 -6862 1 -6863 0 -6864 0 -6865 2 -6866 1 -6867 0 -6868 1 -6869 1 -6870 2 -6871 1 -6872 2 -6873 1 -6874 0 -6875 2 -6876 2 -6877 1 -6878 0 -6879 2 -6880 1 -6881 1 -6882 0 -6883 1 -6884 2 -6885 1 -6886 2 -6887 1 -6888 2 -6889 1 -6890 0 -6891 0 -6892 1 -6893 0 -6894 2 -6895 1 -6896 0 -6897 2 -6898 1 -6899 2 -6900 2 -6901 2 -6902 0 -6903 1 -6904 1 -6905 1 -6906 1 -6907 0 -6908 0 -6909 1 -6910 1 -6911 1 -6912 1 -6913 0 -6914 1 -6915 2 -6916 1 -6917 1 -6918 1 -6919 2 -6920 2 -6921 0 -6922 2 -6923 1 -6924 2 -6925 2 -6926 0 -6927 1 -6928 1 -6929 0 -6930 2 -6931 1 -6932 1 -6933 1 -6934 1 -6935 1 -6936 1 -6937 2 -6938 2 -6939 0 -6940 1 -6941 2 -6942 0 -6943 2 -6944 1 -6945 1 -6946 0 -6947 2 -6948 0 -6949 0 -6950 1 -6951 1 -6952 0 -6953 1 -6954 2 -6955 1 -6956 1 -6957 1 -6958 1 -6959 1 -6960 1 -6961 0 -6962 2 -6963 2 -6964 1 -6965 2 -6966 0 -6967 1 -6968 1 -6969 1 -6970 2 -6971 2 -6972 1 -6973 1 -6974 1 -6975 1 -6976 0 -6977 1 -6978 1 -6979 2 -6980 2 -6981 0 -6982 0 -6983 2 -6984 2 -6985 2 -6986 1 -6987 0 -6988 2 -6989 2 -6990 0 -6991 2 -6992 2 -6993 0 -6994 1 -6995 1 -6996 2 -6997 1 -6998 2 -6999 2 -7000 0 -7001 1 -7002 0 -7003 1 -7004 0 -7005 0 -7006 2 -7007 2 -7008 0 -7009 2 -7010 1 -7011 1 -7012 1 -7013 0 -7014 2 -7015 0 -7016 2 -7017 2 -7018 2 -7019 0 -7020 2 -7021 2 -7022 1 -7023 1 -7024 2 -7025 1 -7026 2 -7027 2 -7028 0 -7029 1 -7030 0 -7031 0 -7032 2 -7033 1 -7034 0 -7035 0 -7036 1 -7037 1 -7038 2 -7039 1 -7040 2 -7041 0 -7042 2 -7043 1 -7044 2 -7045 0 -7046 1 -7047 1 -7048 1 -7049 1 -7050 2 -7051 2 -7052 1 -7053 0 -7054 2 -7055 0 -7056 2 -7057 1 -7058 2 -7059 1 -7060 1 -7061 0 -7062 0 -7063 2 -7064 1 -7065 1 -7066 1 -7067 1 -7068 0 -7069 2 -7070 1 -7071 2 -7072 2 -7073 1 -7074 1 -7075 1 -7076 0 -7077 2 -7078 1 -7079 2 -7080 1 -7081 2 -7082 1 -7083 2 -7084 0 -7085 0 -7086 0 -7087 1 -7088 2 -7089 2 -7090 1 -7091 1 -7092 1 -7093 2 -7094 2 -7095 2 -7096 2 -7097 2 -7098 1 -7099 0 -7100 2 -7101 0 -7102 2 -7103 2 -7104 2 -7105 0 -7106 1 -7107 0 -7108 1 -7109 0 -7110 0 -7111 0 -7112 2 -7113 2 -7114 1 -7115 2 -7116 2 -7117 2 -7118 1 -7119 2 -7120 1 -7121 0 -7122 2 -7123 2 -7124 2 -7125 2 -7126 0 -7127 1 -7128 0 -7129 0 -7130 0 -7131 1 -7132 1 -7133 1 -7134 2 -7135 0 -7136 2 -7137 2 -7138 2 -7139 1 -7140 1 -7141 0 -7142 1 -7143 1 -7144 1 -7145 0 -7146 2 -7147 1 -7148 1 -7149 0 -7150 2 -7151 1 -7152 1 -7153 0 -7154 2 -7155 1 -7156 2 -7157 2 -7158 0 -7159 1 -7160 0 -7161 0 -7162 1 -7163 1 -7164 2 -7165 0 -7166 2 -7167 1 -7168 2 -7169 2 -7170 0 -7171 2 -7172 2 -7173 2 -7174 1 -7175 1 -7176 2 -7177 2 -7178 1 -7179 1 -7180 1 -7181 0 -7182 1 -7183 1 -7184 1 -7185 2 -7186 1 -7187 1 -7188 2 -7189 1 -7190 2 -7191 1 -7192 0 -7193 2 -7194 1 -7195 2 -7196 2 -7197 2 -7198 2 -7199 2 -7200 1 -7201 2 -7202 1 -7203 1 -7204 2 -7205 1 -7206 1 -7207 0 -7208 1 -7209 0 -7210 2 -7211 1 -7212 0 -7213 1 -7214 0 -7215 2 -7216 1 -7217 2 -7218 2 -7219 2 -7220 0 -7221 2 -7222 2 -7223 2 -7224 0 -7225 0 -7226 1 -7227 0 -7228 0 -7229 1 -7230 2 -7231 1 -7232 2 -7233 2 -7234 0 -7235 0 -7236 1 -7237 0 -7238 2 -7239 1 -7240 2 -7241 2 -7242 1 -7243 1 -7244 1 -7245 1 -7246 1 -7247 1 -7248 1 -7249 0 -7250 2 -7251 2 -7252 1 -7253 1 -7254 2 -7255 2 -7256 1 -7257 2 -7258 0 -7259 0 -7260 1 -7261 0 -7262 1 -7263 0 -7264 1 -7265 2 -7266 2 -7267 0 -7268 1 -7269 0 -7270 2 -7271 1 -7272 1 -7273 2 -7274 2 -7275 1 -7276 1 -7277 1 -7278 1 -7279 1 -7280 0 -7281 1 -7282 1 -7283 1 -7284 2 -7285 0 -7286 1 -7287 1 -7288 2 -7289 2 -7290 2 -7291 2 -7292 2 -7293 2 -7294 2 -7295 0 -7296 2 -7297 1 -7298 1 -7299 1 -7300 1 -7301 1 -7302 2 -7303 0 -7304 2 -7305 1 -7306 1 -7307 2 -7308 1 -7309 1 -7310 1 -7311 1 -7312 1 -7313 1 -7314 2 -7315 2 -7316 2 -7317 1 -7318 2 -7319 2 -7320 2 -7321 1 -7322 2 -7323 1 -7324 2 -7325 1 -7326 1 -7327 2 -7328 2 -7329 2 -7330 1 -7331 1 -7332 2 -7333 1 -7334 1 -7335 2 -7336 2 -7337 2 -7338 2 -7339 0 -7340 2 -7341 0 -7342 1 -7343 2 -7344 2 -7345 1 -7346 0 -7347 0 -7348 0 -7349 1 -7350 1 -7351 1 -7352 1 -7353 1 -7354 1 -7355 1 -7356 0 -7357 0 -7358 0 -7359 2 -7360 1 -7361 1 -7362 0 -7363 1 -7364 0 -7365 1 -7366 1 -7367 1 -7368 1 -7369 2 -7370 2 -7371 1 -7372 2 -7373 2 -7374 1 -7375 0 -7376 1 -7377 2 -7378 2 -7379 1 -7380 1 -7381 2 -7382 0 -7383 0 -7384 0 -7385 1 -7386 1 -7387 1 -7388 2 -7389 1 -7390 1 -7391 1 -7392 1 -7393 2 -7394 1 -7395 0 -7396 2 -7397 1 -7398 2 -7399 2 -7400 1 -7401 1 -7402 2 -7403 2 -7404 1 -7405 0 -7406 2 -7407 1 -7408 2 -7409 2 -7410 0 -7411 0 -7412 1 -7413 1 -7414 1 -7415 1 -7416 0 -7417 0 -7418 1 -7419 1 -7420 1 -7421 0 -7422 1 -7423 1 -7424 1 -7425 2 -7426 2 -7427 2 -7428 0 -7429 0 -7430 0 -7431 0 -7432 1 -7433 0 -7434 1 -7435 1 -7436 1 -7437 1 -7438 2 -7439 0 -7440 1 -7441 1 -7442 1 -7443 1 -7444 2 -7445 1 -7446 2 -7447 0 -7448 1 -7449 0 -7450 0 -7451 1 -7452 1 -7453 1 -7454 1 -7455 0 -7456 1 -7457 0 -7458 1 -7459 0 -7460 2 -7461 2 -7462 2 -7463 2 -7464 2 -7465 1 -7466 1 -7467 0 -7468 2 -7469 0 -7470 2 -7471 2 -7472 1 -7473 1 -7474 0 -7475 1 -7476 2 -7477 2 -7478 0 -7479 2 -7480 0 -7481 1 -7482 2 -7483 2 -7484 1 -7485 0 -7486 1 -7487 0 -7488 2 -7489 0 -7490 0 -7491 1 -7492 2 -7493 2 -7494 1 -7495 1 -7496 0 -7497 2 -7498 2 -7499 1 -7500 1 -7501 0 -7502 1 -7503 0 -7504 2 -7505 0 -7506 0 -7507 1 -7508 0 -7509 1 -7510 1 -7511 2 -7512 1 -7513 2 -7514 0 -7515 0 -7516 2 -7517 1 -7518 1 -7519 2 -7520 1 -7521 0 -7522 2 -7523 1 -7524 2 -7525 1 -7526 1 -7527 1 -7528 2 -7529 1 -7530 1 -7531 1 -7532 0 -7533 2 -7534 1 -7535 2 -7536 1 -7537 1 -7538 1 -7539 0 -7540 2 -7541 1 -7542 0 -7543 0 -7544 0 -7545 1 -7546 2 -7547 1 -7548 1 -7549 1 -7550 1 -7551 0 -7552 1 -7553 0 -7554 0 -7555 1 -7556 2 -7557 0 -7558 2 -7559 0 -7560 2 -7561 0 -7562 2 -7563 2 -7564 1 -7565 1 -7566 2 -7567 2 -7568 1 -7569 0 -7570 0 -7571 0 -7572 2 -7573 2 -7574 2 -7575 1 -7576 0 -7577 2 -7578 2 -7579 2 -7580 1 -7581 0 -7582 1 -7583 0 -7584 1 -7585 1 -7586 0 -7587 1 -7588 1 -7589 1 -7590 2 -7591 0 -7592 0 -7593 0 -7594 2 -7595 0 -7596 2 -7597 0 -7598 1 -7599 0 -7600 0 -7601 1 -7602 1 -7603 0 -7604 1 -7605 0 -7606 2 -7607 1 -7608 1 -7609 1 -7610 0 -7611 1 -7612 1 -7613 1 -7614 1 -7615 1 -7616 1 -7617 0 -7618 1 -7619 0 -7620 2 -7621 1 -7622 2 -7623 2 -7624 2 -7625 2 -7626 1 -7627 0 -7628 1 -7629 2 -7630 2 -7631 2 -7632 1 -7633 1 -7634 1 -7635 1 -7636 1 -7637 1 -7638 1 -7639 2 -7640 2 -7641 2 -7642 0 -7643 1 -7644 2 -7645 1 -7646 2 -7647 1 -7648 1 -7649 2 -7650 1 -7651 1 -7652 2 -7653 2 -7654 2 -7655 1 -7656 0 -7657 1 -7658 2 -7659 2 -7660 1 -7661 0 -7662 0 -7663 1 -7664 0 -7665 2 -7666 2 -7667 2 -7668 2 -7669 1 -7670 1 -7671 0 -7672 1 -7673 2 -7674 0 -7675 1 -7676 0 -7677 1 -7678 2 -7679 2 -7680 1 -7681 2 -7682 0 -7683 0 -7684 1 -7685 2 -7686 0 -7687 1 -7688 0 -7689 2 -7690 0 -7691 2 -7692 1 -7693 0 -7694 2 -7695 0 -7696 0 -7697 1 -7698 1 -7699 0 -7700 0 -7701 0 -7702 0 -7703 1 -7704 2 -7705 2 -7706 2 -7707 0 -7708 2 -7709 1 -7710 0 -7711 1 -7712 2 -7713 1 -7714 0 -7715 2 -7716 1 -7717 2 -7718 0 -7719 1 -7720 1 -7721 2 -7722 2 -7723 1 -7724 1 -7725 2 -7726 2 -7727 2 -7728 0 -7729 1 -7730 1 -7731 1 -7732 1 -7733 2 -7734 1 -7735 0 -7736 1 -7737 1 -7738 2 -7739 1 -7740 1 -7741 0 -7742 1 -7743 2 -7744 1 -7745 2 -7746 2 -7747 1 -7748 1 -7749 2 -7750 0 -7751 1 -7752 0 -7753 1 -7754 1 -7755 2 -7756 2 -7757 2 -7758 1 -7759 1 -7760 1 -7761 1 -7762 0 -7763 1 -7764 0 -7765 2 -7766 2 -7767 2 -7768 1 -7769 1 -7770 1 -7771 2 -7772 0 -7773 2 -7774 2 -7775 2 -7776 1 -7777 0 -7778 1 -7779 2 -7780 1 -7781 2 -7782 2 -7783 1 -7784 2 -7785 2 -7786 1 -7787 0 -7788 1 -7789 1 -7790 2 -7791 0 -7792 2 -7793 1 -7794 0 -7795 1 -7796 0 -7797 2 -7798 1 -7799 2 -7800 1 -7801 0 -7802 2 -7803 1 -7804 2 -7805 2 -7806 1 -7807 2 -7808 0 -7809 0 -7810 0 -7811 1 -7812 0 -7813 1 -7814 1 -7815 0 -7816 2 -7817 1 -7818 0 -7819 1 -7820 0 -7821 2 -7822 1 -7823 1 -7824 2 -7825 1 -7826 0 -7827 0 -7828 2 -7829 1 -7830 2 -7831 1 -7832 0 -7833 2 -7834 2 -7835 0 -7836 2 -7837 0 -7838 1 -7839 2 -7840 1 -7841 1 -7842 2 -7843 1 -7844 1 -7845 2 -7846 0 -7847 2 -7848 2 -7849 0 -7850 1 -7851 1 -7852 2 -7853 0 -7854 1 -7855 0 -7856 1 -7857 2 -7858 1 -7859 1 -7860 2 -7861 0 -7862 1 -7863 2 -7864 1 -7865 1 -7866 1 -7867 2 -7868 0 -7869 2 -7870 0 -7871 1 -7872 0 -7873 1 -7874 1 -7875 1 -7876 0 -7877 1 -7878 1 -7879 0 -7880 0 -7881 1 -7882 1 -7883 0 -7884 2 -7885 1 -7886 1 -7887 0 -7888 1 -7889 2 -7890 2 -7891 0 -7892 2 -7893 1 -7894 0 -7895 1 -7896 0 -7897 1 -7898 0 -7899 1 -7900 2 -7901 1 -7902 1 -7903 1 -7904 1 -7905 0 -7906 1 -7907 2 -7908 1 -7909 1 -7910 0 -7911 0 -7912 2 -7913 1 -7914 1 -7915 1 -7916 1 -7917 1 -7918 1 -7919 2 -7920 1 -7921 1 -7922 1 -7923 2 -7924 1 -7925 1 -7926 2 -7927 2 -7928 1 -7929 2 -7930 0 -7931 2 -7932 0 -7933 1 -7934 1 -7935 2 -7936 2 -7937 0 -7938 0 -7939 1 -7940 0 -7941 1 -7942 2 -7943 1 -7944 2 -7945 2 -7946 2 -7947 0 -7948 1 -7949 2 -7950 2 -7951 1 -7952 2 -7953 0 -7954 1 -7955 2 -7956 1 -7957 1 -7958 2 -7959 0 -7960 0 -7961 2 -7962 0 -7963 1 -7964 1 -7965 1 -7966 0 -7967 1 -7968 2 -7969 2 -7970 1 -7971 1 -7972 2 -7973 1 -7974 1 -7975 1 -7976 1 -7977 1 -7978 2 -7979 0 -7980 1 -7981 1 -7982 0 -7983 2 -7984 2 -7985 1 -7986 2 -7987 0 -7988 2 -7989 2 -7990 2 -7991 1 -7992 0 -7993 0 -7994 2 -7995 2 -7996 1 -7997 2 -7998 2 -7999 1 -8000 2 -8001 0 -8002 0 -8003 1 -8004 2 -8005 2 -8006 1 -8007 0 -8008 1 -8009 0 -8010 2 -8011 1 -8012 1 -8013 0 -8014 2 -8015 2 -8016 2 -8017 2 -8018 1 -8019 0 -8020 2 -8021 1 -8022 1 -8023 2 -8024 2 -8025 0 -8026 2 -8027 0 -8028 0 -8029 1 -8030 2 -8031 0 -8032 2 -8033 0 -8034 2 -8035 1 -8036 0 -8037 2 -8038 2 -8039 1 -8040 1 -8041 1 -8042 2 -8043 1 -8044 0 -8045 1 -8046 1 -8047 1 -8048 2 -8049 2 -8050 1 -8051 2 -8052 1 -8053 1 -8054 2 -8055 1 -8056 1 -8057 1 -8058 2 -8059 2 -8060 1 -8061 2 -8062 1 -8063 0 -8064 1 -8065 0 -8066 2 -8067 2 -8068 2 -8069 1 -8070 1 -8071 1 -8072 0 -8073 1 -8074 0 -8075 0 -8076 2 -8077 2 -8078 0 -8079 2 -8080 1 -8081 2 -8082 1 -8083 0 -8084 0 -8085 1 -8086 2 -8087 1 -8088 2 -8089 2 -8090 1 -8091 1 -8092 1 -8093 2 -8094 0 -8095 2 -8096 1 -8097 1 -8098 2 -8099 1 -8100 1 -8101 0 -8102 1 -8103 0 -8104 2 -8105 1 -8106 2 -8107 2 -8108 1 -8109 2 -8110 1 -8111 2 -8112 0 -8113 1 -8114 0 -8115 2 -8116 2 -8117 1 -8118 1 -8119 0 -8120 2 -8121 0 -8122 2 -8123 1 -8124 1 -8125 1 -8126 2 -8127 2 -8128 2 -8129 2 -8130 0 -8131 2 -8132 1 -8133 2 -8134 1 -8135 2 -8136 1 -8137 0 -8138 0 -8139 2 -8140 1 -8141 1 -8142 0 -8143 0 -8144 1 -8145 1 -8146 2 -8147 1 -8148 0 -8149 1 -8150 2 -8151 0 -8152 1 -8153 1 -8154 1 -8155 2 -8156 0 -8157 0 -8158 1 -8159 0 -8160 1 -8161 1 -8162 1 -8163 2 -8164 0 -8165 1 -8166 1 -8167 1 -8168 2 -8169 1 -8170 0 -8171 0 -8172 2 -8173 2 -8174 2 -8175 2 -8176 1 -8177 2 -8178 2 -8179 0 -8180 0 -8181 1 -8182 2 -8183 2 -8184 1 -8185 1 -8186 1 -8187 1 -8188 0 -8189 0 -8190 2 -8191 1 -8192 1 -8193 2 -8194 2 -8195 0 -8196 1 -8197 2 -8198 1 -8199 1 -8200 1 -8201 2 -8202 1 -8203 0 -8204 1 -8205 2 -8206 2 -8207 1 -8208 2 -8209 2 -8210 1 -8211 2 -8212 1 -8213 1 -8214 0 -8215 1 -8216 0 -8217 0 -8218 2 -8219 1 -8220 2 -8221 2 -8222 0 -8223 1 -8224 1 -8225 2 -8226 1 -8227 1 -8228 1 -8229 1 -8230 2 -8231 1 -8232 0 -8233 1 -8234 0 -8235 2 -8236 2 -8237 2 -8238 2 -8239 2 -8240 1 -8241 0 -8242 0 -8243 2 -8244 0 -8245 1 -8246 1 -8247 1 -8248 1 -8249 2 -8250 1 -8251 1 -8252 1 -8253 2 -8254 0 -8255 2 -8256 0 -8257 1 -8258 2 -8259 2 -8260 0 -8261 0 -8262 0 -8263 2 -8264 1 -8265 2 -8266 2 -8267 1 -8268 1 -8269 0 -8270 0 -8271 1 -8272 1 -8273 1 -8274 0 -8275 1 -8276 2 -8277 1 -8278 2 -8279 1 -8280 1 -8281 0 -8282 1 -8283 1 -8284 2 -8285 1 -8286 2 -8287 1 -8288 2 -8289 1 -8290 2 -8291 2 -8292 1 -8293 1 -8294 1 -8295 2 -8296 2 -8297 2 -8298 0 -8299 1 -8300 2 -8301 2 -8302 1 -8303 1 -8304 2 -8305 1 -8306 2 -8307 1 -8308 1 -8309 1 -8310 1 -8311 1 -8312 2 -8313 2 -8314 1 -8315 0 -8316 1 -8317 1 -8318 2 -8319 1 -8320 1 -8321 1 -8322 2 -8323 1 -8324 1 -8325 1 -8326 0 -8327 1 -8328 1 -8329 0 -8330 1 -8331 2 -8332 1 -8333 1 -8334 1 -8335 1 -8336 1 -8337 1 -8338 1 -8339 2 -8340 2 -8341 1 -8342 2 -8343 1 -8344 2 -8345 1 -8346 0 -8347 2 -8348 2 -8349 1 -8350 0 -8351 1 -8352 2 -8353 1 -8354 2 -8355 0 -8356 1 -8357 0 -8358 2 -8359 2 -8360 1 -8361 2 -8362 2 -8363 0 -8364 2 -8365 0 -8366 1 -8367 2 -8368 0 -8369 0 -8370 2 -8371 2 -8372 1 -8373 2 -8374 2 -8375 1 -8376 1 -8377 1 -8378 1 -8379 2 -8380 2 -8381 1 -8382 2 -8383 0 -8384 2 -8385 1 -8386 1 -8387 2 -8388 0 -8389 1 -8390 2 -8391 2 -8392 1 -8393 0 -8394 1 -8395 1 -8396 2 -8397 2 -8398 1 -8399 2 -8400 1 -8401 1 -8402 1 -8403 2 -8404 0 -8405 1 -8406 1 -8407 1 -8408 1 -8409 0 -8410 0 -8411 0 -8412 1 -8413 1 -8414 2 -8415 1 -8416 0 -8417 2 -8418 1 -8419 2 -8420 2 -8421 0 -8422 2 -8423 0 -8424 2 -8425 0 -8426 2 -8427 2 -8428 0 -8429 1 -8430 2 -8431 1 -8432 1 -8433 0 -8434 2 -8435 1 -8436 1 -8437 2 -8438 0 -8439 1 -8440 1 -8441 0 -8442 0 -8443 0 -8444 1 -8445 1 -8446 1 -8447 0 -8448 2 -8449 1 -8450 2 -8451 2 -8452 1 -8453 2 -8454 1 -8455 1 -8456 1 -8457 1 -8458 0 -8459 1 -8460 0 -8461 0 -8462 1 -8463 1 -8464 1 -8465 0 -8466 1 -8467 2 -8468 0 -8469 1 -8470 2 -8471 0 -8472 1 -8473 0 -8474 0 -8475 2 -8476 1 -8477 0 -8478 1 -8479 1 -8480 2 -8481 0 -8482 1 -8483 1 -8484 1 -8485 2 -8486 0 -8487 2 -8488 1 -8489 0 -8490 1 -8491 2 -8492 1 -8493 1 -8494 1 -8495 0 -8496 0 -8497 2 -8498 2 -8499 2 -8500 2 -8501 1 -8502 2 -8503 1 -8504 2 -8505 1 -8506 2 -8507 2 -8508 2 -8509 1 -8510 1 -8511 0 -8512 1 -8513 2 -8514 0 -8515 1 -8516 0 -8517 1 -8518 0 -8519 0 -8520 1 -8521 1 -8522 2 -8523 2 -8524 1 -8525 1 -8526 1 -8527 2 -8528 1 -8529 1 -8530 0 -8531 1 -8532 0 -8533 0 -8534 0 -8535 0 -8536 0 -8537 0 -8538 2 -8539 1 -8540 1 -8541 0 -8542 1 -8543 2 -8544 1 -8545 2 -8546 2 -8547 1 -8548 2 -8549 1 -8550 1 -8551 1 -8552 2 -8553 0 -8554 1 -8555 2 -8556 1 -8557 1 -8558 2 -8559 0 -8560 2 -8561 1 -8562 2 -8563 1 -8564 0 -8565 2 -8566 1 -8567 2 -8568 2 -8569 1 -8570 1 -8571 2 -8572 1 -8573 1 -8574 1 -8575 2 -8576 0 -8577 1 -8578 0 -8579 0 -8580 2 -8581 1 -8582 0 -8583 0 -8584 0 -8585 0 -8586 2 -8587 1 -8588 1 -8589 2 -8590 2 -8591 2 -8592 2 -8593 1 -8594 0 -8595 2 -8596 0 -8597 1 -8598 2 -8599 0 -8600 0 -8601 1 -8602 1 -8603 2 -8604 1 -8605 0 -8606 1 -8607 2 -8608 2 -8609 0 -8610 0 -8611 1 -8612 2 -8613 0 -8614 2 -8615 1 -8616 2 -8617 0 -8618 1 -8619 1 -8620 1 -8621 1 -8622 1 -8623 0 -8624 0 -8625 0 -8626 2 -8627 2 -8628 0 -8629 2 -8630 1 -8631 1 -8632 2 -8633 1 -8634 2 -8635 1 -8636 1 -8637 1 -8638 0 -8639 1 -8640 0 -8641 1 -8642 1 -8643 1 -8644 1 -8645 2 -8646 1 -8647 1 -8648 1 -8649 1 -8650 2 -8651 2 -8652 0 -8653 2 -8654 2 -8655 2 -8656 0 -8657 0 -8658 2 -8659 2 -8660 2 -8661 2 -8662 2 -8663 2 -8664 1 -8665 0 -8666 1 -8667 1 -8668 2 -8669 1 -8670 0 -8671 1 -8672 2 -8673 1 -8674 2 -8675 0 -8676 1 -8677 0 -8678 2 -8679 2 -8680 2 -8681 1 -8682 1 -8683 2 -8684 1 -8685 1 -8686 2 -8687 2 -8688 1 -8689 2 -8690 2 -8691 2 -8692 2 -8693 2 -8694 2 -8695 2 -8696 0 -8697 2 -8698 1 -8699 2 -8700 1 -8701 0 -8702 1 -8703 1 -8704 0 -8705 2 -8706 1 -8707 0 -8708 2 -8709 2 -8710 1 -8711 1 -8712 2 -8713 2 -8714 1 -8715 2 -8716 1 -8717 2 -8718 0 -8719 1 -8720 0 -8721 0 -8722 2 -8723 0 -8724 2 -8725 2 -8726 2 -8727 0 -8728 1 -8729 1 -8730 1 -8731 1 -8732 1 -8733 1 -8734 2 -8735 0 -8736 2 -8737 0 -8738 0 -8739 2 -8740 0 -8741 2 -8742 2 -8743 1 -8744 1 -8745 1 -8746 2 -8747 1 -8748 0 -8749 0 -8750 2 -8751 0 -8752 2 -8753 1 -8754 1 -8755 1 -8756 2 -8757 2 -8758 1 -8759 1 -8760 0 -8761 0 -8762 1 -8763 2 -8764 1 -8765 1 -8766 1 -8767 1 -8768 2 -8769 1 -8770 0 -8771 1 -8772 1 -8773 1 -8774 2 -8775 2 -8776 0 -8777 1 -8778 1 -8779 2 -8780 0 -8781 1 -8782 0 -8783 0 -8784 2 -8785 2 -8786 2 -8787 0 -8788 1 -8789 1 -8790 0 -8791 1 -8792 2 -8793 0 -8794 1 -8795 0 -8796 2 -8797 2 -8798 1 -8799 1 -8800 2 -8801 0 -8802 2 -8803 2 -8804 1 -8805 1 -8806 1 -8807 1 -8808 1 -8809 1 -8810 0 -8811 1 -8812 1 -8813 1 -8814 2 -8815 1 -8816 1 -8817 1 -8818 2 -8819 0 -8820 1 -8821 2 -8822 1 -8823 1 -8824 1 -8825 2 -8826 1 -8827 2 -8828 2 -8829 1 -8830 1 -8831 2 -8832 1 -8833 0 -8834 1 -8835 1 -8836 1 -8837 1 -8838 1 -8839 2 -8840 1 -8841 0 -8842 1 -8843 0 -8844 2 -8845 2 -8846 1 -8847 1 -8848 2 -8849 0 -8850 1 -8851 1 -8852 2 -8853 0 -8854 0 -8855 2 -8856 2 -8857 1 -8858 0 -8859 2 -8860 0 -8861 0 -8862 2 -8863 1 -8864 0 -8865 1 -8866 0 -8867 1 -8868 0 -8869 1 -8870 0 -8871 2 -8872 2 -8873 0 -8874 1 -8875 1 -8876 1 -8877 0 -8878 2 -8879 0 -8880 2 -8881 1 -8882 1 -8883 0 -8884 1 -8885 0 -8886 2 -8887 1 -8888 0 -8889 1 -8890 1 -8891 1 -8892 2 -8893 2 -8894 1 -8895 1 -8896 1 -8897 1 -8898 2 -8899 1 -8900 1 -8901 2 -8902 2 -8903 1 -8904 2 -8905 1 -8906 2 -8907 2 -8908 2 -8909 1 -8910 0 -8911 1 -8912 2 -8913 1 -8914 2 -8915 1 -8916 2 -8917 1 -8918 1 -8919 0 -8920 0 -8921 2 -8922 0 -8923 2 -8924 1 -8925 0 -8926 2 -8927 0 -8928 1 -8929 2 -8930 1 -8931 1 -8932 0 -8933 1 -8934 2 -8935 1 -8936 1 -8937 0 -8938 2 -8939 1 -8940 0 -8941 0 -8942 2 -8943 2 -8944 1 -8945 2 -8946 0 -8947 2 -8948 0 -8949 0 -8950 0 -8951 2 -8952 0 -8953 2 -8954 1 -8955 1 -8956 1 -8957 1 -8958 2 -8959 0 -8960 0 -8961 1 -8962 0 -8963 2 -8964 2 -8965 0 -8966 1 -8967 1 -8968 1 -8969 2 -8970 1 -8971 2 -8972 1 -8973 1 -8974 1 -8975 1 -8976 0 -8977 2 -8978 1 -8979 1 -8980 1 -8981 2 -8982 2 -8983 0 -8984 0 -8985 1 -8986 2 -8987 1 -8988 0 -8989 1 -8990 1 -8991 0 -8992 1 -8993 0 -8994 1 -8995 1 -8996 2 -8997 0 -8998 2 -8999 0 -9000 2 -9001 1 -9002 1 -9003 2 -9004 0 -9005 1 -9006 1 -9007 1 -9008 2 -9009 0 -9010 2 -9011 1 -9012 1 -9013 2 -9014 2 -9015 2 -9016 2 -9017 2 -9018 2 -9019 1 -9020 2 -9021 1 -9022 2 -9023 2 -9024 1 -9025 2 -9026 1 -9027 2 -9028 1 -9029 0 -9030 0 -9031 0 -9032 2 -9033 1 -9034 1 -9035 1 -9036 2 -9037 0 -9038 1 -9039 2 -9040 1 -9041 2 -9042 1 -9043 2 -9044 0 -9045 1 -9046 2 -9047 2 -9048 1 -9049 2 -9050 0 -9051 1 -9052 0 -9053 1 -9054 0 -9055 0 -9056 0 -9057 2 -9058 1 -9059 2 -9060 0 -9061 2 -9062 0 -9063 0 -9064 1 -9065 0 -9066 0 -9067 0 -9068 1 -9069 1 -9070 2 -9071 1 -9072 2 -9073 0 -9074 1 -9075 0 -9076 1 -9077 2 -9078 2 -9079 1 -9080 0 -9081 1 -9082 1 -9083 2 -9084 0 -9085 1 -9086 2 -9087 2 -9088 0 -9089 2 -9090 1 -9091 0 -9092 0 -9093 2 -9094 1 -9095 2 -9096 2 -9097 1 -9098 1 -9099 1 -9100 2 -9101 1 -9102 2 -9103 1 -9104 0 -9105 1 -9106 1 -9107 1 -9108 1 -9109 1 -9110 1 -9111 2 -9112 0 -9113 1 -9114 1 -9115 1 -9116 0 -9117 1 -9118 1 -9119 2 -9120 0 -9121 1 -9122 1 -9123 1 -9124 1 -9125 1 -9126 2 -9127 0 -9128 1 -9129 2 -9130 2 -9131 1 -9132 1 -9133 2 -9134 1 -9135 0 -9136 1 -9137 1 -9138 1 -9139 1 -9140 1 -9141 1 -9142 1 -9143 1 -9144 2 -9145 1 -9146 1 -9147 0 -9148 1 -9149 1 -9150 1 -9151 1 -9152 1 -9153 1 -9154 0 -9155 2 -9156 1 -9157 1 -9158 1 -9159 2 -9160 0 -9161 2 -9162 0 -9163 1 -9164 0 -9165 1 -9166 0 -9167 1 -9168 2 -9169 2 -9170 1 -9171 2 -9172 2 -9173 2 -9174 1 -9175 1 -9176 1 -9177 2 -9178 2 -9179 1 -9180 0 -9181 1 -9182 1 -9183 2 -9184 2 -9185 0 -9186 0 -9187 0 -9188 2 -9189 1 -9190 1 -9191 1 -9192 2 -9193 2 -9194 2 -9195 1 -9196 1 -9197 0 -9198 2 -9199 2 -9200 1 -9201 0 -9202 1 -9203 2 -9204 2 -9205 1 -9206 0 -9207 1 -9208 0 -9209 0 -9210 1 -9211 1 -9212 2 -9213 1 -9214 2 -9215 0 -9216 1 -9217 1 -9218 0 -9219 2 -9220 2 -9221 0 -9222 1 -9223 2 -9224 2 -9225 1 -9226 1 -9227 1 -9228 1 -9229 0 -9230 2 -9231 2 -9232 2 -9233 1 -9234 1 -9235 1 -9236 2 -9237 1 -9238 2 -9239 2 -9240 1 -9241 2 -9242 1 -9243 0 -9244 1 -9245 1 -9246 0 -9247 0 -9248 0 -9249 1 -9250 2 -9251 1 -9252 1 -9253 0 -9254 2 -9255 0 -9256 1 -9257 1 -9258 1 -9259 1 -9260 1 -9261 2 -9262 2 -9263 2 -9264 2 -9265 1 -9266 1 -9267 1 -9268 2 -9269 1 -9270 0 -9271 1 -9272 1 -9273 2 -9274 2 -9275 2 -9276 2 -9277 2 -9278 0 -9279 2 -9280 0 -9281 1 -9282 0 -9283 1 -9284 0 -9285 2 -9286 0 -9287 0 -9288 0 -9289 2 -9290 2 -9291 0 -9292 0 -9293 1 -9294 0 -9295 2 -9296 2 -9297 1 -9298 1 -9299 0 -9300 1 -9301 0 -9302 1 -9303 1 -9304 1 -9305 1 -9306 0 -9307 1 -9308 1 -9309 1 -9310 2 -9311 0 -9312 1 -9313 0 -9314 0 -9315 2 -9316 0 -9317 0 -9318 1 -9319 0 -9320 0 -9321 2 -9322 0 -9323 0 -9324 2 -9325 1 -9326 2 -9327 1 -9328 2 -9329 0 -9330 2 -9331 2 -9332 2 -9333 1 -9334 2 -9335 2 -9336 1 -9337 1 -9338 1 -9339 0 -9340 2 -9341 2 -9342 2 -9343 2 -9344 2 -9345 2 -9346 1 -9347 1 -9348 1 -9349 0 -9350 1 -9351 1 -9352 1 -9353 0 -9354 1 -9355 1 -9356 1 -9357 1 -9358 2 -9359 2 -9360 1 -9361 1 -9362 1 -9363 1 -9364 1 -9365 2 -9366 2 -9367 2 -9368 2 -9369 0 -9370 0 -9371 0 -9372 0 -9373 0 -9374 2 -9375 2 -9376 2 -9377 0 -9378 0 -9379 2 -9380 2 -9381 2 -9382 2 -9383 0 -9384 1 -9385 2 -9386 2 -9387 2 -9388 2 -9389 0 -9390 2 -9391 2 -9392 0 -9393 1 -9394 2 -9395 0 -9396 2 -9397 2 -9398 2 -9399 2 -9400 0 -9401 1 -9402 2 -9403 0 -9404 1 -9405 1 -9406 2 -9407 2 -9408 1 -9409 2 -9410 1 -9411 0 -9412 1 -9413 2 -9414 1 -9415 1 -9416 2 -9417 1 -9418 2 -9419 1 -9420 1 -9421 1 -9422 2 -9423 2 -9424 2 -9425 2 -9426 2 -9427 0 -9428 2 -9429 2 -9430 1 -9431 2 -9432 0 -9433 2 -9434 0 -9435 0 -9436 2 -9437 0 -9438 1 -9439 1 -9440 2 -9441 1 -9442 2 -9443 2 -9444 1 -9445 2 -9446 0 -9447 2 -9448 0 -9449 0 -9450 1 -9451 0 -9452 1 -9453 2 -9454 0 -9455 1 -9456 0 -9457 1 -9458 0 -9459 0 -9460 0 -9461 2 -9462 2 -9463 2 -9464 2 -9465 1 -9466 2 -9467 2 -9468 2 -9469 1 -9470 2 -9471 2 -9472 1 -9473 1 -9474 2 -9475 1 -9476 1 -9477 1 -9478 1 -9479 2 -9480 1 -9481 1 -9482 0 -9483 1 -9484 2 -9485 1 -9486 0 -9487 1 -9488 2 -9489 0 -9490 2 -9491 2 -9492 2 -9493 1 -9494 1 -9495 1 -9496 2 -9497 1 -9498 1 -9499 1 -9500 1 -9501 2 -9502 1 -9503 1 -9504 1 -9505 1 -9506 2 -9507 1 -9508 2 -9509 1 -9510 2 -9511 0 -9512 2 -9513 2 -9514 2 -9515 1 -9516 1 -9517 0 -9518 1 -9519 2 -9520 0 -9521 0 -9522 2 -9523 1 -9524 1 -9525 1 -9526 2 -9527 2 -9528 2 -9529 2 -9530 2 -9531 0 -9532 1 -9533 1 -9534 1 -9535 0 -9536 2 -9537 0 -9538 2 -9539 2 -9540 1 -9541 2 -9542 0 -9543 0 -9544 2 -9545 2 -9546 1 -9547 0 -9548 1 -9549 0 -9550 0 -9551 2 -9552 0 -9553 1 -9554 2 -9555 0 -9556 0 -9557 0 -9558 0 -9559 2 -9560 1 -9561 2 -9562 1 -9563 2 -9564 2 -9565 2 -9566 2 -9567 0 -9568 1 -9569 1 -9570 2 -9571 0 -9572 2 -9573 0 -9574 2 -9575 2 -9576 2 -9577 0 -9578 0 -9579 0 -9580 0 -9581 1 -9582 0 -9583 2 -9584 2 -9585 1 -9586 0 -9587 2 -9588 2 -9589 2 -9590 1 -9591 1 -9592 1 -9593 1 -9594 1 -9595 2 -9596 2 -9597 2 -9598 1 -9599 1 -9600 1 -9601 1 -9602 2 -9603 1 -9604 1 -9605 1 -9606 1 -9607 0 -9608 2 -9609 1 -9610 1 -9611 2 -9612 1 -9613 1 -9614 2 -9615 2 -9616 2 -9617 0 -9618 2 -9619 1 -9620 1 -9621 2 -9622 2 -9623 2 -9624 2 -9625 0 -9626 1 -9627 1 -9628 2 -9629 1 -9630 1 -9631 2 -9632 2 -9633 0 -9634 1 -9635 0 -9636 1 -9637 1 -9638 2 -9639 1 -9640 0 -9641 1 -9642 1 -9643 1 -9644 1 -9645 0 -9646 2 -9647 0 -9648 2 -9649 1 -9650 2 -9651 0 -9652 0 -9653 1 -9654 0 -9655 0 -9656 2 -9657 0 -9658 1 -9659 0 -9660 1 -9661 2 -9662 1 -9663 0 -9664 2 -9665 0 -9666 1 -9667 0 -9668 2 -9669 1 -9670 1 -9671 0 -9672 2 -9673 1 -9674 2 -9675 0 -9676 1 -9677 2 -9678 1 -9679 1 -9680 2 -9681 1 -9682 2 -9683 0 -9684 0 -9685 0 -9686 0 -9687 1 -9688 2 -9689 1 -9690 1 -9691 0 -9692 2 -9693 1 -9694 0 -9695 1 -9696 2 -9697 1 -9698 1 -9699 2 -9700 0 -9701 1 -9702 2 -9703 2 -9704 1 -9705 2 -9706 2 -9707 0 -9708 2 -9709 2 -9710 1 -9711 1 -9712 2 -9713 1 -9714 2 -9715 2 -9716 1 -9717 2 -9718 1 -9719 0 -9720 2 -9721 1 -9722 1 -9723 1 -9724 0 -9725 1 -9726 1 -9727 2 -9728 0 -9729 0 -9730 0 -9731 1 -9732 1 -9733 1 -9734 2 -9735 1 -9736 0 -9737 1 -9738 1 -9739 0 -9740 2 -9741 1 -9742 1 -9743 0 -9744 2 -9745 0 -9746 0 -9747 1 -9748 1 -9749 1 -9750 1 -9751 1 -9752 2 -9753 2 -9754 1 -9755 1 -9756 0 -9757 2 -9758 2 -9759 0 -9760 0 -9761 1 -9762 0 -9763 2 -9764 2 -9765 1 -9766 1 -9767 2 -9768 1 -9769 1 -9770 0 -9771 1 -9772 1 -9773 0 -9774 1 -9775 0 -9776 2 -9777 2 -9778 2 -9779 1 -9780 1 -9781 0 -9782 1 -9783 1 -9784 1 -9785 2 -9786 0 -9787 1 -9788 2 -9789 1 -9790 0 -9791 2 -9792 1 -9793 2 -9794 1 -9795 2 -9796 0 -9797 2 -9798 1 -9799 1 -9800 2 -9801 2 -9802 1 -9803 2 -9804 2 -9805 1 -9806 2 -9807 1 -9808 1 -9809 1 -9810 0 -9811 1 -9812 0 -9813 2 -9814 2 -9815 2 -9816 0 -9817 0 -9818 1 -9819 1 -9820 2 -9821 0 -9822 1 -9823 2 -9824 2 -9825 2 -9826 1 -9827 1 -9828 0 -9829 0 -9830 2 -9831 0 -9832 0 -9833 0 -9834 2 -9835 1 -9836 1 -9837 1 -9838 2 -9839 2 -9840 1 -9841 1 -9842 2 -9843 2 -9844 2 -9845 0 -9846 2 -9847 2 -9848 0 -9849 0 -9850 1 -9851 1 -9852 1 -9853 1 -9854 2 -9855 2 -9856 1 -9857 2 -9858 0 -9859 1 -9860 2 -9861 1 -9862 0 -9863 1 -9864 2 -9865 2 -9866 0 -9867 2 -9868 1 -9869 1 -9870 0 -9871 0 -9872 0 -9873 2 -9874 0 -9875 1 -9876 1 -9877 0 -9878 1 -9879 1 -9880 1 -9881 1 -9882 1 -9883 0 -9884 2 -9885 2 -9886 2 -9887 2 -9888 1 -9889 1 -9890 2 -9891 1 -9892 0 -9893 2 -9894 0 -9895 2 -9896 1 -9897 1 -9898 1 -9899 1 -9900 0 -9901 2 -9902 2 -9903 1 -9904 0 -9905 1 -9906 2 -9907 0 -9908 1 -9909 2 -9910 2 -9911 1 -9912 1 -9913 1 -9914 1 -9915 1 -9916 2 -9917 0 -9918 0 -9919 1 -9920 2 -9921 1 -9922 2 -9923 0 -9924 1 -9925 0 -9926 0 -9927 2 -9928 2 -9929 1 -9930 1 -9931 1 -9932 0 -9933 1 -9934 1 -9935 2 -9936 2 -9937 1 -9938 1 -9939 2 -9940 2 -9941 0 -9942 2 -9943 2 -9944 0 -9945 1 -9946 0 -9947 2 -9948 1 -9949 1 -9950 0 -9951 0 -9952 1 -9953 1 -9954 2 -9955 0 -9956 0 -9957 1 -9958 1 -9959 2 -9960 0 -9961 1 -9962 1 -9963 1 -9964 2 -9965 1 -9966 0 -9967 2 -9968 2 -9969 2 -9970 2 -9971 1 -9972 1 -9973 1 -9974 2 -9975 1 -9976 0 -9977 1 -9978 2 -9979 1 -9980 2 -9981 2 -9982 1 -9983 1 -9984 2 -9985 1 -9986 2 -9987 0 -9988 2 -9989 2 -9990 2 -9991 2 -9992 1 -9993 2 -9994 0 -9995 1 -9996 1 -9997 1 -9998 0 -9999 1 -10000 2 -10001 1 -10002 2 -10003 2 -10004 0 -10005 0 -10006 1 -10007 0 -10008 1 -10009 1 -10010 1 -10011 2 -10012 1 -10013 1 -10014 1 -10015 2 -10016 0 -10017 0 -10018 2 -10019 2 -10020 2 -10021 0 -10022 0 -10023 0 -10024 2 -10025 2 -10026 2 -10027 1 -10028 1 -10029 2 -10030 1 -10031 2 -10032 2 -10033 1 -10034 0 -10035 1 -10036 2 -10037 2 -10038 0 -10039 2 -10040 0 -10041 2 -10042 0 -10043 1 -10044 0 -10045 0 -10046 0 -10047 0 -10048 1 -10049 2 -10050 2 -10051 1 -10052 2 -10053 0 -10054 1 -10055 1 -10056 1 -10057 2 -10058 1 -10059 0 -10060 0 -10061 0 -10062 1 -10063 0 -10064 0 -10065 2 -10066 0 -10067 1 -10068 2 -10069 0 -10070 0 -10071 0 -10072 2 -10073 0 -10074 2 -10075 2 -10076 2 -10077 1 -10078 2 -10079 0 -10080 1 -10081 1 -10082 0 -10083 2 -10084 0 -10085 2 -10086 2 -10087 1 -10088 1 -10089 2 -10090 2 -10091 2 -10092 1 -10093 2 -10094 2 -10095 1 -10096 1 -10097 1 -10098 0 -10099 2 -10100 2 -10101 0 -10102 1 -10103 2 -10104 1 -10105 0 -10106 1 -10107 1 -10108 0 -10109 2 -10110 1 -10111 1 -10112 1 -10113 0 -10114 1 -10115 1 -10116 2 -10117 1 -10118 0 -10119 0 -10120 0 -10121 1 -10122 1 -10123 0 -10124 1 -10125 0 -10126 2 -10127 0 -10128 2 -10129 1 -10130 2 -10131 1 -10132 0 -10133 1 -10134 2 -10135 1 -10136 2 -10137 0 -10138 1 -10139 1 -10140 1 -10141 0 -10142 1 -10143 0 -10144 1 -10145 2 -10146 1 -10147 0 -10148 1 -10149 1 -10150 0 -10151 2 -10152 2 -10153 2 -10154 1 -10155 1 -10156 2 -10157 2 -10158 0 -10159 2 -10160 2 -10161 2 -10162 2 -10163 0 -10164 2 -10165 1 -10166 1 -10167 2 -10168 2 -10169 0 -10170 2 -10171 1 -10172 0 -10173 0 -10174 1 -10175 1 -10176 1 -10177 1 -10178 1 -10179 2 -10180 2 -10181 1 -10182 2 -10183 1 -10184 1 -10185 2 -10186 1 -10187 2 -10188 2 -10189 0 -10190 0 -10191 2 -10192 1 -10193 1 -10194 2 -10195 1 -10196 2 -10197 1 -10198 2 -10199 1 -10200 1 -10201 0 -10202 2 -10203 1 -10204 1 -10205 1 -10206 2 -10207 1 -10208 1 -10209 2 -10210 1 -10211 0 -10212 2 -10213 1 -10214 0 -10215 1 -10216 1 -10217 2 -10218 1 -10219 2 -10220 1 -10221 2 -10222 2 -10223 0 -10224 1 -10225 2 -10226 1 -10227 1 -10228 1 -10229 0 -10230 2 -10231 1 -10232 0 -10233 0 -10234 0 -10235 2 -10236 1 -10237 1 -10238 2 -10239 1 -10240 0 -10241 2 -10242 0 -10243 1 -10244 2 -10245 1 -10246 2 -10247 0 -10248 2 -10249 1 -10250 0 -10251 0 -10252 2 -10253 1 -10254 2 -10255 0 -10256 2 -10257 2 -10258 1 -10259 1 -10260 1 -10261 0 -10262 1 -10263 1 -10264 1 -10265 2 -10266 2 -10267 1 -10268 2 -10269 2 -10270 2 -10271 1 -10272 2 -10273 1 -10274 1 -10275 2 -10276 1 -10277 2 -10278 1 -10279 0 -10280 1 -10281 1 -10282 1 -10283 1 -10284 1 -10285 2 -10286 2 -10287 1 -10288 1 -10289 1 -10290 2 -10291 1 -10292 2 -10293 2 -10294 2 -10295 0 -10296 1 -10297 2 -10298 2 -10299 1 -10300 0 -10301 1 -10302 0 -10303 0 -10304 1 -10305 1 -10306 0 -10307 0 -10308 2 -10309 1 -10310 0 -10311 1 -10312 1 -10313 2 -10314 1 -10315 2 -10316 1 -10317 1 -10318 2 -10319 2 -10320 0 -10321 1 -10322 1 -10323 1 -10324 2 -10325 2 -10326 1 -10327 0 -10328 1 -10329 2 -10330 1 -10331 0 -10332 1 -10333 1 -10334 1 -10335 2 -10336 1 -10337 2 -10338 2 -10339 2 -10340 0 -10341 0 -10342 2 -10343 1 -10344 0 -10345 1 -10346 2 -10347 0 -10348 0 -10349 1 -10350 1 -10351 0 -10352 1 -10353 2 -10354 2 -10355 2 -10356 2 -10357 1 -10358 0 -10359 2 -10360 0 -10361 1 -10362 2 -10363 2 -10364 2 -10365 1 -10366 2 -10367 2 -10368 0 -10369 2 -10370 2 -10371 2 -10372 2 -10373 2 -10374 1 -10375 1 -10376 2 -10377 2 -10378 1 -10379 2 -10380 1 -10381 2 -10382 0 -10383 2 -10384 0 -10385 2 -10386 1 -10387 2 -10388 0 -10389 2 -10390 1 -10391 0 -10392 1 -10393 1 -10394 1 -10395 1 -10396 2 -10397 2 -10398 1 -10399 2 -10400 2 -10401 1 -10402 1 -10403 1 -10404 1 -10405 2 -10406 2 -10407 0 -10408 1 -10409 0 -10410 1 -10411 0 -10412 2 -10413 2 -10414 2 -10415 0 -10416 2 -10417 2 -10418 1 -10419 0 -10420 0 -10421 1 -10422 2 -10423 1 -10424 1 -10425 1 -10426 2 -10427 1 -10428 0 -10429 2 -10430 2 -10431 1 -10432 0 -10433 1 -10434 2 -10435 0 -10436 2 -10437 2 -10438 1 -10439 1 -10440 2 -10441 1 -10442 0 -10443 0 -10444 1 -10445 1 -10446 0 -10447 1 -10448 2 -10449 2 -10450 0 -10451 1 -10452 0 -10453 1 -10454 2 -10455 1 -10456 1 -10457 2 -10458 2 -10459 0 -10460 1 -10461 2 -10462 1 -10463 1 -10464 1 -10465 0 -10466 1 -10467 1 -10468 2 -10469 0 -10470 2 -10471 1 -10472 1 -10473 1 -10474 0 -10475 2 -10476 1 -10477 2 -10478 2 -10479 1 -10480 0 -10481 1 -10482 0 -10483 2 -10484 2 -10485 2 -10486 0 -10487 1 -10488 1 -10489 1 -10490 0 -10491 1 -10492 1 -10493 1 -10494 1 -10495 1 -10496 1 -10497 1 -10498 1 -10499 1 -10500 2 -10501 2 -10502 2 -10503 1 -10504 1 -10505 0 -10506 2 -10507 0 -10508 2 -10509 0 -10510 2 -10511 1 -10512 2 -10513 2 -10514 1 -10515 1 -10516 0 -10517 1 -10518 1 -10519 1 -10520 2 -10521 1 -10522 1 -10523 1 -10524 0 -10525 0 -10526 2 -10527 1 -10528 1 -10529 2 -10530 1 -10531 1 -10532 2 -10533 2 -10534 1 -10535 1 -10536 1 -10537 1 -10538 1 -10539 0 -10540 2 -10541 0 -10542 1 -10543 0 -10544 0 -10545 1 -10546 0 -10547 0 -10548 1 -10549 1 -10550 1 -10551 1 -10552 1 -10553 2 -10554 1 -10555 0 -10556 1 -10557 0 -10558 0 -10559 2 -10560 1 -10561 2 -10562 1 -10563 0 -10564 0 -10565 0 -10566 0 -10567 2 -10568 2 -10569 1 -10570 1 -10571 1 -10572 0 -10573 2 -10574 2 -10575 0 -10576 1 -10577 1 -10578 1 -10579 1 -10580 2 -10581 0 -10582 1 -10583 1 -10584 1 -10585 2 -10586 1 -10587 1 -10588 2 -10589 2 -10590 2 -10591 1 -10592 2 -10593 2 -10594 0 -10595 1 -10596 1 -10597 2 -10598 1 -10599 1 -10600 2 -10601 2 -10602 2 -10603 2 -10604 2 -10605 0 -10606 2 -10607 1 -10608 1 -10609 1 -10610 2 -10611 1 -10612 0 -10613 0 -10614 2 -10615 0 -10616 2 -10617 0 -10618 2 -10619 1 -10620 1 -10621 1 -10622 1 -10623 0 -10624 1 -10625 1 -10626 1 -10627 2 -10628 2 -10629 1 -10630 1 -10631 1 -10632 1 -10633 2 -10634 1 -10635 0 -10636 1 -10637 2 -10638 1 -10639 1 -10640 1 -10641 0 -10642 0 -10643 1 -10644 1 -10645 0 -10646 0 -10647 1 -10648 2 -10649 0 -10650 1 -10651 1 -10652 2 -10653 0 -10654 2 -10655 0 -10656 0 -10657 2 -10658 1 -10659 0 -10660 2 -10661 1 -10662 1 -10663 0 -10664 1 -10665 1 -10666 1 -10667 2 -10668 0 -10669 0 -10670 0 -10671 0 -10672 1 -10673 1 -10674 0 -10675 2 -10676 2 -10677 0 -10678 0 -10679 1 -10680 1 -10681 1 -10682 1 -10683 0 -10684 1 -10685 1 -10686 1 -10687 0 -10688 1 -10689 1 -10690 2 -10691 1 -10692 1 -10693 1 -10694 0 -10695 2 -10696 0 -10697 2 -10698 2 -10699 1 -10700 2 -10701 0 -10702 1 -10703 2 -10704 2 -10705 1 -10706 2 -10707 2 -10708 1 -10709 2 -10710 2 -10711 1 -10712 1 -10713 1 -10714 1 -10715 0 -10716 1 -10717 0 -10718 2 -10719 1 -10720 1 -10721 1 -10722 0 -10723 1 -10724 2 -10725 1 -10726 1 -10727 1 -10728 1 -10729 1 -10730 2 -10731 2 -10732 2 -10733 2 -10734 0 -10735 1 -10736 0 -10737 2 -10738 2 -10739 2 -10740 1 -10741 2 -10742 2 -10743 2 -10744 1 -10745 1 -10746 2 -10747 1 -10748 2 -10749 1 -10750 2 -10751 0 -10752 0 -10753 1 -10754 2 -10755 1 -10756 2 -10757 1 -10758 2 -10759 2 -10760 2 -10761 1 -10762 1 -10763 2 -10764 0 -10765 1 -10766 1 -10767 2 -10768 1 -10769 2 -10770 1 -10771 0 -10772 1 -10773 1 -10774 0 -10775 1 -10776 2 -10777 0 -10778 1 -10779 2 -10780 0 -10781 0 -10782 0 -10783 2 -10784 1 -10785 1 -10786 0 -10787 1 -10788 0 -10789 1 -10790 1 -10791 2 -10792 1 -10793 0 -10794 1 -10795 0 -10796 1 -10797 1 -10798 1 -10799 2 -10800 1 -10801 2 -10802 2 -10803 1 -10804 2 -10805 2 -10806 0 -10807 0 -10808 0 -10809 2 -10810 0 -10811 0 -10812 1 -10813 1 -10814 1 -10815 1 -10816 0 -10817 1 -10818 2 -10819 1 -10820 1 -10821 2 -10822 1 -10823 0 -10824 1 -10825 1 -10826 2 -10827 0 -10828 0 -10829 0 -10830 1 -10831 0 -10832 1 -10833 0 -10834 0 -10835 1 -10836 1 -10837 1 -10838 1 -10839 2 -10840 2 -10841 1 -10842 1 -10843 1 -10844 2 -10845 2 -10846 1 -10847 0 -10848 2 -10849 1 -10850 0 -10851 1 -10852 0 -10853 2 -10854 0 -10855 1 -10856 2 -10857 0 -10858 1 -10859 2 -10860 0 -10861 1 -10862 1 -10863 0 -10864 0 -10865 0 -10866 0 -10867 0 -10868 1 -10869 1 -10870 1 -10871 2 -10872 1 -10873 0 -10874 1 -10875 2 -10876 0 -10877 1 -10878 2 -10879 1 -10880 2 -10881 0 -10882 2 -10883 1 -10884 1 -10885 2 -10886 1 -10887 2 -10888 0 -10889 1 -10890 2 -10891 2 -10892 0 -10893 0 -10894 1 -10895 2 -10896 2 -10897 1 -10898 2 -10899 2 -10900 0 -10901 0 -10902 2 -10903 0 -10904 1 -10905 1 -10906 0 -10907 2 -10908 2 -10909 1 -10910 1 -10911 1 -10912 2 -10913 0 -10914 2 -10915 2 -10916 2 -10917 1 -10918 1 -10919 0 -10920 2 -10921 1 -10922 2 -10923 1 -10924 0 -10925 0 -10926 0 -10927 0 -10928 1 -10929 0 -10930 2 -10931 0 -10932 1 -10933 1 -10934 1 -10935 2 -10936 2 -10937 2 -10938 1 -10939 1 -10940 1 -10941 2 -10942 2 -10943 1 -10944 0 -10945 1 -10946 1 -10947 1 -10948 2 -10949 1 -10950 1 -10951 0 -10952 1 -10953 1 -10954 1 -10955 0 -10956 2 -10957 1 -10958 0 -10959 0 -10960 2 -10961 2 -10962 2 -10963 2 -10964 1 -10965 2 -10966 2 -10967 1 -10968 2 -10969 1 -10970 1 -10971 1 -10972 1 -10973 1 -10974 2 -10975 1 -10976 1 -10977 0 -10978 1 -10979 2 -10980 1 -10981 2 -10982 2 -10983 2 -10984 1 -10985 1 -10986 1 -10987 1 -10988 1 -10989 2 -10990 1 -10991 0 -10992 1 -10993 2 -10994 0 -10995 2 -10996 2 -10997 1 -10998 2 -10999 2 -11000 1 -11001 1 -11002 2 -11003 2 -11004 2 -11005 2 -11006 0 -11007 2 -11008 1 -11009 2 -11010 2 -11011 1 -11012 0 -11013 2 -11014 1 -11015 1 -11016 1 -11017 0 -11018 2 -11019 2 -11020 1 -11021 1 -11022 2 -11023 2 -11024 2 -11025 1 -11026 1 -11027 1 -11028 2 -11029 1 -11030 1 -11031 1 -11032 0 -11033 0 -11034 1 -11035 0 -11036 1 -11037 1 -11038 0 -11039 2 -11040 2 -11041 2 -11042 1 -11043 0 -11044 0 -11045 2 -11046 0 -11047 0 -11048 2 -11049 1 -11050 2 -11051 2 -11052 1 -11053 1 -11054 1 -11055 2 -11056 2 -11057 2 -11058 0 -11059 1 -11060 0 -11061 1 -11062 1 -11063 2 -11064 2 -11065 1 -11066 1 -11067 1 -11068 2 -11069 2 -11070 1 -11071 1 -11072 2 -11073 0 -11074 2 -11075 0 -11076 1 -11077 1 -11078 1 -11079 2 -11080 1 -11081 0 -11082 2 -11083 0 -11084 2 -11085 2 -11086 1 -11087 2 -11088 2 -11089 2 -11090 2 -11091 2 -11092 2 -11093 1 -11094 1 -11095 2 -11096 2 -11097 1 -11098 0 -11099 0 -11100 2 -11101 1 -11102 1 -11103 1 -11104 1 -11105 1 -11106 2 -11107 2 -11108 2 -11109 1 -11110 2 -11111 1 -11112 2 -11113 0 -11114 1 -11115 0 -11116 1 -11117 2 -11118 1 -11119 1 -11120 1 -11121 1 -11122 2 -11123 0 -11124 0 -11125 2 -11126 2 -11127 0 -11128 1 -11129 0 -11130 1 -11131 0 -11132 0 -11133 1 -11134 1 -11135 2 -11136 0 -11137 0 -11138 1 -11139 1 -11140 1 -11141 0 -11142 1 -11143 0 -11144 0 -11145 1 -11146 2 -11147 2 -11148 1 -11149 2 -11150 0 -11151 1 -11152 0 -11153 0 -11154 1 -11155 1 -11156 2 -11157 2 -11158 2 -11159 1 -11160 1 -11161 1 -11162 1 -11163 2 -11164 2 -11165 2 -11166 2 -11167 1 -11168 2 -11169 0 -11170 2 -11171 2 -11172 2 -11173 0 -11174 1 -11175 1 -11176 2 -11177 2 -11178 1 -11179 0 -11180 1 -11181 2 -11182 1 -11183 1 -11184 1 -11185 0 -11186 1 -11187 1 -11188 2 -11189 1 -11190 1 -11191 2 -11192 0 -11193 0 -11194 2 -11195 1 -11196 0 -11197 1 -11198 1 -11199 0 -11200 1 -11201 2 -11202 2 -11203 2 -11204 2 -11205 1 -11206 2 -11207 2 -11208 0 -11209 0 -11210 2 -11211 2 -11212 2 -11213 1 -11214 1 -11215 1 -11216 1 -11217 0 -11218 1 -11219 2 -11220 1 -11221 1 -11222 1 -11223 1 -11224 2 -11225 0 -11226 2 -11227 1 -11228 2 -11229 0 -11230 2 -11231 2 -11232 1 -11233 0 -11234 2 -11235 1 -11236 2 -11237 1 -11238 0 -11239 2 -11240 2 -11241 1 -11242 2 -11243 2 -11244 1 -11245 1 -11246 2 -11247 0 -11248 1 -11249 1 -11250 2 -11251 2 -11252 2 -11253 0 -11254 1 -11255 2 -11256 1 -11257 2 -11258 1 -11259 2 -11260 2 -11261 1 -11262 2 -11263 0 -11264 0 -11265 1 -11266 1 -11267 2 -11268 1 -11269 2 -11270 0 -11271 1 -11272 1 -11273 1 -11274 1 -11275 2 -11276 2 -11277 0 -11278 2 -11279 2 -11280 2 -11281 1 -11282 2 -11283 0 -11284 2 -11285 0 -11286 0 -11287 2 -11288 2 -11289 1 -11290 0 -11291 0 -11292 1 -11293 1 -11294 1 -11295 0 -11296 1 -11297 1 -11298 1 -11299 2 -11300 1 -11301 1 -11302 2 -11303 2 -11304 0 -11305 2 -11306 2 -11307 2 -11308 2 -11309 1 -11310 2 -11311 2 -11312 0 -11313 1 -11314 0 -11315 1 -11316 1 -11317 1 -11318 1 -11319 1 -11320 1 -11321 2 -11322 1 -11323 0 -11324 1 -11325 1 -11326 1 -11327 1 -11328 0 -11329 0 -11330 2 -11331 1 -11332 1 -11333 1 -11334 2 -11335 2 -11336 1 -11337 1 -11338 1 -11339 1 -11340 2 -11341 0 -11342 2 -11343 1 -11344 2 -11345 2 -11346 1 -11347 0 -11348 1 -11349 1 -11350 0 -11351 0 -11352 1 -11353 2 -11354 2 -11355 2 -11356 1 -11357 0 -11358 2 -11359 2 -11360 1 -11361 1 -11362 2 -11363 0 -11364 2 -11365 1 -11366 2 -11367 1 -11368 2 -11369 2 -11370 1 -11371 1 -11372 1 -11373 1 -11374 1 -11375 1 -11376 2 -11377 1 -11378 2 -11379 0 -11380 1 -11381 2 -11382 0 -11383 1 -11384 1 -11385 0 -11386 2 -11387 1 -11388 0 -11389 1 -11390 1 -11391 2 -11392 2 -11393 1 -11394 0 -11395 1 -11396 2 -11397 1 -11398 1 -11399 1 -11400 1 -11401 0 -11402 2 -11403 1 -11404 2 -11405 1 -11406 0 -11407 1 -11408 1 -11409 1 -11410 0 -11411 2 -11412 1 -11413 1 -11414 1 -11415 0 -11416 2 -11417 1 -11418 1 -11419 1 -11420 2 -11421 1 -11422 1 -11423 0 -11424 1 -11425 2 -11426 0 -11427 1 -11428 0 -11429 1 -11430 1 -11431 1 -11432 2 -11433 1 -11434 2 -11435 1 -11436 1 -11437 2 -11438 0 -11439 0 -11440 2 -11441 0 -11442 2 -11443 1 -11444 0 -11445 1 -11446 0 -11447 0 -11448 0 -11449 1 -11450 1 -11451 2 -11452 2 -11453 2 -11454 1 -11455 2 -11456 2 -11457 2 -11458 2 -11459 0 -11460 0 -11461 1 -11462 2 -11463 2 -11464 2 -11465 0 -11466 2 -11467 1 -11468 2 -11469 2 -11470 2 -11471 1 -11472 0 -11473 1 -11474 2 -11475 0 -11476 0 -11477 0 -11478 1 -11479 1 -11480 2 -11481 0 -11482 1 -11483 2 -11484 2 -11485 2 -11486 1 -11487 1 -11488 1 -11489 1 -11490 0 -11491 1 -11492 1 -11493 2 -11494 0 -11495 2 -11496 1 -11497 1 -11498 1 -11499 2 -11500 0 -11501 2 -11502 2 -11503 2 -11504 1 -11505 1 -11506 1 -11507 2 -11508 2 -11509 1 -11510 1 -11511 1 -11512 1 -11513 2 -11514 1 -11515 0 -11516 1 -11517 2 -11518 2 -11519 1 -11520 0 -11521 1 -11522 2 -11523 1 -11524 1 -11525 2 -11526 2 -11527 1 -11528 1 -11529 0 -11530 1 -11531 2 -11532 1 -11533 2 -11534 1 -11535 2 -11536 2 -11537 1 -11538 1 -11539 1 -11540 1 -11541 1 -11542 1 -11543 1 -11544 2 -11545 2 -11546 2 -11547 1 -11548 2 -11549 0 -11550 1 -11551 1 -11552 1 -11553 1 -11554 2 -11555 1 -11556 2 -11557 1 -11558 2 -11559 2 -11560 0 -11561 1 -11562 0 -11563 1 -11564 2 -11565 2 -11566 1 -11567 1 -11568 1 -11569 1 -11570 1 -11571 1 -11572 0 -11573 1 -11574 0 -11575 0 -11576 2 -11577 0 -11578 1 -11579 0 -11580 0 -11581 0 -11582 1 -11583 2 -11584 1 -11585 0 -11586 1 -11587 2 -11588 0 -11589 1 -11590 1 -11591 2 -11592 2 -11593 1 -11594 2 -11595 2 -11596 2 -11597 1 -11598 2 -11599 2 -11600 2 -11601 1 -11602 1 -11603 1 -11604 0 -11605 2 -11606 2 -11607 2 -11608 1 -11609 0 -11610 0 -11611 1 -11612 1 -11613 2 -11614 1 -11615 1 -11616 2 -11617 1 -11618 0 -11619 2 -11620 1 -11621 2 -11622 0 -11623 0 -11624 2 -11625 0 -11626 0 -11627 1 -11628 2 -11629 2 -11630 1 -11631 1 -11632 2 -11633 1 -11634 1 -11635 1 -11636 0 -11637 0 -11638 1 -11639 0 -11640 1 -11641 1 -11642 2 -11643 1 -11644 2 -11645 0 -11646 0 -11647 1 -11648 0 -11649 2 -11650 0 -11651 1 -11652 1 -11653 0 -11654 1 -11655 1 -11656 1 -11657 1 -11658 1 -11659 0 -11660 1 -11661 1 -11662 0 -11663 1 -11664 0 -11665 0 -11666 1 -11667 1 -11668 0 -11669 1 -11670 0 -11671 1 -11672 1 -11673 1 -11674 2 -11675 0 -11676 1 -11677 2 -11678 2 -11679 1 -11680 1 -11681 2 -11682 1 -11683 0 -11684 2 -11685 1 -11686 1 -11687 1 -11688 2 -11689 1 -11690 2 -11691 0 -11692 1 -11693 2 -11694 0 -11695 2 -11696 2 -11697 1 -11698 2 -11699 2 -11700 1 -11701 1 -11702 0 -11703 1 -11704 0 -11705 0 -11706 1 -11707 1 -11708 0 -11709 1 -11710 1 -11711 2 -11712 2 -11713 0 -11714 2 -11715 2 -11716 1 -11717 1 -11718 2 -11719 2 -11720 1 -11721 2 -11722 1 -11723 2 -11724 0 -11725 2 -11726 0 -11727 2 -11728 2 -11729 0 -11730 2 -11731 1 -11732 1 -11733 1 -11734 1 -11735 2 -11736 2 -11737 0 -11738 0 -11739 2 -11740 0 -11741 1 -11742 1 -11743 0 -11744 1 -11745 0 -11746 1 -11747 0 -11748 1 -11749 2 -11750 0 -11751 2 -11752 0 -11753 0 -11754 2 -11755 1 -11756 1 -11757 2 -11758 2 -11759 2 -11760 0 -11761 2 -11762 0 -11763 1 -11764 1 -11765 1 -11766 1 -11767 1 -11768 1 -11769 1 -11770 0 -11771 2 -11772 0 -11773 2 -11774 1 -11775 1 -11776 2 -11777 0 -11778 1 -11779 1 -11780 1 -11781 2 -11782 2 -11783 2 -11784 2 -11785 0 -11786 1 -11787 0 -11788 1 -11789 2 -11790 0 -11791 2 -11792 1 -11793 0 -11794 1 -11795 2 -11796 1 -11797 1 -11798 1 -11799 2 -11800 2 -11801 0 -11802 1 -11803 1 -11804 1 -11805 1 -11806 1 -11807 0 -11808 2 -11809 1 -11810 2 -11811 2 -11812 2 -11813 2 -11814 2 -11815 0 -11816 2 -11817 1 -11818 1 -11819 1 -11820 2 -11821 1 -11822 1 -11823 2 -11824 0 -11825 1 -11826 0 -11827 1 -11828 2 -11829 1 -11830 1 -11831 2 -11832 2 -11833 2 -11834 0 -11835 2 -11836 2 -11837 1 -11838 1 -11839 1 -11840 2 -11841 2 -11842 0 -11843 1 -11844 1 -11845 1 -11846 1 -11847 2 -11848 2 -11849 1 -11850 1 -11851 1 -11852 0 -11853 1 -11854 1 -11855 1 -11856 0 -11857 1 -11858 2 -11859 0 -11860 1 -11861 2 -11862 0 -11863 2 -11864 0 -11865 2 -11866 2 -11867 1 -11868 1 -11869 1 -11870 1 -11871 0 -11872 0 -11873 2 -11874 0 -11875 2 -11876 2 -11877 1 -11878 2 -11879 0 -11880 1 -11881 1 -11882 0 -11883 2 -11884 2 -11885 1 -11886 0 -11887 0 -11888 1 -11889 1 -11890 2 -11891 0 -11892 1 -11893 0 -11894 1 -11895 1 -11896 1 -11897 1 -11898 2 -11899 0 -11900 1 -11901 1 -11902 2 -11903 1 -11904 2 -11905 0 -11906 2 -11907 2 -11908 1 -11909 2 -11910 2 -11911 2 -11912 2 -11913 1 -11914 2 -11915 0 -11916 1 -11917 0 -11918 2 -11919 2 -11920 1 -11921 0 -11922 0 -11923 2 -11924 2 -11925 1 -11926 2 -11927 2 -11928 1 -11929 1 -11930 1 -11931 1 -11932 0 -11933 1 -11934 1 -11935 2 -11936 2 -11937 0 -11938 1 -11939 2 -11940 2 -11941 2 -11942 1 -11943 1 -11944 2 -11945 1 -11946 1 -11947 0 -11948 1 -11949 2 -11950 2 -11951 2 -11952 1 -11953 1 -11954 0 -11955 2 -11956 1 -11957 2 -11958 2 -11959 2 -11960 1 -11961 2 -11962 1 -11963 1 -11964 1 -11965 2 -11966 1 -11967 2 -11968 0 -11969 2 -11970 2 -11971 1 -11972 2 -11973 0 -11974 0 -11975 0 -11976 2 -11977 1 -11978 1 -11979 2 -11980 0 -11981 0 -11982 0 -11983 1 -11984 1 -11985 2 -11986 2 -11987 1 -11988 2 -11989 2 -11990 0 -11991 0 -11992 0 -11993 1 -11994 1 -11995 1 -11996 2 -11997 2 -11998 0 -11999 1 -12000 2 -12001 2 -12002 0 -12003 1 -12004 2 -12005 1 -12006 1 -12007 1 -12008 1 -12009 1 -12010 2 -12011 2 -12012 1 -12013 2 -12014 2 -12015 1 -12016 2 -12017 2 -12018 1 -12019 1 -12020 1 -12021 2 -12022 2 -12023 1 -12024 0 -12025 2 -12026 2 -12027 1 -12028 2 -12029 2 -12030 0 -12031 1 -12032 0 -12033 1 -12034 2 -12035 0 -12036 0 -12037 1 -12038 0 -12039 0 -12040 2 -12041 1 -12042 1 -12043 1 -12044 0 -12045 1 -12046 1 -12047 1 -12048 2 -12049 2 -12050 1 -12051 0 -12052 2 -12053 2 -12054 2 -12055 1 -12056 1 -12057 2 -12058 2 -12059 0 -12060 2 -12061 1 -12062 2 -12063 1 -12064 2 -12065 0 -12066 2 -12067 1 -12068 0 -12069 2 -12070 2 -12071 0 -12072 1 -12073 0 -12074 0 -12075 0 -12076 0 -12077 0 -12078 0 -12079 1 -12080 1 -12081 1 -12082 0 -12083 1 -12084 1 -12085 1 -12086 2 -12087 2 -12088 1 -12089 1 -12090 2 -12091 0 -12092 2 -12093 2 -12094 1 -12095 1 -12096 0 -12097 1 -12098 2 -12099 2 -12100 2 -12101 1 -12102 0 -12103 1 -12104 0 -12105 1 -12106 2 -12107 0 -12108 0 -12109 2 -12110 2 -12111 2 -12112 2 -12113 0 -12114 1 -12115 1 -12116 1 -12117 2 -12118 0 -12119 2 -12120 2 -12121 0 -12122 2 -12123 1 -12124 1 -12125 1 -12126 2 -12127 0 -12128 0 -12129 1 -12130 1 -12131 2 -12132 2 -12133 0 -12134 1 -12135 2 -12136 0 -12137 1 -12138 2 -12139 0 -12140 1 -12141 1 -12142 2 -12143 1 -12144 1 -12145 2 -12146 1 -12147 2 -12148 0 -12149 1 -12150 2 -12151 1 -12152 2 -12153 1 -12154 0 -12155 1 -12156 2 -12157 2 -12158 2 -12159 2 -12160 1 -12161 0 -12162 1 -12163 0 -12164 2 -12165 1 -12166 1 -12167 1 -12168 2 -12169 1 -12170 1 -12171 1 -12172 1 -12173 2 -12174 1 -12175 1 -12176 2 -12177 2 -12178 1 -12179 1 -12180 1 -12181 1 -12182 2 -12183 1 -12184 0 -12185 1 -12186 0 -12187 2 -12188 1 -12189 2 -12190 2 -12191 2 -12192 1 -12193 1 -12194 1 -12195 0 -12196 1 -12197 1 -12198 1 -12199 0 -12200 1 -12201 2 -12202 1 -12203 2 -12204 2 -12205 2 -12206 1 -12207 2 -12208 0 -12209 1 -12210 1 -12211 0 -12212 0 -12213 0 -12214 0 -12215 0 -12216 0 -12217 1 -12218 1 -12219 0 -12220 1 -12221 2 -12222 2 -12223 2 -12224 1 -12225 1 -12226 0 -12227 1 -12228 2 -12229 2 -12230 1 -12231 0 -12232 2 -12233 1 -12234 1 -12235 0 -12236 0 -12237 1 -12238 1 -12239 2 -12240 2 -12241 0 -12242 2 -12243 1 -12244 1 -12245 2 -12246 0 -12247 2 -12248 1 -12249 1 -12250 2 -12251 2 -12252 2 -12253 2 -12254 1 -12255 0 -12256 1 -12257 1 -12258 0 -12259 2 -12260 0 -12261 0 -12262 1 -12263 1 -12264 1 -12265 0 -12266 1 -12267 1 -12268 0 -12269 1 -12270 2 -12271 2 -12272 0 -12273 0 -12274 1 -12275 2 -12276 1 -12277 2 -12278 2 -12279 2 -12280 1 -12281 1 -12282 1 -12283 2 -12284 1 -12285 2 -12286 0 -12287 2 -12288 0 -12289 0 -12290 0 -12291 2 -12292 0 -12293 2 -12294 1 -12295 2 -12296 0 -12297 1 -12298 2 -12299 2 -12300 1 -12301 1 -12302 1 -12303 0 -12304 0 -12305 2 -12306 0 -12307 2 -12308 2 -12309 2 -12310 1 -12311 2 -12312 2 -12313 1 -12314 2 -12315 1 -12316 0 -12317 1 -12318 1 -12319 1 -12320 0 -12321 2 -12322 1 -12323 1 -12324 0 -12325 1 -12326 1 -12327 1 -12328 2 -12329 0 -12330 1 -12331 1 -12332 2 -12333 2 -12334 0 -12335 0 -12336 2 -12337 1 -12338 1 -12339 0 -12340 1 -12341 1 -12342 0 -12343 1 -12344 1 -12345 1 -12346 0 -12347 2 -12348 2 -12349 2 -12350 1 -12351 1 -12352 1 -12353 2 -12354 2 -12355 2 -12356 1 -12357 2 -12358 1 -12359 0 -12360 1 -12361 2 -12362 1 -12363 0 -12364 0 -12365 2 -12366 1 -12367 2 -12368 1 -12369 0 -12370 2 -12371 0 -12372 2 -12373 0 -12374 0 -12375 0 -12376 1 -12377 1 -12378 0 -12379 2 -12380 0 -12381 0 -12382 2 -12383 2 -12384 0 -12385 1 -12386 2 -12387 0 -12388 1 -12389 1 -12390 1 -12391 2 -12392 1 -12393 2 -12394 0 -12395 0 -12396 2 -12397 2 -12398 0 -12399 1 -12400 1 -12401 2 -12402 0 -12403 1 -12404 2 -12405 0 -12406 0 -12407 1 -12408 1 -12409 1 -12410 1 -12411 1 -12412 1 -12413 0 -12414 1 -12415 1 -12416 1 -12417 2 -12418 1 -12419 1 -12420 1 -12421 0 -12422 2 -12423 2 -12424 1 -12425 1 -12426 1 -12427 2 -12428 2 -12429 1 -12430 0 -12431 1 -12432 2 -12433 1 -12434 1 -12435 2 -12436 1 -12437 1 -12438 1 -12439 2 -12440 1 -12441 1 -12442 0 -12443 1 -12444 2 -12445 1 -12446 2 -12447 1 -12448 2 -12449 1 -12450 1 -12451 1 -12452 2 -12453 2 -12454 1 -12455 2 -12456 1 -12457 2 -12458 0 -12459 2 -12460 0 -12461 0 -12462 1 -12463 1 -12464 1 -12465 0 -12466 0 -12467 2 -12468 0 -12469 1 -12470 2 -12471 1 -12472 1 -12473 1 -12474 2 -12475 2 -12476 1 -12477 1 -12478 1 -12479 2 -12480 1 -12481 1 -12482 0 -12483 2 -12484 1 -12485 2 -12486 0 -12487 2 -12488 1 -12489 0 -12490 0 -12491 1 -12492 2 -12493 0 -12494 1 -12495 2 -12496 2 -12497 2 -12498 2 -12499 0 -12500 2 -12501 1 -12502 2 -12503 2 -12504 2 -12505 2 -12506 0 -12507 1 -12508 2 -12509 0 -12510 0 -12511 2 -12512 2 -12513 0 -12514 2 -12515 1 -12516 1 -12517 1 -12518 1 -12519 0 -12520 0 -12521 1 -12522 2 -12523 1 -12524 2 -12525 0 -12526 0 -12527 2 -12528 1 -12529 1 -12530 1 -12531 2 -12532 1 -12533 1 -12534 2 -12535 2 -12536 0 -12537 2 -12538 1 -12539 2 -12540 0 -12541 1 -12542 2 -12543 1 -12544 1 -12545 2 -12546 1 -12547 2 -12548 1 -12549 2 -12550 2 -12551 2 -12552 1 -12553 2 -12554 0 -12555 2 -12556 2 -12557 1 -12558 0 -12559 1 -12560 0 -12561 2 -12562 2 -12563 1 -12564 1 -12565 1 -12566 1 -12567 2 -12568 1 -12569 1 -12570 1 -12571 1 -12572 1 -12573 2 -12574 1 -12575 2 -12576 1 -12577 2 -12578 0 -12579 0 -12580 1 -12581 0 -12582 1 -12583 1 -12584 2 -12585 2 -12586 0 -12587 0 -12588 0 -12589 1 -12590 2 -12591 1 -12592 1 -12593 2 -12594 2 -12595 2 -12596 2 -12597 2 -12598 0 -12599 1 -12600 0 -12601 1 -12602 0 -12603 1 -12604 1 -12605 2 -12606 1 -12607 1 -12608 2 -12609 2 -12610 1 -12611 2 -12612 0 -12613 2 -12614 2 -12615 2 -12616 0 -12617 0 -12618 1 -12619 1 -12620 0 -12621 0 -12622 1 -12623 2 -12624 1 -12625 1 -12626 1 -12627 0 -12628 2 -12629 2 -12630 1 -12631 1 -12632 0 -12633 1 -12634 0 -12635 1 -12636 1 -12637 1 -12638 1 -12639 2 -12640 2 -12641 0 -12642 0 -12643 1 -12644 2 -12645 0 -12646 0 -12647 1 -12648 1 -12649 0 -12650 0 -12651 1 -12652 2 -12653 1 -12654 2 -12655 2 -12656 2 -12657 2 -12658 0 -12659 0 -12660 0 -12661 0 -12662 0 -12663 0 -12664 1 -12665 1 -12666 2 -12667 1 -12668 1 -12669 1 -12670 1 -12671 2 -12672 0 -12673 2 -12674 2 -12675 1 -12676 1 -12677 2 -12678 1 -12679 2 -12680 2 -12681 0 -12682 2 -12683 1 -12684 1 -12685 1 -12686 1 -12687 2 -12688 1 -12689 1 -12690 1 -12691 0 -12692 2 -12693 0 -12694 2 -12695 0 -12696 1 -12697 0 -12698 1 -12699 1 -12700 2 -12701 2 -12702 2 -12703 0 -12704 0 -12705 1 -12706 0 -12707 0 -12708 0 -12709 2 -12710 1 -12711 1 -12712 2 -12713 0 -12714 1 -12715 2 -12716 1 -12717 1 -12718 1 -12719 1 -12720 2 -12721 0 -12722 2 -12723 2 -12724 2 -12725 2 -12726 1 -12727 2 -12728 2 -12729 0 -12730 2 -12731 1 -12732 2 -12733 2 -12734 1 -12735 0 -12736 1 -12737 1 -12738 2 -12739 2 -12740 1 -12741 1 -12742 2 -12743 2 -12744 2 -12745 1 -12746 1 -12747 1 -12748 1 -12749 0 -12750 1 -12751 0 -12752 0 -12753 0 -12754 0 -12755 1 -12756 0 -12757 1 -12758 1 -12759 1 -12760 0 -12761 1 -12762 2 -12763 1 -12764 0 -12765 0 -12766 2 -12767 1 -12768 1 -12769 0 -12770 1 -12771 1 -12772 1 -12773 0 -12774 0 -12775 1 -12776 1 -12777 1 -12778 1 -12779 1 -12780 1 -12781 0 -12782 1 -12783 2 -12784 1 -12785 1 -12786 0 -12787 0 -12788 1 -12789 2 -12790 0 -12791 0 -12792 0 -12793 0 -12794 2 -12795 0 -12796 1 -12797 1 -12798 1 -12799 0 -12800 1 -12801 1 -12802 0 -12803 0 -12804 0 -12805 0 -12806 0 -12807 1 -12808 0 -12809 2 -12810 2 -12811 1 -12812 1 -12813 0 -12814 1 -12815 1 -12816 1 -12817 0 -12818 2 -12819 0 -12820 2 -12821 1 -12822 0 -12823 1 -12824 1 -12825 0 -12826 0 -12827 1 -12828 0 -12829 1 -12830 0 -12831 1 -12832 1 -12833 1 -12834 0 -12835 0 -12836 1 -12837 2 -12838 2 -12839 1 -12840 2 -12841 2 -12842 1 -12843 2 -12844 2 -12845 1 -12846 2 -12847 0 -12848 2 -12849 2 -12850 1 -12851 2 -12852 1 -12853 1 -12854 1 -12855 2 -12856 0 -12857 2 -12858 2 -12859 1 -12860 0 -12861 0 -12862 2 -12863 2 -12864 1 -12865 2 -12866 2 -12867 2 -12868 2 -12869 1 -12870 2 -12871 0 -12872 2 -12873 0 -12874 1 -12875 2 -12876 1 -12877 1 -12878 1 -12879 2 -12880 1 -12881 1 -12882 1 -12883 2 -12884 2 -12885 2 -12886 1 -12887 1 -12888 1 -12889 0 -12890 1 -12891 2 -12892 1 -12893 0 -12894 0 -12895 1 -12896 0 -12897 2 -12898 2 -12899 0 -12900 1 -12901 1 -12902 2 -12903 0 -12904 2 -12905 2 -12906 0 -12907 1 -12908 0 -12909 2 -12910 2 -12911 2 -12912 1 -12913 1 -12914 1 -12915 0 -12916 2 -12917 2 -12918 1 -12919 0 -12920 0 -12921 1 -12922 1 -12923 0 -12924 1 -12925 1 -12926 2 -12927 0 -12928 1 -12929 1 -12930 1 -12931 1 -12932 1 -12933 2 -12934 2 -12935 1 -12936 1 -12937 1 -12938 2 -12939 1 -12940 1 -12941 2 -12942 1 -12943 1 -12944 1 -12945 1 -12946 1 -12947 0 -12948 2 -12949 0 -12950 1 -12951 2 -12952 0 -12953 1 -12954 0 -12955 2 -12956 0 -12957 2 -12958 0 -12959 1 -12960 1 -12961 0 -12962 1 -12963 1 -12964 1 -12965 2 -12966 1 -12967 1 -12968 2 -12969 0 -12970 1 -12971 1 -12972 1 -12973 1 -12974 2 -12975 1 -12976 1 -12977 1 -12978 2 -12979 1 -12980 0 -12981 2 -12982 0 -12983 1 -12984 0 -12985 0 -12986 2 -12987 0 -12988 2 -12989 2 -12990 1 -12991 1 -12992 0 -12993 1 -12994 1 -12995 1 -12996 1 -12997 1 -12998 2 -12999 2 -13000 1 -13001 0 -13002 1 -13003 1 -13004 0 -13005 2 -13006 1 -13007 1 -13008 0 -13009 0 -13010 0 -13011 2 -13012 1 -13013 0 -13014 2 -13015 1 -13016 0 -13017 2 -13018 1 -13019 1 -13020 2 -13021 0 -13022 2 -13023 1 -13024 0 -13025 1 -13026 1 -13027 1 -13028 2 -13029 0 -13030 0 -13031 2 -13032 1 -13033 2 -13034 1 -13035 2 -13036 0 -13037 0 -13038 2 -13039 0 -13040 2 -13041 1 -13042 1 -13043 2 -13044 1 -13045 0 -13046 2 -13047 0 -13048 2 -13049 0 -13050 1 -13051 2 -13052 1 -13053 1 -13054 1 -13055 1 -13056 0 -13057 1 -13058 0 -13059 1 -13060 1 -13061 2 -13062 2 -13063 1 -13064 0 -13065 2 -13066 1 -13067 1 -13068 2 -13069 1 -13070 0 -13071 1 -13072 2 -13073 1 -13074 1 -13075 0 -13076 2 -13077 1 -13078 1 -13079 2 -13080 2 -13081 2 -13082 2 -13083 2 -13084 1 -13085 0 -13086 2 -13087 1 -13088 0 -13089 2 -13090 2 -13091 2 -13092 2 -13093 2 -13094 0 -13095 1 -13096 1 -13097 1 -13098 1 -13099 2 -13100 2 -13101 0 -13102 0 -13103 1 -13104 1 -13105 1 -13106 2 -13107 1 -13108 0 -13109 0 -13110 0 -13111 2 -13112 1 -13113 1 -13114 0 -13115 1 -13116 1 -13117 0 -13118 1 -13119 1 -13120 2 -13121 2 -13122 2 -13123 1 -13124 2 -13125 1 -13126 1 -13127 2 -13128 2 -13129 1 -13130 1 -13131 2 -13132 2 -13133 2 -13134 0 -13135 1 -13136 0 -13137 1 -13138 2 -13139 1 -13140 2 -13141 1 -13142 2 -13143 0 -13144 0 -13145 0 -13146 1 -13147 1 -13148 2 -13149 2 -13150 0 -13151 0 -13152 1 -13153 1 -13154 1 -13155 2 -13156 1 -13157 0 -13158 2 -13159 0 -13160 0 -13161 1 -13162 2 -13163 1 -13164 0 -13165 0 -13166 1 -13167 2 -13168 1 -13169 1 -13170 2 -13171 1 -13172 1 -13173 2 -13174 1 -13175 1 -13176 0 -13177 2 -13178 1 -13179 2 -13180 1 -13181 0 -13182 0 -13183 2 -13184 1 -13185 2 -13186 1 -13187 1 -13188 2 -13189 1 -13190 2 -13191 2 -13192 1 -13193 0 -13194 1 -13195 1 -13196 0 -13197 0 -13198 1 -13199 2 -13200 0 -13201 1 -13202 2 -13203 0 -13204 0 -13205 1 -13206 0 -13207 1 -13208 1 -13209 2 -13210 1 -13211 2 -13212 1 -13213 1 -13214 1 -13215 1 -13216 1 -13217 0 -13218 1 -13219 2 -13220 1 -13221 0 -13222 0 -13223 1 -13224 0 -13225 1 -13226 2 -13227 1 -13228 2 -13229 0 -13230 0 -13231 0 -13232 0 -13233 1 -13234 0 -13235 1 -13236 2 -13237 1 -13238 0 -13239 2 -13240 2 -13241 1 -13242 1 -13243 2 -13244 2 -13245 0 -13246 0 -13247 2 -13248 1 -13249 1 -13250 0 -13251 1 -13252 1 -13253 2 -13254 2 -13255 2 -13256 1 -13257 1 -13258 2 -13259 2 -13260 1 -13261 2 -13262 1 -13263 1 -13264 2 -13265 2 -13266 0 -13267 1 -13268 1 -13269 2 -13270 2 -13271 2 -13272 1 -13273 2 -13274 1 -13275 2 -13276 1 -13277 2 -13278 1 -13279 1 -13280 1 -13281 1 -13282 1 -13283 0 -13284 2 -13285 1 -13286 0 -13287 1 -13288 2 -13289 2 -13290 0 -13291 2 -13292 1 -13293 2 -13294 0 -13295 1 -13296 0 -13297 0 -13298 1 -13299 2 -13300 1 -13301 2 -13302 2 -13303 1 -13304 2 -13305 1 -13306 1 -13307 1 -13308 1 -13309 1 -13310 0 -13311 1 -13312 2 -13313 1 -13314 0 -13315 2 -13316 2 -13317 1 -13318 1 -13319 1 -13320 0 -13321 0 -13322 2 -13323 2 -13324 2 -13325 0 -13326 2 -13327 0 -13328 1 -13329 0 -13330 1 -13331 0 -13332 2 -13333 2 -13334 2 -13335 2 -13336 1 -13337 2 -13338 1 -13339 2 -13340 1 -13341 0 -13342 1 -13343 2 -13344 0 -13345 0 -13346 1 -13347 2 -13348 2 -13349 2 -13350 1 -13351 0 -13352 1 -13353 1 -13354 2 -13355 0 -13356 1 -13357 1 -13358 1 -13359 1 -13360 2 -13361 2 -13362 2 -13363 0 -13364 2 -13365 1 -13366 1 -13367 0 -13368 1 -13369 2 -13370 2 -13371 0 -13372 2 -13373 0 -13374 0 -13375 0 -13376 2 -13377 1 -13378 0 -13379 1 -13380 1 -13381 2 -13382 2 -13383 0 -13384 0 -13385 2 -13386 0 -13387 0 -13388 1 -13389 1 -13390 1 -13391 0 -13392 2 -13393 1 -13394 0 -13395 1 -13396 1 -13397 0 -13398 1 -13399 1 -13400 1 -13401 2 -13402 1 -13403 2 -13404 2 -13405 0 -13406 2 -13407 1 -13408 2 -13409 1 -13410 1 -13411 1 -13412 0 -13413 0 -13414 0 -13415 2 -13416 2 -13417 1 -13418 2 -13419 0 -13420 2 -13421 0 -13422 1 -13423 0 -13424 0 -13425 1 -13426 2 -13427 0 -13428 2 -13429 1 -13430 2 -13431 0 -13432 0 -13433 1 -13434 1 -13435 2 -13436 1 -13437 2 -13438 0 -13439 1 -13440 0 -13441 0 -13442 1 -13443 2 -13444 0 -13445 1 -13446 0 -13447 1 -13448 2 -13449 2 -13450 2 -13451 2 -13452 1 -13453 0 -13454 0 -13455 1 -13456 1 -13457 2 -13458 1 -13459 1 -13460 0 -13461 0 -13462 2 -13463 1 -13464 0 -13465 0 -13466 1 -13467 2 -13468 1 -13469 2 -13470 1 -13471 1 -13472 2 -13473 1 -13474 1 -13475 0 -13476 1 -13477 2 -13478 0 -13479 1 -13480 0 -13481 1 -13482 2 -13483 1 -13484 1 -13485 2 -13486 2 -13487 0 -13488 2 -13489 0 -13490 2 -13491 0 -13492 2 -13493 0 -13494 1 -13495 0 -13496 2 -13497 2 -13498 1 -13499 0 -13500 0 -13501 0 -13502 2 -13503 1 -13504 1 -13505 0 -13506 2 -13507 2 -13508 1 -13509 1 -13510 1 -13511 2 -13512 2 -13513 1 -13514 1 -13515 0 -13516 0 -13517 1 -13518 1 -13519 2 -13520 2 -13521 1 -13522 2 -13523 2 -13524 2 -13525 2 -13526 2 -13527 1 -13528 0 -13529 2 -13530 1 -13531 1 -13532 1 -13533 2 -13534 0 -13535 2 -13536 0 -13537 0 -13538 1 -13539 1 -13540 2 -13541 2 -13542 2 -13543 0 -13544 1 -13545 1 -13546 0 -13547 1 -13548 1 -13549 0 -13550 2 -13551 1 -13552 1 -13553 1 -13554 1 -13555 1 -13556 1 -13557 2 -13558 0 -13559 1 -13560 0 -13561 0 -13562 2 -13563 2 -13564 0 -13565 2 -13566 1 -13567 2 -13568 1 -13569 1 -13570 1 -13571 2 -13572 1 -13573 2 -13574 0 -13575 2 -13576 2 -13577 2 -13578 2 -13579 0 -13580 1 -13581 2 -13582 2 -13583 1 -13584 0 -13585 1 -13586 0 -13587 1 -13588 1 -13589 1 -13590 1 -13591 0 -13592 2 -13593 1 -13594 2 -13595 1 -13596 2 -13597 2 -13598 1 -13599 2 -13600 2 -13601 0 -13602 1 -13603 2 -13604 2 -13605 2 -13606 2 -13607 0 -13608 0 -13609 0 -13610 1 -13611 1 -13612 1 -13613 1 -13614 1 -13615 1 -13616 1 -13617 1 -13618 2 -13619 1 -13620 2 -13621 1 -13622 1 -13623 2 -13624 1 -13625 2 -13626 2 -13627 2 -13628 0 -13629 2 -13630 2 -13631 1 -13632 2 -13633 2 -13634 2 -13635 1 -13636 1 -13637 1 -13638 1 -13639 2 -13640 2 -13641 2 -13642 1 -13643 1 -13644 2 -13645 1 -13646 1 -13647 1 -13648 1 -13649 0 -13650 2 -13651 1 -13652 1 -13653 1 -13654 0 -13655 2 -13656 2 -13657 2 -13658 2 -13659 2 -13660 1 -13661 1 -13662 1 -13663 0 -13664 2 -13665 1 -13666 1 -13667 1 -13668 1 -13669 1 -13670 0 -13671 1 -13672 1 -13673 2 -13674 0 -13675 2 -13676 2 -13677 1 -13678 0 -13679 1 -13680 2 -13681 1 -13682 2 -13683 2 -13684 1 -13685 2 -13686 2 -13687 2 -13688 1 -13689 0 -13690 2 -13691 1 -13692 1 -13693 2 -13694 2 -13695 1 -13696 1 -13697 1 -13698 2 -13699 1 -13700 0 -13701 2 -13702 1 -13703 0 -13704 2 -13705 2 -13706 1 -13707 2 -13708 1 -13709 2 -13710 2 -13711 0 -13712 1 -13713 1 -13714 0 -13715 1 -13716 0 -13717 2 -13718 1 -13719 2 -13720 0 -13721 1 -13722 1 -13723 1 -13724 0 -13725 2 -13726 2 -13727 2 -13728 1 -13729 2 -13730 2 -13731 2 -13732 2 -13733 1 -13734 0 -13735 2 -13736 1 -13737 2 -13738 1 -13739 1 -13740 1 -13741 0 -13742 0 -13743 0 -13744 1 -13745 1 -13746 1 -13747 1 -13748 2 -13749 2 -13750 2 -13751 2 -13752 0 -13753 1 -13754 2 -13755 1 -13756 1 -13757 0 -13758 2 -13759 1 -13760 1 -13761 0 -13762 1 -13763 1 -13764 1 -13765 0 -13766 1 -13767 1 -13768 2 -13769 0 -13770 2 -13771 1 -13772 1 -13773 2 -13774 2 -13775 1 -13776 1 -13777 2 -13778 1 -13779 2 -13780 1 -13781 2 -13782 1 -13783 1 -13784 2 -13785 1 -13786 1 -13787 1 -13788 1 -13789 1 -13790 0 -13791 1 -13792 2 -13793 1 -13794 1 -13795 1 -13796 1 -13797 2 -13798 2 -13799 2 -13800 2 -13801 2 -13802 2 -13803 1 -13804 2 -13805 2 -13806 1 -13807 1 -13808 2 -13809 0 -13810 1 -13811 2 -13812 1 -13813 2 -13814 1 -13815 0 -13816 2 -13817 0 -13818 2 -13819 1 -13820 0 -13821 2 -13822 0 -13823 1 -13824 0 -13825 2 -13826 2 -13827 2 -13828 2 -13829 2 -13830 0 -13831 0 -13832 2 -13833 1 -13834 0 -13835 0 -13836 1 -13837 0 -13838 0 -13839 0 -13840 0 -13841 1 -13842 1 -13843 0 -13844 2 -13845 1 -13846 2 -13847 1 -13848 1 -13849 0 -13850 1 -13851 0 -13852 1 -13853 1 -13854 2 -13855 0 -13856 2 -13857 1 -13858 1 -13859 1 -13860 0 -13861 1 -13862 2 -13863 0 -13864 0 -13865 1 -13866 1 -13867 1 -13868 1 -13869 2 -13870 0 -13871 1 -13872 2 -13873 2 -13874 2 -13875 0 -13876 1 -13877 2 -13878 2 -13879 2 -13880 0 -13881 0 -13882 1 -13883 0 -13884 1 -13885 0 -13886 1 -13887 2 -13888 0 -13889 1 -13890 2 -13891 1 -13892 2 -13893 1 -13894 2 -13895 0 -13896 0 -13897 1 -13898 2 -13899 1 -13900 1 -13901 1 -13902 1 -13903 0 -13904 2 -13905 0 -13906 0 -13907 1 -13908 1 -13909 0 -13910 1 -13911 1 -13912 2 -13913 1 -13914 0 -13915 2 -13916 2 -13917 2 -13918 2 -13919 1 -13920 0 -13921 1 -13922 0 -13923 1 -13924 0 -13925 1 -13926 0 -13927 0 -13928 2 -13929 0 -13930 2 -13931 2 -13932 0 -13933 1 -13934 1 -13935 1 -13936 1 -13937 1 -13938 2 -13939 0 -13940 2 -13941 2 -13942 1 -13943 2 -13944 1 -13945 1 -13946 0 -13947 1 -13948 0 -13949 1 -13950 1 -13951 1 -13952 1 -13953 0 -13954 0 -13955 0 -13956 2 -13957 0 -13958 1 -13959 1 -13960 1 -13961 2 -13962 1 -13963 2 -13964 2 -13965 2 -13966 0 -13967 1 -13968 1 -13969 2 -13970 1 -13971 2 -13972 2 -13973 0 -13974 1 -13975 2 -13976 0 -13977 0 -13978 2 -13979 1 -13980 0 -13981 0 -13982 2 -13983 2 -13984 1 -13985 1 -13986 1 -13987 1 -13988 0 -13989 1 -13990 0 -13991 2 -13992 1 -13993 0 -13994 1 -13995 1 -13996 1 -13997 1 -13998 2 -13999 1 -14000 0 -14001 1 -14002 2 -14003 1 -14004 1 -14005 1 -14006 1 -14007 2 -14008 2 -14009 1 -14010 1 -14011 1 -14012 0 -14013 2 -14014 1 -14015 2 -14016 0 -14017 0 -14018 0 -14019 0 -14020 1 -14021 2 -14022 0 -14023 2 -14024 2 -14025 2 -14026 2 -14027 0 -14028 1 -14029 1 -14030 0 -14031 2 -14032 2 -14033 1 -14034 2 -14035 1 -14036 1 -14037 0 -14038 2 -14039 1 -14040 1 -14041 2 -14042 0 -14043 0 -14044 0 -14045 0 -14046 1 -14047 1 -14048 1 -14049 2 -14050 1 -14051 2 -14052 2 -14053 0 -14054 2 -14055 1 -14056 1 -14057 2 -14058 0 -14059 1 -14060 0 -14061 1 -14062 1 -14063 1 -14064 1 -14065 0 -14066 0 -14067 1 -14068 2 -14069 1 -14070 1 -14071 2 -14072 1 -14073 2 -14074 1 -14075 0 -14076 2 -14077 1 -14078 1 -14079 2 -14080 2 -14081 2 -14082 1 -14083 2 -14084 2 -14085 0 -14086 2 -14087 2 -14088 2 -14089 0 -14090 1 -14091 0 -14092 2 -14093 1 -14094 1 -14095 0 -14096 1 -14097 2 -14098 1 -14099 1 -14100 2 -14101 0 -14102 1 -14103 0 -14104 1 -14105 0 -14106 0 -14107 2 -14108 2 -14109 0 -14110 1 -14111 0 -14112 2 -14113 1 -14114 2 -14115 1 -14116 0 -14117 2 -14118 0 -14119 1 -14120 2 -14121 2 -14122 1 -14123 2 -14124 2 -14125 2 -14126 1 -14127 2 -14128 2 -14129 2 -14130 0 -14131 1 -14132 2 -14133 1 -14134 0 -14135 0 -14136 1 -14137 1 -14138 1 -14139 1 -14140 0 -14141 0 -14142 1 -14143 1 -14144 1 -14145 2 -14146 1 -14147 2 -14148 1 -14149 1 -14150 1 -14151 1 -14152 0 -14153 2 -14154 2 -14155 2 -14156 1 -14157 1 -14158 1 -14159 2 -14160 2 -14161 2 -14162 2 -14163 2 -14164 1 -14165 2 -14166 1 -14167 1 -14168 1 -14169 1 -14170 1 -14171 2 -14172 1 -14173 2 -14174 0 -14175 2 -14176 2 -14177 1 -14178 1 -14179 1 -14180 0 -14181 2 -14182 0 -14183 0 -14184 1 -14185 1 -14186 2 -14187 0 -14188 2 -14189 1 -14190 1 -14191 0 -14192 1 -14193 0 -14194 2 -14195 0 -14196 1 -14197 2 -14198 0 -14199 0 -14200 1 -14201 2 -14202 2 -14203 1 -14204 2 -14205 2 -14206 1 -14207 1 -14208 2 -14209 0 -14210 1 -14211 2 -14212 1 -14213 1 -14214 0 -14215 0 -14216 1 -14217 2 -14218 1 -14219 0 -14220 2 -14221 1 -14222 1 -14223 1 -14224 1 -14225 2 -14226 1 -14227 1 -14228 1 -14229 2 -14230 1 -14231 0 -14232 1 -14233 1 -14234 1 -14235 2 -14236 1 -14237 0 -14238 1 -14239 2 -14240 1 -14241 1 -14242 1 -14243 1 -14244 1 -14245 1 -14246 0 -14247 2 -14248 1 -14249 1 -14250 2 -14251 2 -14252 1 -14253 1 -14254 1 -14255 0 -14256 2 -14257 0 -14258 2 -14259 2 -14260 0 -14261 1 -14262 1 -14263 1 -14264 1 -14265 1 -14266 0 -14267 0 -14268 1 -14269 1 -14270 0 -14271 0 -14272 1 -14273 1 -14274 1 -14275 1 -14276 1 -14277 0 -14278 2 -14279 2 -14280 2 -14281 1 -14282 2 -14283 2 -14284 2 -14285 2 -14286 2 -14287 2 -14288 2 -14289 2 -14290 2 -14291 0 -14292 1 -14293 2 -14294 2 -14295 2 -14296 1 -14297 2 -14298 1 -14299 0 -14300 1 -14301 2 -14302 1 -14303 1 -14304 2 -14305 2 -14306 2 -14307 2 -14308 1 -14309 1 -14310 2 -14311 1 -14312 0 -14313 1 -14314 1 -14315 1 -14316 2 -14317 0 -14318 1 -14319 0 -14320 2 -14321 1 -14322 1 -14323 1 -14324 1 -14325 0 -14326 1 -14327 1 -14328 2 -14329 1 -14330 1 -14331 1 -14332 1 -14333 0 -14334 1 -14335 1 -14336 0 -14337 2 -14338 1 -14339 0 -14340 1 -14341 2 -14342 2 -14343 2 -14344 0 -14345 0 -14346 1 -14347 2 -14348 1 -14349 1 -14350 0 -14351 0 -14352 2 -14353 2 -14354 0 -14355 2 -14356 0 -14357 2 -14358 2 -14359 0 -14360 1 -14361 1 -14362 1 -14363 2 -14364 1 -14365 0 -14366 1 -14367 2 -14368 1 -14369 2 -14370 0 -14371 2 -14372 1 -14373 2 -14374 1 -14375 1 -14376 1 -14377 2 -14378 2 -14379 1 -14380 0 -14381 0 -14382 1 -14383 2 -14384 0 -14385 2 -14386 2 -14387 2 -14388 0 -14389 1 -14390 0 -14391 0 -14392 2 -14393 2 -14394 2 -14395 1 -14396 2 -14397 2 -14398 0 -14399 2 -14400 1 -14401 1 -14402 1 -14403 1 -14404 1 -14405 2 -14406 1 -14407 1 -14408 0 -14409 0 -14410 1 -14411 1 -14412 1 -14413 0 -14414 0 -14415 0 -14416 2 -14417 1 -14418 0 -14419 1 -14420 1 -14421 2 -14422 1 -14423 0 -14424 1 -14425 2 -14426 2 -14427 1 -14428 2 -14429 0 -14430 1 -14431 2 -14432 1 -14433 1 -14434 1 -14435 1 -14436 1 -14437 0 -14438 1 -14439 1 -14440 0 -14441 1 -14442 1 -14443 1 -14444 0 -14445 1 -14446 1 -14447 1 -14448 0 -14449 0 -14450 2 -14451 0 -14452 0 -14453 1 -14454 2 -14455 1 -14456 0 -14457 1 -14458 1 -14459 1 -14460 1 -14461 1 -14462 1 -14463 1 -14464 1 -14465 0 -14466 2 -14467 0 -14468 1 -14469 0 -14470 0 -14471 2 -14472 0 -14473 2 -14474 1 -14475 0 -14476 2 -14477 0 -14478 2 -14479 0 -14480 0 -14481 1 -14482 1 -14483 0 -14484 2 -14485 0 -14486 0 -14487 1 -14488 1 -14489 1 -14490 1 -14491 2 -14492 0 -14493 0 -14494 1 -14495 1 -14496 1 -14497 1 -14498 0 -14499 0 -14500 1 -14501 0 -14502 1 -14503 2 -14504 0 -14505 2 -14506 2 -14507 0 -14508 1 -14509 1 -14510 2 -14511 1 -14512 1 -14513 1 -14514 2 -14515 1 -14516 1 -14517 2 -14518 2 -14519 1 -14520 0 -14521 1 -14522 2 -14523 2 -14524 1 -14525 2 -14526 1 -14527 1 -14528 2 -14529 0 -14530 0 -14531 1 -14532 1 -14533 1 -14534 2 -14535 1 -14536 1 -14537 2 -14538 2 -14539 1 -14540 1 -14541 0 -14542 1 -14543 1 -14544 0 -14545 0 -14546 1 -14547 2 -14548 0 -14549 1 -14550 1 -14551 1 -14552 0 -14553 1 -14554 1 -14555 2 -14556 2 -14557 2 -14558 1 -14559 1 -14560 2 -14561 0 -14562 2 -14563 1 -14564 1 -14565 1 -14566 1 -14567 2 -14568 2 -14569 2 -14570 1 -14571 0 -14572 1 -14573 1 -14574 0 -14575 2 -14576 1 -14577 1 -14578 1 -14579 1 -14580 0 -14581 0 -14582 0 -14583 2 -14584 2 -14585 0 -14586 0 -14587 1 -14588 1 -14589 2 -14590 1 -14591 0 -14592 0 -14593 2 -14594 1 -14595 2 -14596 2 -14597 2 -14598 1 -14599 1 -14600 1 -14601 2 -14602 1 -14603 1 -14604 0 -14605 1 -14606 2 -14607 1 -14608 0 -14609 1 -14610 2 -14611 2 -14612 1 -14613 1 -14614 0 -14615 2 -14616 2 -14617 1 -14618 1 -14619 0 -14620 0 -14621 2 -14622 0 -14623 1 -14624 1 -14625 1 -14626 2 -14627 1 -14628 2 -14629 1 -14630 0 -14631 0 -14632 0 -14633 0 -14634 2 -14635 1 -14636 1 -14637 2 -14638 1 -14639 0 -14640 2 -14641 2 -14642 2 -14643 2 -14644 1 -14645 1 -14646 1 -14647 1 -14648 2 -14649 2 -14650 1 -14651 2 -14652 1 -14653 1 -14654 1 -14655 0 -14656 1 -14657 0 -14658 2 -14659 1 -14660 0 -14661 0 -14662 0 -14663 0 -14664 2 -14665 1 -14666 0 -14667 2 -14668 0 -14669 0 -14670 1 -14671 2 -14672 1 -14673 0 -14674 0 -14675 0 -14676 0 -14677 2 -14678 1 -14679 0 -14680 2 -14681 1 -14682 2 -14683 2 -14684 1 -14685 1 -14686 1 -14687 1 -14688 0 -14689 2 -14690 0 -14691 1 -14692 2 -14693 1 -14694 1 -14695 1 -14696 2 -14697 1 -14698 1 -14699 1 -14700 1 -14701 0 -14702 1 -14703 0 -14704 2 -14705 0 -14706 0 -14707 0 -14708 2 -14709 1 -14710 1 -14711 1 -14712 2 -14713 2 -14714 1 -14715 2 -14716 0 -14717 1 -14718 0 -14719 2 -14720 0 -14721 2 -14722 2 -14723 0 -14724 2 -14725 2 -14726 2 -14727 2 -14728 2 -14729 1 -14730 1 -14731 0 -14732 0 -14733 1 -14734 1 -14735 2 -14736 2 -14737 2 -14738 2 -14739 2 -14740 0 -14741 2 -14742 2 -14743 0 -14744 1 -14745 0 -14746 1 -14747 1 -14748 0 -14749 2 -14750 1 -14751 1 -14752 2 -14753 2 -14754 1 -14755 0 -14756 2 -14757 2 -14758 1 -14759 0 -14760 1 -14761 2 -14762 2 -14763 0 -14764 1 -14765 2 -14766 2 -14767 1 -14768 1 -14769 0 -14770 1 -14771 2 -14772 2 -14773 2 -14774 1 -14775 0 -14776 1 -14777 2 -14778 1 -14779 1 -14780 0 -14781 0 -14782 2 -14783 2 -14784 1 -14785 1 -14786 1 -14787 0 -14788 0 -14789 2 -14790 2 -14791 1 -14792 0 -14793 1 -14794 1 -14795 0 -14796 2 -14797 1 -14798 2 -14799 0 -14800 1 -14801 1 -14802 2 -14803 0 -14804 1 -14805 2 -14806 1 -14807 0 -14808 1 -14809 2 -14810 1 -14811 0 -14812 2 -14813 2 -14814 1 -14815 0 -14816 0 -14817 1 -14818 1 -14819 0 -14820 2 -14821 2 -14822 1 -14823 0 -14824 2 -14825 2 -14826 2 -14827 0 -14828 2 -14829 1 -14830 1 -14831 2 -14832 2 -14833 0 -14834 2 -14835 1 -14836 1 -14837 2 -14838 2 -14839 2 -14840 1 -14841 1 -14842 1 -14843 1 -14844 2 -14845 2 -14846 2 -14847 1 -14848 2 -14849 2 -14850 1 -14851 1 -14852 1 -14853 0 -14854 0 -14855 0 -14856 0 -14857 0 -14858 0 -14859 1 -14860 1 -14861 1 -14862 2 -14863 0 -14864 2 -14865 1 -14866 1 -14867 2 -14868 2 -14869 1 -14870 1 -14871 2 -14872 2 -14873 2 -14874 2 -14875 1 -14876 2 -14877 1 -14878 1 -14879 1 -14880 1 -14881 1 -14882 2 -14883 1 -14884 0 -14885 0 -14886 1 -14887 1 -14888 0 -14889 2 -14890 0 -14891 1 -14892 2 -14893 2 -14894 0 -14895 1 -14896 1 -14897 0 -14898 0 -14899 0 -14900 1 -14901 1 -14902 2 -14903 1 -14904 0 -14905 0 -14906 0 -14907 2 -14908 0 -14909 2 -14910 1 -14911 0 -14912 1 -14913 1 -14914 2 -14915 2 -14916 1 -14917 0 -14918 1 -14919 2 -14920 1 -14921 0 -14922 2 -14923 2 -14924 1 -14925 2 -14926 2 -14927 1 -14928 1 -14929 2 -14930 0 -14931 0 -14932 2 -14933 0 -14934 2 -14935 2 -14936 2 -14937 1 -14938 0 -14939 1 -14940 1 -14941 1 -14942 2 -14943 0 -14944 1 -14945 1 -14946 0 -14947 2 -14948 2 -14949 1 -14950 1 -14951 1 -14952 2 -14953 2 -14954 2 -14955 2 -14956 1 -14957 2 -14958 2 -14959 2 -14960 2 -14961 2 -14962 2 -14963 1 -14964 2 -14965 2 -14966 2 -14967 1 -14968 0 -14969 1 -14970 1 -14971 2 -14972 0 -14973 1 -14974 2 -14975 1 -14976 0 -14977 2 -14978 2 -14979 2 -14980 2 -14981 2 -14982 2 -14983 0 -14984 0 -14985 1 -14986 1 -14987 0 -14988 2 -14989 2 -14990 2 -14991 1 -14992 1 -14993 2 -14994 1 -14995 2 -14996 2 -14997 1 -14998 1 -14999 0 -15000 1 -15001 1 -15002 1 -15003 2 -15004 1 -15005 0 -15006 1 -15007 0 -15008 2 -15009 1 -15010 0 -15011 0 -15012 0 -15013 1 -15014 0 -15015 1 -15016 1 -15017 0 -15018 1 -15019 2 -15020 0 -15021 1 -15022 1 -15023 1 -15024 2 -15025 2 -15026 2 -15027 1 -15028 1 -15029 2 -15030 1 -15031 0 -15032 1 -15033 1 -15034 1 -15035 0 -15036 2 -15037 2 -15038 0 -15039 2 -15040 1 -15041 1 -15042 1 -15043 0 -15044 2 -15045 0 -15046 1 -15047 2 -15048 0 -15049 2 -15050 1 -15051 1 -15052 0 -15053 2 -15054 0 -15055 2 -15056 0 -15057 1 -15058 0 -15059 1 -15060 1 -15061 1 -15062 2 -15063 2 -15064 0 -15065 0 -15066 2 -15067 1 -15068 1 -15069 0 -15070 1 -15071 1 -15072 1 -15073 1 -15074 1 -15075 1 -15076 0 -15077 1 -15078 1 -15079 2 -15080 1 -15081 0 -15082 0 -15083 1 -15084 0 -15085 1 -15086 1 -15087 1 -15088 1 -15089 2 -15090 2 -15091 1 -15092 1 -15093 2 -15094 1 -15095 0 -15096 1 -15097 1 -15098 2 -15099 1 -15100 2 -15101 2 -15102 1 -15103 1 -15104 0 -15105 0 -15106 1 -15107 0 -15108 1 -15109 0 -15110 2 -15111 0 -15112 1 -15113 2 -15114 0 -15115 2 -15116 1 -15117 2 -15118 1 -15119 0 -15120 1 -15121 1 -15122 1 -15123 2 -15124 1 -15125 0 -15126 0 -15127 2 -15128 1 -15129 1 -15130 2 -15131 1 -15132 1 -15133 1 -15134 2 -15135 1 -15136 0 -15137 2 -15138 1 -15139 1 -15140 1 -15141 1 -15142 1 -15143 1 -15144 1 -15145 2 -15146 0 -15147 0 -15148 1 -15149 1 -15150 1 -15151 0 -15152 1 -15153 1 -15154 1 -15155 2 -15156 1 -15157 0 -15158 1 -15159 1 -15160 1 -15161 2 -15162 1 -15163 2 -15164 1 -15165 1 -15166 1 -15167 2 -15168 1 -15169 0 -15170 2 -15171 1 -15172 1 -15173 1 -15174 0 -15175 2 -15176 0 -15177 2 -15178 0 -15179 1 -15180 0 -15181 0 -15182 2 -15183 0 -15184 2 -15185 2 -15186 1 -15187 1 -15188 0 -15189 1 -15190 0 -15191 1 -15192 0 -15193 2 -15194 2 -15195 1 -15196 1 -15197 1 -15198 0 -15199 1 -15200 0 -15201 0 -15202 2 -15203 2 -15204 2 -15205 1 -15206 1 -15207 1 -15208 1 -15209 1 -15210 1 -15211 1 -15212 2 -15213 1 -15214 2 -15215 2 -15216 1 -15217 1 -15218 1 -15219 1 -15220 1 -15221 1 -15222 2 -15223 1 -15224 1 -15225 1 -15226 2 -15227 2 -15228 1 -15229 1 -15230 1 -15231 2 -15232 0 -15233 0 -15234 1 -15235 0 -15236 0 -15237 2 -15238 1 -15239 1 -15240 0 -15241 2 -15242 1 -15243 2 -15244 0 -15245 1 -15246 1 -15247 1 -15248 1 -15249 1 -15250 2 -15251 2 -15252 0 -15253 0 -15254 0 -15255 1 -15256 1 -15257 1 -15258 1 -15259 0 -15260 1 -15261 1 -15262 1 -15263 1 -15264 1 -15265 1 -15266 2 -15267 0 -15268 1 -15269 2 -15270 1 -15271 1 -15272 0 -15273 1 -15274 2 -15275 1 -15276 1 -15277 0 -15278 1 -15279 0 -15280 2 -15281 1 -15282 1 -15283 0 -15284 2 -15285 2 -15286 2 -15287 2 -15288 0 -15289 1 -15290 1 -15291 1 -15292 1 -15293 2 -15294 1 -15295 2 -15296 2 -15297 2 -15298 1 -15299 2 -15300 1 -15301 0 -15302 0 -15303 1 -15304 0 -15305 1 -15306 1 -15307 0 -15308 1 -15309 2 -15310 2 -15311 1 -15312 1 -15313 0 -15314 1 -15315 2 -15316 2 -15317 2 -15318 1 -15319 2 -15320 2 -15321 1 -15322 2 -15323 1 -15324 2 -15325 1 -15326 2 -15327 0 -15328 1 -15329 0 -15330 1 -15331 1 -15332 1 -15333 1 -15334 2 -15335 2 -15336 0 -15337 1 -15338 0 -15339 2 -15340 1 -15341 1 -15342 1 -15343 2 -15344 1 -15345 0 -15346 2 -15347 1 -15348 1 -15349 1 -15350 0 -15351 0 -15352 0 -15353 1 -15354 1 -15355 0 -15356 0 -15357 1 -15358 1 -15359 1 -15360 1 -15361 1 -15362 1 -15363 2 -15364 1 -15365 0 -15366 1 -15367 1 -15368 2 -15369 1 -15370 2 -15371 1 -15372 2 -15373 0 -15374 1 -15375 1 -15376 1 -15377 1 -15378 1 -15379 0 -15380 2 -15381 1 -15382 1 -15383 1 -15384 1 -15385 1 -15386 1 -15387 1 -15388 0 -15389 1 -15390 2 -15391 1 -15392 2 -15393 1 -15394 0 -15395 2 -15396 1 -15397 1 -15398 1 -15399 1 -15400 2 -15401 2 -15402 2 -15403 2 -15404 1 -15405 1 -15406 2 -15407 1 -15408 0 -15409 2 -15410 1 -15411 2 -15412 1 -15413 1 -15414 1 -15415 2 -15416 1 -15417 1 -15418 0 -15419 0 -15420 0 -15421 2 -15422 1 -15423 1 -15424 2 -15425 1 -15426 1 -15427 2 -15428 0 -15429 1 -15430 0 -15431 0 -15432 1 -15433 1 -15434 1 -15435 0 -15436 2 -15437 2 -15438 2 -15439 2 -15440 1 -15441 0 -15442 0 -15443 2 -15444 0 -15445 1 -15446 1 -15447 2 -15448 1 -15449 1 -15450 1 -15451 2 -15452 0 -15453 0 -15454 2 -15455 1 -15456 2 -15457 0 -15458 1 -15459 1 -15460 1 -15461 1 -15462 1 -15463 0 -15464 0 -15465 0 -15466 2 -15467 2 -15468 2 -15469 2 -15470 2 -15471 2 -15472 2 -15473 2 -15474 1 -15475 1 -15476 1 -15477 1 -15478 0 -15479 1 -15480 2 -15481 2 -15482 1 -15483 1 -15484 2 -15485 1 -15486 0 -15487 1 -15488 0 -15489 1 -15490 1 -15491 2 -15492 1 -15493 1 -15494 1 -15495 1 -15496 0 -15497 2 -15498 1 -15499 1 -15500 0 -15501 1 -15502 1 -15503 1 -15504 2 -15505 1 -15506 2 -15507 1 -15508 1 -15509 1 -15510 2 -15511 2 -15512 1 -15513 1 -15514 2 -15515 1 -15516 1 -15517 1 -15518 0 -15519 1 -15520 1 -15521 0 -15522 1 -15523 1 -15524 1 -15525 1 -15526 0 -15527 1 -15528 2 -15529 0 -15530 0 -15531 0 -15532 0 -15533 0 -15534 0 -15535 0 -15536 2 -15537 0 -15538 2 -15539 2 -15540 2 -15541 1 -15542 2 -15543 2 -15544 0 -15545 1 -15546 2 -15547 0 -15548 2 -15549 2 -15550 2 -15551 0 -15552 2 -15553 2 -15554 1 -15555 1 -15556 0 -15557 0 -15558 1 -15559 1 -15560 1 -15561 1 -15562 2 -15563 1 -15564 0 -15565 0 -15566 1 -15567 2 -15568 1 -15569 1 -15570 1 -15571 1 -15572 0 -15573 0 -15574 0 -15575 1 -15576 1 -15577 1 -15578 1 -15579 2 -15580 2 -15581 1 -15582 0 -15583 2 -15584 0 -15585 1 -15586 1 -15587 1 -15588 0 -15589 1 -15590 1 -15591 2 -15592 2 -15593 0 -15594 0 -15595 2 -15596 2 -15597 1 -15598 1 -15599 2 -15600 1 -15601 1 -15602 2 -15603 0 -15604 1 -15605 1 -15606 2 -15607 2 -15608 1 -15609 1 -15610 0 -15611 2 -15612 1 -15613 2 -15614 0 -15615 1 -15616 2 -15617 2 -15618 1 -15619 2 -15620 2 -15621 2 -15622 1 -15623 1 -15624 1 -15625 1 -15626 1 -15627 0 -15628 1 -15629 1 -15630 0 -15631 1 -15632 2 -15633 0 -15634 2 -15635 2 -15636 0 -15637 2 -15638 1 -15639 2 -15640 1 -15641 0 -15642 1 -15643 2 -15644 1 -15645 1 -15646 2 -15647 1 -15648 1 -15649 0 -15650 1 -15651 1 -15652 1 -15653 1 -15654 0 -15655 1 -15656 0 -15657 1 -15658 1 -15659 0 -15660 1 -15661 1 -15662 0 -15663 0 -15664 0 -15665 2 -15666 0 -15667 0 -15668 2 -15669 1 -15670 1 -15671 2 -15672 1 -15673 1 -15674 2 -15675 1 -15676 1 -15677 0 -15678 2 -15679 1 -15680 1 -15681 1 -15682 1 -15683 1 -15684 1 -15685 0 -15686 1 -15687 2 -15688 0 -15689 1 -15690 2 -15691 1 -15692 2 -15693 0 -15694 2 -15695 2 -15696 0 -15697 1 -15698 0 -15699 1 -15700 2 -15701 1 -15702 1 -15703 1 -15704 2 -15705 0 -15706 1 -15707 2 -15708 1 -15709 1 -15710 2 -15711 1 -15712 1 -15713 2 -15714 0 -15715 1 -15716 1 -15717 1 -15718 1 -15719 2 -15720 2 -15721 1 -15722 2 -15723 1 -15724 1 -15725 2 -15726 2 -15727 2 -15728 2 -15729 2 -15730 1 -15731 2 -15732 2 -15733 1 -15734 2 -15735 1 -15736 0 -15737 2 -15738 2 -15739 2 -15740 2 -15741 2 -15742 1 -15743 0 -15744 1 -15745 1 -15746 1 -15747 0 -15748 2 -15749 1 -15750 2 -15751 1 -15752 1 -15753 0 -15754 1 -15755 1 -15756 2 -15757 0 -15758 1 -15759 1 -15760 2 -15761 2 -15762 2 -15763 1 -15764 1 -15765 2 -15766 1 -15767 1 -15768 1 -15769 1 -15770 0 -15771 1 -15772 1 -15773 1 -15774 1 -15775 1 -15776 0 -15777 1 -15778 0 -15779 1 -15780 2 -15781 1 -15782 1 -15783 2 -15784 1 -15785 0 -15786 2 -15787 1 -15788 2 -15789 0 -15790 1 -15791 1 -15792 0 -15793 0 -15794 2 -15795 0 -15796 0 -15797 0 -15798 1 -15799 2 -15800 1 -15801 1 -15802 1 -15803 2 -15804 2 -15805 1 -15806 1 -15807 1 -15808 2 -15809 1 -15810 1 -15811 1 -15812 1 -15813 2 -15814 1 -15815 0 -15816 1 -15817 1 -15818 1 -15819 2 -15820 0 -15821 1 -15822 1 -15823 2 -15824 1 -15825 0 -15826 1 -15827 0 -15828 2 -15829 2 -15830 2 -15831 1 -15832 1 -15833 0 -15834 1 -15835 1 -15836 0 -15837 1 -15838 0 -15839 0 -15840 1 -15841 2 -15842 2 -15843 2 -15844 1 -15845 1 -15846 1 -15847 2 -15848 1 -15849 0 -15850 1 -15851 1 -15852 2 -15853 0 -15854 0 -15855 0 -15856 0 -15857 1 -15858 0 -15859 2 -15860 1 -15861 0 -15862 1 -15863 2 -15864 1 -15865 1 -15866 2 -15867 1 -15868 0 -15869 1 -15870 2 -15871 2 -15872 0 -15873 1 -15874 0 -15875 1 -15876 1 -15877 0 -15878 1 -15879 2 -15880 0 -15881 2 -15882 1 -15883 0 -15884 0 -15885 1 -15886 2 -15887 2 -15888 2 -15889 1 -15890 0 -15891 2 -15892 0 -15893 2 -15894 2 -15895 1 -15896 1 -15897 2 -15898 2 -15899 2 -15900 2 -15901 2 -15902 2 -15903 1 -15904 2 -15905 1 -15906 2 -15907 2 -15908 1 -15909 1 -15910 1 -15911 1 -15912 1 -15913 1 -15914 2 -15915 1 -15916 1 -15917 1 -15918 0 -15919 0 -15920 2 -15921 2 -15922 1 -15923 1 -15924 1 -15925 2 -15926 1 -15927 1 -15928 0 -15929 1 -15930 0 -15931 1 -15932 1 -15933 2 -15934 1 -15935 2 -15936 2 -15937 2 -15938 2 -15939 1 -15940 1 -15941 1 -15942 0 -15943 1 -15944 2 -15945 1 -15946 1 -15947 1 -15948 1 -15949 1 -15950 1 -15951 2 -15952 2 -15953 2 -15954 1 -15955 0 -15956 2 -15957 1 -15958 2 -15959 1 -15960 2 -15961 0 -15962 1 -15963 2 -15964 2 -15965 1 -15966 0 -15967 1 -15968 0 -15969 1 -15970 2 -15971 0 -15972 1 -15973 1 -15974 1 -15975 1 -15976 2 -15977 0 -15978 2 -15979 1 -15980 2 -15981 1 -15982 2 -15983 0 -15984 0 -15985 1 -15986 1 -15987 2 -15988 2 -15989 2 -15990 1 -15991 1 -15992 2 -15993 2 -15994 2 -15995 2 -15996 2 -15997 1 -15998 2 -15999 2 -16000 2 -16001 1 -16002 0 -16003 2 -16004 2 -16005 2 -16006 2 -16007 0 -16008 2 -16009 2 -16010 0 -16011 1 -16012 1 -16013 1 -16014 0 -16015 1 -16016 2 -16017 2 -16018 2 -16019 2 -16020 2 -16021 0 -16022 1 -16023 0 -16024 1 -16025 2 -16026 1 -16027 0 -16028 1 -16029 0 -16030 2 -16031 0 -16032 0 -16033 2 -16034 0 -16035 0 -16036 1 -16037 1 -16038 0 -16039 0 -16040 0 -16041 1 -16042 2 -16043 0 -16044 1 -16045 0 -16046 1 -16047 2 -16048 2 -16049 1 -16050 2 -16051 1 -16052 1 -16053 1 -16054 1 -16055 2 -16056 1 -16057 1 -16058 1 -16059 0 -16060 2 -16061 1 -16062 1 -16063 2 -16064 2 -16065 0 -16066 1 -16067 1 -16068 1 -16069 2 -16070 2 -16071 1 -16072 1 -16073 2 -16074 1 -16075 2 -16076 2 -16077 2 -16078 1 -16079 0 -16080 1 -16081 0 -16082 0 -16083 2 -16084 2 -16085 2 -16086 1 -16087 2 -16088 0 -16089 1 -16090 2 -16091 2 -16092 1 -16093 0 -16094 0 -16095 0 -16096 2 -16097 2 -16098 1 -16099 1 -16100 1 -16101 1 -16102 0 -16103 0 -16104 0 -16105 1 -16106 0 -16107 2 -16108 1 -16109 0 -16110 2 -16111 0 -16112 2 -16113 0 -16114 0 -16115 0 -16116 1 -16117 2 -16118 2 -16119 2 -16120 0 -16121 1 -16122 1 -16123 1 -16124 2 -16125 0 -16126 2 -16127 2 -16128 2 -16129 2 -16130 2 -16131 2 -16132 0 -16133 1 -16134 2 -16135 1 -16136 0 -16137 0 -16138 2 -16139 1 -16140 2 -16141 0 -16142 2 -16143 1 -16144 2 -16145 2 -16146 1 -16147 1 -16148 2 -16149 1 -16150 0 -16151 1 -16152 0 -16153 0 -16154 1 -16155 2 -16156 1 -16157 1 -16158 1 -16159 0 -16160 1 -16161 2 -16162 0 -16163 2 -16164 1 -16165 0 -16166 1 -16167 1 -16168 1 -16169 1 -16170 0 -16171 1 -16172 1 -16173 0 -16174 2 -16175 2 -16176 1 -16177 0 -16178 0 -16179 1 -16180 1 -16181 1 -16182 0 -16183 2 -16184 2 -16185 1 -16186 1 -16187 2 -16188 1 -16189 2 -16190 0 -16191 1 -16192 0 -16193 0 -16194 0 -16195 2 -16196 0 -16197 2 -16198 0 -16199 1 -16200 1 -16201 1 -16202 2 -16203 0 -16204 2 -16205 1 -16206 0 -16207 0 -16208 0 -16209 1 -16210 1 -16211 1 -16212 1 -16213 1 -16214 1 -16215 1 -16216 1 -16217 1 -16218 1 -16219 2 -16220 1 -16221 1 -16222 0 -16223 0 -16224 0 -16225 0 -16226 1 -16227 0 -16228 1 -16229 0 -16230 2 -16231 0 -16232 2 -16233 0 -16234 2 -16235 2 -16236 2 -16237 2 -16238 1 -16239 1 -16240 2 -16241 1 -16242 0 -16243 2 -16244 1 -16245 0 -16246 1 -16247 2 -16248 1 -16249 1 -16250 0 -16251 1 -16252 1 -16253 0 -16254 1 -16255 2 -16256 2 -16257 1 -16258 2 -16259 2 -16260 1 -16261 2 -16262 0 -16263 1 -16264 1 -16265 1 -16266 2 -16267 2 -16268 2 -16269 1 -16270 0 -16271 0 -16272 0 -16273 2 -16274 0 -16275 2 -16276 0 -16277 2 -16278 1 -16279 0 -16280 0 -16281 0 -16282 2 -16283 0 -16284 2 -16285 0 -16286 2 -16287 1 -16288 2 -16289 1 -16290 1 -16291 1 -16292 1 -16293 2 -16294 1 -16295 1 -16296 1 -16297 2 -16298 2 -16299 0 -16300 2 -16301 2 -16302 0 -16303 2 -16304 0 -16305 1 -16306 2 -16307 1 -16308 1 -16309 2 -16310 1 -16311 1 -16312 1 -16313 1 -16314 1 -16315 2 -16316 1 -16317 1 -16318 2 -16319 1 -16320 2 -16321 2 -16322 1 -16323 0 -16324 0 -16325 1 -16326 1 -16327 0 -16328 1 -16329 1 -16330 0 -16331 1 -16332 0 -16333 2 -16334 0 -16335 1 -16336 0 -16337 2 -16338 0 -16339 1 -16340 1 -16341 2 -16342 0 -16343 1 -16344 1 -16345 1 -16346 1 -16347 2 -16348 1 -16349 0 -16350 0 -16351 1 -16352 2 -16353 1 -16354 2 -16355 1 -16356 0 -16357 2 -16358 2 -16359 1 -16360 2 -16361 2 -16362 0 -16363 0 -16364 2 -16365 1 -16366 2 -16367 2 -16368 1 -16369 0 -16370 2 -16371 2 -16372 1 -16373 2 -16374 1 -16375 2 -16376 1 -16377 2 -16378 1 -16379 2 -16380 2 -16381 1 -16382 0 -16383 0 -16384 0 -16385 2 -16386 0 -16387 2 -16388 2 -16389 1 -16390 1 -16391 2 -16392 1 -16393 2 -16394 2 -16395 2 -16396 0 -16397 2 -16398 1 -16399 2 -16400 0 -16401 1 -16402 0 -16403 0 -16404 0 -16405 0 -16406 2 -16407 0 -16408 1 -16409 2 -16410 1 -16411 1 -16412 1 -16413 2 -16414 1 -16415 2 -16416 2 -16417 1 -16418 1 -16419 1 -16420 0 -16421 1 -16422 1 -16423 0 -16424 1 -16425 1 -16426 1 -16427 1 -16428 2 -16429 2 -16430 0 -16431 1 -16432 2 -16433 0 -16434 0 -16435 2 -16436 0 -16437 0 -16438 1 -16439 2 -16440 2 -16441 1 -16442 2 -16443 2 -16444 0 -16445 2 -16446 0 -16447 1 -16448 1 -16449 1 -16450 1 -16451 1 -16452 0 -16453 2 -16454 0 -16455 2 -16456 1 -16457 1 -16458 1 -16459 1 -16460 1 -16461 0 -16462 1 -16463 1 -16464 1 -16465 1 -16466 1 -16467 2 -16468 1 -16469 1 -16470 1 -16471 1 -16472 0 -16473 1 -16474 0 -16475 0 -16476 1 -16477 0 -16478 1 -16479 1 -16480 0 -16481 1 -16482 2 -16483 1 -16484 0 -16485 1 -16486 2 -16487 2 -16488 1 -16489 1 -16490 1 -16491 0 -16492 0 -16493 0 -16494 2 -16495 2 -16496 1 -16497 1 -16498 2 -16499 1 -16500 1 -16501 0 -16502 2 -16503 0 -16504 1 -16505 1 -16506 0 -16507 1 -16508 1 -16509 2 -16510 2 -16511 1 -16512 0 -16513 1 -16514 2 -16515 0 -16516 0 -16517 1 -16518 1 -16519 1 -16520 2 -16521 0 -16522 1 -16523 1 -16524 2 -16525 0 -16526 1 -16527 1 -16528 2 -16529 0 -16530 2 -16531 2 -16532 2 -16533 0 -16534 2 -16535 1 -16536 1 -16537 1 -16538 0 -16539 2 -16540 1 -16541 2 -16542 1 -16543 1 -16544 1 -16545 1 -16546 1 -16547 1 -16548 1 -16549 2 -16550 1 -16551 2 -16552 1 -16553 1 -16554 1 -16555 1 -16556 1 -16557 0 -16558 1 -16559 1 -16560 1 -16561 1 -16562 2 -16563 1 -16564 1 -16565 1 -16566 0 -16567 1 -16568 1 -16569 0 -16570 2 -16571 1 -16572 1 -16573 0 -16574 2 -16575 1 -16576 1 -16577 1 -16578 0 -16579 1 -16580 0 -16581 0 -16582 1 -16583 1 -16584 2 -16585 2 -16586 2 -16587 0 -16588 2 -16589 0 -16590 2 -16591 1 -16592 2 -16593 2 -16594 1 -16595 1 -16596 0 -16597 1 -16598 2 -16599 2 -16600 1 -16601 1 -16602 2 -16603 1 -16604 1 -16605 0 -16606 1 -16607 1 -16608 1 -16609 2 -16610 2 -16611 1 -16612 1 -16613 1 -16614 1 -16615 2 -16616 2 -16617 2 -16618 2 -16619 2 -16620 1 -16621 1 -16622 1 -16623 1 -16624 0 -16625 2 -16626 1 -16627 2 -16628 1 -16629 1 -16630 1 -16631 2 -16632 2 -16633 0 -16634 0 -16635 2 -16636 2 -16637 1 -16638 2 -16639 2 -16640 1 -16641 2 -16642 2 -16643 2 -16644 2 -16645 1 -16646 1 -16647 1 -16648 1 -16649 1 -16650 2 -16651 1 -16652 1 -16653 2 -16654 0 -16655 2 -16656 0 -16657 1 -16658 0 -16659 1 -16660 0 -16661 0 -16662 0 -16663 1 -16664 1 -16665 0 -16666 1 -16667 0 -16668 2 -16669 2 -16670 1 -16671 0 -16672 1 -16673 1 -16674 2 -16675 1 -16676 2 -16677 1 -16678 0 -16679 2 -16680 0 -16681 1 -16682 1 -16683 1 -16684 1 -16685 2 -16686 1 -16687 1 -16688 2 -16689 2 -16690 1 -16691 1 -16692 2 -16693 1 -16694 1 -16695 2 -16696 2 -16697 2 -16698 2 -16699 1 -16700 2 -16701 1 -16702 1 -16703 1 -16704 2 -16705 0 -16706 0 -16707 0 -16708 1 -16709 1 -16710 1 -16711 2 -16712 1 -16713 2 -16714 2 -16715 2 -16716 1 -16717 1 -16718 2 -16719 0 -16720 2 -16721 1 -16722 0 -16723 1 -16724 0 -16725 1 -16726 1 -16727 0 -16728 1 -16729 0 -16730 1 -16731 1 -16732 1 -16733 1 -16734 1 -16735 1 -16736 0 -16737 2 -16738 2 -16739 1 -16740 1 -16741 1 -16742 1 -16743 1 -16744 1 -16745 1 -16746 1 -16747 2 -16748 1 -16749 1 -16750 0 -16751 0 -16752 2 -16753 2 -16754 2 -16755 1 -16756 1 -16757 1 -16758 0 -16759 0 -16760 1 -16761 0 -16762 1 -16763 1 -16764 2 -16765 0 -16766 0 -16767 1 -16768 0 -16769 1 -16770 1 -16771 0 -16772 1 -16773 2 -16774 1 -16775 0 -16776 0 -16777 1 -16778 2 -16779 2 -16780 1 -16781 0 -16782 2 -16783 2 -16784 2 -16785 2 -16786 2 -16787 2 -16788 1 -16789 1 -16790 2 -16791 1 -16792 0 -16793 1 -16794 1 -16795 2 -16796 1 -16797 1 -16798 1 -16799 0 -16800 1 -16801 1 -16802 1 -16803 1 -16804 1 -16805 1 -16806 1 -16807 0 -16808 2 -16809 0 -16810 2 -16811 1 -16812 1 -16813 1 -16814 1 -16815 0 -16816 1 -16817 1 -16818 1 -16819 0 -16820 0 -16821 2 -16822 1 -16823 1 -16824 1 -16825 1 -16826 2 -16827 2 -16828 2 -16829 2 -16830 1 -16831 1 -16832 2 -16833 1 -16834 1 -16835 1 -16836 0 -16837 1 -16838 2 -16839 1 -16840 1 -16841 1 -16842 1 -16843 0 -16844 1 -16845 1 -16846 1 -16847 0 -16848 1 -16849 0 -16850 1 -16851 1 -16852 1 -16853 2 -16854 2 -16855 1 -16856 2 -16857 1 -16858 1 -16859 1 -16860 2 -16861 1 -16862 0 -16863 1 -16864 1 -16865 0 -16866 1 -16867 2 -16868 1 -16869 1 -16870 0 -16871 2 -16872 1 -16873 1 -16874 0 -16875 2 -16876 2 -16877 1 -16878 0 -16879 1 -16880 0 -16881 1 -16882 2 -16883 1 -16884 1 -16885 2 -16886 1 -16887 1 -16888 0 -16889 1 -16890 0 -16891 1 -16892 0 -16893 2 -16894 2 -16895 0 -16896 2 -16897 0 -16898 1 -16899 1 -16900 2 -16901 2 -16902 1 -16903 0 -16904 1 -16905 1 -16906 1 -16907 1 -16908 2 -16909 1 -16910 0 -16911 1 -16912 0 -16913 1 -16914 2 -16915 2 -16916 0 -16917 1 -16918 0 -16919 2 -16920 1 -16921 0 -16922 0 -16923 2 -16924 2 -16925 2 -16926 2 -16927 2 -16928 2 -16929 0 -16930 0 -16931 0 -16932 1 -16933 0 -16934 2 -16935 2 -16936 1 -16937 1 -16938 2 -16939 1 -16940 0 -16941 1 -16942 1 -16943 0 -16944 2 -16945 1 -16946 2 -16947 2 -16948 0 -16949 2 -16950 1 -16951 2 -16952 1 -16953 0 -16954 2 -16955 2 -16956 0 -16957 1 -16958 1 -16959 1 -16960 0 -16961 1 -16962 2 -16963 1 -16964 2 -16965 1 -16966 2 -16967 2 -16968 1 -16969 2 -16970 2 -16971 1 -16972 2 -16973 2 -16974 2 -16975 0 -16976 1 -16977 1 -16978 0 -16979 1 -16980 2 -16981 0 -16982 2 -16983 1 -16984 0 -16985 0 -16986 2 -16987 0 -16988 2 -16989 2 -16990 2 -16991 1 -16992 1 -16993 2 -16994 1 -16995 2 -16996 1 -16997 1 -16998 0 -16999 1 -17000 2 -17001 2 -17002 2 -17003 2 -17004 1 -17005 1 -17006 2 -17007 2 -17008 1 -17009 1 -17010 1 -17011 1 -17012 1 -17013 0 -17014 1 -17015 1 -17016 0 -17017 1 -17018 2 -17019 1 -17020 0 -17021 2 -17022 2 -17023 1 -17024 0 -17025 1 -17026 1 -17027 0 -17028 0 -17029 2 -17030 1 -17031 2 -17032 0 -17033 0 -17034 2 -17035 2 -17036 0 -17037 0 -17038 0 -17039 1 -17040 2 -17041 0 -17042 1 -17043 1 -17044 0 -17045 2 -17046 1 -17047 1 -17048 2 -17049 2 -17050 1 -17051 2 -17052 2 -17053 2 -17054 1 -17055 1 -17056 2 -17057 2 -17058 1 -17059 1 -17060 1 -17061 2 -17062 1 -17063 0 -17064 2 -17065 2 -17066 1 -17067 1 -17068 0 -17069 1 -17070 1 -17071 1 -17072 1 -17073 0 -17074 1 -17075 1 -17076 0 -17077 1 -17078 2 -17079 2 -17080 2 -17081 1 -17082 1 -17083 0 -17084 1 -17085 1 -17086 0 -17087 1 -17088 0 -17089 2 -17090 0 -17091 1 -17092 0 -17093 2 -17094 1 -17095 0 -17096 2 -17097 0 -17098 2 -17099 1 -17100 2 -17101 1 -17102 1 -17103 1 -17104 2 -17105 2 -17106 1 -17107 2 -17108 2 -17109 1 -17110 2 -17111 2 -17112 2 -17113 2 -17114 1 -17115 1 -17116 1 -17117 0 -17118 2 -17119 0 -17120 0 -17121 2 -17122 2 -17123 1 -17124 2 -17125 2 -17126 2 -17127 1 -17128 2 -17129 1 -17130 0 -17131 0 -17132 1 -17133 2 -17134 1 -17135 2 -17136 0 -17137 2 -17138 1 -17139 1 -17140 1 -17141 0 -17142 2 -17143 1 -17144 1 -17145 1 -17146 2 -17147 2 -17148 2 -17149 1 -17150 1 -17151 1 -17152 1 -17153 1 -17154 0 -17155 2 -17156 0 -17157 1 -17158 2 -17159 2 -17160 1 -17161 1 -17162 2 -17163 0 -17164 2 -17165 1 -17166 1 -17167 1 -17168 2 -17169 0 -17170 2 -17171 2 -17172 1 -17173 2 -17174 2 -17175 0 -17176 2 -17177 1 -17178 2 -17179 0 -17180 1 -17181 2 -17182 2 -17183 2 -17184 0 -17185 1 -17186 1 -17187 1 -17188 1 -17189 0 -17190 1 -17191 2 -17192 2 -17193 0 -17194 2 -17195 2 -17196 1 -17197 1 -17198 2 -17199 0 -17200 1 -17201 0 -17202 1 -17203 0 -17204 2 -17205 2 -17206 1 -17207 0 -17208 2 -17209 2 -17210 2 -17211 1 -17212 1 -17213 2 -17214 1 -17215 1 -17216 0 -17217 2 -17218 1 -17219 2 -17220 0 -17221 0 -17222 2 -17223 0 -17224 1 -17225 0 -17226 2 -17227 1 -17228 1 -17229 1 -17230 1 -17231 1 -17232 1 -17233 1 -17234 1 -17235 1 -17236 2 -17237 0 -17238 1 -17239 1 -17240 1 -17241 0 -17242 1 -17243 1 -17244 1 -17245 0 -17246 2 -17247 1 -17248 0 -17249 1 -17250 0 -17251 0 -17252 2 -17253 1 -17254 1 -17255 2 -17256 0 -17257 1 -17258 0 -17259 1 -17260 1 -17261 1 -17262 1 -17263 0 -17264 0 -17265 0 -17266 1 -17267 1 -17268 1 -17269 0 -17270 0 -17271 1 -17272 2 -17273 0 -17274 1 -17275 2 -17276 0 -17277 1 -17278 1 -17279 2 -17280 2 -17281 1 -17282 1 -17283 1 -17284 2 -17285 1 -17286 1 -17287 1 -17288 2 -17289 0 -17290 0 -17291 0 -17292 2 -17293 0 -17294 1 -17295 2 -17296 1 -17297 0 -17298 1 -17299 2 -17300 2 -17301 2 -17302 2 -17303 0 -17304 1 -17305 2 -17306 1 -17307 1 -17308 1 -17309 0 -17310 0 -17311 1 -17312 0 -17313 0 -17314 1 -17315 2 -17316 1 -17317 2 -17318 1 -17319 1 -17320 1 -17321 1 -17322 2 -17323 2 -17324 2 -17325 0 -17326 0 -17327 2 -17328 2 -17329 1 -17330 1 -17331 2 -17332 2 -17333 2 -17334 0 -17335 2 -17336 1 -17337 2 -17338 0 -17339 0 -17340 1 -17341 2 -17342 1 -17343 0 -17344 2 -17345 1 -17346 2 -17347 1 -17348 2 -17349 0 -17350 2 -17351 2 -17352 0 -17353 0 -17354 1 -17355 1 -17356 2 -17357 1 -17358 0 -17359 1 -17360 0 -17361 1 -17362 1 -17363 0 -17364 2 -17365 1 -17366 1 -17367 2 -17368 1 -17369 1 -17370 2 -17371 0 -17372 0 -17373 2 -17374 1 -17375 2 -17376 0 -17377 2 -17378 1 -17379 2 -17380 1 -17381 2 -17382 0 -17383 1 -17384 2 -17385 1 -17386 1 -17387 2 -17388 2 -17389 2 -17390 1 -17391 2 -17392 1 -17393 0 -17394 0 -17395 0 -17396 2 -17397 0 -17398 0 -17399 1 -17400 2 -17401 1 -17402 0 -17403 0 -17404 1 -17405 0 -17406 1 -17407 1 -17408 0 -17409 2 -17410 1 -17411 1 -17412 1 -17413 2 -17414 0 -17415 1 -17416 2 -17417 1 -17418 1 -17419 1 -17420 1 -17421 2 -17422 1 -17423 0 -17424 1 -17425 2 -17426 1 -17427 1 -17428 2 -17429 1 -17430 2 -17431 1 -17432 2 -17433 1 -17434 1 -17435 2 -17436 2 -17437 2 -17438 1 -17439 1 -17440 2 -17441 2 -17442 1 -17443 1 -17444 0 -17445 1 -17446 1 -17447 0 -17448 0 -17449 0 -17450 1 -17451 0 -17452 2 -17453 0 -17454 0 -17455 0 -17456 1 -17457 1 -17458 1 -17459 1 -17460 0 -17461 2 -17462 0 -17463 1 -17464 0 -17465 2 -17466 1 -17467 2 -17468 2 -17469 0 -17470 2 -17471 1 -17472 2 -17473 0 -17474 0 -17475 1 -17476 1 -17477 2 -17478 1 -17479 0 -17480 2 -17481 2 -17482 0 -17483 2 -17484 1 -17485 1 -17486 1 -17487 1 -17488 1 -17489 0 -17490 1 -17491 2 -17492 2 -17493 2 -17494 2 -17495 0 -17496 0 -17497 1 -17498 1 -17499 2 -17500 2 -17501 1 -17502 0 -17503 1 -17504 1 -17505 1 -17506 1 -17507 0 -17508 1 -17509 1 -17510 1 -17511 0 -17512 2 -17513 1 -17514 2 -17515 1 -17516 2 -17517 0 -17518 1 -17519 2 -17520 2 -17521 1 -17522 2 -17523 1 -17524 1 -17525 1 -17526 1 -17527 1 -17528 2 -17529 0 -17530 0 -17531 2 -17532 2 -17533 0 -17534 1 -17535 2 -17536 0 -17537 1 -17538 0 -17539 1 -17540 0 -17541 1 -17542 2 -17543 1 -17544 1 -17545 1 -17546 1 -17547 1 -17548 2 -17549 0 -17550 0 -17551 0 -17552 2 -17553 0 -17554 1 -17555 1 -17556 0 -17557 0 -17558 1 -17559 1 -17560 2 -17561 0 -17562 1 -17563 0 -17564 0 -17565 1 -17566 1 -17567 1 -17568 0 -17569 1 -17570 1 -17571 1 -17572 2 -17573 2 -17574 1 -17575 2 -17576 2 -17577 1 -17578 1 -17579 2 -17580 0 -17581 2 -17582 2 -17583 0 -17584 2 -17585 1 -17586 1 -17587 2 -17588 0 -17589 1 -17590 1 -17591 0 -17592 2 -17593 1 -17594 0 -17595 1 -17596 2 -17597 0 -17598 1 -17599 2 -17600 1 -17601 0 -17602 0 -17603 2 -17604 1 -17605 2 -17606 0 -17607 0 -17608 2 -17609 0 -17610 2 -17611 2 -17612 0 -17613 1 -17614 2 -17615 2 -17616 2 -17617 2 -17618 2 -17619 0 -17620 2 -17621 0 -17622 2 -17623 0 -17624 2 -17625 1 -17626 0 -17627 1 -17628 1 -17629 2 -17630 1 -17631 1 -17632 2 -17633 2 -17634 1 -17635 1 -17636 2 -17637 2 -17638 1 -17639 2 -17640 2 -17641 1 -17642 2 -17643 1 -17644 1 -17645 1 -17646 0 -17647 1 -17648 2 -17649 2 -17650 1 -17651 1 -17652 1 -17653 2 -17654 1 -17655 0 -17656 0 -17657 2 -17658 0 -17659 1 -17660 1 -17661 1 -17662 1 -17663 0 -17664 2 -17665 0 -17666 1 -17667 1 -17668 0 -17669 1 -17670 1 -17671 2 -17672 2 -17673 0 -17674 2 -17675 2 -17676 1 -17677 2 -17678 0 -17679 2 -17680 1 -17681 1 -17682 2 -17683 1 -17684 1 -17685 2 -17686 1 -17687 1 -17688 1 -17689 1 -17690 1 -17691 0 -17692 0 -17693 0 -17694 1 -17695 0 -17696 2 -17697 0 -17698 1 -17699 2 -17700 2 -17701 2 -17702 1 -17703 2 -17704 1 -17705 1 -17706 0 -17707 1 -17708 2 -17709 1 -17710 0 -17711 0 -17712 0 -17713 1 -17714 2 -17715 1 -17716 2 -17717 1 -17718 0 -17719 1 -17720 1 -17721 1 -17722 1 -17723 0 -17724 2 -17725 2 -17726 2 -17727 1 -17728 2 -17729 1 -17730 2 -17731 2 -17732 2 -17733 0 -17734 1 -17735 1 -17736 1 -17737 2 -17738 0 -17739 0 -17740 0 -17741 2 -17742 2 -17743 1 -17744 1 -17745 1 -17746 2 -17747 0 -17748 1 -17749 2 -17750 2 -17751 1 -17752 1 -17753 1 -17754 2 -17755 0 -17756 0 -17757 2 -17758 1 -17759 1 -17760 0 -17761 1 -17762 1 -17763 2 -17764 1 -17765 2 -17766 1 -17767 0 -17768 1 -17769 2 -17770 2 -17771 1 -17772 1 -17773 1 -17774 0 -17775 2 -17776 0 -17777 2 -17778 0 -17779 2 -17780 0 -17781 2 -17782 1 -17783 2 -17784 2 -17785 2 -17786 1 -17787 0 -17788 0 -17789 1 -17790 0 -17791 2 -17792 0 -17793 1 -17794 0 -17795 1 -17796 0 -17797 0 -17798 0 -17799 1 -17800 1 -17801 2 -17802 2 -17803 2 -17804 1 -17805 1 -17806 0 -17807 2 -17808 0 -17809 1 -17810 1 -17811 1 -17812 1 -17813 1 -17814 2 -17815 2 -17816 2 -17817 1 -17818 2 -17819 2 -17820 2 -17821 2 -17822 2 -17823 0 -17824 2 -17825 2 -17826 0 -17827 1 -17828 0 -17829 1 -17830 0 -17831 1 -17832 0 -17833 1 -17834 0 -17835 0 -17836 1 -17837 1 -17838 1 -17839 2 -17840 2 -17841 2 -17842 0 -17843 2 -17844 1 -17845 0 -17846 1 -17847 2 -17848 1 -17849 1 -17850 2 -17851 0 -17852 0 -17853 2 -17854 2 -17855 2 -17856 2 -17857 1 -17858 2 -17859 2 -17860 1 -17861 0 -17862 0 -17863 2 -17864 1 -17865 0 -17866 1 -17867 1 -17868 1 -17869 1 -17870 1 -17871 1 -17872 1 -17873 0 -17874 1 -17875 2 -17876 1 -17877 1 -17878 1 -17879 0 -17880 2 -17881 0 -17882 0 -17883 1 -17884 2 -17885 2 -17886 1 -17887 2 -17888 0 -17889 2 -17890 0 -17891 1 -17892 1 -17893 1 -17894 2 -17895 1 -17896 1 -17897 2 -17898 1 -17899 2 -17900 0 -17901 2 -17902 0 -17903 1 -17904 0 -17905 0 -17906 2 -17907 1 -17908 2 -17909 1 -17910 1 -17911 0 -17912 0 -17913 0 -17914 0 -17915 0 -17916 1 -17917 0 -17918 1 -17919 1 -17920 0 -17921 0 -17922 1 -17923 1 -17924 1 -17925 1 -17926 2 -17927 2 -17928 0 -17929 1 -17930 1 -17931 2 -17932 2 -17933 2 -17934 1 -17935 0 -17936 2 -17937 0 -17938 0 -17939 1 -17940 2 -17941 2 -17942 0 -17943 2 -17944 0 -17945 1 -17946 1 -17947 0 -17948 2 -17949 1 -17950 0 -17951 2 -17952 0 -17953 0 -17954 2 -17955 1 -17956 1 -17957 1 -17958 2 -17959 1 -17960 0 -17961 1 -17962 1 -17963 0 -17964 1 -17965 1 -17966 1 -17967 1 -17968 1 -17969 1 -17970 0 -17971 2 -17972 1 -17973 1 -17974 0 -17975 1 -17976 0 -17977 2 -17978 1 -17979 2 -17980 0 -17981 1 -17982 1 -17983 1 -17984 1 -17985 0 -17986 1 -17987 0 -17988 2 -17989 1 -17990 2 -17991 1 -17992 1 -17993 1 -17994 1 -17995 2 -17996 2 -17997 0 -17998 0 -17999 1 -18000 2 -18001 1 -18002 1 -18003 0 -18004 2 -18005 1 -18006 2 -18007 0 -18008 1 -18009 2 -18010 0 -18011 0 -18012 1 -18013 1 -18014 0 -18015 1 -18016 1 -18017 0 -18018 0 -18019 1 -18020 1 -18021 2 -18022 2 -18023 2 -18024 2 -18025 1 -18026 0 -18027 2 -18028 1 -18029 2 -18030 1 -18031 1 -18032 1 -18033 2 -18034 0 -18035 1 -18036 2 -18037 0 -18038 2 -18039 1 -18040 2 -18041 2 -18042 0 -18043 1 -18044 2 -18045 0 -18046 1 -18047 2 -18048 0 -18049 1 -18050 0 -18051 1 -18052 1 -18053 2 -18054 1 -18055 1 -18056 2 -18057 0 -18058 1 -18059 0 -18060 1 -18061 2 -18062 2 -18063 0 -18064 0 -18065 2 -18066 1 -18067 1 -18068 1 -18069 1 -18070 2 -18071 2 -18072 1 -18073 0 -18074 1 -18075 0 -18076 1 -18077 0 -18078 1 -18079 1 -18080 1 -18081 1 -18082 0 -18083 0 -18084 0 -18085 0 -18086 1 -18087 0 -18088 2 -18089 2 -18090 2 -18091 1 -18092 1 -18093 1 -18094 2 -18095 2 -18096 1 -18097 0 -18098 2 -18099 0 -18100 2 -18101 0 -18102 0 -18103 0 -18104 0 -18105 2 -18106 2 -18107 2 -18108 0 -18109 2 -18110 1 -18111 1 -18112 1 -18113 2 -18114 1 -18115 1 -18116 1 -18117 2 -18118 2 -18119 1 -18120 1 -18121 2 -18122 1 -18123 0 -18124 2 -18125 0 -18126 0 -18127 1 -18128 1 -18129 1 -18130 2 -18131 1 -18132 2 -18133 1 -18134 1 -18135 1 -18136 0 -18137 2 -18138 2 -18139 1 -18140 1 -18141 2 -18142 2 -18143 0 -18144 1 -18145 1 -18146 0 -18147 0 -18148 2 -18149 1 -18150 1 -18151 1 -18152 1 -18153 1 -18154 1 -18155 1 -18156 2 -18157 2 -18158 0 -18159 2 -18160 2 -18161 2 -18162 1 -18163 1 -18164 1 -18165 2 -18166 0 -18167 1 -18168 1 -18169 2 -18170 1 -18171 1 -18172 1 -18173 2 -18174 1 -18175 2 -18176 2 -18177 0 -18178 1 -18179 0 -18180 0 -18181 1 -18182 1 -18183 1 -18184 2 -18185 0 -18186 2 -18187 2 -18188 2 -18189 0 -18190 1 -18191 1 -18192 1 -18193 0 -18194 0 -18195 0 -18196 1 -18197 2 -18198 1 -18199 1 -18200 1 -18201 1 -18202 0 -18203 2 -18204 1 -18205 2 -18206 2 -18207 1 -18208 0 -18209 0 -18210 1 -18211 1 -18212 1 -18213 0 -18214 1 -18215 0 -18216 2 -18217 0 -18218 2 -18219 2 -18220 0 -18221 1 -18222 1 -18223 1 -18224 1 -18225 0 -18226 2 -18227 1 -18228 2 -18229 1 -18230 0 -18231 0 -18232 1 -18233 1 -18234 2 -18235 2 -18236 2 -18237 0 -18238 2 -18239 0 -18240 2 -18241 1 -18242 1 -18243 1 -18244 2 -18245 1 -18246 0 -18247 1 -18248 1 -18249 2 -18250 2 -18251 1 -18252 1 -18253 0 -18254 2 -18255 2 -18256 2 -18257 0 -18258 1 -18259 1 -18260 2 -18261 1 -18262 2 -18263 0 -18264 2 -18265 1 -18266 0 -18267 1 -18268 2 -18269 1 -18270 2 -18271 2 -18272 1 -18273 0 -18274 2 -18275 2 -18276 0 -18277 2 -18278 0 -18279 2 -18280 0 -18281 1 -18282 2 -18283 0 -18284 1 -18285 1 -18286 1 -18287 1 -18288 1 -18289 1 -18290 0 -18291 1 -18292 1 -18293 0 -18294 2 -18295 2 -18296 2 -18297 0 -18298 1 -18299 1 -18300 0 -18301 2 -18302 0 -18303 1 -18304 2 -18305 2 -18306 1 -18307 2 -18308 1 -18309 2 -18310 2 -18311 1 -18312 0 -18313 0 -18314 0 -18315 2 -18316 2 -18317 1 -18318 1 -18319 2 -18320 2 -18321 0 -18322 1 -18323 2 -18324 1 -18325 0 -18326 1 -18327 2 -18328 0 -18329 2 -18330 1 -18331 0 -18332 1 -18333 1 -18334 1 -18335 0 -18336 1 -18337 2 -18338 1 -18339 2 -18340 1 -18341 2 -18342 0 -18343 0 -18344 0 -18345 1 -18346 1 -18347 2 -18348 1 -18349 2 -18350 0 -18351 2 -18352 2 -18353 0 -18354 1 -18355 0 -18356 0 -18357 2 -18358 1 -18359 0 -18360 2 -18361 1 -18362 1 -18363 2 -18364 1 -18365 1 -18366 2 -18367 1 -18368 1 -18369 1 -18370 2 -18371 0 -18372 1 -18373 1 -18374 1 -18375 2 -18376 1 -18377 2 -18378 2 -18379 0 -18380 2 -18381 0 -18382 1 -18383 1 -18384 1 -18385 0 -18386 1 -18387 0 -18388 1 -18389 2 -18390 1 -18391 2 -18392 0 -18393 1 -18394 2 -18395 2 -18396 0 -18397 1 -18398 1 -18399 1 -18400 0 -18401 1 -18402 0 -18403 2 -18404 0 -18405 2 -18406 0 -18407 2 -18408 1 -18409 1 -18410 2 -18411 0 -18412 2 -18413 2 -18414 0 -18415 0 -18416 1 -18417 0 -18418 2 -18419 0 -18420 1 -18421 2 -18422 2 -18423 2 -18424 2 -18425 1 -18426 0 -18427 1 -18428 0 -18429 2 -18430 1 -18431 0 -18432 1 -18433 1 -18434 1 -18435 1 -18436 0 -18437 1 -18438 1 -18439 1 -18440 2 -18441 2 -18442 1 -18443 0 -18444 2 -18445 2 -18446 0 -18447 0 -18448 0 -18449 0 -18450 0 -18451 1 -18452 2 -18453 0 -18454 1 -18455 1 -18456 0 -18457 0 -18458 0 -18459 1 -18460 2 -18461 1 -18462 2 -18463 0 -18464 1 -18465 1 -18466 2 -18467 0 -18468 2 -18469 1 -18470 0 -18471 2 -18472 1 -18473 1 -18474 2 -18475 1 -18476 0 -18477 0 -18478 1 -18479 1 -18480 2 -18481 0 -18482 2 -18483 1 -18484 1 -18485 0 -18486 1 -18487 2 -18488 2 -18489 0 -18490 1 -18491 1 -18492 1 -18493 0 -18494 2 -18495 2 -18496 0 -18497 1 -18498 1 -18499 0 -18500 0 -18501 2 -18502 1 -18503 0 -18504 1 -18505 1 -18506 2 -18507 1 -18508 2 -18509 1 -18510 2 -18511 2 -18512 0 -18513 0 -18514 1 -18515 1 -18516 2 -18517 0 -18518 1 -18519 1 -18520 0 -18521 0 -18522 1 -18523 2 -18524 1 -18525 2 -18526 1 -18527 0 -18528 2 -18529 1 -18530 1 -18531 0 -18532 2 -18533 0 -18534 1 -18535 2 -18536 0 -18537 0 -18538 2 -18539 2 -18540 1 -18541 0 -18542 2 -18543 1 -18544 1 -18545 2 -18546 1 -18547 2 -18548 2 -18549 1 -18550 0 -18551 1 -18552 0 -18553 0 -18554 1 -18555 1 -18556 1 -18557 0 -18558 1 -18559 2 -18560 2 -18561 2 -18562 0 -18563 1 -18564 2 -18565 2 -18566 0 -18567 1 -18568 1 -18569 0 -18570 1 -18571 1 -18572 2 -18573 1 -18574 0 -18575 2 -18576 1 -18577 1 -18578 2 -18579 1 -18580 2 -18581 1 -18582 0 -18583 1 -18584 0 -18585 0 -18586 2 -18587 2 -18588 2 -18589 2 -18590 1 -18591 1 -18592 2 -18593 0 -18594 1 -18595 0 -18596 1 -18597 1 -18598 1 -18599 0 -18600 2 -18601 0 -18602 2 -18603 2 -18604 0 -18605 2 -18606 1 -18607 2 -18608 2 -18609 1 -18610 1 -18611 1 -18612 1 -18613 1 -18614 2 -18615 0 -18616 0 -18617 2 -18618 2 -18619 1 -18620 2 -18621 2 -18622 2 -18623 1 -18624 0 -18625 2 -18626 1 -18627 2 -18628 2 -18629 1 -18630 1 -18631 2 -18632 1 -18633 2 -18634 1 -18635 1 -18636 1 -18637 1 -18638 1 -18639 2 -18640 1 -18641 2 -18642 2 -18643 2 -18644 1 -18645 1 -18646 1 -18647 0 -18648 1 -18649 1 -18650 1 -18651 1 -18652 1 -18653 1 -18654 1 -18655 0 -18656 0 -18657 2 -18658 2 -18659 2 -18660 0 -18661 1 -18662 1 -18663 2 -18664 2 -18665 2 -18666 2 -18667 0 -18668 2 -18669 1 -18670 2 -18671 2 -18672 0 -18673 2 -18674 2 -18675 1 -18676 1 -18677 2 -18678 1 -18679 2 -18680 2 -18681 1 -18682 0 -18683 0 -18684 1 -18685 0 -18686 2 -18687 1 -18688 1 -18689 1 -18690 1 -18691 2 -18692 1 -18693 1 -18694 2 -18695 0 -18696 1 -18697 0 -18698 2 -18699 2 -18700 1 -18701 1 -18702 1 -18703 0 -18704 1 -18705 1 -18706 2 -18707 0 -18708 0 -18709 2 -18710 1 -18711 1 -18712 1 -18713 2 -18714 2 -18715 2 -18716 0 -18717 2 -18718 2 -18719 2 -18720 1 -18721 1 -18722 2 -18723 2 -18724 2 -18725 2 -18726 2 -18727 1 -18728 2 -18729 2 -18730 1 -18731 2 -18732 2 -18733 1 -18734 2 -18735 0 -18736 2 -18737 0 -18738 2 -18739 1 -18740 2 -18741 2 -18742 2 -18743 2 -18744 2 -18745 1 -18746 0 -18747 1 -18748 1 -18749 0 -18750 2 -18751 2 -18752 2 -18753 2 -18754 1 -18755 1 -18756 1 -18757 2 -18758 2 -18759 1 -18760 2 -18761 1 -18762 2 -18763 1 -18764 0 -18765 1 -18766 0 -18767 1 -18768 1 -18769 1 -18770 2 -18771 2 -18772 1 -18773 2 -18774 2 -18775 1 -18776 0 -18777 2 -18778 2 -18779 2 -18780 0 -18781 1 -18782 1 -18783 1 -18784 2 -18785 2 -18786 2 -18787 1 -18788 0 -18789 2 -18790 2 -18791 0 -18792 1 -18793 1 -18794 2 -18795 1 -18796 1 -18797 1 -18798 2 -18799 1 -18800 1 -18801 2 -18802 2 -18803 2 -18804 2 -18805 0 -18806 0 -18807 1 -18808 2 -18809 1 -18810 1 -18811 2 -18812 2 -18813 2 -18814 2 -18815 1 -18816 1 -18817 1 -18818 1 -18819 2 -18820 2 -18821 0 -18822 1 -18823 1 -18824 1 -18825 1 -18826 1 -18827 1 -18828 2 -18829 0 -18830 2 -18831 2 -18832 2 -18833 0 -18834 0 -18835 1 -18836 1 -18837 2 -18838 2 -18839 0 -18840 2 -18841 2 -18842 1 -18843 2 -18844 1 -18845 0 -18846 2 -18847 2 -18848 2 -18849 2 -18850 2 -18851 1 -18852 1 -18853 1 -18854 0 -18855 1 -18856 2 -18857 2 -18858 1 -18859 1 -18860 0 -18861 1 -18862 1 -18863 0 -18864 2 -18865 2 -18866 1 -18867 1 -18868 2 -18869 2 -18870 0 -18871 1 -18872 2 -18873 2 -18874 0 -18875 0 -18876 2 -18877 1 -18878 1 -18879 0 -18880 1 -18881 2 -18882 2 -18883 1 -18884 0 -18885 0 -18886 2 -18887 1 -18888 1 -18889 2 -18890 0 -18891 1 -18892 2 -18893 0 -18894 2 -18895 2 -18896 1 -18897 1 -18898 1 -18899 1 -18900 2 -18901 0 -18902 2 -18903 2 -18904 2 -18905 2 -18906 1 -18907 1 -18908 2 -18909 1 -18910 2 -18911 0 -18912 2 -18913 1 -18914 2 -18915 0 -18916 1 -18917 1 -18918 2 -18919 1 -18920 2 -18921 1 -18922 1 -18923 2 -18924 2 -18925 1 -18926 1 -18927 1 -18928 1 -18929 2 -18930 1 -18931 1 -18932 1 -18933 2 -18934 2 -18935 2 -18936 2 -18937 1 -18938 2 -18939 1 -18940 1 -18941 1 -18942 1 -18943 1 -18944 1 -18945 1 -18946 1 -18947 0 -18948 0 -18949 2 -18950 0 -18951 1 -18952 0 -18953 1 -18954 1 -18955 2 -18956 0 -18957 1 -18958 2 -18959 2 -18960 1 -18961 2 -18962 2 -18963 1 -18964 1 -18965 1 -18966 1 -18967 0 -18968 0 -18969 2 -18970 2 -18971 2 -18972 1 -18973 1 -18974 1 -18975 2 -18976 1 -18977 1 -18978 2 -18979 2 -18980 0 -18981 2 -18982 0 -18983 0 -18984 1 -18985 1 -18986 1 -18987 1 -18988 2 -18989 2 -18990 0 -18991 0 -18992 2 -18993 1 -18994 1 -18995 1 -18996 0 -18997 2 -18998 1 -18999 0 -19000 1 -19001 2 -19002 2 -19003 1 -19004 1 -19005 1 -19006 0 -19007 1 -19008 2 -19009 2 -19010 1 -19011 1 -19012 2 -19013 0 -19014 2 -19015 2 -19016 2 -19017 2 -19018 2 -19019 1 -19020 2 -19021 0 -19022 2 -19023 1 -19024 1 -19025 1 -19026 2 -19027 0 -19028 1 -19029 2 -19030 1 -19031 2 -19032 0 -19033 0 -19034 1 -19035 2 -19036 0 -19037 2 -19038 1 -19039 1 -19040 2 -19041 0 -19042 1 -19043 1 -19044 2 -19045 1 -19046 1 -19047 2 -19048 1 -19049 2 -19050 2 -19051 2 -19052 1 -19053 0 -19054 1 -19055 1 -19056 2 -19057 1 -19058 2 -19059 2 -19060 1 -19061 2 -19062 1 -19063 0 -19064 2 -19065 2 -19066 1 -19067 2 -19068 2 -19069 2 -19070 1 -19071 1 -19072 1 -19073 0 -19074 2 -19075 2 -19076 0 -19077 2 -19078 2 -19079 2 -19080 1 -19081 1 -19082 1 -19083 2 -19084 1 -19085 0 -19086 2 -19087 2 -19088 1 -19089 1 -19090 1 -19091 1 -19092 0 -19093 2 -19094 2 -19095 0 -19096 0 -19097 1 -19098 2 -19099 0 -19100 2 -19101 2 -19102 1 -19103 2 -19104 2 -19105 1 -19106 1 -19107 0 -19108 0 -19109 0 -19110 2 -19111 1 -19112 1 -19113 1 -19114 1 -19115 1 -19116 2 -19117 0 -19118 0 -19119 2 -19120 1 -19121 1 -19122 1 -19123 0 -19124 1 -19125 2 -19126 2 -19127 1 -19128 1 -19129 1 -19130 1 -19131 2 -19132 1 -19133 2 -19134 0 -19135 1 -19136 2 -19137 0 -19138 1 -19139 1 -19140 1 -19141 1 -19142 1 -19143 1 -19144 1 -19145 0 -19146 1 -19147 1 -19148 2 -19149 0 -19150 0 -19151 1 -19152 1 -19153 1 -19154 1 -19155 0 -19156 1 -19157 0 -19158 2 -19159 2 -19160 0 -19161 0 -19162 1 -19163 2 -19164 1 -19165 1 -19166 2 -19167 1 -19168 2 -19169 2 -19170 0 -19171 1 -19172 1 -19173 1 -19174 1 -19175 2 -19176 1 -19177 1 -19178 0 -19179 2 -19180 0 -19181 2 -19182 2 -19183 1 -19184 1 -19185 1 -19186 1 -19187 1 -19188 0 -19189 1 -19190 2 -19191 1 -19192 1 -19193 1 -19194 0 -19195 2 -19196 0 -19197 1 -19198 1 -19199 0 -19200 0 -19201 0 -19202 1 -19203 0 -19204 2 -19205 1 -19206 1 -19207 2 -19208 1 -19209 1 -19210 0 -19211 1 -19212 1 -19213 2 -19214 2 -19215 2 -19216 1 -19217 0 -19218 1 -19219 1 -19220 2 -19221 0 -19222 2 -19223 2 -19224 2 -19225 1 -19226 2 -19227 1 -19228 2 -19229 1 -19230 1 -19231 2 -19232 2 -19233 1 -19234 1 -19235 1 -19236 1 -19237 1 -19238 2 -19239 1 -19240 1 -19241 1 -19242 0 -19243 1 -19244 1 -19245 2 -19246 1 -19247 1 -19248 2 -19249 1 -19250 1 -19251 2 -19252 0 -19253 0 -19254 0 -19255 1 -19256 0 -19257 2 -19258 1 -19259 2 -19260 0 -19261 2 -19262 1 -19263 0 -19264 1 -19265 0 -19266 1 -19267 0 -19268 1 -19269 1 -19270 1 -19271 2 -19272 2 -19273 1 -19274 2 -19275 2 -19276 1 -19277 1 -19278 2 -19279 0 -19280 1 -19281 1 -19282 2 -19283 0 -19284 2 -19285 1 -19286 1 -19287 2 -19288 1 -19289 2 -19290 1 -19291 0 -19292 1 -19293 2 -19294 1 -19295 2 -19296 2 -19297 2 -19298 0 -19299 1 -19300 1 -19301 1 -19302 1 -19303 2 -19304 0 -19305 2 -19306 2 -19307 1 -19308 1 -19309 1 -19310 2 -19311 1 -19312 0 -19313 1 -19314 2 -19315 1 -19316 0 -19317 2 -19318 0 -19319 1 -19320 1 -19321 1 -19322 2 -19323 1 -19324 2 -19325 2 -19326 1 -19327 0 -19328 2 -19329 2 -19330 2 -19331 2 -19332 2 -19333 2 -19334 2 -19335 1 -19336 2 -19337 2 -19338 1 -19339 0 -19340 2 -19341 2 -19342 1 -19343 1 -19344 2 -19345 2 -19346 1 -19347 1 -19348 2 -19349 1 -19350 1 -19351 1 -19352 1 -19353 1 -19354 2 -19355 0 -19356 0 -19357 0 -19358 1 -19359 2 -19360 1 -19361 1 -19362 1 -19363 2 -19364 0 -19365 1 -19366 2 -19367 2 -19368 2 -19369 0 -19370 1 -19371 0 -19372 1 -19373 1 -19374 1 -19375 1 -19376 1 -19377 2 -19378 1 -19379 1 -19380 1 -19381 2 -19382 1 -19383 2 -19384 2 -19385 2 -19386 2 -19387 2 -19388 1 -19389 0 -19390 1 -19391 0 -19392 2 -19393 1 -19394 1 -19395 0 -19396 0 -19397 1 -19398 1 -19399 1 -19400 2 -19401 2 -19402 2 -19403 1 -19404 1 -19405 1 -19406 2 -19407 1 -19408 1 -19409 0 -19410 1 -19411 0 -19412 1 -19413 1 -19414 1 -19415 1 -19416 2 -19417 0 -19418 1 -19419 0 -19420 1 -19421 2 -19422 1 -19423 2 -19424 2 -19425 0 -19426 1 -19427 0 -19428 2 -19429 1 -19430 1 -19431 1 -19432 1 -19433 1 -19434 1 -19435 0 -19436 1 -19437 2 -19438 1 -19439 1 -19440 2 -19441 0 -19442 2 -19443 2 -19444 2 -19445 1 -19446 1 -19447 1 -19448 2 -19449 1 -19450 0 -19451 1 -19452 1 -19453 1 -19454 1 -19455 1 -19456 0 -19457 2 -19458 2 -19459 2 -19460 2 -19461 2 -19462 1 -19463 1 -19464 0 -19465 1 -19466 0 -19467 0 -19468 1 -19469 0 -19470 1 -19471 2 -19472 1 -19473 0 -19474 0 -19475 2 -19476 0 -19477 1 -19478 0 -19479 1 -19480 1 -19481 0 -19482 1 -19483 2 -19484 1 -19485 2 -19486 0 -19487 1 -19488 1 -19489 0 -19490 1 -19491 1 -19492 1 -19493 1 -19494 2 -19495 2 -19496 2 -19497 1 -19498 1 -19499 1 -19500 1 -19501 1 -19502 0 -19503 2 -19504 1 -19505 1 -19506 1 -19507 0 -19508 0 -19509 0 -19510 1 -19511 2 -19512 1 -19513 0 -19514 2 -19515 0 -19516 1 -19517 2 -19518 1 -19519 0 -19520 1 -19521 1 -19522 0 -19523 2 -19524 1 -19525 2 -19526 1 -19527 0 -19528 1 -19529 1 -19530 2 -19531 1 -19532 2 -19533 1 -19534 1 -19535 1 -19536 1 -19537 2 -19538 1 -19539 0 -19540 1 -19541 1 -19542 1 -19543 2 -19544 1 -19545 1 -19546 1 -19547 0 -19548 2 -19549 2 -19550 1 -19551 2 -19552 1 -19553 0 -19554 2 -19555 1 -19556 0 -19557 1 -19558 0 -19559 0 -19560 0 -19561 2 -19562 0 -19563 0 -19564 1 -19565 1 -19566 1 -19567 0 -19568 1 -19569 2 -19570 1 -19571 2 -19572 0 -19573 1 -19574 1 -19575 1 -19576 2 -19577 1 -19578 0 -19579 1 -19580 1 -19581 1 -19582 1 -19583 0 -19584 0 -19585 1 -19586 1 -19587 2 -19588 0 -19589 1 -19590 1 -19591 1 -19592 2 -19593 0 -19594 1 -19595 2 -19596 1 -19597 1 -19598 2 -19599 2 -19600 1 -19601 1 -19602 0 -19603 1 -19604 1 -19605 1 -19606 2 -19607 0 -19608 1 -19609 0 -19610 1 -19611 2 -19612 0 -19613 1 -19614 2 -19615 1 -19616 2 -19617 0 -19618 0 -19619 1 -19620 0 -19621 1 -19622 2 -19623 1 -19624 2 -19625 2 -19626 1 -19627 1 -19628 1 -19629 2 -19630 2 -19631 0 -19632 1 -19633 0 -19634 1 -19635 1 -19636 0 -19637 0 -19638 2 -19639 1 -19640 2 -19641 2 -19642 1 -19643 2 -19644 0 -19645 1 -19646 1 -19647 0 -19648 2 -19649 1 -19650 0 -19651 1 -19652 1 -19653 2 -19654 0 -19655 1 -19656 2 -19657 1 -19658 2 -19659 2 -19660 1 -19661 2 -19662 1 -19663 1 -19664 2 -19665 1 -19666 1 -19667 1 -19668 1 -19669 2 -19670 2 -19671 0 -19672 0 -19673 1 -19674 0 -19675 1 -19676 0 -19677 1 -19678 0 -19679 2 -19680 2 -19681 2 -19682 1 -19683 1 -19684 2 -19685 1 -19686 1 -19687 1 -19688 0 -19689 2 -19690 2 -19691 2 -19692 1 -19693 1 -19694 1 -19695 1 -19696 2 -19697 2 -19698 2 -19699 0 -19700 1 -19701 2 -19702 1 -19703 2 -19704 1 -19705 0 -19706 0 -19707 1 -19708 2 -19709 2 -19710 2 -19711 0 -19712 2 -19713 0 -19714 2 -19715 0 -19716 1 diff --git a/pygip/models/attack/mea/data/attack2_generated_graph/pubmed/selected_index.txt b/pygip/models/attack/mea/data/attack2_generated_graph/pubmed/selected_index.txt deleted file mode 100644 index 2dc267fa..00000000 --- a/pygip/models/attack/mea/data/attack2_generated_graph/pubmed/selected_index.txt +++ /dev/null @@ -1,700 +0,0 @@ -6 -20 -45 -119 -131 -145 -202 -211 -213 -251 -252 -255 -301 -337 -345 -351 -352 -375 -381 -391 -498 -499 -528 -559 -566 -586 -618 -628 -656 -851 -899 -911 -913 -927 -937 -971 -1006 -1007 -1008 -1057 -1063 -1079 -1138 -1184 -1189 -1222 -1231 -1271 -1274 -1283 -1331 -1336 -1356 -1432 -1477 -1538 -1568 -1615 -1628 -1639 -1659 -1689 -1707 -1721 -1741 -1788 -1795 -1823 -1826 -1890 -1931 -1935 -1939 -1951 -1984 -1990 -2006 -2026 -2060 -2111 -2160 -2166 -2211 -2261 -2263 -2304 -2325 -2328 -2423 -2424 -2441 -2461 -2482 -2494 -2518 -2546 -2579 -2675 -2704 -2720 -2753 -2756 -2782 -2843 -2868 -2936 -2965 -3008 -3017 -3051 -3082 -3117 -3140 -3148 -3154 -3174 -3232 -3267 -3316 -3356 -3394 -3413 -3461 -3506 -3632 -3655 -3698 -3704 -3735 -3743 -3776 -3895 -3903 -3908 -3926 -3929 -3941 -3943 -3991 -4050 -4051 -4133 -4138 -4164 -4207 -4214 -4371 -4374 -4413 -4420 -4431 -4440 -4509 -4514 -4522 -4548 -4550 -4610 -4633 -4641 -4691 -4702 -4704 -4727 -4752 -4791 -4888 -4901 -4932 -5003 -5042 -5104 -5129 -5154 -5182 -5204 -5236 -5272 -5281 -5339 -5345 -5354 -5356 -5371 -5440 -5494 -5562 -5585 -5605 -5611 -5622 -5735 -5752 -5808 -5817 -5838 -5929 -5930 -5936 -5962 -5984 -5986 -6028 -6049 -6064 -6111 -6141 -6159 -6198 -6213 -6221 -6222 -6231 -6234 -6273 -6295 -6322 -6338 -6369 -6377 -6383 -6392 -6405 -6414 -6419 -6420 -6421 -6431 -6504 -6509 -6544 -6563 -6588 -6597 -6612 -6623 -6665 -6670 -6685 -6729 -6737 -6753 -6756 -6822 -6855 -7022 -7061 -7063 -7067 -7110 -7127 -7170 -7198 -7230 -7353 -7358 -7375 -7397 -7499 -7514 -7538 -7539 -7576 -7612 -7688 -7702 -7715 -7720 -7734 -7769 -7771 -7819 -7821 -7850 -7876 -7970 -8004 -8034 -8045 -8059 -8121 -8127 -8176 -8222 -8228 -8290 -8296 -8338 -8360 -8411 -8437 -8545 -8591 -8642 -8646 -8682 -8705 -8750 -8761 -8763 -8773 -8866 -8952 -8963 -8990 -8996 -9047 -9091 -9133 -9144 -9148 -9212 -9231 -9294 -9361 -9422 -9427 -9482 -9562 -9582 -9657 -9697 -9698 -9716 -9736 -9743 -9746 -9754 -9780 -9791 -9824 -9840 -9862 -9866 -9889 -9890 -10011 -10100 -10163 -10179 -10187 -10204 -10217 -10232 -10321 -10342 -10348 -10356 -10397 -10412 -10452 -10564 -10569 -10582 -10636 -10687 -10688 -10740 -10769 -10800 -10811 -10813 -10884 -10893 -10944 -10947 -10976 -11022 -11036 -11058 -11073 -11077 -11203 -11226 -11244 -11253 -11291 -11319 -11340 -11342 -11349 -11415 -11448 -11452 -11468 -11474 -11545 -11563 -11566 -11567 -11568 -11575 -11583 -11615 -11631 -11653 -11729 -11763 -11839 -11864 -11873 -11910 -11934 -11940 -11944 -11963 -11966 -12100 -12117 -12120 -12122 -12131 -12140 -12163 -12164 -12168 -12176 -12246 -12251 -12266 -12286 -12305 -12334 -12342 -12356 -12380 -12410 -12442 -12456 -12458 -12472 -12474 -12478 -12484 -12494 -12495 -12572 -12574 -12599 -12638 -12644 -12695 -12753 -12765 -12767 -12842 -12844 -12956 -12978 -12988 -13003 -13032 -13036 -13043 -13051 -13094 -13112 -13159 -13172 -13177 -13238 -13253 -13261 -13366 -13421 -13431 -13482 -13535 -13555 -13571 -13606 -13614 -13641 -13656 -13682 -13730 -13736 -13747 -13796 -13835 -13836 -13897 -13912 -13965 -13968 -14013 -14053 -14065 -14070 -14119 -14147 -14158 -14172 -14173 -14182 -14210 -14220 -14257 -14262 -14274 -14327 -14340 -14350 -14378 -14384 -14394 -14408 -14413 -14427 -14444 -14457 -14478 -14516 -14589 -14590 -14609 -14612 -14657 -14680 -14736 -14791 -14855 -14952 -14961 -14979 -15020 -15138 -15143 -15148 -15174 -15203 -15224 -15225 -15237 -15239 -15243 -15250 -15321 -15337 -15403 -15409 -15437 -15445 -15471 -15482 -15487 -15490 -15515 -15539 -15543 -15571 -15597 -15630 -15648 -15650 -15660 -15673 -15718 -15720 -15741 -15753 -15807 -15875 -15885 -15914 -15932 -15941 -16011 -16024 -16026 -16029 -16030 -16040 -16068 -16124 -16130 -16174 -16212 -16250 -16254 -16286 -16293 -16318 -16360 -16364 -16380 -16417 -16439 -16442 -16453 -16463 -16536 -16542 -16553 -16558 -16569 -16624 -16650 -16655 -16677 -16680 -16699 -16722 -16790 -16795 -16801 -16817 -16822 -16833 -16895 -16899 -16924 -16975 -16985 -17007 -17012 -17040 -17046 -17096 -17113 -17127 -17165 -17170 -17190 -17232 -17239 -17278 -17337 -17370 -17413 -17441 -17446 -17476 -17491 -17520 -17545 -17596 -17615 -17637 -17711 -17742 -17770 -17773 -17777 -17800 -17805 -17820 -17870 -17945 -17957 -18039 -18046 -18125 -18174 -18251 -18257 -18263 -18272 -18281 -18283 -18329 -18346 -18361 -18380 -18429 -18518 -18534 -18545 -18608 -18689 -18691 -18730 -18811 -18838 -18950 -18961 -18968 -18990 -19036 -19069 -19070 -19094 -19095 -19107 -19117 -19141 -19288 -19315 -19354 -19372 -19418 -19468 -19537 -19577 -19605 -19607 -19635 -19661 -19694 -19709 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/attack_6_sub_shadow_graph_index_attack_2.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/attack_6_sub_shadow_graph_index_attack_2.txt deleted file mode 100644 index c1963f59..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/attack_6_sub_shadow_graph_index_attack_2.txt +++ /dev/null @@ -1,506 +0,0 @@ -2048 -2058 -13 -15 -16 -21 -23 -24 -2072 -2075 -40 -41 -42 -44 -47 -2099 -58 -2107 -2121 -2140 -95 -96 -2146 -2149 -104 -2153 -110 -2164 -117 -2166 -119 -2167 -125 -129 -131 -132 -2182 -2184 -2186 -139 -2187 -145 -147 -149 -2204 -156 -159 -171 -2221 -175 -2225 -180 -181 -2240 -197 -2256 -209 -210 -2263 -2264 -2269 -2272 -225 -2274 -228 -2277 -235 -240 -239 -2288 -245 -2303 -255 -2305 -259 -2309 -256 -263 -268 -284 -2332 -2333 -2334 -291 -2343 -2344 -2345 -2346 -2347 -306 -300 -314 -317 -2366 -2367 -2368 -324 -2377 -338 -339 -340 -2390 -351 -349 -348 -2403 -2404 -2405 -358 -2408 -2412 -364 -2415 -2416 -2417 -2418 -367 -373 -374 -377 -394 -399 -407 -411 -425 -428 -2480 -2481 -440 -2490 -444 -447 -448 -2498 -2500 -459 -465 -472 -2521 -2522 -2523 -484 -2538 -2543 -495 -2541 -499 -2548 -2549 -505 -514 -531 -541 -2590 -2594 -2596 -2597 -2599 -552 -2606 -2610 -2611 -564 -2612 -568 -2625 -2626 -579 -578 -2630 -2635 -2636 -2637 -2638 -2639 -2640 -2641 -592 -2642 -596 -589 -2648 -602 -606 -2655 -616 -622 -629 -631 -639 -2693 -656 -661 -2709 -2710 -2711 -665 -2712 -672 -2724 -2726 -2727 -691 -693 -2745 -2746 -699 -700 -703 -2753 -2757 -2760 -2761 -2762 -715 -2763 -711 -2768 -2769 -722 -2781 -737 -740 -741 -742 -2789 -2790 -743 -757 -2809 -2810 -2814 -767 -2816 -2817 -2815 -773 -775 -779 -780 -2825 -791 -797 -2847 -2851 -2852 -804 -809 -2862 -816 -820 -822 -826 -831 -842 -850 -858 -870 -873 -886 -888 -2940 -2942 -2941 -2943 -912 -2973 -928 -2978 -931 -932 -2982 -935 -942 -945 -2999 -952 -954 -957 -958 -959 -3012 -3016 -970 -3022 -3023 -3024 -987 -3035 -3036 -996 -3047 -3048 -3049 -3050 -3054 -1006 -3057 -3058 -3059 -3063 -1013 -1023 -3075 -1025 -1029 -3077 -3084 -1043 -3096 -3097 -1050 -3102 -3103 -1059 -3108 -3109 -1063 -3110 -1066 -3115 -3116 -3121 -1073 -1074 -3124 -3130 -3131 -1084 -3134 -3133 -1090 -1096 -1095 -1098 -3152 -1105 -1106 -1111 -1117 -3167 -3168 -1123 -3183 -3189 -3190 -3191 -1147 -3202 -3203 -3204 -3208 -1160 -1164 -1156 -1166 -1167 -3225 -3229 -3230 -1184 -3233 -1191 -3242 -3246 -3254 -1210 -3267 -3274 -1227 -3279 -1233 -3286 -1240 -3287 -3294 -3300 -3303 -3305 -3306 -3309 -1261 -1262 -1270 -3320 -3319 -3324 -1286 -1287 -1303 -1313 -1321 -1322 -1324 -1327 -1329 -1333 -1334 -1337 -1347 -1350 -1352 -1364 -1368 -1376 -1380 -1388 -1401 -1410 -1411 -1416 -1420 -1432 -1439 -1449 -1460 -1462 -1464 -1476 -1477 -1490 -1493 -1495 -1528 -1531 -1549 -1551 -1552 -1562 -1564 -1567 -1572 -1581 -1605 -1610 -1613 -1637 -1647 -1661 -1667 -1675 -1685 -1690 -1704 -1708 -1717 -1725 -1726 -1729 -1732 -1733 -1738 -1740 -1743 -1744 -1747 -1766 -1768 -1776 -1785 -1796 -1800 -1803 -1813 -1827 -1829 -1831 -1838 -1839 -1845 -774 -1855 -1858 -1859 -1863 -1871 -1872 -1893 -1894 -1901 -1929 -1940 -1942 -1949 -1955 -1963 -1964 -1980 -1983 -1986 -1988 -1991 -1997 -2000 -2006 -2009 -2012 -2019 -2024 -2026 -2027 -2031 -2033 -2035 -2039 -2042 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/attack_6_sub_shadow_graph_index_attack_3.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/attack_6_sub_shadow_graph_index_attack_3.txt deleted file mode 100644 index 4d6386a2..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/attack_6_sub_shadow_graph_index_attack_3.txt +++ /dev/null @@ -1,496 +0,0 @@ -4 -2053 -6 -14 -2064 -2065 -2067 -2069 -27 -29 -33 -2086 -43 -46 -2097 -2098 -51 -52 -59 -2115 -2116 -2125 -2126 -78 -2128 -2129 -2133 -2139 -91 -93 -97 -2151 -107 -2157 -111 -2162 -123 -2176 -2181 -2180 -137 -141 -2191 -2192 -2194 -146 -153 -2202 -160 -2211 -168 -2222 -2226 -2230 -2234 -2236 -188 -190 -2239 -192 -2242 -193 -2246 -2247 -202 -2253 -2254 -207 -2257 -213 -219 -220 -223 -227 -229 -2279 -2281 -2282 -237 -2287 -2291 -246 -251 -2300 -253 -2302 -2306 -258 -266 -2318 -270 -2319 -274 -275 -282 -289 -296 -2348 -2349 -302 -303 -2352 -307 -325 -2376 -330 -2392 -2393 -346 -345 -2396 -2397 -2398 -352 -2407 -361 -354 -363 -365 -372 -370 -376 -382 -387 -2435 -388 -391 -2444 -403 -410 -426 -2475 -431 -433 -2482 -2491 -2492 -449 -2499 -453 -454 -460 -461 -2512 -467 -473 -2525 -2524 -481 -492 -493 -2542 -2544 -500 -2551 -2550 -2556 -508 -510 -2558 -511 -2564 -522 -523 -2572 -527 -528 -529 -2579 -534 -2585 -544 -549 -554 -556 -560 -2619 -572 -2622 -2627 -2628 -2632 -585 -586 -2649 -2650 -2651 -2660 -614 -2663 -2664 -2665 -2667 -615 -625 -626 -627 -2678 -630 -633 -2681 -636 -2685 -2686 -640 -2689 -2688 -644 -2716 -2717 -669 -683 -2732 -2733 -686 -690 -696 -701 -2754 -2755 -706 -2758 -710 -712 -719 -2774 -2775 -2782 -2783 -2784 -2785 -2786 -753 -755 -2808 -2818 -2820 -2821 -2822 -2824 -2823 -2829 -2830 -2826 -2827 -2828 -787 -788 -784 -2838 -2839 -798 -799 -800 -2853 -807 -2859 -2860 -823 -824 -2882 -835 -843 -845 -846 -849 -855 -869 -2925 -2926 -2927 -884 -887 -2939 -893 -895 -2944 -896 -900 -902 -917 -2967 -2968 -926 -934 -937 -941 -2991 -2995 -949 -3000 -3006 -961 -962 -3007 -3013 -965 -3014 -968 -969 -980 -3030 -3031 -983 -3040 -993 -3041 -992 -1009 -1010 -1012 -1014 -3068 -1024 -3073 -3074 -1034 -1036 -3086 -3092 -3093 -1047 -1054 -3106 -3107 -1071 -1072 -3122 -1081 -3132 -1086 -1085 -3139 -1092 -3143 -1112 -1118 -1122 -3184 -3185 -1137 -3186 -3193 -3194 -3195 -3196 -3197 -1150 -1157 -3206 -1159 -3210 -1163 -3214 -1172 -3220 -1174 -1176 -1177 -1181 -1186 -3237 -1190 -1194 -3244 -1198 -3248 -1201 -3249 -3255 -3258 -3262 -1214 -3263 -1218 -1219 -3271 -1224 -1226 -1230 -3280 -1232 -1236 -1238 -1244 -3293 -1245 -1248 -3297 -3298 -1251 -1258 -3316 -1269 -1268 -1273 -3326 -254 -1279 -1281 -1290 -1297 -1301 -1309 -1339 -1342 -1343 -1351 -1363 -1371 -1386 -1405 -1413 -1421 -1422 -1427 -1452 -1470 -1480 -1494 -1501 -1503 -1508 -1513 -1532 -1533 -1542 -1543 -1559 -1579 -1590 -1593 -1594 -1599 -1607 -1608 -1616 -1617 -1633 -1635 -1644 -1645 -1652 -1658 -1662 -1663 -1664 -1674 -1687 -1689 -1703 -1712 -1715 -1724 -1728 -1731 -1751 -1752 -1757 -1772 -1807 -1808 -1809 -1818 -1820 -1822 -1824 -1830 -1834 -1840 -1868 -1883 -1886 -1890 -1909 -1915 -1918 -1935 -1937 -1943 -1947 -1954 -1958 -1962 -1976 -1992 -1998 -2002 -2004 -2008 -2021 -2023 -2030 -2043 -2045 -2047 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_1200_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_1200_shadow_graph_index.txt deleted file mode 100644 index ab465c06..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_1200_shadow_graph_index.txt +++ /dev/null @@ -1,807 +0,0 @@ -2048 -4 -2053 -6 -13 -14 -15 -2064 -2065 -16 -2069 -21 -23 -24 -27 -29 -33 -2086 -40 -41 -42 -43 -44 -46 -47 -2097 -2098 -51 -52 -58 -2107 -59 -2115 -2116 -2121 -2125 -2126 -2128 -2139 -95 -96 -97 -2146 -2149 -2151 -104 -2153 -107 -110 -111 -2162 -2164 -2166 -119 -2167 -125 -2176 -129 -131 -132 -2181 -2182 -2184 -137 -139 -2187 -141 -2191 -2192 -145 -2194 -147 -146 -153 -2204 -159 -2211 -168 -171 -2221 -175 -2226 -180 -181 -2230 -2234 -2236 -190 -2239 -192 -2240 -2242 -197 -2246 -2247 -202 -2253 -2254 -207 -2256 -2257 -213 -2263 -2264 -219 -220 -2269 -223 -2272 -225 -2274 -228 -229 -2277 -2279 -2281 -2282 -237 -2287 -240 -239 -2291 -245 -246 -251 -2300 -2302 -2303 -255 -2306 -258 -259 -2309 -263 -268 -2318 -270 -2319 -274 -275 -282 -284 -2332 -2333 -2334 -289 -291 -296 -2344 -2345 -2346 -2348 -2349 -302 -303 -2352 -2347 -306 -307 -300 -314 -317 -324 -325 -2376 -2377 -330 -338 -339 -340 -2390 -2392 -2393 -346 -345 -2396 -2397 -2398 -351 -352 -349 -2404 -2405 -358 -2407 -2408 -361 -363 -2412 -365 -2415 -2416 -2417 -2418 -372 -373 -376 -377 -382 -387 -2435 -388 -391 -394 -2444 -403 -407 -411 -425 -2475 -428 -431 -2480 -433 -2482 -2481 -440 -2490 -444 -448 -2498 -2500 -453 -454 -460 -461 -465 -467 -472 -2521 -2522 -2523 -2525 -481 -484 -2538 -492 -493 -2542 -2543 -2544 -495 -499 -505 -2556 -510 -2558 -511 -514 -2564 -522 -523 -2572 -527 -528 -529 -531 -534 -2585 -541 -2590 -544 -2594 -2596 -549 -2597 -2599 -552 -554 -2606 -560 -2610 -2611 -564 -2612 -568 -2622 -2625 -2626 -579 -2630 -2632 -585 -586 -2635 -2636 -2637 -2638 -2639 -2640 -2641 -592 -2642 -596 -589 -2648 -2649 -602 -2650 -2651 -606 -2655 -2660 -2663 -2664 -2665 -616 -2667 -622 -625 -626 -627 -629 -2678 -631 -630 -633 -2681 -636 -2685 -2686 -639 -2688 -2689 -644 -2693 -661 -2709 -2710 -2711 -665 -2712 -2717 -2724 -683 -686 -691 -693 -696 -2745 -2746 -699 -700 -701 -703 -2753 -2754 -2755 -706 -2758 -710 -2760 -2761 -2762 -715 -2763 -712 -719 -2768 -2769 -2774 -2775 -2781 -2782 -2783 -2784 -2785 -2786 -737 -740 -741 -742 -2789 -753 -755 -2808 -2809 -2810 -2814 -767 -2816 -2817 -2818 -2815 -2820 -773 -2821 -2822 -2824 -775 -2823 -779 -780 -2829 -2830 -2826 -2827 -2828 -2825 -787 -788 -2838 -791 -2839 -799 -800 -2847 -2851 -2852 -2853 -804 -807 -809 -2859 -2860 -2862 -816 -820 -822 -823 -824 -826 -831 -2882 -835 -842 -845 -846 -850 -855 -869 -870 -873 -2925 -2926 -2927 -886 -887 -888 -893 -2942 -895 -2944 -896 -2941 -902 -912 -917 -2967 -2968 -2973 -928 -2978 -931 -932 -935 -937 -941 -2991 -945 -2995 -2999 -3000 -958 -959 -962 -3012 -3013 -965 -3014 -3016 -968 -970 -3023 -3024 -980 -3030 -3031 -983 -987 -3035 -3036 -3040 -993 -3041 -992 -996 -3047 -3048 -3049 -3050 -3054 -1006 -1009 -1010 -3057 -1012 -3058 -3059 -3063 -1013 -1014 -3068 -1023 -3073 -3074 -3075 -1025 -1034 -1036 -3086 -1043 -3092 -3093 -1047 -3096 -3097 -1050 -1054 -1059 -1063 -1066 -3115 -3116 -1071 -3121 -1073 -1074 -3124 -1081 -3130 -3131 -3132 -1086 -3134 -1090 -1092 -3143 -1096 -1095 -1098 -1105 -1112 -1117 -1118 -3167 -3168 -1122 -3184 -3185 -1137 -3186 -3189 -3190 -3191 -3193 -3194 -3195 -3196 -3197 -1150 -1147 -3202 -3203 -3204 -1157 -3206 -1159 -3208 -1160 -3210 -1163 -1164 -1166 -1167 -1174 -1176 -1177 -3225 -1181 -3229 -3230 -3233 -1186 -3237 -1190 -1194 -3242 -1198 -3246 -1201 -3254 -3255 -3258 -1210 -3262 -1214 -3263 -1218 -1219 -1224 -3274 -1227 -1226 -1230 -3280 -1232 -1233 -1236 -1238 -1240 -1244 -3293 -1245 -3294 -1248 -3297 -1251 -3300 -3303 -1258 -3309 -1261 -1262 -3316 -1269 -1268 -1270 -3320 -1273 -3319 -3324 -3326 -1281 -1290 -1297 -1301 -1303 -1313 -1321 -1322 -1324 -1327 -1329 -1333 -1337 -1339 -1342 -1343 -1347 -1350 -1351 -1352 -1364 -1368 -1371 -1376 -1380 -1386 -1388 -1410 -1413 -1416 -1421 -1422 -1427 -1432 -1439 -1449 -1452 -1460 -1462 -1464 -1470 -1476 -1477 -1480 -1490 -1493 -1494 -1495 -1501 -1503 -1508 -1513 -1528 -1531 -1532 -1533 -1542 -1549 -1551 -1552 -1559 -1562 -1567 -1572 -1579 -1581 -1590 -1593 -1594 -1599 -1605 -1607 -1608 -1617 -1633 -1635 -1637 -1644 -1645 -1647 -1658 -1661 -1662 -1663 -1664 -1674 -1675 -1689 -1690 -1703 -1704 -1708 -1712 -1717 -1724 -1725 -1728 -1729 -1731 -1732 -1740 -1743 -1747 -1751 -1757 -1768 -1776 -1785 -1796 -1800 -1807 -1813 -1820 -1824 -1827 -1829 -1830 -1831 -1834 -1838 -1839 -1840 -1845 -774 -1858 -1859 -1868 -1871 -1872 -1883 -1886 -1893 -1894 -1901 -1909 -1915 -1918 -1929 -1937 -1940 -1942 -1943 -1947 -1949 -1954 -1955 -1958 -1962 -1963 -1964 -1976 -1980 -1986 -1988 -1997 -2000 -2002 -2004 -2006 -2008 -2009 -2012 -2019 -2021 -2023 -2024 -2027 -2031 -2033 -2035 -2039 -2042 -2045 -2047 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_1300_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_1300_shadow_graph_index.txt deleted file mode 100644 index 70fa0299..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_1300_shadow_graph_index.txt +++ /dev/null @@ -1,1002 +0,0 @@ -2048 -4 -2053 -6 -2058 -13 -14 -15 -2064 -2065 -16 -2067 -2069 -21 -23 -24 -2072 -2075 -27 -29 -33 -2086 -40 -41 -42 -43 -44 -46 -47 -2097 -2098 -51 -52 -2099 -58 -2107 -59 -2115 -2116 -2121 -2125 -2126 -78 -2128 -2129 -2133 -2139 -91 -2140 -93 -95 -96 -97 -2146 -2149 -2151 -104 -2153 -107 -2157 -110 -111 -2162 -2164 -117 -2166 -119 -2167 -123 -125 -2176 -129 -131 -132 -2181 -2182 -2180 -2184 -137 -2186 -139 -2187 -141 -2191 -2192 -145 -2194 -146 -147 -149 -153 -2202 -2204 -156 -159 -160 -2211 -168 -171 -2221 -2222 -175 -2225 -2226 -180 -181 -2230 -2234 -2236 -188 -190 -2239 -192 -2240 -2242 -193 -197 -2246 -2247 -202 -2253 -2254 -207 -2256 -2257 -209 -210 -213 -2263 -2264 -219 -220 -2269 -223 -2272 -225 -2274 -227 -228 -229 -2277 -2279 -2281 -2282 -235 -237 -2287 -240 -239 -2288 -2291 -245 -246 -251 -2300 -253 -2302 -2303 -255 -2305 -2306 -258 -259 -2309 -256 -263 -266 -268 -2318 -270 -2319 -274 -275 -282 -284 -2332 -2333 -2334 -289 -291 -2343 -296 -2344 -2345 -2346 -2348 -2349 -302 -303 -2352 -2347 -306 -307 -300 -314 -317 -2366 -2367 -2368 -324 -325 -2376 -2377 -330 -338 -339 -340 -2390 -2392 -2393 -346 -345 -2396 -2397 -2398 -351 -352 -349 -348 -2403 -2404 -2405 -358 -2407 -2408 -361 -354 -363 -2412 -365 -364 -2415 -2416 -2417 -2418 -367 -372 -373 -370 -374 -376 -377 -382 -387 -2435 -388 -391 -394 -2444 -399 -403 -407 -410 -411 -425 -426 -2475 -428 -431 -2480 -433 -2482 -2481 -440 -2490 -2491 -444 -2492 -447 -448 -449 -2498 -2499 -2500 -453 -454 -459 -460 -461 -2512 -465 -467 -472 -2521 -2522 -2523 -473 -2525 -2524 -481 -484 -2538 -492 -493 -2542 -2543 -2544 -495 -2541 -499 -2548 -2549 -500 -2551 -2550 -505 -2556 -508 -510 -2558 -511 -514 -2564 -522 -523 -2572 -527 -528 -529 -531 -2579 -534 -2585 -541 -2590 -544 -2594 -2596 -549 -2597 -2599 -552 -554 -556 -2606 -560 -2610 -2611 -564 -2612 -568 -2619 -572 -2622 -2625 -2626 -579 -578 -2627 -2630 -2628 -2632 -585 -586 -2635 -2636 -2637 -2638 -2639 -2640 -2641 -592 -2642 -596 -589 -2648 -2649 -602 -2650 -2651 -606 -2655 -2660 -614 -2663 -2664 -2665 -616 -2667 -615 -622 -625 -626 -627 -629 -2678 -631 -630 -633 -2681 -636 -2685 -2686 -639 -640 -2689 -2688 -644 -2693 -656 -661 -2709 -2710 -2711 -665 -2712 -2716 -2717 -669 -672 -2724 -2726 -2727 -683 -2732 -2733 -686 -690 -691 -693 -696 -2745 -2746 -699 -700 -701 -703 -2753 -2754 -2755 -706 -2757 -2758 -710 -2760 -2761 -2762 -715 -2763 -712 -711 -719 -2768 -2769 -722 -2774 -2775 -2781 -2782 -2783 -2784 -2785 -2786 -737 -740 -741 -742 -2789 -2790 -743 -753 -755 -757 -2808 -2809 -2810 -2814 -767 -2816 -2817 -2818 -2815 -2820 -773 -2821 -2822 -2824 -775 -2823 -779 -780 -2829 -2830 -2826 -2827 -2828 -2825 -787 -788 -784 -2838 -791 -2839 -797 -798 -799 -800 -2847 -2851 -2852 -2853 -804 -807 -809 -2859 -2860 -2862 -816 -820 -822 -823 -824 -826 -831 -2882 -835 -842 -843 -845 -846 -849 -850 -855 -858 -869 -870 -873 -2925 -2926 -2927 -884 -886 -887 -888 -2939 -2940 -893 -2942 -895 -2944 -896 -2941 -2943 -900 -902 -912 -917 -2967 -2968 -2973 -926 -928 -2978 -931 -932 -2982 -935 -934 -937 -941 -942 -2991 -945 -2995 -949 -2999 -3000 -952 -954 -957 -958 -959 -3006 -961 -962 -3007 -3012 -3013 -965 -3014 -3016 -968 -970 -969 -3022 -3023 -3024 -980 -3030 -3031 -983 -987 -3035 -3036 -3040 -993 -3041 -992 -996 -3047 -3048 -3049 -3050 -3054 -1006 -1009 -1010 -3057 -1012 -3058 -3059 -3063 -1013 -1014 -3068 -1023 -1024 -3073 -3074 -3075 -1025 -1029 -3077 -1034 -1036 -3084 -3086 -1043 -3092 -3093 -1047 -3096 -3097 -1050 -1054 -3102 -3103 -3106 -1059 -3107 -3108 -3109 -1063 -3110 -1066 -3115 -3116 -1071 -1072 -3121 -1073 -1074 -3124 -3122 -1081 -3130 -3131 -3132 -1084 -1086 -3134 -1085 -3133 -1090 -3139 -1092 -3143 -1096 -1095 -1098 -3152 -1105 -1106 -1111 -1112 -1117 -1118 -3167 -3168 -1122 -1123 -3183 -3184 -3185 -1137 -3186 -3189 -3190 -3191 -3193 -3194 -3195 -3196 -3197 -1150 -1147 -3202 -3203 -3204 -1157 -3206 -1159 -3208 -1160 -3210 -1163 -1164 -1156 -1166 -1167 -3214 -1172 -3220 -1174 -1176 -1177 -3225 -1181 -3229 -3230 -1184 -3233 -1186 -3237 -1190 -1191 -1194 -3242 -3244 -1198 -3246 -3248 -1201 -3249 -3254 -3255 -3258 -1210 -3262 -1214 -3263 -1218 -1219 -3267 -3271 -1224 -3274 -1227 -1226 -1230 -3279 -3280 -1232 -1233 -1236 -1238 -3286 -1240 -3287 -1244 -3293 -1245 -3294 -1248 -3297 -3298 -1251 -3300 -3303 -3305 -1258 -3306 -3309 -1261 -1262 -3316 -1269 -1268 -1270 -3320 -1273 -3319 -3324 -3326 -254 -1279 -1281 -1286 -1287 -1290 -1297 -1301 -1303 -1309 -1313 -1321 -1322 -1324 -1327 -1329 -1333 -1334 -1337 -1339 -1342 -1343 -1347 -1350 -1351 -1352 -1363 -1364 -1368 -1371 -1376 -1380 -1386 -1388 -1401 -1405 -1410 -1411 -1413 -1416 -1420 -1421 -1422 -1427 -1432 -1439 -1449 -1452 -1460 -1462 -1464 -1470 -1476 -1477 -1480 -1490 -1493 -1494 -1495 -1501 -1503 -1508 -1513 -1528 -1531 -1532 -1533 -1542 -1543 -1549 -1551 -1552 -1559 -1562 -1564 -1567 -1572 -1579 -1581 -1590 -1593 -1594 -1599 -1605 -1607 -1608 -1610 -1613 -1616 -1617 -1633 -1635 -1637 -1644 -1645 -1647 -1652 -1658 -1661 -1662 -1663 -1664 -1667 -1674 -1675 -1685 -1687 -1689 -1690 -1703 -1704 -1708 -1712 -1715 -1717 -1724 -1725 -1726 -1728 -1729 -1731 -1732 -1733 -1738 -1740 -1743 -1744 -1747 -1751 -1752 -1757 -1766 -1768 -1772 -1776 -1785 -1796 -1800 -1803 -1807 -1808 -1809 -1813 -1818 -1820 -1822 -1824 -1827 -1829 -1830 -1831 -1834 -1838 -1839 -1840 -1845 -774 -1855 -1858 -1859 -1863 -1868 -1871 -1872 -1883 -1886 -1890 -1893 -1894 -1901 -1909 -1915 -1918 -1929 -1935 -1937 -1940 -1942 -1943 -1947 -1949 -1954 -1955 -1958 -1962 -1963 -1964 -1976 -1980 -1983 -1986 -1988 -1991 -1992 -1997 -1998 -2000 -2002 -2004 -2006 -2008 -2009 -2012 -2019 -2021 -2023 -2024 -2026 -2027 -2030 -2031 -2033 -2035 -2039 -2042 -2043 -2045 -2047 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_500_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_500_shadow_graph_index.txt deleted file mode 100644 index 2372e46c..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_500_shadow_graph_index.txt +++ /dev/null @@ -1,201 +0,0 @@ -514 -1542 -2572 -529 -2065 -534 -23 -2590 -544 -33 -2086 -40 -3115 -3116 -1579 -1581 -2098 -568 -1081 -1594 -58 -2107 -3132 -1086 -2625 -2626 -1092 -1608 -1096 -2635 -3068 -2638 -2639 -2640 -2641 -592 -596 -1118 -97 -2146 -107 -1645 -1647 -3184 -3185 -625 -3191 -631 -3193 -3194 -3195 -636 -1661 -1662 -3196 -2688 -2689 -3197 -131 -125 -2685 -3206 -2686 -639 -1159 -3210 -1166 -2192 -2194 -147 -661 -1689 -1690 -3233 -1186 -168 -1194 -683 -3242 -2221 -1198 -1201 -180 -3258 -1724 -1725 -190 -3262 -192 -1728 -1218 -2753 -1732 -197 -2246 -2754 -2755 -710 -202 -715 -1740 -2253 -1743 -207 -3280 -2264 -1244 -3293 -223 -2272 -740 -742 -1258 -2287 -240 -1269 -2808 -2809 -2810 -3324 -2302 -3326 -2306 -773 -268 -2318 -270 -1807 -1297 -787 -788 -275 -1820 -284 -1831 -1839 -816 -306 -824 -831 -1859 -324 -325 -1350 -2376 -2377 -330 -1868 -845 -846 -1368 -2392 -2393 -1371 -351 -352 -2404 -2405 -1380 -1893 -372 -377 -1915 -893 -2942 -2944 -896 -387 -1413 -902 -1416 -403 -1940 -1942 -1439 -1954 -1958 -935 -1962 -428 -945 -2500 -3016 -1997 -3023 -2000 -3024 -1490 -980 -2006 -3031 -2009 -987 -996 -2542 -3054 -2033 -3063 -1532 -2558 -511 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_700_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_700_shadow_graph_index.txt deleted file mode 100644 index 87aaf6e0..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_700_shadow_graph_index.txt +++ /dev/null @@ -1,542 +0,0 @@ -2053 -6 -15 -2064 -2065 -2069 -23 -24 -27 -29 -33 -2086 -40 -41 -43 -44 -2097 -2098 -51 -52 -58 -2107 -59 -2115 -2116 -2125 -2128 -2139 -95 -96 -97 -2146 -2149 -2151 -104 -107 -110 -2162 -2164 -119 -125 -2176 -129 -131 -132 -2181 -137 -139 -141 -2192 -145 -2194 -147 -153 -2211 -168 -2221 -2226 -180 -2230 -2234 -190 -2239 -192 -197 -2246 -2247 -202 -2253 -2254 -207 -2256 -2257 -213 -2264 -219 -220 -2269 -223 -2272 -229 -2281 -2282 -237 -2287 -240 -239 -2291 -246 -2302 -2303 -2306 -258 -2309 -268 -2318 -270 -274 -275 -284 -2332 -2333 -2334 -289 -296 -302 -303 -2352 -306 -307 -324 -325 -2376 -2377 -330 -338 -339 -2392 -2393 -346 -2396 -2397 -2398 -351 -352 -2404 -2405 -2407 -361 -363 -2412 -372 -377 -382 -387 -2435 -388 -2444 -403 -2475 -428 -431 -433 -440 -2498 -2500 -453 -454 -461 -472 -2521 -2522 -2523 -2525 -2538 -492 -493 -2542 -2543 -2544 -2556 -2558 -511 -510 -514 -2564 -2572 -527 -528 -529 -534 -2585 -541 -2590 -544 -549 -552 -554 -564 -568 -2622 -2625 -2626 -2632 -585 -586 -2635 -2636 -2638 -2639 -2640 -2641 -592 -2642 -596 -2649 -602 -2650 -2651 -606 -2660 -2663 -2664 -2665 -2667 -625 -627 -2678 -631 -633 -2681 -636 -2685 -2686 -639 -2688 -2689 -644 -661 -665 -2717 -683 -686 -696 -2745 -2746 -700 -703 -2753 -2754 -2755 -706 -710 -712 -715 -719 -2774 -2775 -2781 -2782 -2783 -2784 -2785 -2786 -740 -2789 -742 -753 -755 -2808 -2809 -2810 -767 -2818 -2820 -773 -2821 -2822 -775 -2823 -2824 -779 -780 -2826 -2827 -2828 -2829 -2830 -787 -788 -2838 -791 -2839 -799 -800 -2853 -807 -809 -2859 -2860 -816 -822 -824 -831 -2882 -835 -845 -846 -869 -870 -2925 -2926 -2927 -887 -893 -2942 -895 -2944 -896 -902 -917 -2967 -2968 -928 -932 -935 -937 -941 -2991 -945 -3000 -958 -959 -962 -3013 -965 -3014 -3016 -968 -970 -3023 -3024 -980 -3031 -987 -3035 -993 -996 -3047 -3048 -3049 -3050 -3054 -1009 -1010 -1012 -3063 -3068 -3073 -3074 -3075 -1034 -1036 -3086 -1043 -3092 -3093 -3097 -1054 -1059 -1063 -3115 -3116 -1071 -3121 -1081 -3132 -1086 -3134 -1092 -3143 -1096 -1112 -1118 -3167 -3168 -1122 -3184 -3185 -1137 -3186 -3190 -3191 -3193 -3194 -3195 -3196 -3197 -1157 -3206 -1159 -3208 -3210 -1166 -1174 -1176 -1177 -1181 -3229 -3230 -3233 -1186 -1190 -1194 -3242 -1198 -1201 -3254 -3255 -3258 -3262 -1214 -1218 -1219 -3274 -1227 -1226 -1230 -3280 -1232 -1236 -1238 -1240 -1244 -3293 -3297 -1251 -1258 -3309 -3316 -1269 -1268 -1273 -3324 -3326 -1281 -1290 -1297 -1301 -1303 -1321 -1322 -1327 -1329 -1337 -1342 -1343 -1350 -1351 -1352 -1368 -1371 -1376 -1380 -1386 -1413 -1416 -1422 -1427 -1439 -1464 -1470 -1477 -1480 -1490 -1494 -1501 -1503 -1508 -1532 -1533 -1542 -1551 -1552 -1572 -1579 -1581 -1590 -1594 -1599 -1607 -1608 -1617 -1633 -1635 -1644 -1645 -1647 -1658 -1661 -1662 -1663 -1664 -1674 -1675 -1689 -1690 -1703 -1704 -1724 -1725 -1728 -1731 -1732 -1740 -1743 -1768 -1807 -1820 -1824 -1829 -1831 -1834 -1839 -1845 -1858 -1859 -1868 -1883 -1893 -1915 -1918 -1937 -1940 -1942 -1943 -1947 -1949 -1954 -1955 -1958 -1962 -1964 -1976 -1997 -2000 -2002 -2004 -2006 -2008 -2009 -2012 -2019 -2021 -2023 -2027 -2031 -2033 -2035 -2042 -2045 -2047 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_900_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_900_shadow_graph_index.txt deleted file mode 100644 index 2d683aed..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/protential_900_shadow_graph_index.txt +++ /dev/null @@ -1,600 +0,0 @@ -4 -2053 -6 -15 -2064 -2065 -2069 -23 -24 -27 -29 -33 -2086 -40 -41 -43 -44 -46 -2097 -2098 -51 -52 -58 -2107 -59 -2115 -2116 -2121 -2125 -2128 -2139 -95 -96 -97 -2146 -2149 -2151 -104 -107 -110 -2162 -2164 -119 -125 -2176 -129 -131 -132 -2181 -137 -139 -141 -2191 -2192 -145 -2194 -147 -153 -2211 -168 -2221 -175 -2226 -180 -2230 -2234 -190 -2239 -192 -197 -2246 -2247 -202 -2253 -2254 -207 -2256 -2257 -213 -2264 -219 -220 -2269 -223 -2272 -225 -229 -2281 -2282 -237 -2287 -240 -239 -2291 -245 -246 -2302 -2303 -2306 -258 -2309 -268 -2318 -270 -274 -275 -282 -284 -2332 -2333 -2334 -289 -296 -2344 -2348 -2349 -302 -303 -2352 -306 -307 -324 -325 -2376 -2377 -330 -338 -339 -2390 -2392 -2393 -346 -2396 -2397 -2398 -351 -352 -2404 -2405 -358 -2407 -361 -363 -2412 -372 -377 -382 -387 -2435 -388 -2444 -403 -2475 -428 -431 -433 -2482 -440 -2498 -2500 -453 -454 -461 -467 -472 -2521 -2522 -2523 -2525 -2538 -492 -493 -2542 -2543 -2544 -2556 -510 -2558 -511 -514 -2564 -522 -523 -2572 -527 -528 -529 -534 -2585 -541 -2590 -544 -549 -552 -554 -560 -564 -568 -2622 -2625 -2626 -2632 -585 -586 -2635 -2636 -2638 -2639 -2640 -2641 -592 -2642 -596 -2649 -602 -2650 -2651 -606 -2660 -2663 -2664 -2665 -2667 -625 -627 -2678 -631 -633 -2681 -636 -2685 -2686 -639 -2688 -2689 -644 -661 -665 -2717 -683 -686 -696 -2745 -2746 -699 -700 -703 -2753 -2754 -2755 -706 -710 -712 -715 -719 -2774 -2775 -2781 -2782 -2783 -2784 -2785 -2786 -740 -2789 -742 -741 -753 -755 -2808 -2809 -2810 -767 -2818 -2820 -773 -2821 -2822 -775 -2823 -2824 -779 -780 -2827 -2828 -2829 -2830 -2826 -787 -788 -2838 -791 -2839 -799 -800 -2847 -2853 -807 -809 -2859 -2860 -816 -822 -824 -831 -2882 -835 -842 -845 -846 -869 -870 -2925 -2926 -2927 -887 -893 -2942 -895 -2944 -896 -2941 -902 -917 -2967 -2968 -928 -932 -935 -937 -941 -2991 -945 -3000 -958 -959 -962 -3013 -965 -3014 -3016 -968 -970 -3023 -3024 -980 -3031 -987 -3035 -3036 -3040 -993 -3041 -996 -3047 -3048 -3049 -3050 -3054 -1009 -1010 -3057 -1012 -3058 -3059 -3063 -3068 -3073 -3074 -3075 -1034 -1036 -3086 -1043 -3092 -3093 -1047 -3097 -1054 -1059 -1063 -1066 -3115 -3116 -1071 -3121 -3124 -1081 -3132 -1086 -3134 -1092 -3143 -1096 -1112 -1118 -3167 -3168 -1122 -3184 -3185 -1137 -3186 -3189 -3190 -3191 -3193 -3194 -3195 -3196 -3197 -3203 -3204 -1157 -3206 -1159 -3208 -1160 -3210 -1163 -1164 -1166 -1174 -1176 -1177 -1181 -3229 -3230 -3233 -1186 -3237 -1190 -1194 -3242 -1198 -1201 -3254 -3255 -3258 -3262 -1214 -1218 -1219 -1224 -3274 -1227 -1226 -1230 -3280 -1232 -1236 -1238 -1240 -1244 -3293 -1245 -3297 -1251 -1258 -3309 -3316 -1269 -1268 -1270 -3320 -1273 -3319 -3324 -3326 -1281 -1290 -1297 -1301 -1303 -1321 -1322 -1327 -1329 -1337 -1342 -1343 -1347 -1350 -1351 -1352 -1368 -1371 -1376 -1380 -1386 -1413 -1416 -1422 -1427 -1432 -1439 -1464 -1470 -1477 -1480 -1490 -1494 -1495 -1501 -1503 -1508 -1528 -1532 -1533 -1542 -1549 -1551 -1552 -1559 -1572 -1579 -1581 -1590 -1593 -1594 -1599 -1607 -1608 -1617 -1633 -1635 -1644 -1645 -1647 -1658 -1661 -1662 -1663 -1664 -1674 -1675 -1689 -1690 -1703 -1704 -1724 -1725 -1728 -1731 -1732 -1740 -1743 -1747 -1768 -1800 -1807 -1820 -1824 -1827 -1829 -1830 -1831 -1834 -1839 -1840 -1845 -1858 -1859 -1868 -1883 -1893 -1915 -1918 -1929 -1937 -1940 -1942 -1943 -1947 -1949 -1954 -1955 -1958 -1962 -1963 -1964 -1976 -1997 -2000 -2002 -2004 -2006 -2008 -2009 -2012 -2019 -2021 -2023 -2027 -2031 -2033 -2035 -2042 -2045 -2047 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/target_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/target_graph_index.txt deleted file mode 100644 index 779353e3..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/citeseer/target_graph_index.txt +++ /dev/null @@ -1,2325 +0,0 @@ -0 -1 -2 -3 -5 -7 -8 -9 -10 -11 -12 -17 -18 -19 -20 -22 -25 -26 -28 -30 -31 -32 -34 -35 -36 -37 -38 -39 -45 -48 -49 -50 -53 -54 -55 -56 -57 -60 -61 -62 -63 -64 -65 -66 -67 -68 -69 -70 -71 -72 -73 -74 -75 -76 -77 -79 -80 -81 -82 -83 -84 -85 -86 -87 -88 -89 -90 -92 -94 -98 -99 -100 -101 -102 -103 -105 -106 -108 -109 -112 -113 -114 -115 -116 -118 -120 -121 -122 -124 -126 -127 -128 -130 -133 -134 -135 -136 -138 -140 -142 -143 -144 -148 -150 -151 -152 -154 -155 -157 -158 -161 -162 -163 -164 -165 -166 -167 -169 -170 -172 -173 -174 -176 -177 -178 -179 -182 -183 -184 -185 -186 -187 -189 -191 -194 -195 -196 -198 -199 -200 -201 -203 -204 -205 -206 -208 -211 -212 -214 -215 -216 -217 -218 -221 -222 -224 -226 -230 -231 -232 -233 -234 -236 -238 -241 -242 -243 -244 -247 -248 -249 -250 -252 -257 -260 -261 -262 -264 -265 -267 -269 -271 -272 -273 -276 -277 -278 -279 -280 -281 -283 -285 -286 -287 -288 -290 -292 -293 -294 -295 -297 -298 -299 -301 -304 -305 -308 -309 -310 -311 -312 -313 -315 -316 -318 -319 -320 -321 -322 -323 -326 -327 -328 -329 -331 -332 -333 -334 -335 -336 -337 -341 -342 -343 -344 -347 -350 -353 -355 -356 -357 -359 -360 -362 -366 -368 -369 -371 -375 -378 -379 -380 -381 -383 -384 -385 -386 -389 -390 -392 -393 -395 -396 -397 -398 -400 -401 -402 -404 -405 -406 -408 -409 -412 -413 -414 -415 -416 -417 -418 -419 -420 -421 -422 -423 -424 -427 -429 -430 -432 -434 -435 -436 -437 -438 -439 -441 -442 -443 -445 -446 -450 -451 -452 -455 -456 -457 -458 -462 -463 -464 -466 -468 -469 -470 -471 -474 -475 -476 -477 -478 -479 -480 -482 -483 -485 -486 -487 -488 -489 -490 -491 -494 -496 -497 -498 -501 -502 -503 -504 -506 -507 -509 -512 -513 -515 -516 -517 -518 -519 -520 -521 -524 -525 -526 -530 -532 -533 -535 -536 -537 -538 -539 -540 -542 -543 -545 -546 -547 -548 -550 -551 -553 -555 -557 -558 -559 -561 -562 -563 -565 -566 -567 -569 -570 -571 -573 -574 -575 -576 -577 -580 -581 -582 -583 -584 -587 -588 -590 -591 -593 -594 -595 -597 -598 -599 -600 -601 -603 -604 -605 -607 -608 -609 -610 -611 -612 -613 -617 -618 -619 -620 -621 -623 -624 -628 -632 -634 -635 -637 -638 -641 -642 -643 -645 -646 -647 -648 -649 -650 -651 -652 -653 -654 -655 -657 -658 -659 -660 -662 -663 -664 -666 -667 -668 -670 -671 -673 -674 -675 -676 -677 -678 -679 -680 -681 -682 -684 -685 -687 -688 -689 -692 -694 -695 -697 -698 -702 -704 -705 -707 -708 -709 -713 -714 -716 -717 -718 -720 -721 -723 -724 -725 -726 -727 -728 -729 -730 -731 -732 -733 -734 -735 -736 -738 -739 -744 -745 -746 -747 -748 -749 -750 -751 -752 -754 -756 -758 -759 -760 -761 -762 -763 -764 -765 -766 -768 -769 -770 -771 -772 -776 -777 -778 -781 -782 -783 -785 -786 -789 -790 -792 -793 -794 -795 -796 -801 -802 -803 -805 -806 -808 -810 -811 -812 -813 -814 -815 -817 -818 -819 -821 -825 -827 -828 -829 -830 -832 -833 -834 -836 -837 -838 -839 -840 -841 -844 -847 -848 -851 -852 -853 -854 -856 -857 -859 -860 -861 -862 -863 -864 -865 -866 -867 -868 -871 -872 -874 -875 -876 -877 -878 -879 -880 -881 -882 -883 -885 -889 -890 -891 -892 -894 -897 -898 -899 -901 -903 -904 -905 -906 -907 -908 -909 -910 -911 -913 -914 -915 -916 -918 -919 -920 -921 -922 -923 -924 -925 -927 -929 -930 -933 -936 -938 -939 -940 -943 -944 -946 -947 -948 -950 -951 -953 -955 -956 -960 -963 -964 -966 -967 -971 -972 -973 -974 -975 -976 -977 -978 -979 -981 -982 -984 -985 -986 -988 -989 -990 -991 -994 -995 -997 -998 -999 -1000 -1001 -1002 -1003 -1004 -1005 -1007 -1008 -1011 -1015 -1016 -1017 -1018 -1019 -1020 -1021 -1022 -1026 -1027 -1028 -1030 -1031 -1032 -1033 -1035 -1037 -1038 -1039 -1040 -1041 -1042 -1044 -1045 -1046 -1048 -1049 -1051 -1052 -1053 -1055 -1056 -1057 -1058 -1060 -1061 -1062 -1064 -1065 -1067 -1068 -1069 -1070 -1075 -1076 -1077 -1078 -1079 -1080 -1082 -1083 -1087 -1088 -1089 -1091 -1093 -1094 -1097 -1099 -1100 -1101 -1102 -1103 -1104 -1107 -1108 -1109 -1110 -1113 -1114 -1115 -1116 -1119 -1120 -1121 -1124 -1125 -1126 -1127 -1128 -1129 -1130 -1131 -1132 -1133 -1134 -1135 -1136 -1138 -1139 -1140 -1141 -1142 -1143 -1144 -1145 -1146 -1148 -1149 -1151 -1152 -1153 -1154 -1155 -1158 -1161 -1162 -1165 -1168 -1169 -1170 -1171 -1173 -1175 -1178 -1179 -1180 -1182 -1183 -1185 -1187 -1188 -1189 -1192 -1193 -1195 -1196 -1197 -1199 -1200 -1202 -1203 -1204 -1205 -1206 -1207 -1208 -1209 -1211 -1212 -1213 -1215 -1216 -1217 -1220 -1221 -1222 -1223 -1225 -1228 -1229 -1231 -1234 -1235 -1237 -1239 -1241 -1242 -1243 -1246 -1247 -1249 -1250 -1252 -1253 -1254 -1255 -1256 -1257 -1259 -1260 -1263 -1264 -1265 -1266 -1267 -1271 -1272 -1274 -1275 -1276 -1277 -1278 -1280 -1282 -1283 -1284 -1285 -1288 -1289 -1291 -1292 -1293 -1294 -1295 -1296 -1298 -1299 -1300 -1302 -1304 -1305 -1306 -1307 -1308 -1310 -1311 -1312 -1314 -1315 -1316 -1317 -1318 -1319 -1320 -1323 -1325 -1326 -1328 -1330 -1331 -1332 -1335 -1336 -1338 -1340 -1341 -1344 -1345 -1346 -1348 -1349 -1353 -1354 -1355 -1356 -1357 -1358 -1359 -1360 -1361 -1362 -1365 -1366 -1367 -1369 -1370 -1372 -1373 -1374 -1375 -1377 -1378 -1379 -1381 -1382 -1383 -1384 -1385 -1387 -1389 -1390 -1391 -1392 -1393 -1394 -1395 -1396 -1397 -1398 -1399 -1400 -1402 -1403 -1404 -1406 -1407 -1408 -1409 -1412 -1414 -1415 -1417 -1418 -1419 -1423 -1424 -1425 -1426 -1428 -1429 -1430 -1431 -1433 -1434 -1435 -1436 -1437 -1438 -1440 -1441 -1442 -1443 -1444 -1445 -1446 -1447 -1448 -1450 -1451 -1453 -1454 -1455 -1456 -1457 -1458 -1459 -1461 -1463 -1465 -1466 -1467 -1468 -1469 -1471 -1472 -1473 -1474 -1475 -1478 -1479 -1481 -1482 -1483 -1484 -1485 -1486 -1487 -1488 -1489 -1491 -1492 -1496 -1497 -1498 -1499 -1500 -1502 -1504 -1505 -1506 -1507 -1509 -1510 -1511 -1512 -1514 -1515 -1516 -1517 -1518 -1519 -1520 -1521 -1522 -1523 -1524 -1525 -1526 -1527 -1529 -1530 -1534 -1535 -1536 -1537 -1538 -1539 -1540 -1541 -1544 -1545 -1546 -1547 -1548 -1550 -1553 -1554 -1555 -1556 -1557 -1558 -1560 -1561 -1563 -1565 -1566 -1568 -1569 -1570 -1571 -1573 -1574 -1575 -1576 -1577 -1578 -1580 -1582 -1583 -1584 -1585 -1586 -1587 -1588 -1589 -1591 -1592 -1595 -1596 -1597 -1598 -1600 -1601 -1602 -1603 -1604 -1606 -1609 -1611 -1612 -1614 -1615 -1618 -1619 -1620 -1621 -1622 -1623 -1624 -1625 -1626 -1627 -1628 -1629 -1630 -1631 -1632 -1634 -1636 -1638 -1639 -1640 -1641 -1642 -1643 -1646 -1648 -1649 -1650 -1651 -1653 -1654 -1655 -1656 -1657 -1659 -1660 -1665 -1666 -1668 -1669 -1670 -1671 -1672 -1673 -1676 -1677 -1678 -1679 -1680 -1681 -1682 -1683 -1684 -1686 -1688 -1691 -1692 -1693 -1694 -1695 -1696 -1697 -1698 -1699 -1700 -1701 -1702 -1705 -1706 -1707 -1709 -1710 -1711 -1713 -1714 -1716 -1718 -1719 -1720 -1721 -1722 -1723 -1727 -1730 -1734 -1735 -1736 -1737 -1739 -1741 -1742 -1745 -1746 -1748 -1749 -1750 -1753 -1754 -1755 -1756 -1758 -1759 -1760 -1761 -1762 -1763 -1764 -1765 -1767 -1769 -1770 -1771 -1773 -1774 -1775 -1777 -1778 -1779 -1780 -1781 -1782 -1783 -1784 -1786 -1787 -1788 -1789 -1790 -1791 -1792 -1793 -1794 -1795 -1797 -1798 -1799 -1801 -1802 -1804 -1805 -1806 -1810 -1811 -1812 -1814 -1815 -1816 -1817 -1819 -1821 -1823 -1825 -1826 -1828 -1832 -1833 -1835 -1836 -1837 -1841 -1842 -1843 -1844 -1846 -1847 -1848 -1849 -1850 -1851 -1852 -1853 -1854 -1856 -1857 -1860 -1861 -1862 -1864 -1865 -1866 -1867 -1869 -1870 -1873 -1874 -1875 -1876 -1877 -1878 -1879 -1880 -1881 -1882 -1884 -1885 -1887 -1888 -1889 -1891 -1892 -1895 -1896 -1897 -1898 -1899 -1900 -1902 -1903 -1904 -1905 -1906 -1907 -1908 -1910 -1911 -1912 -1913 -1914 -1916 -1917 -1919 -1920 -1921 -1922 -1923 -1924 -1925 -1926 -1927 -1928 -1930 -1931 -1932 -1933 -1934 -1936 -1938 -1939 -1941 -1944 -1945 -1946 -1948 -1950 -1951 -1952 -1953 -1956 -1957 -1959 -1960 -1961 -1965 -1966 -1967 -1968 -1969 -1970 -1971 -1972 -1973 -1974 -1975 -1977 -1978 -1979 -1981 -1982 -1984 -1985 -1987 -1989 -1990 -1993 -1994 -1995 -1996 -1999 -2001 -2003 -2005 -2007 -2010 -2011 -2013 -2014 -2015 -2016 -2017 -2018 -2020 -2022 -2025 -2028 -2029 -2032 -2034 -2036 -2037 -2038 -2040 -2041 -2044 -2046 -2049 -2050 -2051 -2052 -2054 -2055 -2056 -2057 -2059 -2060 -2061 -2062 -2063 -2066 -2068 -2070 -2071 -2073 -2074 -2076 -2077 -2078 -2079 -2080 -2081 -2082 -2083 -2084 -2085 -2087 -2088 -2089 -2090 -2091 -2092 -2093 -2094 -2095 -2096 -2100 -2101 -2102 -2103 -2104 -2105 -2106 -2108 -2109 -2110 -2111 -2112 -2113 -2114 -2117 -2118 -2119 -2120 -2122 -2123 -2124 -2127 -2130 -2131 -2132 -2134 -2135 -2136 -2137 -2138 -2141 -2142 -2143 -2144 -2145 -2147 -2148 -2150 -2152 -2154 -2155 -2156 -2158 -2159 -2160 -2161 -2163 -2165 -2168 -2169 -2170 -2171 -2172 -2173 -2174 -2175 -2177 -2178 -2179 -2183 -2185 -2188 -2189 -2190 -2193 -2195 -2196 -2197 -2198 -2199 -2200 -2201 -2203 -2205 -2206 -2207 -2208 -2209 -2210 -2212 -2213 -2214 -2215 -2216 -2217 -2218 -2219 -2220 -2223 -2224 -2227 -2228 -2229 -2231 -2232 -2233 -2235 -2237 -2238 -2241 -2243 -2244 -2245 -2248 -2249 -2250 -2251 -2252 -2255 -2258 -2259 -2260 -2261 -2262 -2265 -2266 -2267 -2268 -2270 -2271 -2273 -2275 -2276 -2278 -2280 -2283 -2284 -2285 -2286 -2289 -2290 -2292 -2293 -2294 -2295 -2296 -2297 -2298 -2299 -2301 -2304 -2307 -2308 -2310 -2311 -2312 -2313 -2314 -2315 -2316 -2317 -2320 -2321 -2322 -2323 -2324 -2325 -2326 -2327 -2328 -2329 -2330 -2331 -2335 -2336 -2337 -2338 -2339 -2340 -2341 -2342 -2350 -2351 -2353 -2354 -2355 -2356 -2357 -2358 -2359 -2360 -2361 -2362 -2363 -2364 -2365 -2369 -2370 -2371 -2372 -2373 -2374 -2375 -2378 -2379 -2380 -2381 -2382 -2383 -2384 -2385 -2386 -2387 -2388 -2389 -2391 -2394 -2395 -2399 -2400 -2401 -2402 -2406 -2409 -2410 -2411 -2413 -2414 -2419 -2420 -2421 -2422 -2423 -2424 -2425 -2426 -2427 -2428 -2429 -2430 -2431 -2432 -2433 -2434 -2436 -2437 -2438 -2439 -2440 -2441 -2442 -2443 -2445 -2446 -2447 -2448 -2449 -2450 -2451 -2452 -2453 -2454 -2455 -2456 -2457 -2458 -2459 -2460 -2461 -2462 -2463 -2464 -2465 -2466 -2467 -2468 -2469 -2470 -2471 -2472 -2473 -2474 -2476 -2477 -2478 -2479 -2483 -2484 -2485 -2486 -2487 -2488 -2489 -2493 -2494 -2495 -2496 -2497 -2501 -2502 -2503 -2504 -2505 -2506 -2507 -2508 -2509 -2510 -2511 -2513 -2514 -2515 -2516 -2517 -2518 -2519 -2520 -2526 -2527 -2528 -2529 -2530 -2531 -2532 -2533 -2534 -2535 -2536 -2537 -2539 -2540 -2545 -2546 -2547 -2552 -2553 -2554 -2555 -2557 -2559 -2560 -2561 -2562 -2563 -2565 -2566 -2567 -2568 -2569 -2570 -2571 -2573 -2574 -2575 -2576 -2577 -2578 -2580 -2581 -2582 -2583 -2584 -2586 -2587 -2588 -2589 -2591 -2592 -2593 -2595 -2598 -2600 -2601 -2602 -2603 -2604 -2605 -2607 -2608 -2609 -2613 -2614 -2615 -2616 -2617 -2618 -2620 -2621 -2623 -2624 -2629 -2631 -2633 -2634 -2643 -2644 -2645 -2646 -2647 -2652 -2653 -2654 -2656 -2657 -2658 -2659 -2661 -2662 -2666 -2668 -2669 -2670 -2671 -2672 -2673 -2674 -2675 -2676 -2677 -2679 -2680 -2682 -2683 -2684 -2687 -2690 -2691 -2692 -2694 -2695 -2696 -2697 -2698 -2699 -2700 -2701 -2702 -2703 -2704 -2705 -2706 -2707 -2708 -2713 -2714 -2715 -2718 -2719 -2720 -2721 -2722 -2723 -2725 -2728 -2729 -2730 -2731 -2734 -2735 -2736 -2737 -2738 -2739 -2740 -2741 -2742 -2743 -2744 -2747 -2748 -2749 -2750 -2751 -2752 -2756 -2759 -2764 -2765 -2766 -2767 -2770 -2771 -2772 -2773 -2776 -2777 -2778 -2779 -2780 -2787 -2788 -2791 -2792 -2793 -2794 -2795 -2796 -2797 -2798 -2799 -2800 -2801 -2802 -2803 -2804 -2805 -2806 -2807 -2811 -2812 -2813 -2819 -2831 -2832 -2833 -2834 -2835 -2836 -2837 -2840 -2841 -2842 -2843 -2844 -2845 -2846 -2848 -2849 -2850 -2854 -2855 -2856 -2857 -2858 -2861 -2863 -2864 -2865 -2866 -2867 -2868 -2869 -2870 -2871 -2872 -2873 -2874 -2875 -2876 -2877 -2878 -2879 -2880 -2881 -2883 -2884 -2885 -2886 -2887 -2888 -2889 -2890 -2891 -2892 -2893 -2894 -2895 -2896 -2897 -2898 -2899 -2900 -2901 -2902 -2903 -2904 -2905 -2906 -2907 -2908 -2909 -2910 -2911 -2912 -2913 -2914 -2915 -2916 -2917 -2918 -2919 -2920 -2921 -2922 -2923 -2924 -2928 -2929 -2930 -2931 -2932 -2933 -2934 -2935 -2936 -2937 -2938 -2945 -2946 -2947 -2948 -2949 -2950 -2951 -2952 -2953 -2954 -2955 -2956 -2957 -2958 -2959 -2960 -2961 -2962 -2963 -2964 -2965 -2966 -2969 -2970 -2971 -2972 -2974 -2975 -2976 -2977 -2979 -2980 -2981 -2983 -2984 -2985 -2986 -2987 -2988 -2989 -2990 -2992 -2993 -2994 -2996 -2997 -2998 -3001 -3002 -3003 -3004 -3005 -3008 -3009 -3010 -3011 -3015 -3017 -3018 -3019 -3020 -3021 -3025 -3026 -3027 -3028 -3029 -3032 -3033 -3034 -3037 -3038 -3039 -3042 -3043 -3044 -3045 -3046 -3051 -3052 -3053 -3055 -3056 -3060 -3061 -3062 -3064 -3065 -3066 -3067 -3069 -3070 -3071 -3072 -3076 -3078 -3079 -3080 -3081 -3082 -3083 -3085 -3087 -3088 -3089 -3090 -3091 -3094 -3095 -3098 -3099 -3100 -3101 -3104 -3105 -3111 -3112 -3113 -3114 -3117 -3118 -3119 -3120 -3123 -3125 -3126 -3127 -3128 -3129 -3135 -3136 -3137 -3138 -3140 -3141 -3142 -3144 -3145 -3146 -3147 -3148 -3149 -3150 -3151 -3153 -3154 -3155 -3156 -3157 -3158 -3159 -3160 -3161 -3162 -3163 -3164 -3165 -3166 -3169 -3170 -3171 -3172 -3173 -3174 -3175 -3176 -3177 -3178 -3179 -3180 -3181 -3182 -3187 -3188 -3192 -3198 -3199 -3200 -3201 -3205 -3207 -3209 -3211 -3212 -3213 -3215 -3216 -3217 -3218 -3219 -3221 -3222 -3223 -3224 -3226 -3227 -3228 -3231 -3232 -3234 -3235 -3236 -3238 -3239 -3240 -3241 -3243 -3245 -3247 -3250 -3251 -3252 -3253 -3256 -3257 -3259 -3260 -3261 -3264 -3265 -3266 -3268 -3269 -3270 -3272 -3273 -3275 -3276 -3277 -3278 -3281 -3282 -3283 -3284 -3285 -3288 -3289 -3290 -3291 -3292 -3295 -3296 -3299 -3301 -3302 -3304 -3307 -3308 -3310 -3311 -3312 -3313 -3314 -3315 -3317 -3318 -3321 -3322 -3323 -3325 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/attack_6_sub_shadow_graph_index_attack_2.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/cora/attack_6_sub_shadow_graph_index_attack_2.txt deleted file mode 100644 index 85d52ae2..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/attack_6_sub_shadow_graph_index_attack_2.txt +++ /dev/null @@ -1,653 +0,0 @@ -1 -2 -3 -6 -2057 -13 -12 -15 -16 -2065 -21 -2069 -24 -2074 -28 -2078 -2079 -2080 -34 -35 -2084 -39 -37 -38 -2091 -2086 -2093 -46 -2089 -2097 -51 -54 -2102 -2103 -57 -60 -61 -63 -2113 -2115 -67 -2117 -2118 -2120 -77 -2126 -79 -82 -85 -84 -92 -2143 -96 -2144 -2149 -2150 -103 -2155 -111 -105 -2162 -2160 -2161 -118 -117 -2167 -2166 -2171 -2169 -128 -2179 -132 -2183 -135 -27 -140 -2188 -143 -144 -2189 -2195 -147 -148 -152 -153 -2194 -156 -155 -2207 -2210 -162 -2211 -2213 -2215 -167 -2217 -2221 -2222 -173 -178 -180 -2229 -186 -187 -2241 -197 -2253 -2076 -207 -2257 -210 -209 -211 -218 -227 -2278 -2282 -2283 -2284 -234 -238 -239 -240 -2290 -2291 -247 -248 -2288 -251 -2300 -252 -253 -254 -2305 -2308 -2312 -2088 -260 -269 -270 -273 -275 -2324 -277 -2326 -2328 -2329 -283 -2330 -286 -2332 -2334 -2339 -294 -295 -296 -298 -301 -303 -305 -306 -2353 -2358 -314 -316 -2368 -321 -323 -317 -325 -2381 -335 -337 -333 -340 -341 -344 -346 -348 -350 -351 -347 -2404 -2406 -2407 -2409 -363 -367 -370 -372 -374 -2424 -2426 -378 -2431 -2430 -385 -2438 -2442 -396 -2444 -2447 -399 -398 -405 -409 -410 -2461 -412 -414 -415 -2464 -2465 -419 -418 -2467 -2468 -417 -2472 -2473 -426 -425 -2476 -2480 -2485 -438 -2486 -440 -444 -2493 -446 -449 -452 -2503 -455 -463 -2512 -465 -466 -470 -471 -472 -473 -467 -2524 -477 -478 -479 -482 -483 -2536 -489 -2539 -495 -500 -2550 -2551 -502 -2555 -509 -2560 -508 -513 -2559 -521 -523 -2546 -529 -532 -2582 -2586 -2587 -2588 -2589 -2545 -2591 -548 -551 -552 -2599 -554 -2598 -2604 -559 -2609 -562 -563 -564 -565 -566 -567 -2615 -2617 -575 -2623 -568 -578 -2628 -2629 -585 -586 -588 -2639 -2636 -2642 -2637 -2647 -2648 -601 -602 -2652 -2658 -2664 -618 -619 -2668 -621 -2670 -2672 -2671 -633 -636 -638 -644 -645 -650 -2697 -2698 -653 -654 -656 -657 -2704 -664 -669 -671 -675 -682 -688 -691 -692 -699 -701 -709 -712 -713 -715 -2178 -720 -723 -729 -730 -733 -2181 -738 -739 -742 -749 -753 -754 -768 -770 -772 -773 -775 -776 -2190 -784 -790 -794 -795 -802 -804 -807 -161 -827 -835 -837 -573 -841 -842 -843 -845 -850 -852 -854 -855 -865 -868 -870 -875 -877 -2525 -882 -893 -909 -912 -913 -917 -932 -933 -937 -946 -950 -956 -963 -968 -970 -971 -973 -974 -977 -978 -993 -995 -996 -998 -999 -1005 -1007 -1008 -1010 -1011 -1012 -1015 -1027 -1033 -1034 -1036 -1045 -1061 -1073 -1076 -1078 -1081 -1082 -1086 -1099 -1104 -1114 -1117 -1135 -1137 -1141 -1147 -1160 -1165 -1166 -1168 -1174 -1177 -1178 -1179 -1180 -1182 -1183 -2082 -1187 -1189 -1192 -1193 -1201 -1202 -1213 -1217 -1226 -1228 -1230 -1231 -1232 -1234 -1238 -1240 -1241 -1244 -1248 -1250 -1251 -1259 -1260 -1261 -1262 -1275 -1285 -1290 -1293 -1297 -1298 -2700 -1312 -1313 -1314 -1316 -1322 -1324 -1326 -1327 -2301 -1338 -1347 -1349 -1371 -1376 -1378 -1382 -1383 -1384 -462 -276 -1396 -1398 -1401 -279 -1428 -464 -1438 -1440 -1442 -1443 -1444 -1445 -1459 -1486 -1489 -1495 -1505 -1510 -1519 -1527 -1529 -1539 -1540 -1543 -1546 -1549 -1557 -1569 -312 -1578 -1585 -1587 -1588 -1592 -1596 -1600 -1602 -1603 -1606 -1609 -1616 -1620 -1622 -1623 -1627 -1631 -1632 -1636 -1643 -1647 -1649 -1651 -1654 -1655 -2365 -1662 -1672 -1676 -1679 -1681 -1684 -1682 -1686 -1687 -1688 -1690 -1693 -339 -1721 -342 -1726 -1727 -1730 -1736 -1737 -1741 -1747 -1759 -1764 -1765 -1767 -1768 -1769 -1770 -1773 -1782 -1786 -1788 -1790 -1791 -1793 -1794 -1795 -1798 -1800 -1808 -1815 -1816 -1817 -1828 -1831 -1832 -1836 -1841 -1859 -1873 -1877 -1882 -1889 -1894 -1901 -1907 -1918 -1919 -1930 -2420 -1938 -1944 -1945 -1954 -1955 -1957 -1963 -1965 -1970 -1983 -1985 -1989 -1993 -1994 -1995 -1996 -1998 -1997 -2000 -2434 -2010 -2014 -2016 -2018 -2027 -2031 -2032 -2045 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/attack_6_sub_shadow_graph_index_attack_3.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/cora/attack_6_sub_shadow_graph_index_attack_3.txt deleted file mode 100644 index 0b04d6a6..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/attack_6_sub_shadow_graph_index_attack_3.txt +++ /dev/null @@ -1,647 +0,0 @@ -4 -2053 -2054 -2050 -9 -2055 -2056 -2064 -2066 -17 -20 -2072 -25 -29 -31 -33 -36 -2090 -40 -2095 -2098 -2099 -2105 -2110 -2114 -71 -65 -74 -2124 -80 -2128 -2130 -2132 -2135 -2131 -90 -2141 -2145 -2146 -98 -102 -2151 -106 -107 -108 -2156 -109 -114 -2168 -2172 -125 -2176 -130 -133 -134 -136 -2182 -142 -146 -2198 -151 -154 -159 -2209 -164 -170 -171 -176 -2225 -2227 -2224 -2231 -179 -2230 -2246 -2247 -199 -2255 -2260 -2077 -2263 -2266 -2270 -224 -2273 -2279 -230 -2289 -244 -2083 -242 -250 -256 -257 -2307 -261 -2303 -263 -2310 -267 -271 -2320 -2316 -2325 -282 -281 -2335 -2336 -287 -2340 -2345 -299 -2344 -2349 -2351 -304 -2350 -310 -311 -2354 -2356 -308 -2360 -319 -2369 -2364 -318 -2378 -331 -2382 -2384 -338 -2389 -2390 -345 -2386 -2391 -2394 -353 -2402 -2408 -360 -2412 -2415 -368 -2418 -2421 -2423 -376 -377 -379 -382 -383 -2433 -2427 -2437 -391 -392 -2440 -394 -397 -400 -2452 -406 -2455 -404 -2457 -2460 -413 -421 -2475 -2479 -429 -427 -2477 -2481 -434 -2483 -2484 -445 -453 -2501 -456 -459 -460 -461 -2509 -2513 -2511 -2515 -474 -480 -2528 -2530 -484 -2534 -481 -2535 -488 -2538 -493 -494 -2543 -2540 -498 -499 -2548 -505 -506 -2556 -507 -511 -514 -2562 -517 -520 -2570 -2569 -524 -526 -2576 -2579 -535 -2584 -2583 -547 -549 -550 -555 -556 -2607 -569 -2618 -2614 -2621 -574 -2627 -2630 -2632 -2634 -589 -2635 -592 -591 -597 -596 -604 -2654 -2655 -2653 -607 -610 -608 -611 -614 -2663 -615 -622 -626 -627 -2674 -2676 -2678 -631 -2675 -2681 -2683 -2685 -639 -640 -641 -642 -2689 -2692 -2690 -2695 -2696 -2701 -658 -2707 -661 -665 -667 -668 -672 -674 -677 -680 -689 -690 -694 -697 -698 -704 -706 -708 -716 -718 -732 -736 -743 -744 -746 -750 -756 -761 -767 -2595 -774 -778 -783 -785 -786 -791 -792 -796 -798 -160 -821 -822 -823 -826 -828 -832 -836 -838 -853 -858 -859 -861 -862 -864 -867 -871 -2616 -876 -884 -886 -887 -584 -895 -898 -899 -921 -926 -927 -928 -929 -931 -947 -949 -954 -2633 -960 -969 -975 -980 -983 -984 -985 -986 -987 -991 -1000 -1002 -1003 -1004 -1006 -1013 -1017 -1019 -1025 -1026 -1048 -1050 -1051 -1052 -1053 -1055 -1057 -1059 -1066 -1074 -1079 -2657 -1084 -1087 -1088 -2659 -1092 -1095 -1101 -1102 -1103 -1105 -1109 -1111 -1112 -1116 -1118 -1124 -1128 -634 -1144 -1155 -1156 -1161 -1164 -1167 -1169 -1170 -1171 -1173 -1181 -1185 -1191 -1197 -1198 -1208 -1220 -1227 -1236 -1237 -1239 -1256 -1265 -1266 -1269 -1273 -1283 -1284 -255 -1288 -1295 -1296 -1301 -1302 -1306 -1307 -1310 -1315 -1318 -1328 -1330 -1331 -1332 -1339 -1343 -1352 -1356 -1357 -1358 -1359 -1360 -1361 -1363 -1364 -1370 -1386 -1387 -1389 -1395 -1399 -1403 -1405 -1406 -1407 -1408 -1412 -1413 -1417 -1421 -1422 -1423 -1429 -1430 -1431 -2319 -1433 -1434 -1436 -1449 -1454 -1456 -1457 -1458 -1461 -1462 -1463 -1469 -1479 -1482 -1484 -1487 -1488 -1492 -1493 -2333 -1499 -1508 -1509 -1511 -1512 -1513 -1514 -1521 -1522 -1528 -1531 -1538 -1541 -1547 -1559 -1560 -1562 -2346 -1565 -1566 -1567 -1568 -1570 -1574 -1576 -1577 -1579 -1580 -1584 -1591 -1593 -2352 -1595 -1599 -1601 -1608 -1618 -1625 -1626 -1629 -1637 -1641 -1644 -1657 -1658 -1661 -1664 -1668 -1669 -1671 -1678 -334 -1683 -1697 -1701 -1703 -1705 -1712 -1714 -1724 -1725 -1732 -1735 -1739 -1744 -1746 -1748 -2383 -1755 -1758 -2385 -1766 -2387 -1772 -1776 -1777 -1781 -1802 -1804 -1805 -1806 -1810 -1812 -1819 -1820 -1824 -1826 -1827 -1829 -1835 -1837 -1848 -1853 -1856 -1857 -1858 -1861 -1862 -1863 -1868 -1876 -375 -1888 -1892 -1893 -1895 -1897 -1904 -1906 -1912 -1914 -1921 -1923 -1928 -1929 -1931 -1935 -1939 -1948 -1949 -1951 -1960 -1962 -1964 -1974 -1975 -1987 -2432 -1999 -2003 -2004 -2005 -2006 -2009 -2011 -2012 -2013 -2020 -2023 -2028 -2030 -2035 -2036 -2037 -2038 -2040 -2041 -2043 -2044 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_1200_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_1200_shadow_graph_index.txt deleted file mode 100644 index e690671f..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_1200_shadow_graph_index.txt +++ /dev/null @@ -1,1265 +0,0 @@ -1 -2 -3 -4 -2053 -6 -2054 -2050 -2057 -9 -2055 -2056 -13 -12 -15 -16 -2064 -2066 -17 -2065 -21 -20 -2069 -2072 -24 -2074 -25 -28 -29 -2078 -2079 -2080 -31 -34 -35 -2084 -33 -36 -39 -37 -38 -2090 -2091 -2086 -2093 -46 -2089 -40 -2095 -2098 -2099 -2097 -51 -54 -2102 -2103 -57 -2105 -60 -61 -2110 -63 -419 -2113 -2114 -2115 -67 -2117 -2118 -71 -2120 -74 -2124 -77 -2126 -79 -80 -82 -2132 -85 -84 -2135 -90 -92 -2141 -2461 -96 -2145 -2146 -98 -2144 -2149 -2150 -103 -102 -2151 -106 -2155 -107 -108 -2156 -111 -109 -105 -2162 -114 -2160 -2161 -118 -117 -2168 -2167 -2166 -2171 -2172 -125 -2169 -128 -2176 -130 -2179 -132 -133 -134 -2183 -135 -136 -2182 -27 -140 -2188 -142 -143 -144 -2189 -146 -2195 -147 -148 -2198 -151 -152 -153 -154 -2194 -156 -155 -2207 -159 -2209 -2210 -162 -2211 -2213 -164 -2215 -167 -2217 -170 -171 -2221 -2222 -173 -176 -2225 -178 -2227 -180 -2229 -2224 -2231 -179 -2230 -186 -2241 -197 -2246 -2247 -199 -2253 -2076 -207 -2255 -2257 -210 -209 -2260 -211 -2263 -2266 -218 -2270 -224 -2273 -227 -2278 -2279 -230 -2282 -2283 -2284 -234 -238 -239 -240 -2289 -2290 -2291 -244 -2083 -242 -247 -248 -250 -251 -2300 -252 -253 -254 -256 -257 -2305 -2307 -2308 -261 -2303 -263 -2312 -2310 -2088 -267 -260 -269 -270 -271 -2320 -273 -275 -2324 -277 -2325 -2326 -2328 -2329 -282 -283 -281 -2330 -286 -2335 -2336 -2332 -2334 -2339 -287 -2340 -294 -295 -296 -2345 -298 -299 -2344 -2349 -301 -303 -2351 -305 -306 -304 -2353 -2350 -310 -2358 -311 -2354 -314 -2356 -316 -308 -2360 -319 -2368 -321 -2369 -323 -2364 -317 -318 -325 -2378 -331 -2381 -2382 -335 -2384 -337 -338 -333 -340 -2389 -2390 -341 -344 -345 -346 -2386 -348 -2391 -350 -351 -2394 -353 -2402 -347 -2404 -2406 -2407 -2408 -2409 -360 -363 -2412 -2415 -367 -368 -370 -2418 -372 -2421 -374 -2423 -376 -377 -2424 -2426 -378 -379 -382 -383 -2431 -2430 -385 -2433 -2427 -2437 -2438 -391 -392 -2440 -2442 -394 -396 -2444 -397 -2447 -399 -400 -398 -2452 -405 -406 -2455 -404 -409 -410 -2457 -2460 -412 -414 -415 -2464 -2465 -418 -2467 -2468 -421 -413 -417 -2472 -2473 -426 -425 -2476 -429 -427 -2477 -2480 -2481 -434 -2475 -2483 -2485 -438 -2486 -440 -2484 -495 -444 -2493 -446 -445 -449 -452 -453 -2501 -2503 -455 -456 -459 -460 -461 -2509 -463 -2512 -2513 -465 -466 -2511 -2515 -470 -471 -472 -473 -474 -467 -2524 -477 -478 -479 -480 -2528 -2530 -482 -484 -483 -2534 -481 -2535 -2536 -489 -488 -2539 -2538 -493 -494 -2543 -2540 -498 -499 -500 -2548 -2550 -2551 -502 -505 -506 -2555 -2556 -509 -507 -511 -2560 -508 -514 -2562 -513 -517 -2559 -520 -521 -2570 -523 -2569 -524 -526 -2546 -2576 -529 -2579 -532 -535 -2584 -2583 -2586 -2587 -2588 -2589 -2591 -547 -548 -549 -550 -551 -552 -2599 -554 -2598 -2604 -555 -556 -559 -2607 -2609 -562 -563 -564 -565 -566 -567 -2615 -2617 -569 -2618 -2614 -2621 -574 -575 -2623 -568 -578 -2627 -2628 -2629 -2630 -2632 -585 -2634 -586 -588 -589 -2635 -2639 -592 -2636 -2642 -2637 -591 -597 -596 -2647 -2648 -601 -602 -604 -2652 -2654 -2655 -2653 -607 -2658 -610 -608 -611 -614 -2663 -2664 -615 -618 -619 -2668 -621 -622 -2670 -2672 -2671 -626 -627 -2674 -2676 -2678 -631 -2675 -633 -2681 -2683 -636 -2685 -638 -639 -640 -641 -642 -2689 -2692 -644 -2690 -2695 -2696 -645 -650 -653 -654 -2701 -656 -657 -658 -2704 -2707 -661 -664 -665 -667 -668 -669 -671 -672 -674 -675 -677 -680 -682 -688 -689 -690 -691 -692 -694 -697 -698 -699 -701 -704 -706 -708 -709 -712 -713 -715 -716 -718 -720 -723 -729 -730 -732 -733 -736 -738 -739 -742 -746 -749 -750 -753 -754 -756 -761 -767 -768 -2595 -770 -772 -773 -774 -775 -776 -778 -2190 -783 -784 -785 -786 -790 -791 -792 -794 -795 -796 -798 -802 -804 -160 -807 -161 -821 -822 -823 -826 -827 -828 -832 -835 -836 -837 -838 -573 -841 -842 -843 -845 -850 -852 -853 -854 -855 -858 -859 -861 -862 -864 -865 -867 -868 -870 -871 -2616 -875 -876 -877 -2525 -882 -884 -886 -887 -584 -893 -895 -898 -899 -909 -912 -913 -917 -921 -926 -927 -928 -929 -931 -932 -933 -937 -946 -947 -949 -950 -954 -956 -2633 -960 -963 -968 -969 -970 -971 -973 -974 -975 -977 -978 -980 -983 -984 -985 -986 -987 -991 -993 -995 -996 -998 -999 -1000 -1002 -1003 -1004 -1005 -1006 -1007 -1008 -1010 -1011 -1012 -1013 -1015 -1017 -1019 -1025 -1026 -1027 -1033 -1034 -1036 -1045 -1048 -1050 -1051 -1052 -1053 -1055 -1057 -1059 -1061 -1066 -1073 -1074 -1076 -1078 -1079 -1081 -1082 -2657 -1084 -1086 -1087 -1088 -1092 -1095 -1099 -1101 -1102 -1103 -1104 -1105 -1109 -1111 -1112 -1114 -1116 -1117 -1118 -1124 -1128 -1135 -1137 -1141 -634 -1144 -1147 -1155 -1156 -1160 -1161 -1164 -1165 -1166 -1167 -1168 -1169 -1170 -1171 -1173 -1174 -1177 -1178 -1179 -1180 -1181 -1182 -1183 -1185 -1187 -1189 -1191 -1192 -1193 -1197 -1198 -1201 -1202 -1208 -1213 -1217 -1220 -1226 -1227 -1228 -1230 -1231 -1232 -1234 -1236 -1237 -1238 -1240 -1241 -1244 -1248 -1250 -1251 -1256 -1259 -1260 -1261 -1262 -1265 -1266 -1269 -1273 -1275 -1283 -1284 -1285 -255 -1288 -1290 -1293 -1295 -1296 -1297 -1298 -1301 -1302 -1306 -1307 -1310 -1312 -1313 -1314 -1315 -1316 -1318 -1322 -1324 -1326 -1327 -1328 -1330 -1331 -1332 -2301 -1338 -1339 -1343 -1347 -1349 -1352 -1356 -1357 -1358 -1359 -1360 -1361 -1363 -1364 -1370 -1371 -1376 -1378 -1382 -1383 -1384 -1386 -1389 -276 -1396 -1398 -1399 -1401 -1403 -279 -1405 -1406 -1407 -1408 -1412 -1413 -1417 -1421 -1422 -1423 -1428 -1429 -1430 -1431 -1433 -1434 -1436 -464 -1438 -1440 -1442 -1443 -1444 -1445 -1449 -1454 -1456 -1457 -1458 -1459 -1461 -1462 -1463 -1469 -1479 -1482 -1484 -1486 -1487 -1488 -1489 -1492 -1493 -1495 -2333 -1499 -1505 -1508 -1509 -1510 -1511 -1512 -1513 -1514 -1519 -1521 -1522 -1527 -1528 -1529 -1531 -1538 -1539 -1540 -1543 -1546 -1547 -1549 -1557 -1559 -1560 -1562 -2346 -1565 -1566 -1567 -1568 -1569 -1570 -312 -1574 -1576 -1577 -1578 -1579 -1580 -1584 -1585 -1587 -1588 -1591 -1592 -1593 -2352 -1595 -1596 -1599 -1600 -1601 -1602 -1603 -1606 -1608 -1609 -1616 -1618 -1620 -1622 -1623 -1625 -1626 -1627 -1629 -1631 -1632 -1636 -1637 -1641 -1643 -1644 -1647 -1649 -1651 -1654 -1655 -1657 -1658 -2365 -1661 -1662 -1664 -1668 -1669 -1671 -1672 -1676 -1678 -1679 -334 -1681 -1683 -1684 -1682 -1686 -1687 -1688 -1690 -1693 -1697 -1701 -1703 -1705 -339 -1712 -1714 -1721 -342 -1724 -1725 -1726 -1727 -1730 -1732 -1735 -1736 -1737 -1739 -1741 -1744 -1746 -1747 -1748 -2383 -1755 -1758 -2385 -1764 -1765 -1766 -1767 -1768 -1769 -1770 -2387 -1772 -1776 -1777 -1781 -1782 -1786 -1790 -1791 -1798 -1800 -1802 -1804 -1805 -1806 -1808 -1810 -1812 -1815 -1816 -1817 -1819 -1820 -1824 -1826 -1827 -1828 -1829 -1831 -1832 -1835 -1836 -1837 -1841 -1848 -1853 -1856 -1857 -1858 -1859 -1861 -1862 -1863 -1868 -1873 -1876 -1877 -1882 -375 -1888 -1889 -1893 -1894 -1895 -1897 -1901 -1904 -1906 -1907 -1912 -1914 -1918 -1919 -1921 -1923 -1928 -1929 -1930 -1931 -1935 -2420 -1938 -1939 -1944 -1945 -1948 -1949 -1951 -1954 -1955 -1957 -1960 -1962 -1963 -1964 -1965 -1970 -1974 -1975 -1983 -1985 -1987 -1989 -1993 -1994 -1995 -1996 -2432 -1998 -1999 -1997 -2000 -2003 -2004 -2005 -2006 -2434 -2009 -2010 -2011 -2012 -2013 -2014 -2016 -2018 -2020 -2023 -2027 -2028 -2031 -2032 -2035 -2036 -2037 -2038 -2040 -2041 -2043 -2044 -2045 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_1300_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_1300_shadow_graph_index.txt deleted file mode 100644 index fd5bd4af..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_1300_shadow_graph_index.txt +++ /dev/null @@ -1,1300 +0,0 @@ -1 -2 -3 -4 -2053 -6 -2054 -2050 -2057 -9 -2055 -2056 -13 -12 -15 -16 -2064 -2066 -17 -2065 -21 -20 -2069 -2072 -24 -2074 -25 -28 -29 -2078 -2079 -2080 -31 -34 -35 -2084 -33 -36 -39 -37 -38 -2090 -2091 -2086 -2093 -46 -2089 -40 -2095 -2098 -2099 -2097 -51 -54 -2102 -2103 -57 -2105 -60 -61 -2110 -63 -2113 -2114 -2115 -67 -2117 -2118 -71 -2120 -65 -74 -2124 -77 -2126 -79 -80 -2128 -82 -2130 -2132 -85 -84 -2135 -2131 -90 -92 -2141 -2143 -96 -2145 -2146 -98 -2144 -2149 -2150 -103 -102 -2151 -106 -2155 -107 -108 -2156 -111 -109 -105 -2162 -114 -2160 -2161 -118 -117 -2168 -2167 -2166 -2171 -2172 -125 -2169 -128 -2176 -130 -2179 -132 -133 -134 -2183 -135 -136 -2182 -27 -140 -2188 -142 -143 -144 -2189 -146 -2195 -147 -148 -2198 -151 -152 -153 -154 -2194 -156 -155 -2207 -159 -2209 -2210 -162 -2211 -2213 -164 -2215 -167 -2217 -170 -171 -2221 -2222 -173 -176 -2225 -178 -2227 -180 -2229 -2224 -2231 -179 -2230 -186 -187 -2241 -197 -2246 -2247 -199 -2253 -2076 -207 -2255 -2257 -210 -209 -2260 -211 -2077 -2263 -2266 -218 -2270 -224 -2273 -227 -2278 -2279 -230 -2282 -2283 -2284 -234 -238 -239 -240 -2289 -2290 -2291 -244 -2083 -242 -247 -248 -2288 -250 -251 -2300 -252 -253 -254 -256 -257 -2305 -2307 -2308 -261 -2303 -263 -2312 -2310 -2088 -267 -260 -269 -270 -271 -2320 -273 -2316 -275 -2324 -277 -2325 -2326 -2328 -2329 -282 -283 -281 -2330 -286 -2335 -2336 -2332 -2334 -2339 -287 -2340 -294 -295 -296 -2345 -298 -299 -2344 -2349 -301 -303 -2351 -305 -306 -304 -2353 -2350 -310 -2358 -311 -2354 -314 -2356 -316 -308 -2360 -319 -2368 -321 -2369 -323 -2364 -317 -318 -325 -2378 -331 -2381 -2382 -335 -2384 -337 -338 -333 -340 -2389 -2390 -341 -344 -345 -346 -2386 -348 -2391 -350 -351 -2394 -353 -2402 -347 -2404 -2406 -2407 -2408 -2409 -360 -363 -2412 -2415 -367 -368 -370 -2418 -372 -2421 -374 -2423 -376 -377 -2424 -2426 -378 -379 -382 -383 -2431 -2430 -385 -2433 -2427 -2437 -2438 -391 -392 -2440 -2442 -394 -396 -2444 -397 -2447 -399 -400 -398 -2452 -405 -406 -2455 -404 -409 -410 -2457 -2460 -413 -2461 -412 -414 -415 -2464 -2465 -419 -418 -2467 -2468 -421 -417 -2472 -2475 -2473 -426 -425 -2479 -2476 -429 -427 -2477 -2480 -2481 -434 -2483 -2485 -438 -2486 -440 -2484 -444 -2493 -446 -445 -449 -452 -453 -2501 -2503 -455 -456 -459 -460 -461 -2509 -463 -2512 -2513 -465 -466 -2511 -2515 -470 -471 -472 -473 -474 -467 -2524 -477 -478 -479 -480 -2528 -2530 -482 -484 -483 -2534 -481 -2535 -2536 -489 -488 -2539 -2538 -493 -495 -494 -2543 -2540 -498 -499 -500 -2548 -2550 -2551 -502 -505 -506 -2555 -2556 -509 -507 -511 -2560 -508 -514 -2562 -513 -517 -2559 -520 -521 -2570 -523 -2569 -524 -526 -2546 -2576 -529 -2579 -532 -2582 -535 -2584 -2583 -2586 -2587 -2588 -2589 -2545 -2591 -547 -548 -549 -550 -551 -552 -2599 -554 -2598 -2604 -555 -556 -559 -2607 -2609 -562 -563 -564 -565 -566 -567 -2615 -2617 -569 -2618 -2614 -2621 -574 -575 -2623 -568 -578 -2627 -2628 -2629 -2630 -2632 -585 -2634 -586 -588 -589 -2635 -2639 -592 -2636 -2642 -2637 -591 -597 -596 -2647 -2648 -601 -602 -604 -2652 -2654 -2655 -2653 -607 -2658 -610 -608 -611 -614 -2663 -2664 -615 -618 -619 -2668 -621 -622 -2670 -2672 -2671 -626 -627 -2674 -2676 -2678 -631 -2675 -633 -2681 -2683 -636 -2685 -638 -639 -640 -641 -642 -2689 -2692 -644 -2690 -2695 -2696 -645 -650 -2697 -2698 -653 -654 -2701 -656 -657 -658 -2704 -2707 -661 -664 -665 -667 -668 -669 -671 -672 -674 -675 -677 -680 -682 -688 -689 -690 -691 -692 -694 -697 -698 -699 -701 -704 -706 -708 -709 -712 -713 -715 -716 -718 -2178 -720 -723 -729 -730 -732 -733 -2181 -736 -738 -739 -742 -743 -744 -746 -749 -750 -753 -754 -756 -761 -767 -768 -2595 -770 -772 -773 -774 -775 -776 -778 -2190 -783 -784 -785 -786 -790 -791 -792 -794 -795 -796 -798 -802 -804 -160 -807 -161 -821 -822 -823 -826 -827 -828 -832 -835 -836 -837 -838 -573 -841 -842 -843 -845 -850 -852 -853 -854 -855 -858 -859 -861 -862 -864 -865 -867 -868 -870 -871 -2616 -875 -876 -877 -2525 -882 -884 -886 -887 -584 -893 -895 -898 -899 -909 -912 -913 -917 -921 -926 -927 -928 -929 -931 -932 -933 -937 -946 -947 -949 -950 -954 -956 -2633 -960 -963 -968 -969 -970 -971 -973 -974 -975 -977 -978 -980 -983 -984 -985 -986 -987 -991 -993 -995 -996 -998 -999 -1000 -1002 -1003 -1004 -1005 -1006 -1007 -1008 -1010 -1011 -1012 -1013 -1015 -1017 -1019 -1025 -1026 -1027 -1033 -1034 -1036 -1045 -1048 -1050 -1051 -1052 -1053 -1055 -1057 -1059 -1061 -1066 -1073 -1074 -1076 -1078 -1079 -1081 -1082 -2657 -1084 -1086 -1087 -1088 -2659 -1092 -1095 -1099 -1101 -1102 -1103 -1104 -1105 -1109 -1111 -1112 -1114 -1116 -1117 -1118 -1124 -1128 -1135 -1137 -1141 -634 -1144 -1147 -1155 -1156 -1160 -1161 -1164 -1165 -1166 -1167 -1168 -1169 -1170 -1171 -1173 -1174 -1177 -1178 -1179 -1180 -1181 -1182 -1183 -2082 -1185 -1187 -1189 -1191 -1192 -1193 -1197 -1198 -1201 -1202 -1208 -1213 -1217 -1220 -1226 -1227 -1228 -1230 -1231 -1232 -1234 -1236 -1237 -1238 -1239 -1240 -1241 -1244 -1248 -1250 -1251 -1256 -1259 -1260 -1261 -1262 -1265 -1266 -1269 -1273 -1275 -1283 -1284 -1285 -255 -1288 -1290 -1293 -1295 -1296 -1297 -1298 -2700 -1301 -1302 -1306 -1307 -1310 -1312 -1313 -1314 -1315 -1316 -1318 -1322 -1324 -1326 -1327 -1328 -1330 -1331 -1332 -2301 -1338 -1339 -1343 -1347 -1349 -1352 -1356 -1357 -1358 -1359 -1360 -1361 -1363 -1364 -1370 -1371 -1376 -1378 -1382 -1383 -1384 -1386 -462 -1387 -1389 -276 -1395 -1396 -1398 -1399 -1401 -1403 -279 -1405 -1406 -1407 -1408 -1412 -1413 -1417 -1421 -1422 -1423 -1428 -1429 -1430 -1431 -2319 -1433 -1434 -1436 -464 -1438 -1440 -1442 -1443 -1444 -1445 -1449 -1454 -1456 -1457 -1458 -1459 -1461 -1462 -1463 -1469 -1479 -1482 -1484 -1486 -1487 -1488 -1489 -1492 -1493 -1495 -2333 -1499 -1505 -1508 -1509 -1510 -1511 -1512 -1513 -1514 -1519 -1521 -1522 -1527 -1528 -1529 -1531 -1538 -1539 -1540 -1541 -1543 -1546 -1547 -1549 -1557 -1559 -1560 -1562 -2346 -1565 -1566 -1567 -1568 -1569 -1570 -312 -1574 -1576 -1577 -1578 -1579 -1580 -1584 -1585 -1587 -1588 -1591 -1592 -1593 -2352 -1595 -1596 -1599 -1600 -1601 -1602 -1603 -1606 -1608 -1609 -1616 -1618 -1620 -1622 -1623 -1625 -1626 -1627 -1629 -1631 -1632 -1636 -1637 -1641 -1643 -1644 -1647 -1649 -1651 -1654 -1655 -1657 -1658 -2365 -1661 -1662 -1664 -1668 -1669 -1671 -1672 -1676 -1678 -1679 -334 -1681 -1683 -1684 -1682 -1686 -1687 -1688 -1690 -1693 -1697 -1701 -1703 -1705 -339 -1712 -1714 -1721 -342 -1724 -1725 -1726 -1727 -1730 -1732 -1735 -1736 -1737 -1739 -1741 -1744 -1746 -1747 -1748 -2383 -1755 -1758 -1759 -2385 -1764 -1765 -1766 -1767 -1768 -1769 -1770 -2387 -1772 -1773 -1776 -1777 -1781 -1782 -1786 -1788 -1790 -1791 -1793 -1794 -1795 -1798 -1800 -1802 -1804 -1805 -1806 -1808 -1810 -1812 -1815 -1816 -1817 -1819 -1820 -1824 -1826 -1827 -1828 -1829 -1831 -1832 -1835 -1836 -1837 -1841 -1848 -1853 -1856 -1857 -1858 -1859 -1861 -1862 -1863 -1868 -1873 -1876 -1877 -1882 -375 -1888 -1889 -1892 -1893 -1894 -1895 -1897 -1901 -1904 -1906 -1907 -1912 -1914 -1918 -1919 -1921 -1923 -1928 -1929 -1930 -1931 -1935 -2420 -1938 -1939 -1944 -1945 -1948 -1949 -1951 -1954 -1955 -1957 -1960 -1962 -1963 -1964 -1965 -1970 -1974 -1975 -1983 -1985 -1987 -1989 -1993 -1994 -1995 -1996 -2432 -1998 -1999 -1997 -2000 -2003 -2004 -2005 -2006 -2434 -2009 -2010 -2011 -2012 -2013 -2014 -2016 -2018 -2020 -2023 -2027 -2028 -2030 -2031 -2032 -2035 -2036 -2037 -2038 -2040 -2041 -2043 -2044 -2045 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_300_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_300_shadow_graph_index.txt deleted file mode 100644 index 472365a2..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_300_shadow_graph_index.txt +++ /dev/null @@ -1,350 +0,0 @@ -1 -6 -2057 -13 -2066 -2072 -29 -2079 -2080 -2090 -2091 -2114 -2117 -71 -2120 -74 -2124 -2126 -79 -2132 -90 -92 -96 -2150 -106 -2155 -2171 -2172 -2179 -134 -2183 -135 -136 -140 -147 -2195 -154 -156 -2207 -159 -2210 -2213 -2215 -2217 -2222 -178 -2227 -2229 -2231 -2253 -2266 -218 -2270 -224 -227 -2279 -2282 -2289 -2290 -2291 -244 -251 -2300 -257 -2305 -261 -263 -2312 -2329 -286 -2335 -2336 -2339 -296 -2349 -303 -310 -2358 -314 -316 -319 -321 -323 -2382 -335 -2384 -338 -2389 -2390 -344 -345 -348 -2415 -2421 -376 -377 -2424 -2426 -378 -382 -383 -2431 -2438 -392 -2440 -396 -2447 -2460 -414 -2464 -2465 -418 -2467 -2468 -2473 -426 -429 -434 -438 -2493 -449 -2503 -2513 -470 -478 -479 -480 -2530 -482 -484 -2539 -2543 -2550 -2551 -505 -2555 -509 -511 -2560 -521 -2570 -2579 -2587 -2589 -2591 -547 -550 -551 -2609 -2617 -578 -585 -2634 -2635 -2636 -2639 -597 -604 -2654 -2663 -618 -622 -626 -627 -2674 -631 -638 -641 -642 -2692 -654 -661 -667 -669 -675 -677 -680 -692 -697 -704 -720 -750 -754 -790 -794 -795 -796 -802 -822 -826 -828 -837 -865 -867 -871 -877 -886 -893 -931 -932 -937 -949 -963 -973 -977 -985 -987 -993 -1000 -1007 -1008 -1010 -1011 -1013 -1017 -1027 -1033 -1057 -1061 -1073 -1081 -1082 -1087 -1092 -1102 -1104 -1112 -1118 -1135 -1156 -1165 -1168 -1174 -1177 -1180 -1182 -1185 -1187 -1191 -1192 -1197 -1220 -1226 -1227 -1234 -1236 -1237 -1238 -1244 -1259 -1260 -1262 -1273 -1275 -1290 -1295 -1297 -1307 -1312 -1322 -1327 -1356 -1359 -1370 -1403 -1413 -1422 -1428 -1430 -1442 -1444 -1445 -1457 -1462 -1479 -1482 -1484 -1486 -1489 -1505 -1510 -1512 -1514 -1519 -1521 -1522 -1539 -1562 -1569 -1574 -1578 -1587 -1596 -1608 -1609 -1618 -1620 -1622 -1627 -1629 -1636 -1637 -1641 -1651 -1657 -1669 -1671 -1679 -1684 -1686 -1721 -1726 -1739 -1746 -1776 -1781 -1786 -1798 -1804 -1805 -1806 -1808 -1817 -1819 -1829 -1837 -1856 -1868 -1876 -1877 -1906 -1918 -1919 -1923 -1928 -1929 -1944 -1948 -1949 -1954 -1960 -1963 -1974 -2010 -2013 -2031 -2032 -2038 -2043 -2045 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_500_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_500_shadow_graph_index.txt deleted file mode 100644 index 7d6e4a24..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_500_shadow_graph_index.txt +++ /dev/null @@ -1,573 +0,0 @@ -1 -2 -3 -6 -2057 -13 -16 -2066 -21 -2072 -24 -28 -29 -2078 -2079 -2080 -34 -35 -39 -2090 -2091 -2093 -54 -57 -60 -63 -2114 -2117 -2118 -71 -2120 -74 -2124 -77 -2126 -79 -82 -2132 -85 -90 -92 -96 -2150 -103 -106 -2155 -111 -118 -2171 -2172 -128 -2179 -132 -134 -2183 -135 -136 -140 -143 -144 -147 -2195 -148 -153 -154 -156 -2207 -159 -2210 -162 -2213 -2215 -167 -2217 -2221 -2222 -173 -178 -2227 -180 -2229 -2231 -186 -197 -2253 -207 -210 -2266 -218 -2270 -224 -227 -2278 -2279 -2282 -2283 -2284 -239 -2289 -2290 -2291 -244 -251 -2300 -252 -257 -2305 -261 -263 -2312 -269 -277 -2328 -2329 -283 -286 -2335 -2336 -2339 -294 -295 -296 -298 -2349 -303 -305 -306 -310 -2358 -314 -316 -319 -2368 -321 -323 -2381 -2382 -335 -2384 -337 -338 -333 -2389 -2390 -344 -345 -346 -348 -350 -351 -2402 -2404 -2406 -2409 -363 -2415 -367 -370 -2421 -374 -376 -377 -2424 -2426 -378 -382 -383 -2431 -2430 -385 -2438 -392 -2440 -2442 -396 -2447 -405 -409 -410 -2460 -412 -414 -2464 -2465 -418 -2467 -2468 -2472 -2473 -426 -425 -2476 -429 -2480 -434 -2485 -438 -440 -444 -2493 -446 -449 -2503 -455 -463 -2513 -465 -466 -470 -471 -472 -473 -477 -478 -479 -480 -2530 -482 -484 -483 -2536 -2539 -2543 -2550 -2551 -502 -505 -2555 -509 -511 -2560 -521 -2570 -523 -2579 -2587 -2589 -2591 -547 -548 -550 -551 -552 -554 -559 -2609 -562 -564 -565 -566 -567 -2617 -575 -578 -2628 -2629 -585 -2634 -2635 -2636 -586 -588 -2639 -2642 -597 -2647 -601 -604 -2654 -2658 -2663 -618 -621 -622 -2672 -626 -627 -2674 -631 -633 -636 -638 -641 -642 -2692 -644 -650 -653 -654 -656 -657 -661 -664 -667 -669 -671 -675 -677 -680 -682 -688 -691 -692 -697 -699 -704 -720 -723 -730 -733 -738 -739 -749 -750 -753 -754 -770 -773 -775 -790 -794 -795 -796 -802 -804 -807 -822 -826 -828 -835 -837 -842 -865 -867 -868 -870 -871 -875 -877 -882 -886 -893 -929 -931 -932 -933 -937 -946 -949 -963 -968 -970 -973 -974 -977 -978 -985 -987 -993 -1000 -1005 -1007 -1008 -1010 -1011 -1012 -1013 -1017 -1027 -1033 -1045 -1057 -1061 -1073 -1081 -1082 -1087 -1092 -1102 -1104 -1112 -1118 -1135 -1137 -1156 -1165 -1167 -1168 -1174 -1177 -1179 -1180 -1182 -1183 -1185 -1187 -1189 -1191 -1192 -1193 -1197 -1202 -1213 -1220 -1226 -1227 -1228 -1234 -1236 -1237 -1238 -1244 -1248 -1259 -1260 -1262 -1273 -1275 -1285 -1290 -1293 -1295 -1297 -1298 -1307 -1312 -1313 -1314 -1316 -1322 -1327 -1330 -1331 -1332 -1356 -1359 -1370 -1376 -1378 -1382 -1396 -1401 -1403 -1408 -1413 -1422 -1428 -1430 -1438 -1440 -1442 -1443 -1444 -1445 -1457 -1459 -1462 -1479 -1482 -1484 -1486 -1489 -1505 -1510 -1512 -1514 -1519 -1521 -1522 -1529 -1539 -1540 -1546 -1557 -1559 -1562 -1569 -1574 -1578 -1585 -1587 -1592 -1596 -1599 -1600 -1606 -1608 -1609 -1616 -1618 -1620 -1622 -1623 -1627 -1629 -1632 -1636 -1637 -1641 -1643 -1649 -1651 -1655 -1657 -1664 -1669 -1671 -1672 -1676 -1679 -1683 -1684 -1686 -1687 -1693 -1697 -1703 -1721 -1726 -1727 -1739 -1741 -1746 -1776 -1781 -1782 -1786 -1790 -1791 -1798 -1804 -1805 -1806 -1808 -1816 -1817 -1819 -1828 -1829 -1837 -1841 -1856 -1859 -1868 -1876 -1877 -1906 -1918 -1919 -1921 -1923 -1928 -1929 -1931 -1944 -1948 -1949 -1954 -1957 -1960 -1963 -1974 -1983 -2010 -2013 -2014 -2018 -2027 -2031 -2032 -2038 -2043 -2045 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_700_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_700_shadow_graph_index.txt deleted file mode 100644 index d38343c6..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_700_shadow_graph_index.txt +++ /dev/null @@ -1,783 +0,0 @@ -1 -2 -3 -4 -6 -2057 -13 -15 -16 -2064 -2066 -17 -21 -2072 -24 -2074 -28 -29 -2078 -2079 -2080 -31 -34 -35 -2084 -33 -36 -39 -2090 -2091 -2093 -46 -54 -57 -60 -61 -63 -2113 -2114 -2117 -2118 -71 -2120 -74 -2124 -77 -2126 -79 -80 -82 -2132 -85 -90 -92 -2141 -96 -2145 -2150 -103 -106 -2155 -107 -108 -111 -2162 -118 -2171 -2172 -128 -2179 -132 -134 -2183 -135 -136 -140 -142 -143 -144 -146 -147 -2195 -148 -152 -153 -154 -156 -2207 -159 -2209 -2210 -162 -2213 -2215 -167 -2217 -170 -2221 -2222 -173 -176 -2225 -178 -2227 -180 -2229 -2231 -186 -197 -2247 -2253 -207 -2255 -210 -2263 -2266 -218 -2270 -224 -227 -2278 -2279 -230 -2282 -2283 -2284 -234 -238 -239 -240 -2289 -2290 -2291 -244 -250 -251 -2300 -252 -253 -254 -256 -257 -2305 -2307 -2308 -261 -263 -2312 -269 -2320 -277 -2328 -2329 -282 -283 -286 -2335 -2336 -2339 -294 -295 -296 -2345 -298 -2349 -301 -303 -2351 -305 -306 -304 -310 -2358 -311 -314 -316 -319 -2368 -321 -2369 -323 -2381 -2382 -335 -2384 -337 -338 -333 -340 -2389 -2390 -341 -344 -345 -346 -2386 -348 -350 -351 -353 -2402 -2404 -2406 -2408 -2409 -363 -2412 -2415 -367 -370 -2418 -2421 -374 -376 -377 -2424 -2426 -378 -379 -382 -383 -2431 -2430 -385 -2433 -2437 -2438 -391 -392 -2440 -2442 -396 -2447 -399 -405 -406 -409 -410 -2460 -412 -414 -2464 -2465 -418 -2467 -2468 -2472 -2473 -426 -425 -2476 -429 -427 -2477 -2480 -2481 -434 -2485 -438 -2486 -440 -444 -2493 -446 -449 -2503 -455 -461 -463 -2512 -2513 -465 -466 -2511 -2515 -470 -471 -472 -473 -477 -478 -479 -480 -2528 -2530 -482 -484 -483 -2536 -489 -2539 -493 -494 -2543 -498 -499 -2550 -2551 -502 -505 -2555 -509 -511 -2560 -520 -521 -2570 -523 -2569 -524 -526 -2576 -529 -2579 -2584 -2586 -2587 -2589 -2591 -547 -548 -550 -551 -552 -2599 -554 -559 -2607 -2609 -562 -564 -565 -566 -567 -2615 -2617 -575 -2623 -578 -2628 -2629 -2630 -2632 -585 -2634 -586 -588 -589 -2635 -2639 -592 -2636 -2642 -2637 -597 -2647 -2648 -601 -604 -2652 -2654 -2655 -2658 -610 -2663 -618 -621 -622 -2670 -2672 -2671 -626 -627 -2674 -2678 -631 -633 -2683 -636 -638 -639 -641 -642 -2689 -2692 -644 -650 -653 -654 -2701 -656 -657 -658 -661 -664 -665 -667 -669 -671 -674 -675 -677 -680 -682 -688 -689 -691 -692 -694 -697 -699 -701 -704 -708 -709 -720 -723 -730 -733 -736 -738 -739 -749 -750 -753 -754 -756 -770 -773 -775 -784 -790 -794 -795 -796 -802 -804 -807 -822 -823 -826 -828 -832 -835 -837 -838 -842 -845 -850 -852 -855 -865 -867 -868 -870 -871 -875 -877 -882 -886 -887 -584 -893 -895 -898 -917 -928 -929 -931 -932 -933 -937 -946 -949 -954 -963 -968 -969 -970 -973 -974 -977 -978 -983 -985 -987 -993 -996 -1000 -1002 -1005 -1007 -1008 -1010 -1011 -1012 -1013 -1015 -1017 -1027 -1033 -1045 -1051 -1052 -1057 -1061 -1066 -1073 -1081 -1082 -1087 -1092 -1102 -1104 -1112 -1116 -1118 -1128 -1135 -1137 -1144 -1156 -1165 -1167 -1168 -1169 -1174 -1177 -1179 -1180 -1182 -1183 -1185 -1187 -1189 -1191 -1192 -1193 -1197 -1202 -1213 -1217 -1220 -1226 -1227 -1228 -1234 -1236 -1237 -1238 -1244 -1248 -1250 -1251 -1259 -1260 -1262 -1273 -1275 -1283 -1284 -1285 -1288 -1290 -1293 -1295 -1297 -1298 -1302 -1307 -1312 -1313 -1314 -1316 -1318 -1322 -1327 -1330 -1331 -1332 -1338 -1349 -1356 -1357 -1359 -1361 -1370 -1376 -1378 -1382 -1383 -1384 -1396 -1398 -1399 -1401 -1403 -1405 -1406 -1408 -1413 -1422 -1428 -1430 -1436 -1438 -1440 -1442 -1443 -1444 -1445 -1454 -1457 -1458 -1459 -1461 -1462 -1463 -1479 -1482 -1484 -1486 -1489 -1505 -1509 -1510 -1511 -1512 -1514 -1519 -1521 -1522 -1529 -1531 -1539 -1540 -1546 -1547 -1557 -1559 -1562 -1569 -1574 -1577 -1578 -1580 -1585 -1587 -1591 -1592 -1593 -1596 -1599 -1600 -1606 -1608 -1609 -1616 -1618 -1620 -1622 -1623 -1625 -1627 -1629 -1632 -1636 -1637 -1641 -1643 -1647 -1649 -1651 -1654 -1655 -1657 -1664 -1669 -1671 -1672 -1676 -1679 -1683 -1684 -1686 -1687 -1693 -1697 -1701 -1703 -1714 -1721 -1726 -1727 -1735 -1739 -1741 -1744 -1746 -2383 -1758 -1764 -1765 -1766 -1770 -2387 -1772 -1776 -1777 -1781 -1782 -1786 -1790 -1791 -1798 -1802 -1804 -1805 -1806 -1808 -1810 -1815 -1816 -1817 -1819 -1820 -1826 -1828 -1829 -1837 -1841 -1853 -1856 -1858 -1859 -1861 -1868 -1873 -1876 -1877 -1882 -1889 -1906 -1912 -1918 -1919 -1921 -1923 -1928 -1929 -1930 -1931 -1938 -1939 -1944 -1948 -1949 -1954 -1957 -1960 -1963 -1970 -1974 -1975 -1983 -1987 -1989 -1998 -1999 -2003 -2005 -2010 -2013 -2014 -2018 -2027 -2031 -2032 -2038 -2041 -2043 -2044 -2045 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_900_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_900_shadow_graph_index.txt deleted file mode 100644 index b8aaaae6..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_900_shadow_graph_index.txt +++ /dev/null @@ -1,985 +0,0 @@ -1 -2 -3 -4 -2053 -6 -2054 -2057 -9 -13 -15 -16 -2064 -2066 -17 -2065 -21 -20 -2072 -24 -2074 -25 -28 -29 -2078 -2079 -2080 -31 -34 -35 -2084 -33 -36 -39 -37 -38 -2090 -2091 -2086 -2093 -46 -2089 -40 -2098 -2099 -54 -2102 -2103 -57 -60 -61 -2110 -63 -2113 -2114 -2117 -2118 -71 -2120 -74 -2124 -77 -2126 -79 -80 -82 -2132 -85 -90 -92 -2141 -96 -2145 -2146 -98 -2150 -103 -106 -2155 -107 -108 -2156 -111 -109 -2162 -114 -118 -2168 -2171 -2172 -128 -130 -2179 -132 -134 -2183 -135 -136 -2182 -140 -142 -143 -144 -146 -2195 -147 -148 -2198 -152 -153 -154 -156 -2207 -159 -2209 -2210 -162 -2211 -2213 -164 -2215 -167 -2217 -170 -171 -2221 -2222 -173 -176 -2225 -178 -2227 -180 -2229 -2224 -2231 -186 -197 -2246 -2247 -199 -2253 -207 -2255 -2257 -210 -209 -2263 -2266 -218 -2270 -224 -2273 -227 -2278 -2279 -230 -2282 -2283 -2284 -234 -238 -239 -240 -2289 -2290 -2291 -244 -2083 -242 -247 -250 -251 -2300 -252 -253 -254 -256 -257 -2305 -2307 -2308 -261 -2303 -263 -2312 -267 -269 -2320 -273 -277 -2328 -2329 -282 -283 -281 -2330 -286 -2335 -2336 -2332 -2334 -2339 -287 -294 -295 -296 -2345 -298 -299 -2349 -301 -303 -2351 -305 -306 -304 -2353 -2350 -310 -2358 -311 -2354 -314 -2356 -316 -308 -2360 -319 -2368 -321 -2369 -323 -2381 -2382 -335 -2384 -337 -338 -333 -340 -2389 -2390 -341 -344 -345 -346 -2386 -348 -2391 -350 -351 -2394 -353 -2402 -2404 -2406 -2407 -2408 -2409 -363 -2412 -2415 -367 -370 -2418 -372 -2421 -374 -376 -377 -2424 -2426 -378 -379 -382 -383 -2431 -2430 -385 -2433 -2427 -2437 -2438 -391 -392 -2440 -2442 -396 -2444 -2447 -399 -400 -405 -406 -2455 -409 -410 -2460 -412 -414 -415 -2464 -2465 -418 -2467 -2468 -421 -2472 -2473 -426 -425 -2476 -429 -427 -2477 -2480 -2481 -434 -2475 -2483 -2485 -438 -2486 -440 -444 -2493 -446 -449 -453 -2503 -455 -456 -459 -460 -461 -2509 -463 -2512 -2513 -465 -466 -2511 -2515 -470 -471 -472 -473 -477 -478 -479 -480 -2528 -2530 -482 -484 -483 -481 -2536 -489 -488 -2539 -493 -494 -2543 -498 -499 -500 -2548 -2550 -2551 -502 -505 -506 -2555 -2556 -509 -507 -511 -2560 -514 -517 -520 -521 -2570 -523 -2569 -524 -526 -2576 -529 -2579 -535 -2584 -2586 -2587 -2589 -2591 -547 -548 -549 -550 -551 -552 -2599 -554 -2598 -2604 -555 -556 -559 -2607 -2609 -562 -563 -564 -565 -566 -567 -2615 -2617 -569 -2618 -574 -575 -2623 -578 -2628 -2629 -2630 -2632 -585 -2634 -586 -588 -589 -2635 -2639 -592 -2636 -2642 -2637 -591 -597 -596 -2647 -2648 -601 -602 -604 -2652 -2654 -2655 -2653 -2658 -610 -614 -2663 -2664 -615 -618 -621 -622 -2670 -2672 -2671 -626 -627 -2674 -2678 -631 -633 -2683 -636 -2685 -638 -639 -640 -641 -642 -2689 -2692 -644 -2690 -2695 -2696 -650 -653 -654 -2701 -656 -657 -658 -2704 -661 -664 -665 -667 -669 -671 -672 -674 -675 -677 -680 -682 -688 -689 -691 -692 -694 -697 -699 -701 -704 -706 -708 -709 -712 -720 -723 -730 -732 -733 -736 -738 -739 -746 -749 -750 -753 -754 -756 -761 -767 -770 -773 -775 -778 -784 -786 -790 -791 -792 -794 -795 -796 -798 -802 -804 -807 -822 -823 -826 -828 -832 -835 -837 -838 -842 -845 -850 -852 -854 -855 -858 -859 -865 -867 -868 -870 -871 -875 -876 -877 -882 -884 -886 -887 -584 -893 -895 -898 -917 -928 -929 -931 -932 -933 -937 -946 -949 -954 -956 -2633 -963 -968 -969 -970 -973 -974 -977 -978 -983 -985 -987 -991 -993 -995 -996 -1000 -1002 -1003 -1004 -1005 -1006 -1007 -1008 -1010 -1011 -1012 -1013 -1015 -1017 -1025 -1026 -1027 -1033 -1034 -1036 -1045 -1051 -1052 -1057 -1061 -1066 -1073 -1074 -1081 -1082 -1084 -1087 -1092 -1095 -1099 -1102 -1104 -1105 -1112 -1114 -1116 -1117 -1118 -1124 -1128 -1135 -1137 -1144 -1156 -1160 -1165 -1166 -1167 -1168 -1169 -1170 -1174 -1177 -1178 -1179 -1180 -1181 -1182 -1183 -1185 -1187 -1189 -1191 -1192 -1193 -1197 -1198 -1202 -1213 -1217 -1220 -1226 -1227 -1228 -1230 -1231 -1232 -1234 -1236 -1237 -1238 -1244 -1248 -1250 -1251 -1259 -1260 -1261 -1262 -1269 -1273 -1275 -1283 -1284 -1285 -255 -1288 -1290 -1293 -1295 -1296 -1297 -1298 -1302 -1307 -1312 -1313 -1314 -1316 -1318 -1322 -1327 -1330 -1331 -1332 -1338 -1343 -1349 -1356 -1357 -1358 -1359 -1360 -1361 -1363 -1364 -1370 -1371 -1376 -1378 -1382 -1383 -1384 -1386 -1396 -1398 -1399 -1401 -1403 -1405 -1406 -1408 -1413 -1422 -1428 -1430 -1433 -1436 -1438 -1440 -1442 -1443 -1444 -1445 -1449 -1454 -1456 -1457 -1458 -1459 -1461 -1462 -1463 -1469 -1479 -1482 -1484 -1486 -1489 -1495 -1505 -1509 -1510 -1511 -1512 -1514 -1519 -1521 -1522 -1529 -1531 -1539 -1540 -1546 -1547 -1557 -1559 -1560 -1562 -1566 -1567 -1569 -1574 -1577 -1578 -1580 -1585 -1587 -1588 -1591 -1592 -1593 -1596 -1599 -1600 -1603 -1606 -1608 -1609 -1616 -1618 -1620 -1622 -1623 -1625 -1627 -1629 -1631 -1632 -1636 -1637 -1641 -1643 -1647 -1649 -1651 -1654 -1655 -1657 -1658 -1661 -1662 -1664 -1669 -1671 -1672 -1676 -1679 -334 -1683 -1684 -1686 -1687 -1688 -1690 -1693 -1697 -1701 -1703 -1705 -1712 -1714 -1721 -1726 -1727 -1730 -1735 -1736 -1737 -1739 -1741 -1744 -1746 -1748 -2383 -1758 -1764 -1765 -1766 -1767 -1768 -1769 -1770 -2387 -1772 -1776 -1777 -1781 -1782 -1786 -1790 -1791 -1798 -1802 -1804 -1805 -1806 -1808 -1810 -1815 -1816 -1817 -1819 -1820 -1826 -1827 -1828 -1829 -1831 -1832 -1835 -1836 -1837 -1841 -1848 -1853 -1856 -1858 -1859 -1861 -1868 -1873 -1876 -1877 -1882 -1888 -1889 -1897 -1904 -1906 -1912 -1918 -1919 -1921 -1923 -1928 -1929 -1930 -1931 -1938 -1939 -1944 -1948 -1949 -1951 -1954 -1957 -1960 -1962 -1963 -1965 -1970 -1974 -1975 -1983 -1985 -1987 -1989 -1998 -1999 -2003 -2004 -2005 -2010 -2011 -2013 -2014 -2018 -2027 -2028 -2031 -2032 -2035 -2036 -2038 -2040 -2041 -2043 -2044 -2045 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_shadow_graph_index.txt deleted file mode 100644 index fd5bd4af..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/protential_shadow_graph_index.txt +++ /dev/null @@ -1,1300 +0,0 @@ -1 -2 -3 -4 -2053 -6 -2054 -2050 -2057 -9 -2055 -2056 -13 -12 -15 -16 -2064 -2066 -17 -2065 -21 -20 -2069 -2072 -24 -2074 -25 -28 -29 -2078 -2079 -2080 -31 -34 -35 -2084 -33 -36 -39 -37 -38 -2090 -2091 -2086 -2093 -46 -2089 -40 -2095 -2098 -2099 -2097 -51 -54 -2102 -2103 -57 -2105 -60 -61 -2110 -63 -2113 -2114 -2115 -67 -2117 -2118 -71 -2120 -65 -74 -2124 -77 -2126 -79 -80 -2128 -82 -2130 -2132 -85 -84 -2135 -2131 -90 -92 -2141 -2143 -96 -2145 -2146 -98 -2144 -2149 -2150 -103 -102 -2151 -106 -2155 -107 -108 -2156 -111 -109 -105 -2162 -114 -2160 -2161 -118 -117 -2168 -2167 -2166 -2171 -2172 -125 -2169 -128 -2176 -130 -2179 -132 -133 -134 -2183 -135 -136 -2182 -27 -140 -2188 -142 -143 -144 -2189 -146 -2195 -147 -148 -2198 -151 -152 -153 -154 -2194 -156 -155 -2207 -159 -2209 -2210 -162 -2211 -2213 -164 -2215 -167 -2217 -170 -171 -2221 -2222 -173 -176 -2225 -178 -2227 -180 -2229 -2224 -2231 -179 -2230 -186 -187 -2241 -197 -2246 -2247 -199 -2253 -2076 -207 -2255 -2257 -210 -209 -2260 -211 -2077 -2263 -2266 -218 -2270 -224 -2273 -227 -2278 -2279 -230 -2282 -2283 -2284 -234 -238 -239 -240 -2289 -2290 -2291 -244 -2083 -242 -247 -248 -2288 -250 -251 -2300 -252 -253 -254 -256 -257 -2305 -2307 -2308 -261 -2303 -263 -2312 -2310 -2088 -267 -260 -269 -270 -271 -2320 -273 -2316 -275 -2324 -277 -2325 -2326 -2328 -2329 -282 -283 -281 -2330 -286 -2335 -2336 -2332 -2334 -2339 -287 -2340 -294 -295 -296 -2345 -298 -299 -2344 -2349 -301 -303 -2351 -305 -306 -304 -2353 -2350 -310 -2358 -311 -2354 -314 -2356 -316 -308 -2360 -319 -2368 -321 -2369 -323 -2364 -317 -318 -325 -2378 -331 -2381 -2382 -335 -2384 -337 -338 -333 -340 -2389 -2390 -341 -344 -345 -346 -2386 -348 -2391 -350 -351 -2394 -353 -2402 -347 -2404 -2406 -2407 -2408 -2409 -360 -363 -2412 -2415 -367 -368 -370 -2418 -372 -2421 -374 -2423 -376 -377 -2424 -2426 -378 -379 -382 -383 -2431 -2430 -385 -2433 -2427 -2437 -2438 -391 -392 -2440 -2442 -394 -396 -2444 -397 -2447 -399 -400 -398 -2452 -405 -406 -2455 -404 -409 -410 -2457 -2460 -413 -2461 -412 -414 -415 -2464 -2465 -419 -418 -2467 -2468 -421 -417 -2472 -2475 -2473 -426 -425 -2479 -2476 -429 -427 -2477 -2480 -2481 -434 -2483 -2485 -438 -2486 -440 -2484 -444 -2493 -446 -445 -449 -452 -453 -2501 -2503 -455 -456 -459 -460 -461 -2509 -463 -2512 -2513 -465 -466 -2511 -2515 -470 -471 -472 -473 -474 -467 -2524 -477 -478 -479 -480 -2528 -2530 -482 -484 -483 -2534 -481 -2535 -2536 -489 -488 -2539 -2538 -493 -495 -494 -2543 -2540 -498 -499 -500 -2548 -2550 -2551 -502 -505 -506 -2555 -2556 -509 -507 -511 -2560 -508 -514 -2562 -513 -517 -2559 -520 -521 -2570 -523 -2569 -524 -526 -2546 -2576 -529 -2579 -532 -2582 -535 -2584 -2583 -2586 -2587 -2588 -2589 -2545 -2591 -547 -548 -549 -550 -551 -552 -2599 -554 -2598 -2604 -555 -556 -559 -2607 -2609 -562 -563 -564 -565 -566 -567 -2615 -2617 -569 -2618 -2614 -2621 -574 -575 -2623 -568 -578 -2627 -2628 -2629 -2630 -2632 -585 -2634 -586 -588 -589 -2635 -2639 -592 -2636 -2642 -2637 -591 -597 -596 -2647 -2648 -601 -602 -604 -2652 -2654 -2655 -2653 -607 -2658 -610 -608 -611 -614 -2663 -2664 -615 -618 -619 -2668 -621 -622 -2670 -2672 -2671 -626 -627 -2674 -2676 -2678 -631 -2675 -633 -2681 -2683 -636 -2685 -638 -639 -640 -641 -642 -2689 -2692 -644 -2690 -2695 -2696 -645 -650 -2697 -2698 -653 -654 -2701 -656 -657 -658 -2704 -2707 -661 -664 -665 -667 -668 -669 -671 -672 -674 -675 -677 -680 -682 -688 -689 -690 -691 -692 -694 -697 -698 -699 -701 -704 -706 -708 -709 -712 -713 -715 -716 -718 -2178 -720 -723 -729 -730 -732 -733 -2181 -736 -738 -739 -742 -743 -744 -746 -749 -750 -753 -754 -756 -761 -767 -768 -2595 -770 -772 -773 -774 -775 -776 -778 -2190 -783 -784 -785 -786 -790 -791 -792 -794 -795 -796 -798 -802 -804 -160 -807 -161 -821 -822 -823 -826 -827 -828 -832 -835 -836 -837 -838 -573 -841 -842 -843 -845 -850 -852 -853 -854 -855 -858 -859 -861 -862 -864 -865 -867 -868 -870 -871 -2616 -875 -876 -877 -2525 -882 -884 -886 -887 -584 -893 -895 -898 -899 -909 -912 -913 -917 -921 -926 -927 -928 -929 -931 -932 -933 -937 -946 -947 -949 -950 -954 -956 -2633 -960 -963 -968 -969 -970 -971 -973 -974 -975 -977 -978 -980 -983 -984 -985 -986 -987 -991 -993 -995 -996 -998 -999 -1000 -1002 -1003 -1004 -1005 -1006 -1007 -1008 -1010 -1011 -1012 -1013 -1015 -1017 -1019 -1025 -1026 -1027 -1033 -1034 -1036 -1045 -1048 -1050 -1051 -1052 -1053 -1055 -1057 -1059 -1061 -1066 -1073 -1074 -1076 -1078 -1079 -1081 -1082 -2657 -1084 -1086 -1087 -1088 -2659 -1092 -1095 -1099 -1101 -1102 -1103 -1104 -1105 -1109 -1111 -1112 -1114 -1116 -1117 -1118 -1124 -1128 -1135 -1137 -1141 -634 -1144 -1147 -1155 -1156 -1160 -1161 -1164 -1165 -1166 -1167 -1168 -1169 -1170 -1171 -1173 -1174 -1177 -1178 -1179 -1180 -1181 -1182 -1183 -2082 -1185 -1187 -1189 -1191 -1192 -1193 -1197 -1198 -1201 -1202 -1208 -1213 -1217 -1220 -1226 -1227 -1228 -1230 -1231 -1232 -1234 -1236 -1237 -1238 -1239 -1240 -1241 -1244 -1248 -1250 -1251 -1256 -1259 -1260 -1261 -1262 -1265 -1266 -1269 -1273 -1275 -1283 -1284 -1285 -255 -1288 -1290 -1293 -1295 -1296 -1297 -1298 -2700 -1301 -1302 -1306 -1307 -1310 -1312 -1313 -1314 -1315 -1316 -1318 -1322 -1324 -1326 -1327 -1328 -1330 -1331 -1332 -2301 -1338 -1339 -1343 -1347 -1349 -1352 -1356 -1357 -1358 -1359 -1360 -1361 -1363 -1364 -1370 -1371 -1376 -1378 -1382 -1383 -1384 -1386 -462 -1387 -1389 -276 -1395 -1396 -1398 -1399 -1401 -1403 -279 -1405 -1406 -1407 -1408 -1412 -1413 -1417 -1421 -1422 -1423 -1428 -1429 -1430 -1431 -2319 -1433 -1434 -1436 -464 -1438 -1440 -1442 -1443 -1444 -1445 -1449 -1454 -1456 -1457 -1458 -1459 -1461 -1462 -1463 -1469 -1479 -1482 -1484 -1486 -1487 -1488 -1489 -1492 -1493 -1495 -2333 -1499 -1505 -1508 -1509 -1510 -1511 -1512 -1513 -1514 -1519 -1521 -1522 -1527 -1528 -1529 -1531 -1538 -1539 -1540 -1541 -1543 -1546 -1547 -1549 -1557 -1559 -1560 -1562 -2346 -1565 -1566 -1567 -1568 -1569 -1570 -312 -1574 -1576 -1577 -1578 -1579 -1580 -1584 -1585 -1587 -1588 -1591 -1592 -1593 -2352 -1595 -1596 -1599 -1600 -1601 -1602 -1603 -1606 -1608 -1609 -1616 -1618 -1620 -1622 -1623 -1625 -1626 -1627 -1629 -1631 -1632 -1636 -1637 -1641 -1643 -1644 -1647 -1649 -1651 -1654 -1655 -1657 -1658 -2365 -1661 -1662 -1664 -1668 -1669 -1671 -1672 -1676 -1678 -1679 -334 -1681 -1683 -1684 -1682 -1686 -1687 -1688 -1690 -1693 -1697 -1701 -1703 -1705 -339 -1712 -1714 -1721 -342 -1724 -1725 -1726 -1727 -1730 -1732 -1735 -1736 -1737 -1739 -1741 -1744 -1746 -1747 -1748 -2383 -1755 -1758 -1759 -2385 -1764 -1765 -1766 -1767 -1768 -1769 -1770 -2387 -1772 -1773 -1776 -1777 -1781 -1782 -1786 -1788 -1790 -1791 -1793 -1794 -1795 -1798 -1800 -1802 -1804 -1805 -1806 -1808 -1810 -1812 -1815 -1816 -1817 -1819 -1820 -1824 -1826 -1827 -1828 -1829 -1831 -1832 -1835 -1836 -1837 -1841 -1848 -1853 -1856 -1857 -1858 -1859 -1861 -1862 -1863 -1868 -1873 -1876 -1877 -1882 -375 -1888 -1889 -1892 -1893 -1894 -1895 -1897 -1901 -1904 -1906 -1907 -1912 -1914 -1918 -1919 -1921 -1923 -1928 -1929 -1930 -1931 -1935 -2420 -1938 -1939 -1944 -1945 -1948 -1949 -1951 -1954 -1955 -1957 -1960 -1962 -1963 -1964 -1965 -1970 -1974 -1975 -1983 -1985 -1987 -1989 -1993 -1994 -1995 -1996 -2432 -1998 -1999 -1997 -2000 -2003 -2004 -2005 -2006 -2434 -2009 -2010 -2011 -2012 -2013 -2014 -2016 -2018 -2020 -2023 -2027 -2028 -2030 -2031 -2032 -2035 -2036 -2037 -2038 -2040 -2041 -2043 -2044 -2045 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/target_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/cora/target_graph_index.txt deleted file mode 100644 index 0eab7dcc..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/cora/target_graph_index.txt +++ /dev/null @@ -1,1408 +0,0 @@ -0 -5 -7 -8 -10 -11 -14 -18 -19 -22 -23 -26 -30 -32 -41 -42 -43 -44 -45 -47 -48 -49 -50 -52 -53 -55 -56 -58 -59 -62 -64 -66 -68 -69 -70 -72 -73 -75 -76 -78 -81 -83 -86 -87 -88 -89 -91 -93 -94 -95 -97 -99 -100 -101 -104 -110 -112 -113 -115 -116 -119 -120 -121 -122 -123 -124 -126 -127 -129 -131 -137 -138 -139 -141 -145 -149 -150 -157 -158 -163 -165 -166 -168 -169 -172 -174 -175 -177 -181 -182 -183 -184 -185 -188 -189 -190 -191 -192 -193 -194 -195 -196 -198 -200 -201 -202 -203 -204 -205 -206 -208 -212 -213 -214 -215 -216 -217 -219 -220 -221 -222 -223 -225 -226 -228 -229 -231 -232 -233 -235 -236 -237 -241 -243 -245 -246 -249 -258 -259 -262 -264 -265 -266 -268 -272 -274 -278 -280 -284 -285 -288 -289 -290 -291 -292 -293 -297 -300 -302 -307 -309 -313 -315 -320 -322 -324 -326 -327 -328 -329 -330 -332 -336 -343 -349 -352 -354 -355 -356 -357 -358 -359 -361 -362 -364 -365 -366 -369 -371 -373 -380 -381 -384 -386 -387 -388 -389 -390 -393 -395 -401 -402 -403 -407 -408 -411 -416 -420 -422 -423 -424 -428 -430 -431 -432 -433 -435 -436 -437 -439 -441 -442 -443 -447 -448 -450 -451 -454 -457 -458 -468 -469 -475 -476 -485 -486 -487 -490 -491 -492 -496 -497 -501 -503 -504 -510 -512 -515 -516 -518 -519 -522 -525 -527 -528 -530 -531 -533 -534 -536 -537 -538 -539 -540 -541 -542 -543 -544 -545 -546 -553 -557 -558 -560 -561 -570 -571 -572 -576 -577 -579 -580 -581 -582 -583 -587 -590 -593 -594 -595 -598 -599 -600 -603 -605 -606 -609 -612 -613 -616 -617 -620 -623 -624 -625 -628 -629 -630 -632 -635 -637 -643 -646 -647 -648 -649 -651 -652 -655 -659 -660 -662 -663 -666 -670 -673 -676 -678 -679 -681 -683 -684 -685 -686 -687 -693 -695 -696 -700 -702 -703 -705 -707 -710 -711 -714 -717 -719 -721 -722 -724 -725 -726 -727 -728 -731 -734 -735 -737 -740 -741 -745 -747 -748 -751 -752 -755 -757 -758 -759 -760 -762 -763 -764 -765 -766 -769 -771 -777 -779 -780 -781 -782 -787 -788 -789 -793 -797 -799 -800 -801 -803 -805 -806 -808 -809 -810 -811 -812 -813 -814 -815 -816 -817 -818 -819 -820 -824 -825 -829 -830 -831 -833 -834 -839 -840 -844 -846 -847 -848 -849 -851 -856 -857 -860 -863 -866 -869 -872 -873 -874 -878 -879 -880 -881 -883 -885 -888 -889 -890 -891 -892 -894 -896 -897 -900 -901 -902 -903 -904 -905 -906 -907 -908 -910 -911 -914 -915 -916 -918 -919 -920 -922 -923 -924 -925 -930 -934 -935 -936 -938 -939 -940 -941 -942 -943 -944 -945 -948 -951 -952 -953 -955 -957 -958 -959 -961 -962 -964 -965 -966 -967 -972 -976 -979 -981 -982 -988 -989 -990 -992 -994 -997 -1001 -1009 -1014 -1016 -1018 -1020 -1021 -1022 -1023 -1024 -1028 -1029 -1030 -1031 -1032 -1035 -1037 -1038 -1039 -1040 -1041 -1042 -1043 -1044 -1046 -1047 -1049 -1054 -1056 -1058 -1060 -1062 -1063 -1064 -1065 -1067 -1068 -1069 -1070 -1071 -1072 -1075 -1077 -1080 -1083 -1085 -1089 -1090 -1091 -1093 -1094 -1096 -1097 -1098 -1100 -1106 -1107 -1108 -1110 -1113 -1115 -1119 -1120 -1121 -1122 -1123 -1125 -1126 -1127 -1129 -1130 -1131 -1132 -1133 -1134 -1136 -1138 -1139 -1140 -1142 -1143 -1145 -1146 -1148 -1149 -1150 -1151 -1152 -1153 -1154 -1157 -1158 -1159 -1162 -1163 -1172 -1175 -1176 -1184 -1186 -1188 -1190 -1194 -1195 -1196 -1199 -1200 -1203 -1204 -1205 -1206 -1207 -1209 -1210 -1211 -1212 -1214 -1215 -1216 -1218 -1219 -1221 -1222 -1223 -1224 -1225 -1229 -1233 -1235 -1242 -1243 -1245 -1246 -1247 -1249 -1252 -1253 -1254 -1255 -1257 -1258 -1263 -1264 -1267 -1268 -1270 -1271 -1272 -1274 -1276 -1277 -1278 -1279 -1280 -1281 -1282 -1286 -1287 -1289 -1291 -1292 -1294 -1299 -1300 -1303 -1304 -1305 -1308 -1309 -1311 -1317 -1319 -1320 -1321 -1323 -1325 -1329 -1333 -1334 -1335 -1336 -1337 -1340 -1341 -1342 -1344 -1345 -1346 -1348 -1350 -1351 -1353 -1354 -1355 -1362 -1365 -1366 -1367 -1368 -1369 -1372 -1373 -1374 -1375 -1377 -1379 -1380 -1381 -1385 -1388 -1390 -1391 -1392 -1393 -1394 -1397 -1400 -1402 -1404 -1409 -1410 -1411 -1414 -1415 -1416 -1418 -1419 -1420 -1424 -1425 -1426 -1427 -1432 -1435 -1437 -1439 -1441 -1446 -1447 -1448 -1450 -1451 -1452 -1453 -1455 -1460 -1464 -1465 -1466 -1467 -1468 -1470 -1471 -1472 -1473 -1474 -1475 -1476 -1477 -1478 -1480 -1481 -1483 -1485 -1490 -1491 -1494 -1496 -1497 -1498 -1500 -1501 -1502 -1503 -1504 -1506 -1507 -1515 -1516 -1517 -1518 -1520 -1523 -1524 -1525 -1526 -1530 -1532 -1533 -1534 -1535 -1536 -1537 -1542 -1544 -1545 -1548 -1550 -1551 -1552 -1553 -1554 -1555 -1556 -1558 -1561 -1563 -1564 -1571 -1572 -1573 -1575 -1581 -1582 -1583 -1586 -1589 -1590 -1594 -1597 -1598 -1604 -1605 -1607 -1610 -1611 -1612 -1613 -1614 -1615 -1617 -1619 -1621 -1624 -1628 -1630 -1633 -1634 -1635 -1638 -1639 -1640 -1642 -1645 -1646 -1648 -1650 -1652 -1653 -1656 -1659 -1660 -1663 -1665 -1666 -1667 -1670 -1673 -1674 -1675 -1677 -1680 -1685 -1689 -1691 -1692 -1694 -1695 -1696 -1698 -1699 -1700 -1702 -1704 -1706 -1707 -1708 -1709 -1710 -1711 -1713 -1715 -1716 -1717 -1718 -1719 -1720 -1722 -1723 -1728 -1729 -1731 -1733 -1734 -1738 -1740 -1742 -1743 -1745 -1749 -1750 -1751 -1752 -1753 -1754 -1756 -1757 -1760 -1761 -1762 -1763 -1771 -1774 -1775 -1778 -1779 -1780 -1783 -1784 -1785 -1787 -1789 -1792 -1796 -1797 -1799 -1801 -1803 -1807 -1809 -1811 -1813 -1814 -1818 -1821 -1822 -1823 -1825 -1830 -1833 -1834 -1838 -1839 -1840 -1842 -1843 -1844 -1845 -1846 -1847 -1849 -1850 -1851 -1852 -1854 -1855 -1860 -1864 -1865 -1866 -1867 -1869 -1870 -1871 -1872 -1874 -1875 -1878 -1879 -1880 -1881 -1883 -1884 -1885 -1886 -1887 -1890 -1891 -1896 -1898 -1899 -1900 -1902 -1903 -1905 -1908 -1909 -1910 -1911 -1913 -1915 -1916 -1917 -1920 -1922 -1924 -1925 -1926 -1927 -1932 -1933 -1934 -1936 -1937 -1940 -1941 -1942 -1943 -1946 -1947 -1950 -1952 -1953 -1956 -1958 -1959 -1961 -1966 -1967 -1968 -1969 -1971 -1972 -1973 -1976 -1977 -1978 -1979 -1980 -1981 -1982 -1984 -1986 -1988 -1990 -1991 -1992 -2001 -2002 -2007 -2008 -2015 -2017 -2019 -2021 -2022 -2024 -2025 -2026 -2029 -2033 -2034 -2039 -2042 -2046 -2047 -2048 -2049 -2051 -2052 -2058 -2059 -2060 -2061 -2062 -2063 -2067 -2068 -2070 -2071 -2073 -2075 -2081 -2085 -2087 -2092 -2094 -2096 -2100 -2101 -2104 -2106 -2107 -2108 -2109 -2111 -2112 -2116 -2119 -2121 -2122 -2123 -2125 -2127 -2129 -2133 -2134 -2136 -2137 -2138 -2139 -2140 -2142 -2147 -2148 -2152 -2153 -2154 -2157 -2158 -2159 -2163 -2164 -2165 -2170 -2173 -2174 -2175 -2177 -2180 -2184 -2185 -2186 -2187 -2191 -2192 -2193 -2196 -2197 -2199 -2200 -2201 -2202 -2203 -2204 -2205 -2206 -2208 -2212 -2214 -2216 -2218 -2219 -2220 -2223 -2226 -2228 -2232 -2233 -2234 -2235 -2236 -2237 -2238 -2239 -2240 -2242 -2243 -2244 -2245 -2248 -2249 -2250 -2251 -2252 -2254 -2256 -2258 -2259 -2261 -2262 -2264 -2265 -2267 -2268 -2269 -2271 -2272 -2274 -2275 -2276 -2277 -2280 -2281 -2285 -2286 -2287 -2292 -2293 -2294 -2295 -2296 -2297 -2298 -2299 -2302 -2304 -2306 -2309 -2311 -2313 -2314 -2315 -2317 -2318 -2321 -2322 -2323 -2327 -2331 -2337 -2338 -2341 -2342 -2343 -2347 -2348 -2355 -2357 -2359 -2361 -2362 -2363 -2366 -2367 -2370 -2371 -2372 -2373 -2374 -2375 -2376 -2377 -2379 -2380 -2388 -2392 -2393 -2395 -2396 -2397 -2398 -2399 -2400 -2401 -2403 -2405 -2410 -2411 -2413 -2414 -2416 -2417 -2419 -2422 -2425 -2428 -2429 -2435 -2436 -2439 -2441 -2443 -2445 -2446 -2448 -2449 -2450 -2451 -2453 -2454 -2456 -2458 -2459 -2462 -2463 -2466 -2469 -2470 -2471 -2474 -2478 -2482 -2487 -2488 -2489 -2490 -2491 -2492 -2494 -2495 -2496 -2497 -2498 -2499 -2500 -2502 -2504 -2505 -2506 -2507 -2508 -2510 -2514 -2516 -2517 -2518 -2519 -2520 -2521 -2522 -2523 -2526 -2527 -2529 -2531 -2532 -2533 -2537 -2541 -2542 -2544 -2547 -2549 -2552 -2553 -2554 -2557 -2558 -2561 -2563 -2564 -2565 -2566 -2567 -2568 -2571 -2572 -2573 -2574 -2575 -2577 -2578 -2580 -2581 -2585 -2590 -2592 -2593 -2594 -2596 -2597 -2600 -2601 -2602 -2603 -2605 -2606 -2608 -2610 -2611 -2612 -2613 -2619 -2620 -2622 -2624 -2625 -2626 -2631 -2638 -2640 -2641 -2643 -2644 -2645 -2646 -2649 -2650 -2651 -2656 -2660 -2661 -2662 -2665 -2666 -2667 -2669 -2673 -2677 -2679 -2680 -2682 -2684 -2686 -2687 -2688 -2691 -2693 -2694 -2699 -2702 -2703 -2705 -2706 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/attack_6_sub_shadow_graph_index_attack_2.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/attack_6_sub_shadow_graph_index_attack_2.txt deleted file mode 100644 index f05c601f..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/attack_6_sub_shadow_graph_index_attack_2.txt +++ /dev/null @@ -1,1316 +0,0 @@ -8199 -6094 -16403 -6100 -14292 -16413 -14309 -6118 -34 -14296 -6123 -16427 -14311 -16424 -14324 -16415 -14342 -55 -14346 -8253 -16446 -6161 -6172 -68 -6214 -16474 -6218 -14418 -8282 -16469 -6237 -98 -106 -108 -6246 -16493 -6233 -14427 -14430 -14458 -6271 -112 -8307 -6275 -128 -6280 -6283 -16523 -14472 -14475 -146 -8334 -6304 -6287 -16534 -138 -6299 -36 -8354 -8349 -14513 -6331 -14530 -188 -195 -14546 -6364 -8400 -212 -14563 -6357 -8410 -6398 -222 -6401 -6399 -14597 -6405 -14599 -233 -8411 -239 -14616 -6426 -14608 -8427 -14632 -243 -8448 -16633 -14609 -14660 -273 -6466 -14663 -14668 -8465 -16662 -8473 -295 -14678 -14679 -14673 -300 -6492 -14690 -8489 -16683 -14681 -310 -6489 -317 -6514 -14714 -14718 -8514 -8515 -14720 -6529 -336 -14735 -16724 -6543 -328 -8537 -8543 -14763 -6564 -14771 -6598 -14791 -6603 -8569 -378 -6606 -16771 -6614 -8582 -16774 -6620 -396 -16776 -14808 -16785 -6633 -14810 -14833 -6642 -8600 -16796 -8617 -17488 -14863 -16819 -14869 -16821 -14870 -16828 -437 -8638 -449 -14904 -14907 -6720 -14903 -6724 -14917 -16857 -6728 -8667 -16837 -8677 -14941 -6754 -8686 -6745 -6765 -6770 -16888 -8699 -6777 -6774 -6786 -519 -16905 -16908 -6792 -14987 -16911 -6789 -6790 -6808 -6824 -15017 -16932 -16931 -6830 -550 -6831 -557 -16948 -15020 -570 -6845 -15067 -15069 -15070 -16959 -8769 -6891 -16961 -6900 -16966 -6908 -15092 -8778 -6910 -595 -16980 -15110 -6919 -16968 -16991 -6943 -8797 -8804 -8806 -6942 -15148 -605 -17021 -16504 -8832 -8833 -15176 -17038 -8849 -15204 -15210 -7006 -15222 -17047 -7037 -7039 -15235 -8858 -680 -17064 -7062 -7068 -7066 -8880 -15279 -17071 -7102 -7105 -8899 -1151 -17103 -7126 -727 -7139 -732 -7145 -734 -737 -17117 -7165 -6257 -8934 -7179 -17129 -7195 -17132 -15392 -8944 -7207 -15415 -15421 -17141 -7237 -15428 -14464 -9355 -17152 -7244 -770 -15441 -15434 -771 -17160 -7264 -15463 -15465 -784 -15477 -15479 -8977 -15484 -15487 -17169 -792 -800 -809 -15494 -17199 -17190 -7322 -17201 -816 -16024 -7337 -823 -7344 -825 -820 -15537 -7348 -7357 -15533 -835 -7364 -9030 -15551 -9019 -7383 -15576 -7384 -7363 -17250 -7395 -17252 -870 -17239 -7408 -17256 -15606 -9073 -9075 -15610 -9086 -17279 -15627 -896 -15645 -7459 -894 -9092 -7463 -913 -15660 -15663 -7447 -929 -908 -7497 -9123 -7510 -7513 -936 -8399 -17334 -15729 -9158 -971 -7550 -17362 -9170 -17365 -9180 -7581 -17374 -17376 -994 -7593 -7599 -9197 -8409 -17393 -7622 -17397 -1011 -7626 -15818 -7627 -1027 -17405 -17407 -17414 -15815 -1036 -9217 -17424 -9233 -7664 -8418 -1041 -7674 -17429 -9239 -7676 -15870 -15868 -17435 -15874 -17449 -15879 -8423 -15880 -7693 -9265 -7690 -17460 -15889 -15890 -17462 -234 -15897 -9274 -9272 -17470 -15902 -9291 -1085 -9309 -7747 -7745 -17496 -1109 -7766 -1122 -9305 -7772 -1123 -7796 -1127 -7802 -7803 -16007 -9328 -1140 -7822 -7834 -9337 -1143 -16050 -16042 -9349 -7858 -16063 -17549 -17554 -16077 -1176 -7876 -7890 -17564 -17578 -9377 -7908 -17585 -1203 -16104 -7916 -17591 -7919 -1210 -16113 -9411 -7937 -9417 -16141 -9423 -7955 -16135 -1243 -7962 -9437 -1248 -16165 -7977 -8454 -7982 -1258 -1263 -1262 -1277 -7991 -8010 -8009 -17664 -1285 -17667 -1298 -9480 -1299 -8027 -17685 -9493 -17691 -16229 -16254 -17710 -17712 -17718 -8081 -17723 -8084 -16281 -9540 -1351 -9543 -16303 -9547 -17741 -17743 -8121 -16315 -16296 -8130 -1372 -1374 -8142 -17774 -17780 -8154 -8153 -9600 -8170 -16370 -9605 -16374 -9616 -14659 -17813 -1433 -9630 -1451 -17842 -17844 -9657 -17851 -17867 -9683 -9684 -1499 -1500 -1501 -17894 -6480 -17903 -17911 -17915 -9724 -17919 -1539 -17928 -1548 -8513 -17935 -1561 -17948 -17950 -17952 -9769 -1583 -17967 -1585 -17970 -8520 -9790 -17986 -1603 -17992 -9807 -18000 -18001 -16164 -9827 -1648 -18042 -18075 -1693 -18083 -18084 -9900 -1711 -9908 -1722 -1723 -9915 -18108 -9918 -18113 -9924 -6520 -18124 -9939 -1750 -18137 -1771 -1772 -6528 -18164 -1780 -1783 -9982 -9986 -18182 -18185 -9996 -10000 -6537 -1809 -18194 -18193 -18202 -10021 -18218 -18222 -18230 -10042 -18239 -10047 -18246 -1869 -1870 -18253 -18256 -1875 -18260 -10063 -16307 -18271 -1890 -1892 -10084 -18278 -1894 -10093 -18296 -18303 -10112 -10115 -1931 -18346 -1967 -10162 -10164 -10161 -18353 -1975 -18372 -18374 -18379 -10189 -2001 -2012 -10214 -2033 -18417 -6604 -10232 -10234 -18430 -10242 -2062 -18447 -2071 -18463 -10283 -10291 -2102 -18491 -10307 -2118 -10313 -10319 -10329 -10342 -18536 -18554 -10368 -10382 -9297 -10387 -2198 -18582 -18589 -2207 -10401 -18595 -10404 -18597 -18599 -18601 -2219 -10421 -2230 -10432 -2241 -18628 -2248 -10449 -2269 -18666 -10477 -10483 -10484 -2294 -18682 -10495 -2303 -2309 -16845 -2317 -10509 -10511 -18703 -16846 -10514 -2326 -16850 -18724 -10533 -10543 -2360 -17497 -10555 -2384 -18770 -10582 -18775 -18777 -2395 -2396 -18781 -2399 -18790 -10598 -14559 -10645 -10646 -18839 -10658 -2477 -18863 -2485 -10681 -2494 -2505 -2511 -10720 -18918 -2542 -10734 -18943 -10751 -2575 -10768 -18982 -2600 -10798 -18996 -10810 -19006 -10815 -19014 -10823 -10830 -2639 -19023 -19033 -2651 -19042 -2673 -10867 -2686 -2688 -19073 -19078 -10888 -19086 -19091 -2709 -19095 -19099 -19107 -10918 -2727 -2732 -10929 -10936 -19134 -19145 -2775 -2783 -19187 -2810 -17647 -2812 -19196 -2814 -11008 -19200 -2820 -11021 -2833 -11026 -11031 -11033 -2841 -11036 -11052 -2865 -19252 -2869 -19254 -19255 -19256 -11071 -19268 -19276 -19281 -15816 -2905 -11099 -2921 -2925 -19310 -2930 -7736 -2934 -2939 -2946 -9319 -11143 -16960 -19346 -11160 -19356 -19358 -11171 -19363 -16965 -11183 -11185 -19380 -15040 -11199 -17518 -19404 -11212 -19412 -19415 -3045 -19432 -19433 -3050 -11261 -3070 -3076 -8791 -19478 -11291 -11296 -19488 -19489 -3106 -19497 -3119 -11316 -3129 -19513 -11327 -19527 -3152 -11352 -11360 -11366 -3185 -11380 -19578 -19585 -3203 -3206 -19588 -19601 -19605 -11417 -19611 -11422 -19615 -19614 -19619 -3235 -19623 -3242 -11435 -19632 -11441 -19633 -3252 -11445 -7764 -19636 -3264 -19650 -132 -19663 -11471 -3294 -19678 -19684 -19688 -19690 -19701 -11528 -3349 -3356 -11560 -3381 -11578 -11581 -11584 -3395 -3397 -3399 -3402 -11601 -3409 -3427 -11623 -3449 -3450 -3458 -11653 -3462 -3463 -15190 -3469 -3470 -3472 -3480 -3487 -11678 -11698 -3515 -3521 -3525 -11718 -11727 -3535 -3538 -3542 -11736 -11739 -3559 -11762 -3577 -3581 -11774 -3594 -11790 -11793 -3602 -11801 -3630 -11829 -3638 -3639 -11832 -3644 -11837 -11850 -3665 -11870 -11874 -3687 -11889 -3701 -3703 -3711 -11905 -3717 -3728 -3739 -11939 -11941 -3778 -11980 -11981 -3800 -3804 -12016 -12019 -12022 -3837 -12040 -16112 -3847 -3851 -3849 -740 -3859 -12051 -12057 -3877 -3878 -3883 -12076 -3885 -12080 -3890 -3895 -3896 -3911 -3920 -12127 -3942 -12149 -12161 -3970 -12163 -3973 -3983 -3987 -12187 -3997 -3998 -12192 -4005 -4011 -12206 -4015 -4018 -4030 -12229 -4075 -4085 -4090 -4106 -4114 -15370 -4122 -4123 -4124 -4126 -12320 -4160 -4167 -4169 -12364 -4189 -12402 -4215 -4217 -4219 -4221 -12426 -16013 -12444 -4235 -8967 -4291 -4292 -12491 -4307 -4309 -12500 -4321 -12513 -8972 -12525 -12528 -4338 -12533 -4342 -12540 -4350 -12547 -9360 -4370 -4371 -4373 -12582 -12588 -4399 -4401 -4406 -4407 -12605 -4423 -4431 -7262 -4489 -4491 -4497 -4504 -12697 -12712 -4524 -12732 -12733 -4543 -4552 -12752 -12765 -12782 -12788 -12798 -12802 -4620 -4625 -12820 -4640 -12835 -4648 -12851 -12854 -12871 -12900 -12920 -12922 -12923 -12936 -4754 -12950 -4763 -4764 -12960 -4778 -12970 -4788 -4805 -13001 -17221 -13013 -13021 -4841 -4851 -4852 -13045 -13061 -4869 -4883 -13075 -17230 -848 -4899 -13090 -13096 -13113 -13115 -4924 -13130 -13144 -13150 -13164 -4974 -4991 -13189 -13203 -5011 -13206 -5017 -5034 -13239 -13243 -7362 -5062 -5078 -13334 -13338 -13353 -13355 -5164 -15564 -5176 -13375 -13376 -5186 -1198 -5193 -13386 -5199 -5214 -5219 -13414 -5228 -5234 -13430 -5241 -5249 -13448 -13453 -5277 -5288 -13488 -13495 -13500 -13505 -13506 -13509 -13513 -5328 -13528 -5341 -5347 -5353 -13545 -5355 -13548 -5365 -17586 -13572 -5381 -13578 -13586 -5406 -5409 -5414 -13614 -13624 -13637 -13638 -5480 -5486 -5494 -13686 -5505 -5508 -17314 -5522 -5545 -13736 -13744 -5553 -13757 -13765 -5593 -5599 -13796 -13834 -5644 -5656 -5664 -13870 -13872 -5686 -13883 -7881 -5702 -7466 -5711 -13910 -17589 -13917 -5738 -5739 -5750 -13946 -13949 -5757 -13953 -15669 -5763 -5764 -13955 -15672 -13967 -13977 -13979 -5794 -13988 -13990 -13993 -14001 -14012 -5832 -14024 -5844 -5848 -14042 -5891 -14096 -14099 -14100 -996 -14105 -14120 -14129 -5939 -5947 -14139 -5958 -14168 -14174 -14181 -5989 -14185 -14193 -14198 -14199 -14204 -14212 -14214 -15763 -6039 -6043 -14240 -14246 -6060 -14255 -6074 -6082 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/attack_6_sub_shadow_graph_index_attack_3.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/attack_6_sub_shadow_graph_index_attack_3.txt deleted file mode 100644 index 626f85e0..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/attack_6_sub_shadow_graph_index_attack_3.txt +++ /dev/null @@ -1,1293 +0,0 @@ -16396 -16397 -16407 -27 -8222 -32 -8237 -8239 -16433 -8245 -16443 -16444 -16447 -8255 -8258 -8260 -8266 -8268 -8283 -91 -8286 -103 -16492 -8304 -16498 -115 -16497 -8306 -140 -8339 -148 -16533 -8347 -8353 -8357 -8362 -16566 -183 -16571 -16575 -8386 -198 -214 -8425 -16619 -8433 -16627 -16639 -256 -16644 -16647 -274 -8468 -16666 -8492 -8494 -16689 -307 -16691 -16693 -16696 -8521 -8524 -16726 -16728 -344 -16737 -16738 -8548 -358 -16746 -16749 -8562 -8563 -380 -16765 -382 -16769 -388 -16783 -16784 -401 -400 -16788 -8603 -8605 -16801 -16809 -427 -16815 -8628 -8637 -8642 -8651 -16867 -8676 -16870 -487 -499 -16887 -504 -8704 -16902 -8719 -8721 -8724 -534 -537 -8731 -16929 -8739 -8742 -16939 -565 -571 -16956 -8764 -8768 -582 -16975 -8798 -8803 -619 -17011 -632 -8831 -641 -17028 -17031 -17044 -664 -17048 -669 -17061 -679 -17063 -17067 -8876 -8889 -700 -702 -17096 -17098 -17102 -719 -17112 -17113 -738 -739 -17127 -747 -8943 -753 -8948 -8951 -8952 -8953 -762 -8955 -8960 -8963 -779 -783 -8986 -795 -8993 -9003 -9008 -9010 -17204 -9013 -822 -9028 -842 -17227 -17235 -17241 -17246 -9055 -866 -9063 -17260 -9072 -17268 -17270 -17290 -9101 -917 -17302 -924 -9116 -933 -938 -17330 -9141 -17348 -975 -9182 -1005 -1008 -17409 -17413 -9222 -1030 -1038 -9236 -9237 -1050 -17441 -1067 -9260 -17456 -17457 -1072 -17461 -1113 -9318 -9329 -1138 -9331 -9335 -1155 -9348 -17540 -17548 -17555 -1178 -17581 -9396 -1208 -17596 -17598 -1220 -1226 -17611 -17619 -9432 -1245 -17633 -1252 -17638 -9470 -17673 -9492 -17682 -9501 -9507 -9508 -17711 -17713 -1330 -17721 -1342 -1348 -17740 -17746 -1366 -9559 -1368 -17761 -17785 -1403 -1414 -17797 -17801 -1419 -1423 -17808 -9618 -1438 -1447 -1456 -17841 -1459 -9653 -9655 -1468 -1472 -17876 -9692 -1508 -9701 -17893 -9703 -17898 -1517 -9713 -9716 -1530 -17920 -1538 -17940 -17947 -9755 -17949 -9757 -17957 -9781 -1592 -1593 -9789 -9792 -17985 -9795 -17989 -17995 -17998 -18002 -9824 -9836 -18028 -1653 -9860 -18054 -1673 -9868 -18061 -9876 -18076 -18081 -9893 -18089 -9904 -1733 -9935 -9936 -1745 -1755 -1759 -9955 -1766 -9961 -18156 -18173 -9984 -18176 -1796 -1801 -1810 -1812 -1815 -10009 -18215 -1849 -1856 -1857 -18250 -1867 -10064 -10065 -10066 -1881 -10075 -10076 -18269 -10079 -1899 -18291 -10100 -1912 -10107 -1919 -18304 -1927 -18312 -10129 -10135 -18328 -18327 -10143 -1952 -10145 -1955 -18352 -1968 -18355 -10170 -1984 -18383 -10194 -2010 -10203 -10205 -2018 -2026 -2030 -2037 -18426 -18431 -10241 -18434 -2051 -2054 -18438 -2057 -10251 -10254 -2068 -10264 -2072 -2073 -10266 -10271 -18481 -18489 -18496 -2113 -18498 -2125 -2132 -10327 -18525 -18533 -10347 -2160 -10359 -2172 -2180 -18565 -10380 -18575 -18578 -2196 -2199 -2202 -10409 -10416 -10418 -18615 -10425 -10426 -18620 -2240 -2247 -10448 -18641 -2258 -18643 -2271 -18658 -18669 -2286 -2288 -10480 -2293 -18677 -2311 -2313 -18705 -2324 -10526 -10528 -18731 -2349 -2355 -10547 -2362 -2363 -18749 -2378 -2379 -10585 -18789 -2416 -10612 -2433 -10626 -18822 -2441 -2443 -10636 -18830 -2455 -2471 -18865 -2487 -2492 -2497 -2504 -10696 -10702 -2512 -18901 -10710 -10711 -18904 -18909 -10722 -10725 -10731 -10732 -18925 -10733 -2545 -10738 -18930 -18932 -2548 -10745 -18972 -18976 -2604 -2609 -10802 -2614 -2619 -10818 -2628 -2631 -2637 -19030 -10845 -2654 -2665 -19051 -10874 -10882 -2691 -2694 -10902 -2711 -2714 -19105 -2724 -19127 -19136 -2754 -19149 -2765 -10961 -2780 -10974 -19174 -2796 -19188 -2808 -11002 -2818 -2816 -19225 -11045 -2859 -19250 -11065 -19261 -11072 -2888 -2897 -19284 -2901 -2911 -11114 -2929 -19316 -2937 -2940 -2944 -11140 -19333 -11147 -19345 -11161 -19357 -19365 -2990 -19376 -2993 -2996 -2998 -19391 -11206 -11210 -19403 -19402 -11216 -19410 -3028 -11224 -19417 -19416 -3035 -3038 -3046 -3057 -11250 -19449 -19450 -11259 -3077 -11270 -11271 -11269 -3080 -11280 -3089 -3092 -19479 -3095 -3115 -19501 -19506 -11315 -11319 -11322 -11323 -19521 -3145 -19532 -19537 -11345 -11348 -3158 -3163 -19547 -3166 -3169 -19562 -19564 -3182 -11377 -19570 -11383 -11384 -3199 -19584 -19586 -11393 -11397 -3211 -3213 -11410 -3220 -11420 -11428 -19622 -3244 -3254 -11446 -19644 -19645 -3262 -11458 -3269 -19653 -19670 -11493 -3313 -19697 -19699 -19700 -11512 -3323 -3324 -11522 -3367 -11562 -11573 -3382 -3383 -3406 -3410 -11607 -11608 -11612 -11620 -3434 -11643 -3459 -11656 -3468 -3486 -11685 -11694 -3507 -3510 -11704 -11705 -11717 -11730 -11734 -3547 -11748 -3561 -3571 -3580 -11776 -11781 -11786 -3595 -11789 -11821 -3629 -3633 -3641 -3645 -3650 -3657 -3685 -11887 -11896 -11901 -3720 -3722 -11914 -3730 -3732 -11924 -11946 -3758 -3759 -3776 -3785 -11987 -3801 -11999 -3818 -3824 -12023 -12039 -12041 -12044 -3862 -12056 -12060 -3875 -3876 -12091 -12105 -12119 -12124 -3943 -12136 -12146 -12147 -12168 -3994 -12189 -4004 -4008 -12205 -4031 -4044 -12266 -12268 -12271 -12277 -4104 -12300 -4111 -4121 -12322 -12351 -12352 -12358 -12360 -4186 -12379 -4190 -12393 -12394 -12395 -12400 -4213 -12410 -12413 -4228 -12424 -4266 -4287 -12488 -4308 -12503 -12509 -12512 -12517 -4345 -12544 -12549 -4361 -4364 -12557 -12556 -4377 -4378 -4381 -4385 -4386 -12586 -12587 -12602 -12606 -12620 -12631 -4441 -12644 -12653 -4466 -12659 -4474 -12674 -4486 -12679 -4496 -4499 -12702 -4511 -12716 -12727 -12729 -4545 -4549 -4561 -4567 -4583 -4592 -12785 -4595 -4613 -4619 -12815 -4627 -12832 -12837 -12838 -12839 -4658 -4665 -12875 -12881 -12889 -4701 -4706 -12911 -4719 -4724 -4725 -4726 -12928 -4736 -4746 -4760 -12954 -12955 -4772 -12967 -12973 -4782 -12975 -12996 -13005 -4816 -13025 -4843 -13038 -4858 -4859 -4860 -4866 -13072 -13079 -13081 -4890 -13087 -4897 -4898 -13103 -4931 -13127 -13131 -13132 -13145 -13148 -4957 -13151 -13152 -13155 -13174 -4983 -4998 -5000 -5005 -5012 -13207 -5015 -13214 -5026 -13231 -5040 -5043 -13244 -13247 -5068 -5072 -5081 -13282 -5093 -5094 -5099 -13292 -13306 -13313 -5128 -5141 -13333 -13361 -5173 -5184 -13381 -5189 -13389 -5206 -5209 -13421 -13424 -13426 -13427 -13443 -13456 -13459 -13470 -13473 -13477 -13480 -13486 -5296 -5310 -5315 -13524 -5351 -5357 -13546 -5361 -13558 -5370 -13566 -13574 -5384 -13599 -13600 -13601 -5413 -13610 -13615 -13625 -13631 -5442 -5449 -5454 -13684 -5512 -5530 -13726 -5544 -13745 -13751 -5583 -13787 -13805 -5621 -13827 -5636 -13865 -5675 -5676 -13881 -5695 -13899 -13905 -13914 -13915 -13927 -5737 -13938 -13941 -13945 -5755 -5762 -5769 -13962 -5778 -13971 -13970 -5781 -5790 -5798 -5812 -14005 -5819 -5822 -5827 -14020 -14032 -5849 -5860 -5870 -14072 -5880 -14102 -14104 -14121 -5930 -5932 -14146 -14153 -14158 -14167 -14178 -5991 -5995 -6007 -6009 -6008 -6013 -14206 -6038 -14237 -14242 -6059 -6062 -14266 -14274 -6087 -14282 -6091 -6116 -14314 -14318 -6137 -14332 -14340 -14343 -6177 -14372 -14380 -14382 -6191 -6194 -14398 -6224 -14434 -6242 -14441 -14446 -6255 -14470 -6284 -6303 -14500 -14501 -6311 -14504 -14505 -6327 -14519 -6330 -14527 -6341 -14541 -14565 -6382 -14585 -14595 -6410 -6429 -14628 -6455 -14656 -14658 -14665 -14674 -14677 -14702 -14703 -14706 -6515 -6516 -6544 -14752 -6562 -14755 -14764 -6573 -6577 -14774 -14776 -14778 -14796 -6609 -6623 -14817 -14819 -6628 -14828 -6646 -6647 -6651 -14845 -14848 -6666 -6667 -14865 -6679 -14876 -6687 -14884 -6722 -14918 -6739 -14936 -6762 -14955 -6771 -14974 -14984 -15004 -15011 -15014 -15018 -15036 -6855 -6865 -6874 -6882 -6885 -15090 -6909 -15102 -6918 -15126 -6939 -15134 -15138 -15142 -15144 -6962 -15163 -6975 -15172 -6981 -15183 -15185 -7018 -15231 -7051 -7065 -15273 -15287 -15292 -7106 -15300 -15308 -15312 -15319 -7137 -7155 -15350 -15354 -7164 -7167 -15388 -7197 -7204 -15430 -7238 -7241 -7248 -7263 -15473 -7287 -7291 -7298 -7299 -7310 -15504 -7318 -15511 -15515 -7324 -15523 -15530 -15538 -15560 -7381 -7387 -15593 -15596 -15597 -15599 -15601 -15605 -15609 -15614 -15616 -7427 -7460 -7461 -7477 -7506 -7514 -15720 -7539 -7566 -15759 -7569 -7570 -7576 -15789 -7598 -7600 -7603 -7606 -15814 -15827 -7635 -15834 -7646 -15838 -15842 -7652 -7653 -7656 -15859 -7672 -15867 -7683 -7687 -7696 -15908 -7718 -7741 -15942 -7754 -15961 -7788 -15990 -7805 -7826 -16045 -7860 -7867 -7868 -7869 -16079 -7889 -7899 -7902 -16095 -16103 -7912 -7914 -16110 -16116 -7927 -16143 -16145 -16148 -7965 -16161 -7978 -7983 -16180 -16182 -16203 -16205 -16207 -16213 -8021 -16216 -16218 -8028 -16222 -8051 -8058 -16251 -16256 -16262 -8070 -8072 -8078 -16275 -16285 -16289 -8114 -16309 -8124 -8151 -8152 -16354 -8169 -16363 -16371 -8180 -16376 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_1200_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_1200_shadow_graph_index.txt deleted file mode 100644 index 9ef380b2..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_1200_shadow_graph_index.txt +++ /dev/null @@ -1,2047 +0,0 @@ -8199 -16396 -16397 -16407 -16415 -32 -36 -16424 -16427 -8237 -8239 -16433 -8245 -55 -16443 -16444 -8253 -16447 -8258 -8260 -8266 -8268 -16469 -16474 -8283 -91 -8282 -8286 -98 -106 -108 -16493 -8304 -112 -16498 -115 -16497 -8306 -16504 -128 -132 -16523 -140 -8334 -146 -8339 -148 -16533 -16534 -8347 -8349 -8353 -8357 -8362 -16566 -183 -16571 -188 -16575 -8386 -195 -198 -8399 -8400 -212 -214 -8409 -8411 -8418 -8423 -233 -234 -8427 -8425 -16619 -239 -243 -16633 -16639 -256 -8448 -8454 -16647 -274 -8468 -16666 -295 -8489 -16683 -8492 -8494 -16689 -307 -16693 -310 -16696 -317 -8513 -8514 -8515 -8520 -328 -8521 -8524 -336 -16726 -16728 -16737 -358 -16746 -16749 -8562 -8563 -8569 -378 -380 -16765 -382 -16769 -388 -8582 -16774 -396 -16783 -16784 -16785 -401 -400 -16788 -8600 -8603 -16796 -8605 -16801 -8617 -16809 -427 -8628 -16821 -437 -16828 -8637 -8638 -8642 -16837 -8651 -16845 -16846 -16850 -16857 -8667 -16867 -8676 -16870 -487 -8686 -499 -16887 -16888 -8699 -8704 -16902 -16905 -16908 -8719 -16911 -8721 -537 -8731 -16929 -8739 -16932 -16931 -8742 -16939 -557 -16948 -565 -571 -16956 -8764 -16959 -8768 -8769 -16960 -16961 -16965 -16966 -16968 -8778 -16975 -595 -16980 -8791 -605 -8798 -16991 -8797 -8803 -8804 -8806 -619 -17011 -8831 -8832 -641 -8833 -17031 -17038 -8849 -17044 -17047 -664 -17048 -8858 -669 -17061 -679 -17063 -680 -17064 -17067 -8876 -17071 -8880 -8889 -700 -702 -8899 -17096 -17102 -719 -17103 -727 -17112 -17113 -732 -17117 -734 -738 -739 -740 -8934 -17127 -17129 -747 -17132 -8943 -8944 -753 -8951 -8953 -762 -8955 -17152 -8960 -770 -771 -8963 -8967 -17160 -779 -8972 -783 -784 -17169 -8977 -792 -800 -17190 -9003 -17201 -9010 -17204 -9013 -822 -823 -825 -9019 -9028 -17221 -842 -17227 -17230 -848 -17235 -17239 -17241 -17246 -9055 -17250 -17252 -9063 -17256 -17260 -9072 -17268 -17270 -9086 -17279 -17290 -908 -9101 -917 -17302 -924 -9116 -929 -17314 -9123 -938 -17330 -9141 -17348 -9158 -971 -17362 -9170 -17365 -9180 -9182 -17374 -996 -1005 -1008 -17397 -17407 -9217 -17409 -1027 -9222 -1036 -1038 -17424 -9233 -1041 -9236 -9237 -17429 -9239 -1050 -17441 -9260 -17456 -17457 -9265 -17460 -9272 -9274 -17470 -9291 -17488 -9297 -1109 -17497 -9309 -1122 -1123 -9318 -1127 -17518 -9335 -9337 -1151 -1155 -9348 -9349 -9355 -17548 -9360 -17554 -17555 -1176 -1178 -9377 -17578 -17581 -1198 -17585 -17586 -1203 -17589 -17591 -1208 -1210 -17598 -9411 -1220 -9417 -1226 -17611 -9423 -17619 -9432 -1243 -1245 -9437 -17633 -17638 -1258 -1262 -1263 -17647 -1277 -17664 -17667 -1285 -9480 -17673 -1299 -9493 -17685 -17691 -9501 -9508 -17711 -17713 -1330 -17721 -1342 -1348 -9540 -1351 -9543 -9547 -17741 -17743 -17746 -1366 -9559 -1368 -1372 -1374 -17761 -17774 -17785 -1403 -9600 -9605 -1414 -17797 -17801 -1419 -1423 -9616 -9618 -17813 -1433 -9630 -1438 -1447 -1451 -1456 -1459 -17844 -9653 -1472 -17867 -17876 -1499 -1500 -1501 -1508 -9701 -17893 -9703 -17898 -17903 -9713 -9716 -1530 -9724 -17919 -17920 -1538 -1548 -17935 -1561 -17947 -9755 -17949 -17948 -17957 -9769 -1583 -17967 -1592 -1593 -9789 -9790 -9792 -17985 -17986 -1603 -17989 -17992 -17995 -9807 -18000 -18001 -18002 -9827 -9836 -18028 -1653 -18042 -18054 -9868 -18061 -9876 -18075 -18076 -1693 -18081 -18083 -9893 -9900 -1711 -9904 -9908 -1723 -9915 -18108 -18113 -9924 -1733 -18124 -9935 -9936 -1745 -9939 -18137 -1755 -1759 -9955 -1766 -9961 -1771 -1772 -18164 -1780 -18173 -9982 -9984 -18176 -9986 -1796 -18182 -18185 -1801 -9996 -10000 -1809 -1810 -18194 -1812 -10009 -18202 -10021 -18218 -18222 -1849 -10042 -18239 -1856 -1857 -18246 -1867 -1869 -1870 -18253 -10064 -10065 -18256 -1875 -18260 -10066 -1881 -10075 -10076 -18269 -10079 -18271 -1890 -1892 -18278 -1894 -1899 -10093 -18291 -1912 -18296 -10107 -18303 -1919 -18304 -10112 -10115 -1927 -1931 -10129 -18328 -10145 -1955 -18346 -1967 -18352 -1968 -10162 -18355 -10164 -10161 -1975 -18372 -18374 -18379 -18383 -2001 -10194 -10203 -2012 -10205 -2018 -2026 -2030 -2033 -2037 -10232 -10234 -18426 -18430 -18431 -18434 -2051 -2054 -18438 -10251 -10254 -2062 -2068 -10264 -2072 -10271 -18463 -10283 -18481 -10291 -2102 -18489 -18491 -18496 -2113 -18498 -2118 -10313 -2132 -10327 -10329 -10342 -10347 -2160 -18554 -2172 -10368 -2180 -18578 -10387 -2196 -2198 -2199 -18582 -2202 -18589 -2207 -10401 -18595 -10404 -18597 -18599 -18601 -2219 -10418 -10421 -2230 -18615 -10432 -2241 -2240 -18628 -2247 -2248 -10448 -18641 -2258 -18643 -10449 -2269 -18658 -18666 -10477 -18669 -10483 -2293 -2294 -18677 -18682 -10495 -2303 -2309 -2311 -2313 -2317 -10511 -18705 -10514 -2324 -2326 -10526 -18724 -10533 -2349 -2355 -2360 -2362 -2363 -10555 -2378 -2379 -2384 -18770 -10582 -18775 -10585 -18777 -2395 -2396 -18781 -2399 -18789 -18790 -10598 -2416 -2433 -10626 -18822 -2441 -2443 -10636 -18830 -10645 -2455 -10658 -2477 -18863 -18865 -2485 -10681 -2494 -2497 -2504 -2505 -10696 -10702 -2511 -2512 -18901 -10710 -10711 -18904 -18909 -10720 -10722 -18918 -10732 -18925 -2542 -10733 -10738 -18930 -18932 -2548 -10745 -18943 -2575 -10768 -18972 -18976 -18982 -2604 -10798 -2609 -10802 -18996 -2614 -2619 -10815 -10818 -19014 -2631 -10823 -2637 -10830 -2639 -19023 -19030 -2651 -10845 -2654 -19042 -2673 -10867 -10874 -2686 -19073 -10882 -2694 -19078 -10888 -19086 -19091 -2709 -10902 -19095 -2711 -19099 -19105 -19107 -2724 -10918 -2727 -2732 -19127 -10936 -19136 -2754 -19149 -2765 -2775 -2780 -10974 -19174 -2796 -19187 -19188 -2810 -2812 -11008 -19200 -2818 -2816 -2820 -11021 -2833 -11026 -11031 -19225 -11045 -2859 -11052 -2865 -19250 -19252 -19255 -11065 -11071 -11072 -19268 -19276 -19281 -2901 -2905 -2911 -2921 -11114 -19310 -2929 -2930 -19316 -2934 -2939 -2940 -2944 -2946 -11140 -11143 -19345 -11160 -11161 -19356 -19357 -19358 -11171 -19363 -19365 -2990 -11183 -19376 -2993 -11185 -19380 -2998 -11206 -11210 -19403 -19404 -11212 -11216 -19410 -3028 -19412 -19415 -11224 -19417 -19416 -3035 -3038 -3045 -3046 -19432 -19433 -3050 -3057 -11250 -19449 -19450 -11261 -3070 -3077 -11270 -11271 -11269 -3089 -3092 -19479 -11291 -11296 -19488 -19497 -19501 -19506 -11315 -11316 -11319 -3129 -11322 -11327 -19521 -19527 -3145 -19532 -3152 -11348 -3158 -11352 -3163 -19547 -11360 -3169 -11366 -19562 -19564 -3182 -11377 -19570 -11380 -11383 -11384 -3199 -19584 -19585 -19586 -11393 -3203 -11397 -3206 -3213 -19601 -19605 -11417 -19611 -11420 -11422 -19615 -19614 -19619 -3235 -19622 -19623 -3242 -11435 -3244 -19632 -11441 -3252 -11445 -3254 -19644 -19645 -3262 -3264 -3269 -19663 -11471 -19670 -3294 -19684 -19688 -19690 -3313 -19697 -19699 -19700 -11512 -3323 -11522 -11528 -3349 -3356 -3367 -11560 -11562 -11573 -3382 -3383 -11578 -11581 -11584 -3395 -3397 -3402 -3406 -11601 -3409 -11607 -11608 -11612 -11620 -3434 -3449 -3450 -11643 -3458 -3459 -11653 -3462 -3463 -11656 -3468 -3472 -3480 -3486 -3487 -11685 -11694 -11698 -3507 -3510 -11704 -11705 -3515 -3521 -3525 -11718 -11717 -11727 -3538 -3542 -11734 -11736 -11739 -3547 -11748 -3561 -3571 -3581 -11774 -11776 -11781 -11786 -3594 -11789 -11790 -11793 -3602 -11801 -11821 -3630 -3629 -3633 -11829 -3638 -11832 -3641 -3644 -3645 -11837 -3650 -11850 -11870 -3687 -11887 -11889 -3701 -3703 -11896 -11901 -3711 -11905 -3717 -3720 -3722 -11914 -3728 -3730 -3732 -3739 -11939 -11941 -11946 -3758 -3759 -3776 -3778 -11981 -3801 -3804 -11999 -3818 -3824 -12016 -12019 -12022 -12023 -12039 -12040 -12041 -3851 -3859 -3862 -12056 -12057 -12060 -3875 -3876 -3877 -3878 -3883 -3885 -12080 -3895 -3896 -12091 -12105 -3920 -12119 -12124 -3943 -12136 -12146 -12147 -12149 -3970 -12163 -3983 -3987 -3994 -12187 -3997 -12189 -3998 -12192 -4004 -4005 -4011 -12205 -12206 -4018 -4030 -4031 -4044 -12266 -4075 -12268 -12271 -4085 -12277 -4090 -4104 -4106 -12300 -4111 -4114 -4122 -4123 -4124 -4126 -12320 -12322 -12351 -4160 -12352 -12358 -4167 -12360 -4169 -12364 -4189 -4190 -12394 -12395 -12400 -12402 -4213 -4217 -12410 -4219 -12413 -4228 -12424 -12426 -4235 -12444 -4287 -4291 -4292 -12488 -12491 -4307 -4308 -4309 -12500 -12503 -12509 -12512 -4321 -12517 -12525 -12528 -4338 -12533 -4342 -4345 -12544 -12549 -4361 -4364 -12557 -12556 -4370 -4371 -4373 -4378 -4385 -4386 -12582 -12586 -12587 -12588 -4399 -4401 -4406 -4407 -12602 -12605 -12606 -4423 -12631 -12653 -4466 -4474 -12679 -4499 -4504 -12697 -12702 -4511 -12712 -12716 -12729 -12732 -12733 -4543 -4545 -4552 -12752 -4567 -4583 -12782 -4592 -12785 -4595 -12788 -12798 -12802 -4613 -4619 -4620 -12815 -4625 -4627 -12820 -4640 -12832 -12835 -12837 -12838 -12839 -4658 -12851 -12854 -4665 -12875 -12881 -4701 -4706 -12900 -12911 -4719 -4724 -4725 -4726 -12922 -12928 -4736 -12936 -4754 -12950 -4760 -12954 -12955 -4763 -4764 -12960 -4772 -12967 -4778 -12970 -12973 -4782 -12975 -4788 -12996 -4805 -13005 -4816 -13021 -13025 -4843 -13038 -4852 -4858 -4859 -4860 -4866 -13061 -4869 -13072 -4883 -13081 -4890 -13087 -4897 -4898 -4899 -13090 -13096 -13103 -13113 -13115 -4931 -13127 -13130 -13132 -13144 -4957 -13150 -13151 -13152 -13155 -4974 -13174 -4983 -4991 -13189 -4998 -5000 -5005 -5012 -13207 -5015 -13214 -5026 -13231 -5040 -5043 -13239 -13243 -13244 -13247 -5062 -5068 -5072 -13282 -5093 -5094 -13292 -13306 -13313 -5128 -5141 -13334 -13338 -13353 -5164 -13361 -5173 -13375 -13376 -5184 -13381 -5189 -5193 -13386 -13389 -5199 -5206 -5209 -13414 -5228 -13421 -13426 -13427 -5234 -13430 -5249 -13443 -13448 -13453 -13456 -13459 -5277 -13470 -13473 -13480 -13486 -5296 -13488 -13495 -13500 -13505 -13506 -5315 -13509 -13513 -5328 -5353 -13545 -5355 -13548 -5357 -5361 -5365 -13558 -5370 -13572 -5381 -13574 -5384 -13578 -13586 -5406 -13599 -13601 -5409 -5413 -5414 -13610 -13614 -13615 -13624 -13625 -5442 -13637 -13638 -5449 -5454 -5480 -13684 -5494 -13686 -5512 -5522 -5544 -5545 -13736 -13744 -5553 -13745 -5593 -13787 -5599 -13796 -13805 -5621 -13827 -5636 -13834 -5644 -5664 -13865 -5675 -13870 -13872 -13881 -13883 -5702 -13899 -5711 -13905 -13910 -13914 -13915 -13917 -5737 -5739 -13938 -13941 -5750 -13945 -13946 -5755 -13949 -5757 -5762 -5763 -5764 -13962 -13967 -5778 -13971 -13970 -5781 -13979 -5790 -13988 -13990 -5798 -14001 -5812 -14005 -14012 -5822 -5827 -14020 -5832 -14024 -14032 -5844 -5848 -5849 -14042 -5860 -5870 -14072 -5891 -14096 -14099 -14100 -14102 -14104 -14120 -14121 -5930 -5932 -5939 -5947 -14139 -14146 -14153 -14158 -14167 -14168 -14174 -14178 -14181 -5989 -5991 -14185 -5995 -14193 -14204 -6013 -14206 -14212 -14214 -6038 -6039 -6043 -14240 -14242 -6059 -6060 -14255 -6074 -14266 -14274 -6087 -6091 -6094 -6100 -14292 -14296 -14309 -14311 -14314 -6123 -14318 -14324 -14332 -14340 -14342 -14346 -6161 -6172 -6177 -14372 -14380 -14382 -6191 -6194 -6224 -14418 -6233 -14427 -6237 -14430 -14434 -6242 -6246 -14441 -14446 -6255 -6257 -14458 -6271 -14464 -14470 -6280 -6283 -6284 -14475 -6287 -6299 -6303 -6304 -14500 -14501 -6311 -14504 -14505 -14513 -6327 -14519 -6330 -6331 -14527 -6341 -14546 -6357 -6364 -14559 -14563 -14565 -6382 -14585 -6399 -6401 -14595 -14597 -14599 -6410 -14609 -14616 -6426 -6429 -14628 -6455 -14656 -14658 -14659 -14660 -14665 -14668 -6480 -14673 -14678 -14681 -6489 -6492 -14702 -14703 -14706 -6515 -6516 -6514 -6520 -14714 -6528 -6537 -14735 -6544 -14752 -6562 -14755 -6564 -14764 -6573 -6577 -14771 -14774 -14776 -14778 -6598 -14791 -6603 -6604 -6606 -6609 -6614 -14808 -14810 -6620 -14817 -6628 -6633 -6642 -6646 -6647 -6651 -14845 -6666 -6667 -14865 -14869 -14870 -6679 -14876 -6687 -14884 -14903 -14904 -14907 -6720 -6722 -6724 -14918 -6728 -14936 -14941 -6754 -6762 -14955 -6765 -6770 -6771 -6774 -6777 -14974 -6786 -6789 -6790 -14984 -6792 -6808 -15004 -15011 -15017 -15018 -15020 -6830 -6845 -15040 -6874 -15067 -15069 -15070 -6885 -15090 -6900 -15092 -6909 -15102 -6910 -15110 -6919 -6918 -15126 -6939 -15134 -6943 -6942 -15138 -15142 -15144 -15148 -6962 -15163 -6975 -6981 -15176 -15183 -15190 -7006 -15204 -15210 -7018 -15222 -7037 -15231 -7039 -15235 -7051 -7065 -7066 -15273 -15292 -7102 -7106 -15300 -15308 -15312 -15319 -7137 -7139 -7145 -7155 -15350 -15354 -7164 -7167 -7179 -7195 -15388 -15392 -7204 -15415 -15421 -15428 -15430 -7238 -15434 -7244 -7248 -15441 -7262 -15477 -7287 -7291 -15484 -15487 -7298 -7299 -15494 -7310 -15511 -7322 -15515 -7324 -15523 -7337 -15530 -7344 -15538 -15551 -7362 -7363 -7364 -15560 -15564 -7381 -15576 -7387 -7395 -15597 -15599 -7408 -15601 -15605 -15606 -15609 -15610 -15614 -15616 -7427 -7447 -15645 -7461 -7463 -7466 -15660 -7477 -15669 -15672 -7510 -7513 -7514 -15720 -7539 -7550 -7566 -15759 -7569 -15763 -7581 -7593 -15789 -7599 -7600 -15814 -7622 -15816 -15815 -7626 -15818 -7627 -15827 -7635 -15834 -7646 -15838 -15842 -7653 -15859 -7672 -7674 -15867 -7676 -15868 -15870 -7683 -15879 -7687 -7690 -7693 -7696 -15889 -15890 -15897 -15902 -15908 -7718 -7736 -7747 -7754 -7764 -7766 -15961 -7788 -15990 -7802 -7803 -7805 -16013 -7822 -7826 -16024 -7834 -16042 -16050 -7858 -7860 -7867 -7869 -16063 -7876 -7881 -16077 -16079 -7889 -7890 -7899 -7902 -7908 -7912 -7914 -7916 -16110 -7919 -16112 -16116 -7927 -16135 -16141 -16143 -16145 -7955 -16148 -7965 -16161 -16164 -7977 -7978 -7982 -7983 -16180 -16182 -7991 -8010 -16203 -16205 -16207 -16213 -16216 -16218 -8028 -16222 -8051 -8058 -16251 -16254 -16256 -16262 -8070 -8078 -8081 -16275 -8084 -16281 -16285 -16289 -16296 -8114 -16307 -16309 -16315 -8124 -8151 -8152 -8153 -8154 -8169 -8170 -16370 -16371 -8180 -16374 -16376 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_1300_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_1300_shadow_graph_index.txt deleted file mode 100644 index 6d02458c..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_1300_shadow_graph_index.txt +++ /dev/null @@ -1,2609 +0,0 @@ -8199 -16396 -16397 -16403 -16407 -27 -16413 -8222 -16415 -32 -34 -36 -16424 -16427 -8237 -8239 -16433 -8245 -55 -16443 -16444 -8253 -16446 -16447 -8255 -8258 -8260 -68 -8266 -8268 -16469 -16474 -8283 -91 -8282 -8286 -98 -103 -106 -108 -16493 -16492 -8304 -112 -16498 -115 -16497 -8306 -8307 -16504 -128 -132 -138 -16523 -140 -8334 -146 -8339 -148 -16533 -16534 -8347 -8349 -8353 -8354 -8357 -8362 -16566 -183 -16571 -188 -16575 -8386 -195 -198 -8399 -8400 -212 -214 -8409 -8410 -8411 -222 -8418 -8423 -233 -8425 -234 -16619 -8427 -239 -8433 -243 -16627 -16633 -16639 -256 -8448 -16644 -8454 -16647 -273 -274 -8465 -8468 -16662 -8473 -16666 -295 -8489 -16683 -8492 -300 -8494 -16689 -307 -16691 -16693 -310 -16696 -317 -8513 -8514 -8515 -8520 -328 -8521 -8524 -336 -16724 -16726 -16728 -344 -8537 -8543 -16737 -16738 -8548 -358 -16746 -16749 -8562 -8563 -8569 -378 -380 -16765 -382 -16769 -16771 -388 -8582 -16774 -16776 -396 -16783 -16784 -16785 -401 -400 -16788 -8600 -8603 -16796 -8605 -16801 -8617 -16809 -427 -16815 -16819 -8628 -16821 -437 -16828 -8637 -8638 -449 -8642 -16837 -8651 -16845 -16846 -16850 -16857 -8667 -16867 -8676 -8677 -16870 -487 -8686 -499 -16887 -16888 -504 -8699 -8704 -16902 -519 -16905 -16908 -8719 -16911 -8721 -8724 -534 -537 -8731 -16929 -8739 -16932 -16931 -8742 -550 -16939 -557 -16948 -565 -570 -571 -16956 -8764 -16959 -8768 -8769 -16960 -16961 -16965 -16966 -582 -16968 -8778 -16975 -595 -16980 -8791 -605 -8798 -16991 -8797 -8803 -8804 -8806 -619 -17011 -632 -17021 -8831 -8832 -641 -8833 -17028 -17031 -17038 -8849 -17044 -17047 -664 -17048 -8858 -669 -17061 -679 -17063 -680 -17064 -17067 -8876 -17071 -8880 -8889 -700 -702 -8899 -17096 -17098 -17102 -719 -17103 -727 -17112 -17113 -732 -17117 -734 -737 -738 -739 -740 -8934 -17127 -17129 -747 -17132 -8943 -8944 -753 -8948 -17141 -8951 -8952 -8953 -762 -8955 -17152 -8960 -770 -771 -8963 -8967 -17160 -779 -8972 -783 -784 -17169 -8977 -792 -8986 -795 -800 -8993 -17190 -809 -9003 -17199 -9008 -17201 -9010 -816 -17204 -9013 -822 -823 -820 -825 -9019 -835 -9028 -17221 -9030 -842 -17227 -17230 -848 -17235 -17239 -17241 -17246 -9055 -17250 -866 -17252 -870 -9063 -17256 -17260 -9072 -9073 -9075 -17268 -17270 -9086 -17279 -894 -896 -9092 -17290 -908 -9101 -913 -917 -17302 -924 -9116 -929 -17314 -9123 -933 -936 -938 -17330 -9141 -17334 -17348 -9158 -971 -975 -17362 -9170 -17365 -9180 -9182 -17374 -17376 -994 -996 -1005 -9197 -1008 -17393 -1011 -17397 -17405 -17407 -9217 -17409 -1027 -17413 -9222 -17414 -1030 -1036 -1038 -17424 -9233 -1041 -9236 -9237 -17429 -9239 -1050 -17435 -17441 -17449 -1067 -9260 -17456 -17457 -9265 -1072 -17460 -17461 -17462 -9272 -9274 -1085 -17470 -9291 -17488 -9297 -1109 -17496 -17497 -1113 -9305 -9309 -1122 -1123 -9318 -1127 -9319 -17518 -9328 -9329 -1138 -9331 -1140 -9335 -1143 -9337 -1151 -1155 -9348 -9349 -17540 -9355 -17548 -17549 -9360 -17554 -17555 -1176 -1178 -17564 -9377 -17578 -17581 -1198 -17585 -17586 -1203 -9396 -17589 -17591 -1208 -1210 -17596 -17598 -9411 -1220 -9417 -1226 -17611 -9423 -17619 -9432 -1243 -1245 -9437 -1248 -17633 -1252 -17638 -1258 -1262 -1263 -17647 -1277 -9470 -17664 -17667 -1285 -9480 -17673 -1298 -1299 -9492 -9493 -17685 -17682 -17691 -9501 -9507 -9508 -17710 -17711 -17712 -17713 -1330 -17718 -17721 -17723 -1342 -1348 -9540 -1351 -9543 -9547 -17740 -17741 -17743 -17746 -1366 -9559 -1368 -1372 -1374 -17761 -17774 -17780 -17785 -1403 -9600 -9605 -1414 -17797 -17801 -1419 -1423 -9616 -17808 -9618 -17813 -1433 -9630 -1438 -1447 -1451 -1456 -17841 -17842 -1459 -17844 -9653 -9655 -9657 -17851 -1468 -1472 -17867 -9683 -17876 -9684 -1499 -1500 -1501 -9692 -1508 -9701 -17893 -9703 -17894 -17898 -1517 -17903 -9713 -9716 -17911 -1530 -17915 -9724 -17919 -17920 -1538 -1539 -17928 -1548 -17935 -17940 -1561 -17947 -9755 -17949 -17948 -9757 -17950 -17952 -17957 -9769 -1583 -17967 -1585 -17970 -9781 -1592 -1593 -9789 -9790 -9792 -17985 -17986 -1603 -9795 -17989 -17992 -17995 -17998 -9807 -18000 -18001 -18002 -9824 -9827 -9836 -18028 -1648 -1653 -18042 -9860 -18054 -1673 -9868 -18061 -9876 -18075 -18076 -1693 -18081 -18083 -18084 -9893 -18089 -9900 -1711 -9904 -9908 -1722 -1723 -9915 -18108 -9918 -18113 -9924 -1733 -18124 -9935 -9936 -1745 -9939 -1750 -18137 -1755 -1759 -9955 -1766 -9961 -1771 -1772 -18156 -18164 -1780 -1783 -18173 -9982 -9984 -18176 -9986 -1796 -18182 -18185 -1801 -9996 -10000 -1809 -1810 -18194 -1812 -18193 -1815 -10009 -18202 -10021 -18215 -18218 -18222 -18230 -1849 -10042 -18239 -1856 -1857 -10047 -18246 -18250 -1867 -1869 -1870 -18253 -10064 -10065 -18256 -1875 -18260 -10066 -10063 -1881 -10075 -10076 -18269 -10079 -18271 -1890 -1892 -10084 -18278 -1894 -1899 -10093 -18291 -10100 -1912 -18296 -10107 -18303 -1919 -18304 -10112 -10115 -1927 -18312 -1931 -10129 -10135 -18328 -18327 -10143 -1952 -10145 -1955 -18346 -1967 -18352 -1968 -10162 -18355 -10164 -10161 -18353 -1975 -10170 -1984 -18372 -18374 -18379 -10189 -18383 -2001 -10194 -2010 -10203 -2012 -10205 -2018 -10214 -2026 -2030 -2033 -18417 -2037 -10232 -10234 -18426 -18430 -18431 -10241 -18434 -2051 -10242 -2054 -18438 -2057 -10251 -10254 -2062 -18447 -2068 -2071 -10264 -2072 -2073 -10266 -10271 -18463 -10283 -18481 -10291 -2102 -18489 -18491 -18496 -2113 -18498 -10307 -2118 -10313 -2125 -10319 -2132 -10327 -10329 -18525 -18533 -10342 -18536 -10347 -2160 -10359 -18554 -2172 -10368 -2180 -18565 -10380 -10382 -18575 -18578 -10387 -2196 -2198 -2199 -18582 -2202 -18589 -2207 -10401 -18595 -10404 -18597 -18599 -18601 -10409 -2219 -10416 -10418 -10421 -2230 -18615 -10425 -10426 -18620 -10432 -2241 -2240 -18628 -2247 -2248 -10448 -18641 -2258 -18643 -10449 -2269 -2271 -18658 -18666 -10477 -18669 -2286 -2288 -10480 -10483 -10484 -2293 -2294 -18677 -18682 -10495 -2303 -2309 -2311 -2313 -2317 -10509 -10511 -18703 -18705 -10514 -2324 -2326 -10526 -10528 -18724 -10533 -18731 -2349 -10543 -2355 -10547 -2360 -2362 -2363 -10555 -18749 -2378 -2379 -2384 -18770 -10582 -18775 -10585 -18777 -2395 -2396 -18781 -2399 -18789 -18790 -10598 -2416 -10612 -2433 -10626 -18822 -2441 -2443 -10636 -18830 -10645 -10646 -2455 -18839 -10658 -2471 -2477 -18863 -18865 -2485 -2487 -10681 -2492 -2494 -2497 -2504 -2505 -10696 -10702 -2511 -2512 -18901 -10710 -10711 -18904 -18909 -10720 -10722 -10725 -18918 -10731 -10732 -18925 -2542 -10733 -10734 -2545 -10738 -18930 -18932 -2548 -10745 -18943 -10751 -2575 -10768 -18972 -18976 -18982 -2600 -2604 -10798 -2609 -10802 -18996 -2614 -10810 -2619 -19006 -10815 -10818 -2628 -19014 -2631 -10823 -2637 -10830 -2639 -19023 -19030 -19033 -2651 -10845 -2654 -19042 -2665 -19051 -2673 -10867 -10874 -2686 -2688 -19073 -10882 -2691 -2694 -19078 -10888 -19086 -19091 -2709 -10902 -19095 -2711 -2714 -19099 -19105 -19107 -2724 -10918 -2727 -2732 -10929 -19127 -10936 -19134 -19136 -2754 -19145 -19149 -2765 -10961 -2775 -2780 -10974 -2783 -19174 -2796 -19187 -19188 -2808 -2810 -11002 -2812 -19196 -2814 -11008 -19200 -2818 -2816 -2820 -11021 -2833 -11026 -11031 -19225 -11033 -2841 -11036 -11045 -2859 -11052 -2865 -19250 -19252 -2869 -19254 -19255 -19256 -11065 -19261 -11071 -11072 -19268 -2888 -19276 -19281 -2897 -19284 -2901 -2905 -11099 -2911 -2921 -11114 -2925 -19310 -2929 -2930 -19316 -2934 -2937 -2939 -2940 -2944 -2946 -11140 -19333 -11143 -11147 -19345 -19346 -11160 -11161 -19356 -19357 -19358 -11171 -19363 -19365 -2990 -11183 -19376 -2993 -11185 -19380 -2996 -2998 -11199 -19391 -11206 -11210 -19403 -19404 -11212 -19402 -11216 -19410 -3028 -19412 -19415 -11224 -19417 -19416 -3035 -3038 -3045 -3046 -19432 -19433 -3050 -3057 -11250 -19449 -19450 -11259 -11261 -3070 -3076 -3077 -11270 -11271 -11269 -3080 -11280 -3089 -3092 -19478 -19479 -3095 -11291 -11296 -19488 -19489 -3106 -19497 -3115 -19501 -3119 -19506 -11315 -11316 -11319 -3129 -11322 -19513 -11323 -11327 -19521 -19527 -3145 -19532 -3152 -19537 -11345 -11348 -3158 -11352 -3163 -19547 -3166 -11360 -3169 -11366 -19562 -19564 -3182 -11377 -19570 -3185 -11380 -11383 -11384 -19578 -3199 -19584 -19585 -19586 -11393 -3203 -11397 -3206 -19588 -3211 -3213 -19601 -11410 -3220 -19605 -11417 -19611 -11420 -11422 -19615 -19614 -19619 -3235 -11428 -19622 -19623 -3242 -11435 -3244 -19632 -11441 -19633 -3252 -11445 -3254 -11446 -19636 -19644 -19645 -3262 -3264 -19650 -11458 -3269 -19653 -19663 -11471 -19670 -3294 -19678 -19684 -11493 -19688 -19690 -3313 -19697 -19699 -19700 -19701 -11512 -3323 -3324 -11522 -11528 -3349 -3356 -3367 -11560 -11562 -11573 -3382 -3383 -3381 -11578 -11581 -11584 -3395 -3397 -3399 -3402 -3406 -11601 -3409 -3410 -11607 -11608 -11612 -3427 -11620 -11623 -3434 -3449 -3450 -11643 -3458 -3459 -11653 -3462 -3463 -11656 -3468 -3469 -3470 -3472 -3480 -3486 -3487 -11678 -11685 -11694 -11698 -3507 -3510 -11704 -11705 -3515 -3521 -3525 -11718 -11717 -11727 -3535 -3538 -11730 -3542 -11734 -11736 -11739 -3547 -11748 -3559 -3561 -11762 -3571 -3577 -3580 -3581 -11774 -11776 -11781 -11786 -3594 -3595 -11789 -11790 -11793 -3602 -11801 -11821 -3630 -3629 -3633 -11829 -3638 -3639 -11832 -3641 -3644 -3645 -11837 -3650 -3657 -11850 -3665 -11870 -11874 -3685 -3687 -11887 -11889 -3701 -3703 -11896 -11901 -3711 -11905 -3717 -3720 -3722 -11914 -3728 -3730 -3732 -11924 -3739 -11939 -11941 -11946 -3758 -3759 -3776 -3778 -3785 -11980 -11981 -11987 -3800 -3801 -3804 -11999 -3818 -3824 -12016 -12019 -12022 -12023 -3837 -12039 -12040 -12041 -3847 -3851 -3849 -12044 -3859 -12051 -3862 -12056 -12057 -12060 -3875 -3876 -3877 -3878 -3883 -12076 -3885 -12080 -3890 -3895 -3896 -12091 -3911 -12105 -3920 -12119 -12124 -12127 -3942 -3943 -12136 -12146 -12147 -12149 -12161 -3970 -12163 -3973 -12168 -3983 -3987 -3994 -12187 -3997 -12189 -3998 -12192 -4004 -4005 -4008 -4011 -12205 -12206 -4015 -4018 -4030 -4031 -12229 -4044 -12266 -4075 -12268 -12271 -4085 -12277 -4090 -4104 -4106 -12300 -4111 -4114 -4121 -4122 -4123 -4124 -4126 -12320 -12322 -12351 -4160 -12352 -12358 -4167 -12360 -4169 -12364 -4186 -12379 -4189 -4190 -12393 -12394 -12395 -12400 -12402 -4213 -4215 -4217 -12410 -4219 -12413 -4221 -4228 -12424 -12426 -4235 -12444 -4266 -4287 -4291 -4292 -12488 -12491 -4307 -4308 -4309 -12500 -12503 -12509 -12512 -4321 -12513 -12517 -12525 -12528 -4338 -12533 -4342 -4345 -12540 -4350 -12544 -12547 -12549 -4361 -4364 -12557 -12556 -4370 -4371 -4373 -4377 -4378 -4381 -4385 -4386 -12582 -12586 -12587 -12588 -4399 -4401 -4406 -4407 -12602 -12605 -12606 -4423 -12620 -4431 -12631 -4441 -12644 -12653 -4466 -12659 -4474 -12674 -4486 -12679 -4489 -4491 -4496 -4497 -4499 -4504 -12697 -12702 -4511 -12712 -12716 -4524 -12727 -12729 -12732 -12733 -4543 -4545 -4549 -4552 -12752 -4561 -4567 -12765 -4583 -12782 -4592 -12785 -4595 -12788 -12798 -12802 -4613 -4619 -4620 -12815 -4625 -4627 -12820 -4640 -12832 -12835 -12837 -12838 -12839 -4648 -4658 -12851 -12854 -4665 -12871 -12875 -12881 -12889 -4701 -4706 -12900 -12911 -4719 -4724 -4725 -4726 -12920 -12922 -12923 -12928 -4736 -12936 -4746 -4754 -12950 -4760 -12954 -4763 -4764 -12955 -12960 -4772 -12967 -4778 -12970 -12973 -4782 -12975 -4788 -12996 -4805 -13001 -13005 -4816 -13013 -13021 -13025 -4841 -4843 -13038 -4851 -4852 -13045 -4858 -4859 -4860 -4866 -13061 -4869 -13072 -4883 -13075 -13079 -13081 -4890 -13087 -4897 -4898 -4899 -13090 -13096 -13103 -13113 -13115 -4924 -4931 -13127 -13130 -13131 -13132 -13144 -13145 -13148 -4957 -13150 -13151 -13152 -13155 -13164 -4974 -13174 -4983 -4991 -13189 -4998 -5000 -5005 -13203 -5012 -5011 -13206 -13207 -5015 -5017 -13214 -5026 -5034 -13231 -5040 -5043 -13239 -13243 -13244 -13247 -5062 -5068 -5072 -5078 -5081 -13282 -5093 -5094 -5099 -13292 -13306 -13313 -5128 -5141 -13334 -13333 -13338 -13353 -13355 -5164 -13361 -5173 -5176 -13375 -13376 -5184 -5186 -13381 -5189 -5193 -13386 -13389 -5199 -5206 -5209 -5214 -5219 -13414 -5228 -13421 -13424 -13426 -13427 -5234 -13430 -5241 -5249 -13443 -13448 -13453 -13456 -13459 -5277 -13470 -13473 -13477 -13480 -5288 -13486 -5296 -13488 -13495 -13500 -5310 -13505 -13506 -5315 -13509 -13513 -5328 -13524 -13528 -5341 -5347 -5351 -5353 -13545 -5355 -13548 -5357 -13546 -5361 -5365 -13558 -5370 -13566 -13572 -5381 -13574 -5384 -13578 -13586 -5406 -13599 -13600 -13601 -5409 -5413 -5414 -13610 -13614 -13615 -13624 -13625 -13631 -5442 -13637 -13638 -5449 -5454 -5480 -5486 -13684 -5494 -13686 -5505 -5508 -5512 -5522 -5530 -13726 -5544 -5545 -13736 -13744 -5553 -13745 -13751 -13757 -13765 -5583 -5593 -13787 -5599 -13796 -13805 -5621 -13827 -5636 -13834 -5644 -5656 -5664 -13865 -5675 -5676 -13870 -13872 -5686 -13881 -13883 -5695 -5702 -13899 -5711 -13905 -13910 -13914 -13915 -13917 -13927 -5737 -5738 -5739 -13938 -13941 -5750 -13945 -13946 -5755 -13949 -5757 -13953 -5762 -5763 -5764 -13955 -5769 -13962 -13967 -5778 -13971 -13970 -5781 -13977 -13979 -5790 -5794 -13988 -13990 -5798 -13993 -14001 -5812 -14005 -5819 -14012 -5822 -5827 -14020 -5832 -14024 -14032 -5844 -5848 -5849 -14042 -5860 -5870 -14072 -5880 -5891 -14096 -14099 -14100 -14102 -14104 -14105 -14120 -14121 -5930 -5932 -14129 -5939 -5947 -14139 -14146 -5958 -14153 -14158 -14167 -14168 -14174 -14178 -14181 -5989 -5991 -14185 -5995 -14193 -14198 -6007 -14199 -6009 -6008 -14204 -6013 -14206 -14212 -14214 -6038 -6039 -6043 -14237 -14240 -14242 -14246 -6059 -6060 -6062 -14255 -6074 -14266 -14274 -6082 -6087 -14282 -6091 -6094 -6100 -14292 -14296 -6116 -14309 -6118 -14311 -14314 -6123 -14318 -14324 -6137 -14332 -14340 -14342 -14343 -14346 -6161 -6172 -6177 -14372 -14380 -14382 -6191 -6194 -14398 -6214 -6218 -6224 -14418 -6233 -14427 -6237 -14430 -14434 -6242 -6246 -14441 -14446 -6255 -6257 -14458 -6271 -14464 -6275 -14470 -6280 -14472 -6283 -6284 -14475 -6287 -6299 -6303 -6304 -14500 -14501 -6311 -14504 -14505 -14513 -6327 -14519 -6330 -6331 -14527 -14530 -6341 -14541 -14546 -6357 -6364 -14559 -14563 -14565 -6382 -14585 -6398 -6399 -6401 -14595 -14597 -6405 -14599 -6410 -14608 -14609 -14616 -6426 -6429 -14628 -14632 -6455 -14656 -14658 -14659 -14660 -6466 -14663 -14665 -14668 -6480 -14673 -14674 -14677 -14678 -14679 -14681 -6489 -6492 -14690 -14702 -14703 -14706 -6515 -6516 -6514 -6520 -14714 -14718 -6528 -14720 -6529 -6537 -14735 -6544 -6543 -14752 -6562 -14755 -6564 -14763 -14764 -6573 -6577 -14771 -14774 -14776 -14778 -6598 -14791 -6603 -6604 -14796 -6606 -6609 -6614 -14808 -14810 -6620 -6623 -14817 -14819 -6628 -6633 -14828 -14833 -6642 -6646 -6647 -6651 -14845 -14848 -6666 -6667 -14863 -14865 -14869 -14870 -6679 -14876 -6687 -14884 -14903 -14904 -14907 -6720 -6722 -6724 -14917 -14918 -6728 -6739 -14936 -6745 -14941 -6754 -6762 -14955 -6765 -6770 -6771 -6774 -6777 -14974 -6786 -6789 -6790 -14984 -6792 -14987 -6808 -15004 -15011 -15014 -6824 -15017 -15018 -15020 -6830 -6831 -15036 -6845 -15040 -6855 -6865 -6874 -15067 -15069 -15070 -6882 -6885 -6891 -15090 -6900 -15092 -6908 -6909 -15102 -6910 -15110 -6919 -6918 -15126 -6939 -15134 -6943 -6942 -15138 -15142 -15144 -15148 -6962 -15163 -6975 -15172 -6981 -15176 -15183 -15185 -15190 -7006 -15204 -15210 -7018 -15222 -7037 -15231 -7039 -15235 -7051 -7062 -7065 -7066 -7068 -15273 -15279 -15287 -15292 -7102 -7105 -7106 -15300 -15308 -15312 -7126 -15319 -7137 -7139 -7145 -7155 -15350 -15354 -7164 -7165 -7167 -15370 -7179 -7195 -15388 -7197 -15392 -7204 -7207 -15415 -15421 -15428 -7237 -15430 -7238 -7241 -15434 -7244 -7248 -15441 -7262 -7263 -7264 -15463 -15465 -15473 -15477 -7287 -15479 -7291 -15484 -15487 -7298 -7299 -15494 -7310 -15504 -7318 -15511 -7322 -15515 -7324 -15523 -7337 -15530 -15533 -7344 -15537 -15538 -7348 -7357 -15551 -7362 -7363 -7364 -15560 -15564 -7381 -7383 -15576 -7384 -7387 -7395 -15593 -15596 -15597 -15599 -7408 -15601 -15605 -15606 -15609 -15610 -15614 -15616 -7427 -15627 -7447 -15645 -7459 -7460 -7461 -7463 -7466 -15660 -15663 -7477 -15669 -15672 -7497 -7506 -7510 -7513 -7514 -15720 -15729 -7539 -7550 -7566 -15759 -7569 -7570 -15763 -7576 -7581 -7593 -15789 -7598 -7599 -7600 -7603 -7606 -15814 -7622 -15816 -15815 -7626 -15818 -7627 -15827 -7635 -15834 -7646 -15838 -15842 -7652 -7653 -7656 -7664 -15859 -7672 -7674 -15867 -7676 -15868 -15870 -15874 -7683 -15879 -7687 -15880 -7690 -7693 -7696 -15889 -15890 -15897 -15902 -15908 -7718 -7736 -7741 -7745 -7747 -15942 -7754 -7764 -7766 -15961 -7772 -7788 -7796 -15990 -7802 -7803 -7805 -16007 -16013 -7822 -7826 -16024 -7834 -16042 -16045 -16050 -7858 -7860 -7867 -7868 -7869 -16063 -7876 -7881 -16077 -16079 -7889 -7890 -7899 -7902 -16095 -7908 -16103 -7912 -16104 -7914 -7916 -16110 -7919 -16112 -16113 -16116 -7927 -7937 -16135 -16141 -16143 -16145 -7955 -16148 -7962 -7965 -16161 -16164 -16165 -7977 -7978 -7982 -7983 -16180 -16182 -7991 -8009 -8010 -16203 -16205 -16207 -16213 -8021 -16216 -16218 -8027 -8028 -16222 -16229 -8051 -8058 -16251 -16254 -16256 -16262 -8070 -8072 -8078 -8081 -16275 -8084 -16281 -16285 -16289 -16296 -16303 -8114 -16307 -16309 -8121 -16315 -8124 -8130 -8142 -8151 -8152 -8153 -8154 -16354 -8169 -8170 -16363 -16370 -16371 -8180 -16374 -16376 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_500_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_500_shadow_graph_index.txt deleted file mode 100644 index 1f4f2a12..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_500_shadow_graph_index.txt +++ /dev/null @@ -1,537 +0,0 @@ -14342 -4106 -12300 -10254 -4111 -4114 -2068 -16415 -36 -10283 -8237 -10291 -2118 -10313 -12364 -6224 -16469 -6233 -14427 -8283 -14430 -8286 -14441 -8304 -112 -6257 -12402 -115 -2160 -4217 -14464 -14470 -4235 -6283 -6284 -140 -8349 -2207 -6304 -10401 -10404 -18597 -14501 -14513 -10421 -188 -4287 -14527 -4292 -6341 -12491 -214 -12503 -14559 -4321 -14565 -10477 -6382 -12528 -4338 -4342 -2294 -12549 -8454 -4364 -12557 -10511 -14609 -10514 -4370 -2326 -12582 -16683 -8492 -8494 -4399 -4401 -2355 -12602 -2363 -8513 -14658 -14659 -8520 -2379 -6480 -10582 -12631 -14681 -6489 -2395 -16749 -2416 -6515 -6516 -8563 -8569 -4474 -378 -388 -18822 -6537 -2443 -396 -16785 -401 -2455 -16796 -12702 -14755 -14764 -14774 -10681 -14778 -8642 -16837 -6598 -2504 -2505 -6604 -16845 -6606 -16846 -16850 -10710 -14808 -16857 -14810 -18909 -10720 -16867 -4583 -10732 -18925 -12782 -4592 -6646 -6647 -6651 -12798 -4619 -2575 -10768 -4625 -14869 -18976 -16932 -12839 -10802 -565 -14903 -2619 -16956 -10815 -16965 -2631 -10830 -19030 -8791 -10845 -605 -8798 -4701 -8803 -12900 -8806 -17011 -4724 -12922 -8831 -8832 -641 -6789 -6790 -12936 -19091 -17044 -12950 -4760 -6808 -12960 -10918 -2727 -679 -15017 -17063 -15018 -8876 -12975 -17071 -4788 -6845 -19136 -15040 -8899 -4805 -17096 -719 -4816 -17117 -740 -17127 -13038 -15090 -19187 -19188 -8951 -15102 -17152 -11008 -2818 -770 -771 -8960 -6919 -8967 -8972 -11021 -11026 -11031 -19225 -6939 -800 -4898 -4899 -17190 -11052 -2865 -19250 -19255 -11065 -6975 -4931 -19276 -17230 -848 -2901 -17239 -17246 -13155 -2921 -17268 -2934 -17270 -2939 -2940 -11140 -13189 -4998 -908 -5005 -9101 -13207 -11160 -7065 -929 -17314 -11183 -5040 -2993 -5043 -2998 -13247 -9158 -11210 -971 -15308 -5072 -17362 -3028 -17365 -19417 -13282 -7139 -5094 -19432 -19433 -3050 -7167 -9217 -13313 -3077 -9222 -1038 -17424 -9233 -3092 -9237 -13338 -11296 -19506 -19521 -5193 -15434 -5199 -7248 -17488 -9297 -17497 -7262 -3169 -19562 -19564 -17518 -11383 -7287 -1151 -5249 -19586 -19585 -7299 -11397 -15494 -9355 -9360 -15511 -11422 -19615 -13473 -19619 -15523 -3242 -17581 -1198 -19632 -17586 -17589 -1208 -1226 -19663 -1245 -3294 -5355 -13548 -5357 -1263 -17647 -5361 -5365 -15605 -5370 -17667 -9480 -1299 -9493 -9501 -5406 -13601 -9508 -3367 -7466 -13614 -13615 -7477 -15669 -13624 -15672 -11581 -11584 -3395 -1348 -13637 -3402 -3406 -11601 -1366 -11607 -11608 -5480 -3434 -13684 -5494 -3450 -11643 -7550 -3459 -9605 -3462 -1414 -7566 -9616 -7569 -5522 -15763 -3480 -9630 -11685 -5545 -13744 -5553 -1456 -9653 -15816 -15827 -17876 -7635 -3542 -11734 -5593 -5599 -11748 -17898 -3571 -9716 -5621 -15867 -11774 -15870 -17919 -11786 -7696 -17947 -9755 -17949 -9769 -13865 -11821 -3633 -7736 -3641 -3644 -3645 -3650 -17992 -18001 -9827 -3687 -7788 -11889 -1653 -5750 -15990 -7805 -13949 -3711 -5757 -5763 -5764 -3717 -3722 -13967 -5778 -13971 -16024 -1693 -5790 -16042 -11946 -3759 -14001 -7860 -5812 -9908 -5822 -5832 -7881 -1745 -5844 -3801 -7912 -7914 -16112 -12019 -18164 -9982 -9984 -18182 -16135 -12039 -3851 -14096 -1809 -1810 -14099 -14104 -12056 -16161 -3875 -16164 -14120 -7977 -3885 -7983 -16182 -1849 -1857 -12105 -16205 -14158 -10064 -1875 -14167 -14168 -10075 -8028 -12124 -14178 -1892 -3943 -1899 -18303 -1919 -14212 -16262 -1927 -3983 -10129 -3987 -6038 -6039 -6043 -3997 -12192 -16296 -6060 -12206 -1967 -18352 -1968 -16307 -1975 -6074 -18372 -18374 -6100 -14292 -8152 -8153 -2018 -14314 -4075 -8180 -4085 -16376 -18426 -14332 -18430 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_700_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_700_shadow_graph_index.txt deleted file mode 100644 index d08b864b..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_700_shadow_graph_index.txt +++ /dev/null @@ -1,1038 +0,0 @@ -2051 -14340 -14342 -2054 -4106 -10251 -12300 -16396 -10254 -4111 -16397 -4114 -2068 -10264 -4122 -4123 -4124 -4126 -16415 -10271 -12322 -36 -14372 -10283 -16427 -8237 -6191 -8239 -16433 -6194 -10291 -8245 -16443 -16444 -8253 -12351 -4160 -2113 -18498 -8258 -2118 -12358 -12360 -10313 -8266 -12364 -6224 -14418 -2132 -16469 -6233 -16474 -8283 -14427 -91 -14430 -8286 -14441 -10347 -12395 -14446 -8304 -112 -6257 -12402 -115 -2160 -16498 -4217 -18554 -12413 -14464 -4228 -2180 -14470 -6280 -12426 -4235 -6283 -6284 -140 -18578 -148 -16533 -2196 -2199 -2202 -8347 -12444 -8349 -2207 -6304 -10401 -6303 -10404 -18597 -14501 -8357 -14505 -8362 -14513 -10421 -16566 -18615 -183 -6330 -188 -4287 -14527 -16575 -2241 -4292 -6341 -198 -2247 -12491 -8399 -18641 -2258 -18643 -214 -12503 -12509 -14559 -4321 -14565 -8425 -16619 -10477 -6382 -18669 -12528 -4338 -2293 -4342 -2294 -18677 -16639 -256 -8448 -10495 -12544 -12549 -8454 -14599 -2309 -4361 -2313 -16647 -4364 -12557 -2311 -10511 -14609 -10514 -4370 -2324 -274 -2326 -4371 -4373 -4378 -6429 -10526 -4385 -4386 -12582 -16683 -8492 -8494 -4399 -4401 -16689 -2355 -16693 -4406 -4407 -6455 -16696 -12602 -2363 -12606 -14656 -8513 -14658 -14659 -8520 -2379 -14668 -8524 -6480 -336 -10582 -12631 -14681 -6489 -2395 -10585 -16737 -358 -16746 -16749 -12653 -14703 -2416 -14702 -14706 -6515 -6516 -8563 -4466 -8569 -4474 -378 -382 -16769 -10626 -388 -18822 -6537 -2443 -396 -18830 -16783 -16784 -16785 -401 -4499 -6544 -2455 -8603 -16796 -12702 -4511 -16801 -14755 -427 -14764 -2477 -12716 -18865 -14771 -8628 -14774 -10681 -14778 -12729 -8637 -8642 -16837 -6598 -2504 -2505 -8651 -6604 -16845 -6606 -16846 -10702 -6609 -16850 -18901 -10710 -6614 -14808 -16857 -14810 -18904 -18909 -10720 -14817 -16867 -8676 -16870 -4583 -10732 -18925 -12782 -2542 -4592 -12785 -10738 -4595 -18930 -6646 -6647 -10745 -6651 -12798 -18943 -8704 -12802 -6666 -4619 -6667 -2575 -10768 -4625 -8721 -12815 -14869 -18972 -14876 -6687 -18976 -8739 -16932 -14884 -12837 -12839 -16939 -557 -2609 -10802 -4658 -12851 -565 -14903 -14904 -4665 -2619 -16956 -8764 -10815 -16959 -10818 -16965 -2631 -16968 -8778 -12875 -10830 -12881 -19030 -8791 -14936 -10845 -605 -8798 -4701 -14941 -2654 -8803 -12900 -6754 -8806 -6762 -619 -12911 -2673 -6770 -17011 -4724 -4726 -12922 -14974 -8831 -8832 -641 -8833 -10882 -12928 -6789 -6790 -17031 -12936 -14984 -2694 -19091 -17044 -12950 -4760 -6808 -12955 -15004 -12960 -17061 -10918 -2727 -679 -15017 -17063 -15018 -8876 -680 -17064 -12975 -17071 -17067 -4788 -6845 -19136 -15040 -8899 -12996 -4805 -17096 -19149 -17102 -719 -4816 -13005 -2765 -17112 -6874 -15067 -2780 -17117 -15070 -734 -10974 -740 -6885 -19174 -17127 -17129 -747 -13038 -15090 -19187 -19188 -8951 -762 -8955 -4860 -15102 -17152 -11008 -2818 -770 -771 -8960 -4866 -6919 -8967 -779 -8972 -11021 -13072 -11026 -11031 -19225 -13081 -6939 -15134 -800 -4898 -4899 -17190 -15144 -9003 -11052 -2865 -19250 -9010 -19252 -17204 -822 -19255 -823 -11065 -15163 -6975 -4931 -9028 -842 -19276 -17230 -848 -19281 -2901 -17239 -4957 -17246 -13151 -2911 -9055 -13155 -9063 -2921 -17268 -19316 -2934 -17270 -2939 -2940 -15231 -2944 -15235 -11140 -13189 -4998 -5000 -17290 -7051 -908 -5005 -9101 -19345 -5012 -17302 -13207 -11160 -7065 -11161 -19357 -13214 -929 -17314 -11171 -938 -11183 -5040 -2993 -5043 -2998 -7102 -13247 -7106 -9158 -11210 -971 -15308 -5068 -19404 -5072 -15312 -17362 -19410 -3028 -17365 -15319 -11224 -19417 -3035 -9182 -7137 -13282 -7139 -3045 -5094 -3046 -19432 -19433 -3050 -5093 -1005 -1008 -3057 -11250 -19450 -15354 -7164 -3070 -7167 -9217 -13313 -17409 -3077 -9222 -11271 -5128 -1036 -1038 -17424 -9233 -3089 -3092 -9237 -17429 -5141 -9236 -13338 -1050 -15388 -11296 -17441 -19501 -17457 -19506 -11319 -17470 -13376 -19521 -13381 -15430 -19527 -5193 -15434 -3145 -19532 -13389 -5199 -7248 -17488 -9297 -11348 -5206 -17497 -7262 -3169 -1123 -19562 -19564 -13421 -17518 -19570 -13426 -13427 -11383 -7287 -11384 -1151 -3199 -5249 -19586 -19585 -7299 -11397 -15494 -11393 -9355 -9360 -13456 -17554 -17555 -15511 -11417 -7322 -11420 -11422 -19615 -13473 -15523 -19619 -19622 -13480 -3242 -15530 -17581 -1198 -13486 -19632 -11441 -17586 -15538 -17589 -1208 -19645 -3262 -3264 -1220 -1226 -17611 -19663 -7381 -19670 -15576 -1243 -1245 -3294 -17633 -17638 -5355 -13548 -5357 -1263 -17647 -5361 -3313 -19697 -5365 -15605 -11512 -5370 -3323 -15616 -17667 -7427 -5381 -9480 -11528 -1299 -9493 -17691 -9501 -5406 -15645 -13601 -5409 -9508 -5413 -7461 -3367 -7466 -13610 -15660 -13614 -13615 -17713 -7477 -15669 -3382 -13624 -15672 -17721 -11581 -11584 -3395 -1348 -13637 -9540 -3397 -5449 -3402 -3406 -11601 -1366 -11607 -11608 -7510 -1368 -1374 -17761 -11620 -5480 -15720 -3434 -7539 -13684 -5494 -17785 -3450 -11643 -7550 -9600 -3458 -3459 -9605 -3462 -1414 -11656 -1419 -7566 -1423 -9616 -7569 -5522 -15763 -9618 -3480 -9630 -11685 -1447 -5544 -5545 -1451 -13744 -5553 -1456 -1459 -3507 -9653 -7600 -3515 -15814 -15816 -7626 -15818 -15827 -17876 -7635 -3542 -11734 -5593 -11739 -13787 -7646 -5599 -11748 -7653 -9701 -17893 -3561 -17898 -13805 -9713 -3571 -9716 -5621 -15859 -7672 -15867 -11774 -15870 -17919 -1538 -7683 -13827 -11781 -5636 -11786 -7696 -17947 -9755 -17949 -17948 -15908 -7718 -9769 -13865 -11821 -13872 -3633 -7736 -3641 -1592 -3644 -3645 -11837 -9789 -9792 -17985 -3650 -1603 -17989 -17992 -7754 -11850 -18001 -13905 -18002 -13910 -7766 -15961 -13914 -11870 -9827 -3687 -5737 -7788 -9836 -11889 -13938 -1653 -5750 -15990 -13941 -11896 -18042 -5755 -7805 -13949 -3711 -5757 -5762 -5763 -5764 -3717 -3720 -3722 -11914 -18061 -13967 -5778 -13971 -7826 -5781 -13970 -16024 -13979 -1693 -5790 -13988 -9893 -16042 -11946 -3759 -1711 -14001 -7860 -5812 -9908 -14005 -1723 -7869 -5822 -5832 -7881 -14024 -16079 -9935 -1745 -9936 -14032 -5844 -7890 -7889 -9939 -3801 -18137 -7899 -5860 -1766 -7912 -7914 -3818 -7919 -16112 -12019 -18164 -16116 -7927 -9982 -9984 -18176 -1796 -18182 -16135 -12039 -3851 -14096 -1809 -1810 -14099 -18194 -14104 -12056 -10009 -16161 -3875 -16164 -14120 -7977 -7978 -14121 -5932 -3885 -7983 -16180 -16182 -1849 -5947 -18239 -1857 -12105 -14153 -16203 -16205 -14158 -16207 -10064 -10065 -1875 -14167 -14168 -1881 -10075 -8028 -12124 -16222 -10079 -18269 -14178 -1892 -3943 -12136 -5991 -1899 -12146 -8051 -12147 -1912 -18303 -1919 -16256 -14212 -16262 -1927 -8070 -1931 -3983 -10129 -8081 -3987 -6038 -6039 -6043 -3997 -16285 -12189 -12192 -10145 -14242 -4004 -4005 -16296 -4011 -6060 -6059 -12206 -1967 -18352 -1968 -10162 -16307 -18355 -16309 -12205 -1975 -6074 -14266 -4031 -18372 -18374 -18379 -18383 -6100 -14292 -8152 -8153 -2018 -8169 -14314 -4075 -12266 -2026 -6123 -2030 -12271 -8180 -4085 -12277 -16376 -18426 -14332 -18430 -18431 diff --git a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_900_shadow_graph_index.txt b/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_900_shadow_graph_index.txt deleted file mode 100644 index ab12052c..00000000 --- a/pygip/models/attack/mea/data/attack3_shadow_graph/pubmed/protential_900_shadow_graph_index.txt +++ /dev/null @@ -1,1505 +0,0 @@ -16396 -16397 -16415 -32 -36 -16427 -8237 -8239 -16433 -8245 -16443 -16444 -8253 -16447 -8258 -8266 -8268 -16469 -16474 -8283 -91 -8286 -8304 -112 -16498 -115 -16497 -8306 -128 -140 -8334 -146 -8339 -148 -16533 -16534 -8347 -8349 -8353 -8357 -8362 -16566 -183 -188 -16575 -195 -198 -8399 -212 -214 -8409 -8411 -8423 -8425 -16619 -8427 -16633 -16639 -256 -8448 -8454 -16647 -274 -295 -16683 -8492 -8494 -16689 -16693 -16696 -317 -8513 -8514 -8515 -8520 -8524 -336 -16726 -16737 -358 -16746 -16749 -8562 -8563 -8569 -378 -380 -382 -16769 -388 -8582 -396 -16783 -16784 -16785 -401 -400 -8603 -16796 -16801 -427 -8628 -16821 -16828 -8637 -8642 -16837 -8651 -16845 -16846 -16850 -16857 -16867 -8676 -16870 -8686 -16887 -8699 -8704 -8719 -8721 -8739 -16932 -8742 -16939 -557 -16948 -565 -16956 -8764 -16959 -8768 -8769 -16965 -16966 -16968 -8778 -16980 -8791 -605 -8798 -16991 -8797 -8803 -8806 -619 -17011 -8831 -8832 -641 -8833 -17031 -17038 -17044 -664 -17048 -8858 -669 -17061 -679 -17063 -680 -17064 -17067 -8876 -17071 -8880 -700 -702 -8899 -17096 -17102 -719 -17103 -727 -17112 -17113 -17117 -734 -738 -740 -8934 -17127 -17129 -747 -753 -8951 -8953 -762 -8955 -17152 -8960 -770 -771 -8963 -8967 -17160 -779 -8972 -17169 -792 -800 -17190 -9003 -9010 -17204 -9013 -822 -823 -825 -9028 -842 -17230 -848 -17239 -17246 -9055 -17252 -9063 -17256 -17268 -17270 -9086 -17279 -17290 -908 -9101 -917 -17302 -924 -9116 -929 -17314 -938 -17348 -9158 -971 -17362 -17365 -9182 -1005 -1008 -17407 -9217 -17409 -1027 -9222 -1036 -1038 -17424 -9233 -9236 -9237 -17429 -9239 -1050 -17441 -9260 -17456 -17457 -9272 -9274 -17470 -17488 -9297 -1109 -17497 -9309 -1122 -1123 -1127 -17518 -9335 -9337 -1151 -9348 -9355 -9360 -17554 -17555 -1176 -1178 -9377 -17578 -17581 -1198 -17586 -17589 -1208 -1210 -9411 -1220 -1226 -17611 -9423 -1243 -1245 -17633 -17638 -1258 -1263 -17647 -1277 -17664 -17667 -9480 -1299 -9493 -17685 -17691 -9501 -9508 -17713 -17721 -1348 -9540 -1351 -9543 -9547 -17741 -17746 -1366 -9559 -1368 -1374 -17761 -17785 -9600 -9605 -1414 -17797 -17801 -1419 -1423 -9616 -9618 -9630 -1438 -1447 -1451 -1456 -1459 -9653 -1472 -17876 -1499 -1500 -1501 -9701 -17893 -9703 -17898 -9713 -9716 -9724 -17919 -1538 -1548 -1561 -17947 -9755 -17949 -17948 -9769 -1583 -1592 -9789 -9792 -17985 -17986 -1603 -17989 -17992 -17995 -18000 -18001 -18002 -9827 -9836 -1653 -18042 -18061 -18075 -18076 -1693 -18081 -9893 -1711 -9908 -1723 -9915 -18108 -18113 -9924 -18124 -9935 -9936 -1745 -9939 -18137 -1766 -1772 -18164 -1780 -18173 -9982 -9984 -18176 -1796 -18182 -1809 -1810 -18194 -10009 -18202 -10021 -1849 -18239 -1856 -1857 -18246 -1867 -1869 -1870 -18253 -10064 -10065 -18256 -1875 -18260 -1881 -10075 -10076 -18269 -10079 -18271 -1890 -1892 -18278 -1899 -10093 -1912 -18296 -18303 -1919 -18304 -10112 -1927 -1931 -10129 -10145 -1967 -18352 -1968 -10162 -18355 -10164 -1975 -18372 -18374 -18379 -18383 -10194 -2018 -2026 -2030 -10234 -18426 -18430 -18431 -2051 -2054 -18438 -10251 -10254 -2068 -10264 -10271 -10283 -10291 -2113 -18498 -2118 -10313 -2132 -10329 -10347 -2160 -18554 -2180 -18578 -10387 -2196 -2198 -2199 -18582 -2202 -18589 -2207 -10401 -10404 -18597 -2219 -10421 -18615 -10432 -2241 -18628 -2247 -10448 -18641 -2258 -18643 -18658 -18666 -10477 -18669 -10483 -2293 -2294 -18677 -10495 -2309 -2311 -2313 -2317 -10511 -10514 -2324 -2326 -10526 -18724 -2349 -2355 -2362 -2363 -2378 -2379 -2384 -18770 -10582 -10585 -18777 -2395 -2396 -18781 -18790 -2416 -10626 -18822 -2441 -2443 -18830 -2455 -2477 -18863 -18865 -10681 -2494 -2497 -2504 -2505 -10702 -2511 -18901 -10710 -10711 -18904 -18909 -10720 -10722 -18918 -10732 -18925 -2542 -10733 -10738 -18930 -18932 -10745 -18943 -2575 -10768 -18972 -18976 -2609 -10802 -2619 -10815 -10818 -19014 -2631 -10830 -2639 -19030 -10845 -2654 -19042 -2673 -10867 -19073 -10882 -2694 -19078 -19091 -2709 -10902 -19095 -2711 -19099 -10918 -2727 -2732 -10936 -19136 -2754 -19149 -2765 -2775 -2780 -10974 -19174 -2796 -19187 -19188 -11008 -2818 -11021 -11026 -11031 -19225 -11045 -2859 -11052 -2865 -19250 -19252 -19255 -11065 -11072 -19276 -19281 -2901 -2911 -2921 -19310 -2929 -19316 -2934 -2939 -2940 -2944 -11140 -11143 -19345 -11160 -11161 -19357 -19358 -11171 -19363 -11183 -2993 -2998 -11206 -11210 -19404 -11212 -19410 -3028 -11224 -19417 -19416 -3035 -3045 -3046 -19432 -19433 -3050 -3057 -11250 -19450 -3070 -3077 -11271 -3089 -3092 -11291 -11296 -19488 -19501 -19506 -11319 -11327 -19521 -19527 -3145 -19532 -3152 -11348 -11360 -3169 -19562 -19564 -3182 -19570 -11380 -11383 -11384 -3199 -19585 -19586 -11393 -3203 -11397 -3206 -3213 -19605 -11417 -19611 -11420 -11422 -19615 -19614 -19619 -19622 -3242 -11435 -3244 -19632 -11441 -19645 -3262 -3264 -3269 -19663 -11471 -19670 -3294 -19684 -19688 -19690 -3313 -19697 -11512 -3323 -11522 -11528 -3349 -3367 -11573 -3382 -3383 -11581 -11584 -3395 -3397 -3402 -3406 -11601 -3409 -11607 -11608 -11620 -3434 -3449 -3450 -11643 -3458 -3459 -3462 -11656 -3472 -3480 -11685 -11698 -3507 -3515 -3525 -11718 -11717 -11727 -3542 -11734 -11736 -11739 -11748 -3561 -3571 -3581 -11774 -11781 -11786 -3594 -11789 -11793 -11801 -11821 -3630 -3629 -3633 -11832 -3641 -3644 -3645 -11837 -3650 -11850 -11870 -3687 -11889 -3701 -11896 -11901 -3711 -3717 -3720 -3722 -11914 -3728 -11939 -11941 -11946 -3759 -3776 -3801 -3818 -3824 -12019 -12022 -12039 -12040 -3851 -12056 -12057 -12060 -3875 -3877 -3885 -3896 -12091 -12105 -3920 -12119 -12124 -3943 -12136 -12146 -12147 -12149 -3983 -3987 -3994 -12187 -3997 -12189 -3998 -12192 -4004 -4005 -4011 -12205 -12206 -4030 -4031 -12266 -4075 -12271 -4085 -12277 -4106 -12300 -4111 -4114 -4122 -4123 -4124 -4126 -12320 -12322 -12351 -4160 -12352 -12358 -12360 -4169 -12364 -4190 -12395 -12402 -4213 -4217 -4219 -12413 -4228 -12426 -4235 -12444 -4287 -4291 -4292 -12488 -12491 -4307 -4308 -12503 -12509 -12512 -4321 -12517 -12528 -4338 -12533 -4342 -12544 -12549 -4361 -4364 -12557 -12556 -4370 -4371 -4373 -4378 -4385 -4386 -12582 -4399 -4401 -4406 -4407 -12602 -12605 -12606 -4423 -12631 -12653 -4466 -4474 -12679 -4499 -12702 -4511 -12712 -12716 -12729 -12732 -4545 -12752 -4567 -4583 -12782 -4592 -12785 -4595 -12798 -12802 -4613 -4619 -4620 -12815 -4625 -4627 -12820 -4640 -12835 -12837 -12838 -12839 -4658 -12851 -4665 -12875 -12881 -4701 -12900 -12911 -4719 -4724 -4725 -4726 -12922 -12928 -4736 -12936 -4754 -12950 -4760 -12955 -4763 -12960 -4778 -12973 -12975 -4788 -12996 -4805 -13005 -4816 -13038 -4859 -4860 -4866 -4869 -13061 -13072 -13081 -4898 -4899 -13090 -4931 -13130 -13132 -13144 -4957 -13151 -13155 -4991 -13189 -4998 -5000 -5005 -5012 -13207 -13214 -13231 -5040 -5043 -13239 -13243 -13244 -13247 -5068 -5072 -13282 -5093 -5094 -13292 -13313 -5128 -5141 -13334 -13338 -13353 -5173 -13375 -13376 -13381 -5193 -13389 -5199 -5206 -5209 -13421 -13426 -13427 -5234 -13430 -5249 -13443 -13448 -13453 -13456 -13473 -13480 -13486 -5296 -13488 -13500 -13506 -5328 -5355 -13548 -5357 -5361 -5365 -13558 -5370 -13572 -5381 -13578 -5406 -13599 -13601 -5409 -5413 -13610 -13614 -13615 -13624 -13625 -13637 -5449 -5480 -13684 -5494 -13686 -5512 -5522 -5544 -5545 -13744 -5553 -13745 -5593 -13787 -5599 -13796 -13805 -5621 -13827 -5636 -13834 -5664 -13865 -5675 -13872 -13905 -13910 -13914 -13917 -5737 -13938 -13941 -5750 -13945 -5755 -13949 -5757 -5762 -5763 -5764 -13967 -5778 -13971 -13970 -5781 -13979 -5790 -13988 -13990 -14001 -5812 -14005 -14012 -5822 -14020 -5832 -14024 -14032 -5844 -5848 -14042 -5860 -14096 -14099 -14104 -14120 -14121 -5932 -5947 -14139 -14153 -14158 -14167 -14168 -14178 -14181 -5991 -14185 -5995 -14204 -6013 -14212 -6038 -6039 -6043 -14242 -6059 -6060 -14255 -6074 -14266 -6087 -6100 -14292 -14309 -14314 -6123 -14318 -14332 -14340 -14342 -14372 -6191 -6194 -6224 -14418 -6233 -14427 -6237 -14430 -14441 -14446 -6257 -14458 -6271 -14464 -14470 -6280 -6283 -6284 -6287 -6299 -6303 -6304 -14501 -14504 -14505 -14513 -6330 -6331 -14527 -6341 -6357 -6364 -14559 -14563 -14565 -6382 -6401 -14595 -14597 -14599 -14609 -14616 -6429 -14628 -6455 -14656 -14658 -14659 -14665 -14668 -6480 -14673 -14678 -14681 -6489 -14702 -14703 -14706 -6515 -6516 -6514 -6520 -14714 -6537 -14735 -6544 -14752 -6562 -14755 -14764 -14771 -14774 -14778 -6598 -14791 -6603 -6604 -6606 -6609 -6614 -14808 -14810 -14817 -6628 -6642 -6646 -6647 -6651 -14845 -6666 -6667 -14869 -14876 -6687 -14884 -14903 -14904 -14907 -6720 -6724 -6728 -14936 -14941 -6754 -6762 -14955 -6770 -6771 -6777 -14974 -6786 -6789 -6790 -14984 -6808 -15004 -15017 -15018 -6830 -6845 -15040 -6874 -15067 -15070 -6885 -15090 -6900 -15102 -6919 -6939 -15134 -6943 -15142 -15144 -15148 -15163 -6975 -7006 -15204 -15210 -7018 -15231 -7039 -15235 -7051 -7065 -7102 -7106 -15308 -15312 -15319 -7137 -7139 -7145 -15354 -7164 -7167 -7195 -15388 -7204 -15415 -15421 -15430 -7238 -15434 -7248 -15441 -7262 -7287 -7291 -15484 -7299 -15494 -15511 -7322 -7324 -15523 -7337 -15530 -7344 -15538 -7362 -7381 -15576 -15599 -7408 -15605 -15606 -15616 -7427 -7447 -15645 -7461 -7463 -7466 -15660 -7477 -15669 -15672 -7510 -7513 -15720 -7539 -7550 -7566 -7569 -15763 -7581 -7593 -7599 -7600 -15814 -7622 -15816 -15815 -7626 -15818 -15827 -7635 -7646 -15842 -7653 -15859 -7672 -15867 -7676 -15870 -7683 -15879 -7687 -7690 -7696 -15890 -15897 -15902 -15908 -7718 -7736 -7754 -7764 -7766 -15961 -7788 -15990 -7802 -7805 -7826 -16024 -7834 -16042 -16050 -7860 -7867 -7869 -16063 -7881 -16079 -7889 -7890 -7899 -7902 -7908 -7912 -7914 -7916 -16110 -7919 -16112 -16116 -7927 -16135 -16141 -16145 -16161 -16164 -7977 -7978 -7982 -7983 -16180 -16182 -7991 -8010 -16203 -16205 -16207 -16213 -8028 -16222 -8051 -16254 -16256 -16262 -8070 -8081 -16275 -16281 -16285 -16296 -16307 -16309 -16315 -8151 -8152 -8153 -8169 -16370 -16371 -8180 -16374 -16376 diff --git a/pygip/models/defense/BackdoorWM.py b/pygip/models/defense/BackdoorWM.py deleted file mode 100644 index a57d465c..00000000 --- a/pygip/models/defense/BackdoorWM.py +++ /dev/null @@ -1,179 +0,0 @@ -import random -from time import time - -import torch -import torch.nn.functional as F - -from pygip.models.defense.base import BaseDefense -from pygip.models.nn import GCN -from pygip.utils.metrics import DefenseMetric, DefenseCompMetric - - -class BackdoorWM(BaseDefense): - supported_api_types = {"dgl"} - - def __init__(self, dataset, attack_node_fraction, model_path=None, trigger_rate=0.01, l=20, target_label=0): - super().__init__(dataset, attack_node_fraction) - # load data - self.dataset = dataset - self.graph_dataset = dataset.graph_dataset - self.graph_data = dataset.graph_data.to(device=self.device) - self.model_path = model_path - self.features = self.graph_data.ndata['feat'] - self.labels = self.graph_data.ndata['label'] - self.train_mask = self.graph_data.ndata['train_mask'] - self.test_mask = self.graph_data.ndata['test_mask'] - - # load meta data - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes - - # wm params - self.trigger_rate = trigger_rate - self.l = l - self.target_label = target_label - - def _load_model(self): - """ - Load a pre-trained model. - """ - assert self.model_path, "self.model_path should be defined" - - # Create the model - self.net1 = GCN(self.num_features, self.num_classes).to(self.device) - - # Load the saved state dict - self.net1.load_state_dict(torch.load(self.model_path, map_location=self.device)) - - # Set to evaluation mode - self.net1.eval() - - def inject_backdoor_trigger(self, data, trigger_rate=None, trigger_feat_val=0.99, l=None, target_label=None): - """Feature-based Trigger Injection""" - if trigger_rate is None: - trigger_rate = self.trigger_rate - if l is None: - l = self.l - if target_label is None: - target_label = self.target_label - - num_nodes = data.shape[0] - num_feats = data.shape[1] - num_trigger_nodes = int(trigger_rate * num_nodes) - - trigger_nodes = random.sample(range(num_nodes), num_trigger_nodes) - for node in trigger_nodes: - feature_indices = random.sample(range(num_feats), l) - data[node][feature_indices] = trigger_feat_val - return data, trigger_nodes - - def train_target_model(self, metric_comp: DefenseCompMetric): - """ - Train the target model with backdoor injection. - """ - # Initialize GNN model - self.net1 = GCN(self.num_features, self.num_classes).to(self.device) - optimizer = torch.optim.Adam(self.net1.parameters(), lr=0.01, weight_decay=5e-4) - - # Inject backdoor trigger - defense_s = time() - poisoned_features = self.features.clone() - poisoned_labels = self.labels.clone() - - poisoned_features_cpu = poisoned_features.cpu() - poisoned_features_cpu, trigger_nodes = self.inject_backdoor_trigger( - poisoned_features_cpu, - trigger_rate=self.trigger_rate, - l=self.l, - target_label=self.target_label - ) - poisoned_features = poisoned_features_cpu.to(self.device) - - # Modify labels for trigger nodes - for node in trigger_nodes: - poisoned_labels[node] = self.target_label - - self.trigger_nodes = trigger_nodes - self.poisoned_features = poisoned_features - self.poisoned_labels = poisoned_labels - defense_e = time() - - # Training loop - for epoch in range(200): - self.net1.train() - - # Forward pass - logits = self.net1(self.graph_data, poisoned_features) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[self.train_mask], poisoned_labels[self.train_mask]) - - # Backward pass - optimizer.zero_grad() - loss.backward() - optimizer.step() - - # Validation (optional) - if epoch % 50 == 0: - self.net1.eval() - with torch.no_grad(): - logits_val = self.net1(self.graph_data, poisoned_features) - logp_val = F.log_softmax(logits_val, dim=1) - pred = logp_val.argmax(dim=1) - acc_val = (pred[self.test_mask] == poisoned_labels[self.test_mask]).float().mean() - print(f" Epoch {epoch}: training... Validation Accuracy: {acc_val.item():.4f}") - - metric_comp.update(defense_time=(defense_e - defense_s)) - - return self.net1 - - def verify_backdoor(self, model, trigger_nodes): - """Verify backdoor attack success rate""" - model.eval() - with torch.no_grad(): - out = model(self.graph_data, self.poisoned_features) - backdoor_preds = out.argmax(dim=1)[trigger_nodes] - # correct = (pred[trigger_nodes] == target_label).sum().item() - return backdoor_preds - - def evaluate_model(self, model, features): - """Evaluate model performance""" - model.eval() - with torch.no_grad(): - out = model(self.graph_data, features) - logits = out[self.test_mask] - preds = logits.argmax(dim=1).cpu() - - return preds - - def defend(self): - """ - Execute the backdoor watermark defense. - """ - metric_comp = DefenseCompMetric() - metric_comp.start() - print("====================Backdoor Watermark====================") - - # If model wasn't trained yet, train it - if not hasattr(self, 'net1'): - self.train_target_model(metric_comp) - - # Evaluate the backdoored model - preds = self.evaluate_model(self.net1, self.poisoned_features) - inference_s = time() - backdoor_preds = self.verify_backdoor(self.net1, self.trigger_nodes) - inference_e = time() - - # metric - metric = DefenseMetric() - metric.update(preds, self.poisoned_labels[self.test_mask]) - target = torch.full_like(backdoor_preds, fill_value=self.target_label) - metric.update_wm(backdoor_preds, target) - metric_comp.end() - - print("====================Final Results====================") - res = metric.compute() - metric_comp.update(inference_defense_time=(inference_e - inference_s)) - res_comp = metric_comp.compute() - - return res, res_comp diff --git a/pygip/models/defense/GrOVe.py b/pygip/models/defense/GrOVe.py deleted file mode 100644 index bcab9dfe..00000000 --- a/pygip/models/defense/GrOVe.py +++ /dev/null @@ -1,303 +0,0 @@ - -import random -from time import time - -import torch -import torch.nn.functional as F -import numpy as np -from sklearn.neural_network import MLPClassifier - -from pygip.models.defense.base import BaseDefense -from pygip.models.nn import GCN -from pygip.utils.metrics import DefenseMetric, DefenseCompMetric - - -class GroveDefense(BaseDefense): - supported_api_types = {"dgl", "pyg"} - - def __init__(self, dataset, attack_node_fraction, model_path=None, - hidden_dim=256, verification_threshold=0.5, num_surrogate_models=3): - super().__init__(dataset, attack_node_fraction) - # load data - self.dataset = dataset - self.graph_dataset = dataset.graph_dataset - self.graph_data = dataset.graph_data.to(device=self.device) - self.model_path = model_path - self.features = self.graph_data.ndata['feat'] - self.labels = self.graph_data.ndata['label'] - self.train_mask = self.graph_data.ndata['train_mask'] - self.test_mask = self.graph_data.ndata['test_mask'] - - # load meta data - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes - - # grove params - self.hidden_dim = hidden_dim - self.verification_threshold = verification_threshold - self.num_surrogate_models = num_surrogate_models - - # models - self.target_model = None - self.surrogate_models = [] - self.independent_models = [] - self.csim_classifier = None - - def _load_model(self): - """ - Load a pre-trained target model. - """ - if self.model_path: - # Create the model - self.target_model = GCN(self.num_features, self.num_classes).to(self.device) - # Load the saved state dict - self.target_model.load_state_dict(torch.load(self.model_path, map_location=self.device)) - else: - # Train new target model - self._train_target_model() - - # Set to evaluation mode - self.target_model.eval() - - def _train_target_model(self): - """ - Train the target model. - """ - self.target_model = GCN(self.num_features, self.num_classes).to(self.device) - optimizer = torch.optim.Adam(self.target_model.parameters(), lr=0.01, weight_decay=5e-4) - - for epoch in range(200): - self.target_model.train() - - # Forward pass - logits = self.target_model(self.graph_data, self.features) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[self.train_mask], self.labels[self.train_mask]) - - # Backward pass - optimizer.zero_grad() - loss.backward() - optimizer.step() - - self.target_model.eval() - - def _train_surrogate_models(self): - """ - Train surrogate models to simulate model stealing attacks. - """ - print("Training surrogate models...") - - for i in range(self.num_surrogate_models): - # Create surrogate model with different initialization - torch.manual_seed(42 + i) - surrogate_model = GCN(self.num_features, self.num_classes).to(self.device) - optimizer = torch.optim.Adam(surrogate_model.parameters(), lr=0.01, weight_decay=5e-4) - - # Train with limited data (simulating stolen model scenario) - for epoch in range(100): - surrogate_model.train() - - logits = surrogate_model(self.graph_data, self.features) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[self.train_mask], self.labels[self.train_mask]) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - surrogate_model.eval() - self.surrogate_models.append(surrogate_model) - - def _train_independent_models(self): - """ - Train independent models for comparison. - """ - print("Training independent models...") - - for i in range(self.num_surrogate_models): - # Create independent model with different random seed - torch.manual_seed(100 + i) - independent_model = GCN(self.num_features, self.num_classes).to(self.device) - optimizer = torch.optim.Adam(independent_model.parameters(), lr=0.01, weight_decay=5e-4) - - # Train independently - for epoch in range(150): - independent_model.train() - - logits = independent_model(self.graph_data, self.features) - logp = F.log_softmax(logits, dim=1) - loss = F.nll_loss(logp[self.train_mask], self.labels[self.train_mask]) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - independent_model.eval() - self.independent_models.append(independent_model) - - def _get_model_embeddings(self, model): - """ - Extract embeddings from a model. - """ - model.eval() - with torch.no_grad(): - # Get intermediate layer embeddings (before final classification) - embeddings = model.get_embeddings(self.graph_data, self.features) - return embeddings[self.test_mask] - - def _compute_distance_vectors(self, target_embeddings, suspect_embeddings): - """ - Compute element-wise squared distance vectors between embeddings. - """ - target_np = target_embeddings.detach().cpu().numpy() - suspect_np = suspect_embeddings.detach().cpu().numpy() - - # Ensure same shape - min_nodes = min(target_np.shape[0], suspect_np.shape[0]) - target_crop = target_np[:min_nodes] - suspect_crop = suspect_np[:min_nodes] - - # Compute element-wise squared distance - distance_vectors = (target_crop - suspect_crop) ** 2 - return distance_vectors - - def _train_csim_classifier(self): - """ - Train the CSim classifier for ownership verification. - """ - # Get target model embeddings - target_embeddings = self._get_model_embeddings(self.target_model) - - # Prepare training data - distance_vectors = [] - labels = [] - - # Positive samples: (target, surrogate) pairs - label 1 - for surrogate_model in self.surrogate_models: - surrogate_embeddings = self._get_model_embeddings(surrogate_model) - dist_vectors = self._compute_distance_vectors(target_embeddings, surrogate_embeddings) - distance_vectors.extend(dist_vectors) - labels.extend([1] * len(dist_vectors)) - - # Negative samples: (target, independent) pairs - label 0 - for independent_model in self.independent_models: - independent_embeddings = self._get_model_embeddings(independent_model) - dist_vectors = self._compute_distance_vectors(target_embeddings, independent_embeddings) - distance_vectors.extend(dist_vectors) - labels.extend([0] * len(dist_vectors)) - - X_train = np.array(distance_vectors) - y_train = np.array(labels) - - # Train MLP classifier - self.csim_classifier = MLPClassifier( - hidden_layer_sizes=(128,), - activation='relu', - random_state=42, - max_iter=1000 - ) - self.csim_classifier.fit(X_train, y_train) - - def verify_ownership(self, suspect_model): - """ - Verify if suspect model is a surrogate (stolen) or independent. - """ - target_embeddings = self._get_model_embeddings(self.target_model) - suspect_embeddings = self._get_model_embeddings(suspect_model) - - # Compute distance vectors - distance_vectors = self._compute_distance_vectors(target_embeddings, suspect_embeddings) - - # Predict using CSim classifier - predictions = self.csim_classifier.predict(distance_vectors) - probabilities = self.csim_classifier.predict_proba(distance_vectors) - - # Aggregate results - surrogate_probability = np.mean(probabilities[:, 1]) - is_surrogate = surrogate_probability > self.verification_threshold - - return { - 'is_surrogate': is_surrogate, - 'surrogate_probability': surrogate_probability, - 'confidence': abs(surrogate_probability - 0.5) * 2 - } - - def defend(self): - """ - Execute the Grove ownership verification defense. - """ - metric_comp = DefenseCompMetric() - metric_comp.start() - print("====================Grove Ownership Verification====================") - - # Load or train target model - if not hasattr(self, 'target_model') or self.target_model is None: - self._load_model() - - # Train surrogate and independent models - self._train_surrogate_models() - self._train_independent_models() - - # Train CSim classifier - defense_s = time() - self._train_csim_classifier() - defense_e = time() - - # Verify models - inference_s = time() - verification_results = [] - - # Test on surrogate models (should be detected as stolen) - for i, surrogate_model in enumerate(self.surrogate_models): - result = self.verify_ownership(surrogate_model) - result['model_type'] = 'surrogate' - result['expected_surrogate'] = True - verification_results.append(result) - - # Test on independent models (should be detected as independent) - for i, independent_model in enumerate(self.independent_models): - result = self.verify_ownership(independent_model) - result['model_type'] = 'independent' - result['expected_surrogate'] = False - verification_results.append(result) - - inference_e = time() - - # Calculate metrics - correct_predictions = sum( - 1 for result in verification_results - if result['is_surrogate'] == result['expected_surrogate'] - ) - total_predictions = len(verification_results) - accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0 - - metric = DefenseMetric() - - # Pred and label for overall performance - dummy_preds = torch.tensor([1 if r['is_surrogate'] else 0 for r in verification_results]) - dummy_labels = torch.tensor([1 if r['expected_surrogate'] else 0 for r in verification_results]) - metric.update(dummy_preds, dummy_labels) - - wm_preds = torch.tensor([r['surrogate_probability'] for r in verification_results]) - wm_labels = torch.tensor([1.0 if r['expected_surrogate'] else 0.0 for r in verification_results]) - metric.update_wm(wm_preds, wm_labels) - - # Update computation metrics with recorded times - metric_comp.update(defense_time=(defense_e - defense_s)) - metric_comp.update(inference_defense_time=(inference_e - inference_s)) - metric_comp.end() - - print("====================Final Results====================") - print(f"Verification Accuracy: {accuracy:.4f}") - print(f"Correct Predictions: {correct_predictions}/{total_predictions}") - print(f"Defense Time: {defense_e - defense_s:.4f}s") - print(f"Inference Time: {inference_e - inference_s:.4f}s") - - res = metric.compute() - res['verification_accuracy'] = accuracy - res['correct_predictions'] = correct_predictions - res['total_predictions'] = total_predictions - res_comp = metric_comp.compute() - - return res, res_comp diff --git a/pygip/models/defense/ImperceptibleWM.py b/pygip/models/defense/ImperceptibleWM.py deleted file mode 100644 index 215e599a..00000000 --- a/pygip/models/defense/ImperceptibleWM.py +++ /dev/null @@ -1,262 +0,0 @@ -import copy -import time - -import torch -import torch.nn as nn -import torch.nn.functional as F -from tqdm import tqdm -from torch_geometric.nn import GCNConv -from torch_geometric.utils import to_dense_adj, dense_to_sparse -from sklearn.metrics import precision_score, recall_score, f1_score - -from .base import BaseDefense -from pygip.models.nn.backbones import GCN_PyG -from pygip.utils.metrics import DefenseCompMetric, DefenseMetric - - -class ImperceptibleWM(BaseDefense): - supported_api_types = {"pyg"} - - def __init__(self, dataset, defense_ratio=0.1, model_path=None): - super().__init__(dataset, defense_ratio) - # load data - self.model_trained = None - self.dataset = dataset - self.graph_dataset = dataset.graph_dataset - self.graph_data = dataset.graph_data.to(self.device) - self.defense_ratio = defense_ratio - self.num_triggers = int(dataset.num_nodes * defense_ratio) - self.model_path = model_path - - self.owner_id = torch.tensor([0.1, 0.3, 0.5, 0.7, 0.9], dtype=torch.float32, device=self.graph_data.x.device) - - in_feats = dataset.num_features - num_classes = dataset.num_classes - - self.generator = TriggerGenerator(in_feats, 64, self.owner_id).to(self.device) - self.model = GCN_PyG(in_feats, 128, num_classes).to(self.device) - - def defend(self): - """ - Execute the imperceptible watermark defense. - """ - metric_comp = DefenseCompMetric() - metric_comp.start() - print("====================Imperceptible Watermark====================") - - # If model wasn't trained yet, train it - if not hasattr(self, 'model_trained'): - self.train_target_model(metric_comp) - - # Evaluate the watermarked model - trigger_data = generate_trigger_graph(self.graph_data, self.generator, self.model, self.num_triggers) - preds = self.evaluate_model(trigger_data) - inference_s = time.time() - wm_preds = self.verify_watermark(trigger_data) - inference_e = time.time() - - # metric - metric = DefenseMetric() - metric.update(preds, trigger_data.y[trigger_data.original_test_mask]) - wm_true = trigger_data.y[trigger_data.trigger_nodes] - metric.update_wm(wm_preds, wm_true) - metric_comp.end() - - print("====================Final Results====================") - res = metric.compute() - metric_comp.update(inference_defense_time=(inference_e - inference_s)) - res_comp = metric_comp.compute() - - return res, res_comp - - def train_target_model(self, metric_comp: DefenseCompMetric): - """Train the target model with watermark injection.""" - defense_s = time.time() - - pyg_data = self.graph_data - bi_level_optimization(self.model, self.generator, pyg_data, self.num_triggers) - - self.model_trained = True - defense_e = time.time() - - metric_comp.update(defense_time=(defense_e - defense_s)) - return self.model - - def evaluate_model(self, trigger_data): - """Evaluate model performance on downstream task""" - self.model.eval() - with torch.no_grad(): - out = self.model(trigger_data.x, trigger_data.edge_index) - preds = out[trigger_data.original_test_mask].argmax(dim=1).cpu() - return preds - - def verify_watermark(self, trigger_data): - """Verify watermark success rate""" - self.model.eval() - with torch.no_grad(): - out = self.model(trigger_data.x, trigger_data.edge_index) - wm_preds = out[trigger_data.trigger_nodes].argmax(dim=1).cpu() - return wm_preds - - def _load_model(self): - if self.model_path: - self.model.load_state_dict(torch.load(self.model_path)) - - def _train_target_model(self): - # optional if you split training from watermarking - pass - - def _train_defense_model(self): - return self.model - - def _train_surrogate_model(self): - pass - - -class TriggerGenerator(nn.Module): - def __init__(self, in_channels, hidden_channels, owner_id): - super().__init__() - self.conv1 = GCNConv(in_channels, hidden_channels) - self.conv2 = GCNConv(hidden_channels, in_channels) - self.owner_id = owner_id - - def forward(self, x, edge_index): - x = F.relu(self.conv1(x, edge_index)) - x = torch.sigmoid(self.conv2(x, edge_index)) - out = x.clone() - out[:, -5:] = self.owner_id - return out - - -def generate_trigger_graph(data, generator, target_model, num_triggers=50): - with torch.no_grad(): - probs = F.softmax(target_model(data.x, data.edge_index), dim=1) - - selected_nodes = [] - for class_idx in range(probs.size(1)): - class_nodes = torch.where(data.y == class_idx)[0] - if len(class_nodes) > 0: - selected_nodes.append(class_nodes[probs[class_nodes, class_idx].argmax()].item()) - - trigger_features = generator(data.x, data.edge_index) - trigger_nodes = list(range(data.num_nodes, data.num_nodes + num_triggers)) - total_nodes = data.num_nodes + num_triggers - - # Create new dense adjacency matrix - adj = to_dense_adj(data.edge_index)[0] - new_adj = torch.zeros((total_nodes, total_nodes), device=adj.device) - new_adj[:adj.size(0), :adj.size(1)] = adj - - # Connect trigger nodes to selected nodes - for i, trigger in enumerate(trigger_nodes): - for node in selected_nodes: - new_adj[node, trigger] = 1 - new_adj[trigger, node] = 1 - - new_data = copy.deepcopy(data) - new_data.x = torch.cat([data.x, trigger_features[:num_triggers]], dim=0) - new_data.edge_index = dense_to_sparse(new_adj)[0] - new_data.y = torch.cat([ - data.y, - torch.zeros(num_triggers, dtype=torch.long, device=data.y.device) - ]) - - new_data.train_mask = torch.cat([ - data.train_mask, - torch.zeros(num_triggers, dtype=torch.bool, device=data.x.device) - ]) - new_data.val_mask = torch.cat([ - data.val_mask, - torch.zeros(num_triggers, dtype=torch.bool, device=data.x.device) - ]) - new_data.test_mask = torch.cat([ - data.test_mask, - torch.zeros(num_triggers, dtype=torch.bool, device=data.x.device) - ]) - - new_data.original_test_mask = data.test_mask.clone() - - # Add trigger info - new_data.trigger_nodes = trigger_nodes - new_data.selected_nodes = selected_nodes - new_data.trigger_mask = torch.zeros(total_nodes, dtype=torch.bool, device=data.x.device) - new_data.trigger_mask[trigger_nodes] = True - - return new_data - - -def bi_level_optimization(target_model, generator, data, num_triggers, epochs=100, inner_steps=5): - optimizer_model = torch.optim.Adam(target_model.parameters(), lr=0.01) - optimizer_gen = torch.optim.Adam(generator.parameters(), lr=0.01) - criterion = nn.CrossEntropyLoss() - - for epoch in tqdm(range(epochs)): - for _ in range(inner_steps): - optimizer_model.zero_grad() - trigger_data = generate_trigger_graph(data, generator, target_model, num_triggers) - - out_clean = target_model(data.x, data.edge_index) - out_trigger = target_model(trigger_data.x, trigger_data.edge_index) - - clean_loss = criterion(out_clean[data.train_mask], data.y[data.train_mask]) - trigger_loss = criterion(out_trigger[trigger_data.trigger_mask], - trigger_data.y[trigger_data.trigger_mask]) - - total_loss = clean_loss + trigger_loss - total_loss.backward() - optimizer_model.step() - - optimizer_gen.zero_grad() - trigger_data = generate_trigger_graph(data, generator, target_model, num_triggers) - - orig_features = data.x[trigger_data.selected_nodes] - trigger_features = trigger_data.x[trigger_data.trigger_nodes] - sim_loss = -F.cosine_similarity(orig_features.unsqueeze(1), - trigger_features.unsqueeze(0), dim=-1).mean() - - out = target_model(trigger_data.x, trigger_data.edge_index) - trigger_loss = criterion(out[trigger_data.trigger_mask], - trigger_data.y[trigger_data.trigger_mask]) - - owner_loss = F.binary_cross_entropy( - trigger_data.x[trigger_data.trigger_nodes, -5:], - generator.owner_id.expand(len(trigger_data.trigger_nodes), 5) - ) - - total_gen_loss = 0.4 * sim_loss + 0.4 * trigger_loss + 0.2 * owner_loss - total_gen_loss.backward() - optimizer_gen.step() - - -def calculate_metrics(model, data): - model.eval() - with torch.no_grad(): - out = model(data.x, data.edge_index) - pred = out.argmax(dim=1) - true = data.y - - # Handle both original and watermarked data cases - if hasattr(data, 'original_test_mask'): - test_mask = data.original_test_mask - if test_mask.size(0) < pred.size(0): - pad_len = pred.size(0) - test_mask.size(0) - test_mask = torch.cat([test_mask, torch.zeros(pad_len, dtype=torch.bool, device=test_mask.device)]) - else: - test_mask = data.test_mask - - metrics = { - 'accuracy': (pred[test_mask] == true[test_mask]).float().mean().item(), - 'precision': precision_score(true[test_mask].cpu(), pred[test_mask].cpu(), average='macro'), - 'recall': recall_score(true[test_mask].cpu(), pred[test_mask].cpu(), average='macro'), - 'f1': f1_score(true[test_mask].cpu(), pred[test_mask].cpu(), average='macro'), - 'wm_accuracy': None - } - - if hasattr(data, 'trigger_nodes'): - wm_mask = torch.zeros(data.x.size(0), dtype=torch.bool, device=data.x.device) - wm_mask[data.trigger_nodes] = True - wm_pred = pred[wm_mask] - wm_true = true[wm_mask] - metrics['wm_accuracy'] = (wm_pred == wm_true).float().mean().item() - - return metrics diff --git a/pygip/models/defense/ImperceptibleWM2.py b/pygip/models/defense/ImperceptibleWM2.py deleted file mode 100644 index e95dd209..00000000 --- a/pygip/models/defense/ImperceptibleWM2.py +++ /dev/null @@ -1,920 +0,0 @@ -import os -import sys - -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) -import torch -import torch.nn as nn -import torch.nn.functional as F -import dgl -import numpy as np -from tqdm import tqdm -from dgl.dataloading import NeighborSampler, NodeCollator -from torch.utils.data import DataLoader -from dgl.nn import GraphConv -from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score - -from pygip.models.defense.base import BaseDefense -from pygip.models.nn import GraphSAGE - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - -class TriggerGenerator(nn.Module): - """ - Generate watermark trigger features and edge probabilities using a GCN-based architecture. - - This module constructs a small graph template and applies multiple GCN layers to produce - node features that represent the watermark trigger. It also learns a function to generate - edge probabilities between nodes using a neural edge generator. - - Parameters - ---------- - feature_dim : int - Dimension of node feature vectors. - hidden_dim : int, optional - Dimension of hidden layers in GCN and edge generator. Default is 64. - output_nodes : int, optional - Number of nodes in the generated trigger graph. Default is 50. - """ - - def __init__(self, feature_dim, hidden_dim=64, output_nodes=50): - super(TriggerGenerator, self).__init__() - self.feature_dim = feature_dim - self.output_nodes = output_nodes - - self.gcn1 = GraphConv(feature_dim, hidden_dim) - self.gcn2 = GraphConv(hidden_dim, hidden_dim) - self.gcn3 = GraphConv(hidden_dim, feature_dim) - - self.edge_generator = nn.Sequential( - nn.Linear(feature_dim * 2, hidden_dim), - nn.ReLU(), - nn.Linear(hidden_dim, 1), - nn.Sigmoid() - ) - - # Create a small template graph for GCN processing - self.template_graph = self._create_template_graph() - - def _create_template_graph(self): - """ - Create a small template DGL graph structure to serve as the base for GCN processing. - - This function builds a fully connected undirected graph (with self-loops) - consisting of up to 10 nodes. This graph serves as a structural template - for generating watermark trigger node features. - - Returns - ------- - dgl.DGLGraph - A small connected DGL graph with self-loops, moved to the appropriate device. - """ - # Create a small connected graph for initial processing - edges = [] - for i in range(min(10, self.output_nodes)): - for j in range(i + 1, min(10, self.output_nodes)): - edges.append((i, j)) - edges.append((j, i)) - - if not edges: - edges = [(0, 1), (1, 0)] - - src, dst = zip(*edges) if edges else ([0], [1]) - g = dgl.graph((src, dst), num_nodes=min(10, self.output_nodes)) - g = dgl.add_self_loop(g) - return g.to(device) - - def forward(self, clean_features, selected_nodes): - """ - Forward pass to generate trigger node features and edge probabilities. - - Constructs a trigger graph by first computing a prototype feature from - selected clean nodes, propagating it through GCN layers, and generating - additional nodes and edge probabilities to match the required trigger size. - - Parameters - ---------- - clean_features : torch.Tensor - Feature matrix from the clean graph (shape: [num_nodes, feature_dim]). - selected_nodes : list[int] or torch.Tensor - Indices of nodes selected for constructing the prototype vector. - - Returns - ------- - trigger_features : torch.Tensor - Feature matrix of generated trigger nodes (shape: [output_nodes, feature_dim]). - - edge_probs : torch.Tensor - A 1D tensor containing probabilities for edges between node pairs - (upper triangular, shape: [output_nodes * (output_nodes - 1) / 2]). - """ - # Create prototype from selected nodes - if len(selected_nodes) > 0: - sample_features = clean_features[selected_nodes[:min(len(selected_nodes), 10)]] - prototype = sample_features.mean(dim=0, keepdim=True) - else: - prototype = clean_features.mean(dim=0, keepdim=True) - - # Replicate prototype for template graph nodes - template_size = self.template_graph.num_nodes() - template_features = prototype.repeat(template_size, 1) - - # Apply GCN layers - h = F.relu(self.gcn1(self.template_graph, template_features)) - h = F.relu(self.gcn2(self.template_graph, h)) - h = torch.sigmoid(self.gcn3(self.template_graph, h)) - - # Expand to desired number of trigger nodes - if template_size < self.output_nodes: - # Replicate and add noise for additional nodes - additional_nodes = self.output_nodes - template_size - noise = torch.randn(additional_nodes, self.feature_dim, device=device) * 0.1 - additional_features = h[-1:].repeat(additional_nodes, 1) + noise - trigger_features = torch.cat([h, additional_features], dim=0) - else: - trigger_features = h[:self.output_nodes] - - # Generate edge probabilities - n_nodes = self.output_nodes - edge_probs = [] - - for i in range(n_nodes): - for j in range(i + 1, n_nodes): - pair_features = torch.cat([trigger_features[i], trigger_features[j]], dim=0) - edge_prob = self.edge_generator(pair_features.unsqueeze(0)) - edge_probs.append(edge_prob) - - edge_probs = torch.cat(edge_probs, dim=0) if edge_probs else torch.tensor([]).to(device) - - return trigger_features, edge_probs - - -class ImperceptibleWM2(BaseDefense): - def __init__(self, dataset, attack_node_fraction=0.2, wm_node=50, - target_label=None, N=50, M=5, - epsilon1=1.0, epsilon2=0.5, epsilon3=1.0, owner_id=None, - beta=0.001, T_acc=0.8): - """ - Initialize the watermark defense using bilevel optimization. - - Parameters - ---------- - dataset : object - The graph dataset containing features, labels, and graph structure - attack_node_fraction : float, default=0.2 - Fraction of nodes to consider for attack simulation - wm_node : int, default=50 - Number of nodes in the watermark/trigger graph - target_label : int, optional - Target label for watermark classification. If None, randomly selected - N : int, default=50 - Number of bilevel optimization iterations - M : int, default=5 - Number of embedding phase iterations per bilevel step - epsilon1 : float, default=1.0 - Weight for imperception loss in generator objective - epsilon2 : float, default=0.5 - Weight for regulation loss in generator objective - epsilon3 : float, default=1.0 - Weight for trigger loss in generator objective - owner_id : array-like, optional - Owner identifier for watermark regulation. If None, randomly generated - beta : float, default=0.001 - Learning rate for the main model optimizer - T_acc : float, default=0.8 - Accuracy threshold for ownership verification - """ - - super().__init__(dataset, attack_node_fraction) - self.dataset = dataset - self.graph = dataset.graph - - self.node_number = dataset.node_number if hasattr(dataset, 'node_number') else self.graph.num_nodes() - self.feature_number = dataset.feature_number if hasattr(dataset, 'feature_number') else \ - self.graph.ndata['feat'].shape[1] - self.label_number = dataset.label_number if hasattr(dataset, 'label_number') else ( - int(max(self.graph.ndata['label']) - min(self.graph.ndata['label'])) + 1) - self.attack_node_number = int(self.node_number * attack_node_fraction) - - self.wm_node = wm_node - self.target_label = target_label if target_label is not None else np.random.randint(0, self.label_number) - self.N = N - self.M = M - self.beta = beta - self.T_acc = T_acc - - self.epsilon1 = epsilon1 - self.epsilon2 = epsilon2 - self.epsilon3 = epsilon3 - - self.owner_id = owner_id if owner_id is not None else torch.rand(self.feature_number, device=device) - if isinstance(self.owner_id, (list, np.ndarray)): - self.owner_id = torch.tensor(self.owner_id, dtype=torch.float32, device=device) - elif not isinstance(self.owner_id, torch.Tensor): - self.owner_id = torch.rand(self.feature_number, device=device) - - self.features = dataset.features if hasattr(dataset, 'features') else self.graph.ndata['feat'] - self.labels = dataset.labels if hasattr(dataset, 'labels') else self.graph.ndata['label'] - self.train_mask = dataset.train_mask if hasattr(dataset, 'train_mask') else self.graph.ndata['train_mask'] - self.test_mask = dataset.test_mask if hasattr(dataset, 'test_mask') else self.graph.ndata['test_mask'] - - if device != 'cpu': - self.graph = self.graph.to(device) - self.features = self.features.to(device) - self.labels = self.labels.to(device) - self.train_mask = self.train_mask.to(device) - self.test_mask = self.test_mask.to(device) - self.owner_id = self.owner_id.to(device) - - def _select_poisoning_nodes(self, clean_model): - """ - Select nodes for watermark poisoning based on model predictions. - Uses the clean model's confidence scores to identify high-confidence nodes - across different labels for creating the watermark trigger. - - Parameters - ---------- - clean_model : torch.nn.Module - Pre-trained clean model used for node selection - - Returns - ------- - torch.Tensor - Tensor of selected node indices for poisoning - """ - clean_model.eval() - with torch.no_grad(): - sampler = NeighborSampler([5, 5]) - all_nids = torch.arange(self.graph.num_nodes(), device=device) - collator = NodeCollator(self.graph, all_nids, sampler) - dataloader = DataLoader( - collator.dataset, batch_size=64, shuffle=False, - collate_fn=collator.collate, drop_last=False - ) - - all_predictions = [] - node_indices = [] - - for input_nodes, output_nodes, blocks in dataloader: - blocks = [b.to(device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - - logits = clean_model(blocks, input_features) - predictions = F.softmax(logits, dim=1) - - all_predictions.append(predictions) - node_indices.append(output_nodes) - - all_predictions = torch.cat(all_predictions, dim=0) - node_indices = torch.cat(node_indices, dim=0) - - poisoning_nodes = [] - nodes_per_label = max(1, self.wm_node // self.label_number) - - for label in range(self.label_number): - label_probs = all_predictions[:, label] - _, top_indices = torch.topk(label_probs, min(nodes_per_label, len(label_probs))) - selected_nodes = node_indices[top_indices] - poisoning_nodes.extend(selected_nodes.tolist()) - - if len(poisoning_nodes) < self.wm_node: - remaining_nodes = set(range(self.graph.num_nodes())) - set(poisoning_nodes) - additional_nodes = np.random.choice( - list(remaining_nodes), - size=min(self.wm_node - len(poisoning_nodes), len(remaining_nodes)), - replace=False - ) - poisoning_nodes.extend(additional_nodes) - - poisoning_nodes = poisoning_nodes[:self.wm_node] - - return torch.tensor(poisoning_nodes, device=device) - - def _generate_trigger_graph(self, f_g, V_p): - """ - Generate a watermark trigger graph using the generator network. - Creates trigger features and edges based on poisoning nodes and - constructs a DGL graph for watermark embedding. - - Parameters - ---------- - f_g : torch.nn.Module - Trigger generator network - V_p : torch.Tensor - Selected poisoning node indices - - Returns - ------- - dgl.DGLGraph - The generated watermark trigger graph with features and labels - """ - f_g.eval() - with torch.no_grad(): - trigger_features, edge_probs = f_g(self.features, V_p) - - edge_threshold = 0.5 - edges_src, edges_dst = [], [] - edge_idx = 0 - - for i in range(self.wm_node): - for j in range(i + 1, self.wm_node): - if edge_idx < len(edge_probs) and edge_probs[edge_idx] > edge_threshold: - edges_src.extend([i, j]) - edges_dst.extend([j, i]) - edge_idx += 1 - - if len(edges_src) == 0: - edges_src = [0, 1] - edges_dst = [1, 0] - - trigger_graph = dgl.graph((edges_src, edges_dst), num_nodes=self.wm_node) - trigger_graph = trigger_graph.to(device) - - trigger_graph.ndata['feat'] = trigger_features.detach() - trigger_graph.ndata['label'] = torch.full((self.wm_node,), self.target_label, - dtype=torch.long, device=device) - trigger_graph.ndata['train_mask'] = torch.ones(self.wm_node, dtype=torch.bool, device=device) - trigger_graph.ndata['test_mask'] = torch.ones(self.wm_node, dtype=torch.bool, device=device) - - trigger_graph = dgl.add_self_loop(trigger_graph) - - return trigger_graph - - def _construct_backdoor_graph(self, clean_graph, trigger_graph, V_p): - """ - Construct a backdoor graph by combining clean graph with trigger graph. - Merges the original graph with the watermark trigger graph by adding - connections between poisoning nodes and trigger nodes. - - Parameters - ---------- - clean_graph : dgl.DGLGraph - Original clean graph - trigger_graph : dgl.DGLGraph - Generated trigger/watermark graph - V_p : torch.Tensor - Poisoning node indices for connection - - Returns - ------- - dgl.DGLGraph - Combined backdoor graph with embedded watermark - """ - clean_adj = clean_graph.adj().to_dense() - trigger_adj = trigger_graph.adj().to_dense() - - clean_features = clean_graph.ndata['feat'] - trigger_features = trigger_graph.ndata['feat'] - - clean_labels = clean_graph.ndata['label'] - trigger_labels = trigger_graph.ndata['label'] - - n_clean = clean_graph.num_nodes() - n_trigger = trigger_graph.num_nodes() - - A_I = torch.zeros(n_trigger, n_clean, device=device) - - for i in range(n_trigger): - for j in V_p: - if torch.rand(1) > 0.7: - A_I[i, j] = 1 - - top_row = torch.cat([clean_adj, A_I.t()], dim=1) - bottom_row = torch.cat([A_I, trigger_adj], dim=1) - backdoor_adj = torch.cat([top_row, bottom_row], dim=0) - - backdoor_features = torch.cat([clean_features, trigger_features], dim=0) - backdoor_labels = torch.cat([clean_labels, trigger_labels], dim=0) - - edges_src, edges_dst = torch.nonzero(backdoor_adj, as_tuple=True) - backdoor_graph = dgl.graph((edges_src, edges_dst), num_nodes=n_clean + n_trigger) - backdoor_graph = backdoor_graph.to(device) - - backdoor_graph.ndata['feat'] = backdoor_features - backdoor_graph.ndata['label'] = backdoor_labels - - clean_train_mask = clean_graph.ndata['train_mask'] - clean_test_mask = clean_graph.ndata['test_mask'] - - trigger_train_mask = torch.ones(n_trigger, dtype=torch.bool, device=device) - trigger_test_mask = torch.ones(n_trigger, dtype=torch.bool, device=device) - - backdoor_graph.ndata['train_mask'] = torch.cat([clean_train_mask, trigger_train_mask]) - backdoor_graph.ndata['test_mask'] = torch.cat([clean_test_mask, trigger_test_mask]) - - backdoor_graph = dgl.add_self_loop(backdoor_graph) - - return backdoor_graph - - def _calculate_imperception_loss(self, trigger_features, V_p): - """ - Calculate imperception loss to make watermark features similar to clean features. - Measures cosine similarity between trigger features and poisoning node features - to ensure the watermark remains hidden. - - Parameters - ---------- - trigger_features : torch.Tensor - Generated trigger node features - V_p : torch.Tensor - Poisoning node indices - - Returns - ------- - torch.Tensor - Imperception loss value - """ - if len(V_p) == 0: - return torch.tensor(0.0, device=device) - - poisoning_features = self.features[V_p] - total_similarity = 0 - count = 0 - - for i, trigger_feat in enumerate(trigger_features): - for poison_feat in poisoning_features: - similarity = F.cosine_similarity(trigger_feat.unsqueeze(0), poison_feat.unsqueeze(0)) - total_similarity += similarity - count += 1 - - return -total_similarity / count if count > 0 else torch.tensor(0.0, device=device) - - def _calculate_regulation_loss(self, trigger_features): - """ - Calculate regulation loss based on owner ID signature. - Enforces the trigger features to embed owner identification information - using cross-entropy loss with the owner ID as target. - - Parameters - ---------- - trigger_features : torch.Tensor - Generated trigger node features - - Returns - ------- - torch.Tensor - Regulation loss value - """ - total_loss = 0 - for trigger_feat in trigger_features: - loss = -(self.owner_id * torch.log(trigger_feat + 1e-8) + - (1 - self.owner_id) * torch.log(1 - trigger_feat + 1e-8)) - total_loss += loss.mean() - - return total_loss / len(trigger_features) - - def _calculate_trigger_loss(self, f_theta, trigger_features, trigger_graph): - """ - Calculate trigger loss for watermark effectiveness. - Measures how well the model classifies trigger nodes to the target label, - ensuring the watermark functions correctly. - - Parameters - ---------- - f_theta : torch.nn.Module - Main classification model - trigger_features : torch.Tensor - Generated trigger node features - trigger_graph : dgl.DGLGraph - Trigger graph structure - - Returns - ------- - torch.Tensor - Trigger loss value - """ - f_theta.eval() - - sampler = NeighborSampler([5, 5]) - trigger_nids = torch.arange(trigger_graph.number_of_nodes(), device=device) - collator = NodeCollator(trigger_graph, trigger_nids, sampler) - - dataloader = DataLoader( - collator.dataset, batch_size=self.wm_node, - shuffle=False, collate_fn=collator.collate, drop_last=False - ) - - total_loss = 0 - count = 0 - - with torch.no_grad(): - for _, _, blocks in dataloader: - blocks = [b.to(device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - - logits = f_theta(blocks, input_features) - probs = F.softmax(logits, dim=1) - - target_probs = probs[:, self.target_label] - loss = -torch.log(target_probs + 1e-8).mean() - total_loss += loss - count += 1 - break - - return total_loss / count if count > 0 else torch.tensor(0.0, device=device) - - def _calculate_generation_loss_integrated(self, f_theta_s, f_g, V_p): - """ - Calculate integrated generation loss combining all generator objectives. - Combines imperception, regulation, and trigger losses with respective weights - to optimize the trigger generator network. - - Parameters - ---------- - f_theta_s : torch.nn.Module - Current state of the main model - f_g : torch.nn.Module - Trigger generator network - V_p : torch.Tensor - Poisoning node indices - - Returns - ------- - torch.Tensor - Combined generation loss - """ - f_g.train() - f_theta_s.eval() - - trigger_features, edge_probs = f_g(self.features, V_p) - - temp_trigger_graph = self._create_temp_trigger_graph(trigger_features, edge_probs) - - L_imperception = self._calculate_imperception_loss(trigger_features, V_p) - L_regulation = self._calculate_regulation_loss(trigger_features) - L_trigger = self._calculate_trigger_loss(f_theta_s, trigger_features, temp_trigger_graph) - - L_g = (self.epsilon1 * L_imperception + - self.epsilon2 * L_regulation + - self.epsilon3 * L_trigger) - - return L_g - - def _create_temp_trigger_graph(self, trigger_features, edge_probs): - """ - Create a temporary trigger graph for loss calculation. - Constructs a temporary graph structure using generated features and edge - probabilities for intermediate computations. - - Parameters - ---------- - trigger_features : torch.Tensor - Generated trigger node features - edge_probs : torch.Tensor - Edge existence probabilities - - Returns - ------- - dgl.DGLGraph - Temporary trigger graph - """ - edge_threshold = 0.5 - edges_src, edges_dst = [], [] - edge_idx = 0 - - for i in range(self.wm_node): - for j in range(i + 1, self.wm_node): - if edge_idx < len(edge_probs) and edge_probs[edge_idx] > edge_threshold: - edges_src.extend([i, j]) - edges_dst.extend([j, i]) - edge_idx += 1 - - if len(edges_src) == 0: - edges_src = [0, 1] - edges_dst = [1, 0] - - temp_graph = dgl.graph((edges_src, edges_dst), num_nodes=self.wm_node) - temp_graph = temp_graph.to(device) - temp_graph.ndata['feat'] = trigger_features - temp_graph.ndata['label'] = torch.full((self.wm_node,), self.target_label, - dtype=torch.long, device=device) - temp_graph = dgl.add_self_loop(temp_graph) - - return temp_graph - - def _calculate_embedding_loss(self, f_theta, backdoor_graph): - """ - Calculate embedding loss for model training on backdoor graph. - Computes cross-entropy loss for training the main model on the combined - clean and trigger graph data. - - Parameters - ---------- - f_theta : torch.nn.Module - Main classification model - backdoor_graph : dgl.DGLGraph - Combined graph with embedded watermark - - Returns - ------- - torch.Tensor - Embedding loss value - """ - f_theta.train() - backdoor_train_nids = backdoor_graph.ndata['train_mask'].nonzero(as_tuple=True)[0].to(device) - sampler = NeighborSampler([5, 5]) - collator = NodeCollator(backdoor_graph, backdoor_train_nids, sampler) - dataloader = DataLoader( - collator.dataset, batch_size=32, shuffle=True, - collate_fn=collator.collate, drop_last=False - ) - - total_loss = 0 - count = 0 - - for _, _, blocks in dataloader: - blocks = [b.to(device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - - output_predictions = f_theta(blocks, input_features) - loss = F.cross_entropy(output_predictions, output_labels) - total_loss += loss - count += 1 - - if count >= 10: - break - - return total_loss / count if count > 0 else torch.tensor(0.0, device=device) - - def _inner_optimization(self, f_theta, f_g, V_p, optimizer): - """ - Execute the watermark embedding phase of bilevel optimization. - Performs M iterations of model training on the backdoor graph to embed - the watermark into the model parameters. - - Parameters - ---------- - f_theta : torch.nn.Module - Main classification model - f_g : torch.nn.Module - Trigger generator network - V_p : torch.Tensor - Poisoning node indices - optimizer : torch.optim.Optimizer - Optimizer for model parameters - - Returns - ------- - torch.nn.Module - Updated model with embedded watermark - """ - trigger_graph = self._generate_trigger_graph(f_g, V_p) - backdoor_graph = self._construct_backdoor_graph(self.graph, trigger_graph, V_p) - - for t in range(self.M): - L_embed = self._calculate_embedding_loss(f_theta, backdoor_graph) - - optimizer.zero_grad() - L_embed.backward() - optimizer.step() - - return f_theta - - def defend(self): - """ - Execute the complete watermark defense strategy. - Trains target model, applies watermark defense, and verifies ownership. - Returns comprehensive evaluation metrics and ownership verification results. - - Returns - ------- - dict - Dictionary containing attack metrics, defense metrics, ownership - verification status, and trained generator - """ - attack_model = self._train_target_model() - - sampler = NeighborSampler([5, 5]) - test_nids = self.test_mask.nonzero(as_tuple=True)[0].to(device) - test_collator = NodeCollator(self.graph, test_nids, sampler) - test_dataloader = DataLoader( - test_collator.dataset, batch_size=32, shuffle=False, - collate_fn=test_collator.collate, drop_last=False - ) - - attack_acc, attack_prec, attack_rec, attack_f1 = self._evaluate_with_metrics(attack_model, test_dataloader) - print("Target Model Metrics:") - print(f" Accuracy : {attack_acc * 100:.2f}%") - print(f" Precision: {attack_prec * 100:.2f}%") - print(f" Recall : {attack_rec * 100:.2f}%") - print(f" F1 Score : {attack_f1 * 100:.2f}%") - - defense_model, generator = self._train_defense_model() - - defense_acc, defense_prec, defense_rec, defense_f1 = self._evaluate_with_metrics(defense_model, test_dataloader) - print("Defense Model Metrics:") - print(f" Accuracy : {defense_acc * 100:.2f}%") - print(f" Precision: {defense_prec * 100:.2f}%") - print(f" Recall : {defense_rec * 100:.2f}%") - print(f" F1 Score : {defense_f1 * 100:.2f}%") - - is_owner, ownership_acc = self.verify_ownership(defense_model) - print(f"\nOwnership Verification: {is_owner}, Watermark Accuracy: {ownership_acc * 100:.2f}%") - - return { - "attack_accuracy": attack_acc, - "attack_precision": attack_prec, - "attack_recall": attack_rec, - "attack_f1": attack_f1, - "defense_accuracy": defense_acc, - "defense_precision": defense_prec, - "defense_recall": defense_rec, - "defense_f1": defense_f1, - "ownership_verified": is_owner, - "ownership_accuracy": ownership_acc, - "generator": generator - } - - def _train_target_model(self): - """ - Train the target model on clean graph data. - Creates and trains a GraphSAGE model on the original dataset without - any watermark or defense mechanisms. - - Returns - ------- - torch.nn.Module - Trained target model - """ - model = GraphSAGE(in_channels=self.feature_number, - hidden_channels=128, - out_channels=self.label_number) - model = model.to(device) - optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4) - - sampler = NeighborSampler([5, 5]) - train_nids = self.train_mask.nonzero(as_tuple=True)[0].to(device) - test_nids = self.test_mask.nonzero(as_tuple=True)[0].to(device) - - train_collator = NodeCollator(self.graph, train_nids, sampler) - test_collator = NodeCollator(self.graph, test_nids, sampler) - - train_dataloader = DataLoader( - train_collator.dataset, batch_size=32, shuffle=True, - collate_fn=train_collator.collate, drop_last=False - ) - - test_dataloader = DataLoader( - test_collator.dataset, batch_size=32, shuffle=False, - collate_fn=test_collator.collate, drop_last=False - ) - - for epoch in tqdm(range(1, 51), desc="========== Training Target Model =========="): - model.train() - for _, _, blocks in train_dataloader: - blocks = [b.to(device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - - optimizer.zero_grad() - output_predictions = model(blocks, input_features) - loss = F.cross_entropy(output_predictions, output_labels) - loss.backward() - optimizer.step() - - return model - - def _train_defense_model(self): - """ - Train the defense model with watermark embedding using bilevel optimization. - Implements the complete bilevel optimization process alternating between - watermark embedding and trigger generation phases. - - Returns - ------- - tuple - (trained_defense_model, trigger_generator) - """ - - f_theta = GraphSAGE(in_channels=self.feature_number, - hidden_channels=128, - out_channels=self.label_number).to(device) - - f_g = TriggerGenerator(feature_dim=self.feature_number, - output_nodes=self.wm_node).to(device) - print("\n========== Training Defense Model ==========") - # High confidence nodes from the target model will be used as trigger - print("Retraining the target model to select poisoning nodes") - clean_model = self._train_target_model() - V_p = self._select_poisoning_nodes(clean_model) - - theta_optimizer = torch.optim.Adam(f_theta.parameters(), lr=self.beta, weight_decay=5e-4) - g_optimizer = torch.optim.Adam(f_g.parameters(), lr=0.001, weight_decay=5e-4) - - for i in tqdm(range(self.N), desc="Starting BiLevelOptimization Process"): - f_theta = self._inner_optimization(f_theta, f_g, V_p, theta_optimizer) - - f_theta_s = f_theta - - L_g = self._calculate_generation_loss_integrated(f_theta_s, f_g, V_p) - - g_optimizer.zero_grad() - L_g.backward() - g_optimizer.step() - - self.watermark_graph = self._generate_trigger_graph(f_g, V_p) - self.poisoning_nodes = V_p - - return f_theta, f_g - - def _evaluate_with_metrics(self, model, dataloader): - """ - Evaluate model performance using multiple classification metrics. - - Parameters - ---------- - model : torch.nn.Module - The neural network model to evaluate - dataloader : torch.utils.data.DataLoader - DataLoader containing evaluation data - - Returns - ------- - tuple of float - (accuracy, precision, recall, f1_score) metrics - """ - model.eval() - all_preds = [] - all_labels = [] - - with torch.no_grad(): - for _, _, blocks in dataloader: - blocks = [b.to(device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - output_predictions = model(blocks, input_features) - pred = output_predictions.argmax(dim=1) - - all_preds.extend(pred.cpu().numpy()) - all_labels.extend(output_labels.cpu().numpy()) - - if len(all_preds) == 0: - return 0.0, 0.0, 0.0, 0.0 - - accuracy = accuracy_score(all_labels, all_preds) - precision = precision_score(all_labels, all_preds, average='weighted', zero_division=0) - recall = recall_score(all_labels, all_preds, average='weighted', zero_division=0) - f1 = f1_score(all_labels, all_preds, average='weighted', zero_division=0) - - return accuracy, precision, recall, f1 - - def verify_ownership(self, suspicious_model): - """ - Verify ownership of a suspicious model using the watermark. - Tests if the suspicious model correctly classifies the watermark trigger - graph to determine if it contains the embedded watermark. - - Parameters - ---------- - suspicious_model : torch.nn.Module - Model to test for ownership verification - - Returns - ------- - tuple - (is_owner: bool, ownership_accuracy: float) - """ - if not hasattr(self, 'watermark_graph'): - return False, 0.0 - - G_key_p = self.watermark_graph - acc, _, _, _ = self._evaluate_model_on_graph(suspicious_model, G_key_p) - - is_owner = acc > self.T_acc - return is_owner, acc - - def _evaluate_model_on_graph(self, model, graph): - """ - Evaluate model performance on a specific graph. - Computes classification metrics for the given model on the provided graph, - handling different model architectures appropriately. - - Parameters - ---------- - model : torch.nn.Module - Model to evaluate - graph : dgl.DGLGraph - Graph data for evaluation - - Returns - ------- - tuple of float - (accuracy, precision, recall, f1_score) metrics - """ - model_name = model.__class__.__name__ - - if model_name == 'GraphSAGE': - sampler = NeighborSampler([5, 5]) - trigger_nids = torch.arange(graph.number_of_nodes(), device=device) - trigger_collator = NodeCollator(graph, trigger_nids, sampler) - - trigger_dataloader = DataLoader( - trigger_collator.dataset, batch_size=graph.number_of_nodes(), - shuffle=False, collate_fn=trigger_collator.collate, drop_last=False - ) - - return self._evaluate_with_metrics(model, trigger_dataloader) - - else: - return 0.0, 0.0, 0.0, 0.0 diff --git a/pygip/models/defense/Integrity.py b/pygip/models/defense/Integrity.py deleted file mode 100644 index c97a2171..00000000 --- a/pygip/models/defense/Integrity.py +++ /dev/null @@ -1,1227 +0,0 @@ -import copy -import random -import time - -import dgl -import torch -import numpy as np -import torch.optim as optim -from tqdm import tqdm -from torch.optim import Adam -import torch.nn.functional as F -from torch_geometric.utils import to_networkx, from_networkx - -from .base import BaseDefense -from pygip.models.nn import GCN -from pygip.utils.metrics import DefenseCompMetric, DefenseMetric - - -class QueryBasedVerificationDefense(BaseDefense): - supported_api_types = {"dgl"} - supported_datasets = {} - - def __init__(self, dataset, defense_ratio=0.1, model_path=None): - super().__init__(dataset, defense_ratio) - self.model = None - self.defense_ratio = defense_ratio - self.graph_data = dataset.graph_data - # compute related parameters - self.k = max(1, int(dataset.num_nodes * defense_ratio)) - self.model_path = model_path - - def defend(self, fingerprint_mode='inductive', knowledge='full', attack_type='bitflip', - k=5, num_trials=1, use_edge_perturbation=False, verbose=True, **kwargs): - """ - Execute the query-based verification defense. - """ - metric_comp = DefenseCompMetric() - metric_comp.start() - print("====================Query Based Verification Defense====================") - - # If model wasn't trained yet, train it - if not hasattr(self, 'model_trained'): - self.train_target_model(metric_comp) - - # Generate fingerprints and evaluate the defended model - fingerprints = self._generate_fingerprints( - self.model, mode=fingerprint_mode, knowledge=knowledge, k=k, - perturb_fingerprints=use_edge_perturbation, - perturb_budget=kwargs.get('perturb_budget', 5), **kwargs - ) - - preds = self.evaluate_model(self.model, self.dataset) - inference_s = time.time() - detection_results = self.verify_defense(self.model, fingerprints, attack_type, **kwargs) - inference_e = time.time() - - # metric - metric = DefenseMetric() - labels = self.dataset.graph_data.ndata['label'][self.dataset.graph_data.ndata['test_mask']] - metric.update(preds, labels) - - # Convert detection results to binary format - detection_preds, detection_targets = self._convert_detection_to_binary(detection_results) - metric.update_wm(detection_preds, detection_targets) - metric_comp.end() - - print("====================Final Results====================") - res = metric.compute() - metric_comp.update(inference_defense_time=(inference_e - inference_s)) - res_comp = metric_comp.compute() - - return res, res_comp - - def train_target_model(self, metric_comp: DefenseCompMetric): - """Train the target model with defense mechanism.""" - defense_s = time.time() - - # Training and fingerprint generation (defense mechanism) - self.model = self._train_target_model() - self.model_trained = True - - defense_e = time.time() - metric_comp.update(defense_time=(defense_e - defense_s)) - return self.model - - def evaluate_model(self, model, dataset): - """Evaluate model performance on downstream task""" - model.eval() - features = self._get_features().to(self.device) - labels = dataset.graph_data.ndata['label'].to(self.device) - test_mask = dataset.graph_data.ndata['test_mask'] - - with torch.no_grad(): - logits = model(dataset.graph_data.to(self.device), features) - preds = logits.argmax(dim=1)[test_mask].cpu() - return preds - - def verify_defense(self, model, fingerprints, attack_type, **kwargs): - """Verify defense effectiveness by running attack and checking fingerprints""" - # Run attack - poisoned_model, attack_info = self._run_attack( - model, attack_type=attack_type, **kwargs - ) - - # Evaluate fingerprints - flipped_info = self._evaluate_fingerprints(poisoned_model, fingerprints) - - return { - 'flip_rate': flipped_info['flip_rate'], - 'flipped_fingerprints': flipped_info['flipped'], - 'total_fingerprints': len(fingerprints) - } - - @staticmethod - def _convert_detection_to_binary(detection_results): - """Convert detection results to binary classification format""" - total = detection_results['total_fingerprints'] - flipped = len(detection_results['flipped_fingerprints']) - - # Create binary predictions: 1 for attack detected, 0 for no attack - detection_preds = torch.tensor([1 if flipped > 0 else 0]) - # In this case, we assume attack was actually performed, so target is 1 - detection_targets = torch.tensor([1]) - - return detection_preds, detection_targets - - def _get_features(self): - return self.graph_data.ndata['feat'] if hasattr(self.graph_data, 'ndata') else self.graph_data.x - - def _train_target_model(self, epochs=200): - """ - Trains target GCN model according to protocol in - Wu et al. (2023), Section 6.1 for graph node classification. - - Returns - ------- - model : torch.nn.Module - The trained GCN model. - """ - model = GCN( - feature_number=self.dataset.num_features, - label_number=self.dataset.num_classes - ).to(self.device) - print(f"Training target model on device: {self.device} ...") - - optimizer = Adam(model.parameters(), lr=0.02) - loss_fn = torch.nn.NLLLoss() - - features = self._get_features().to(self.device) - labels = self.dataset.graph_data.ndata['label'].to(self.device) - train_mask = self.dataset.graph_data.ndata['train_mask'].to(self.device) - val_mask = getattr(self.dataset.graph_data.ndata, "val_mask", None) - if val_mask is None: - val_mask = self.dataset.graph_data.ndata['test_mask'] - val_mask = val_mask.to(self.device) - - for epoch in range(epochs): - model.train() - logits = model(self.graph_data.to(self.device), features) - log_probs = F.log_softmax(logits, dim=1) - loss = loss_fn(log_probs[train_mask], labels[train_mask]) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - if (epoch + 1) % 10 == 0 or epoch == 0: - model.eval() - with torch.no_grad(): - val_logits = model(self.graph_data.to(self.device), features) - val_log_probs = F.log_softmax(val_logits, dim=1) - val_pred = val_log_probs[val_mask].max(1)[1] - val_acc = (val_pred == labels[val_mask]).float().mean().item() - print(f"Epoch {epoch + 1}: Loss={loss.item():.4f} | Val Acc={val_acc:.4f}") - - return model - - def _load_model(self, model_path): - model = GCN( - in_feats=self.dataset.feature_number, - hidden_feats=16, - out_feats=self.dataset.label_number - ) - model.load_state_dict(torch.load(model_path)) - return model - - def _generate_fingerprints(self, model, mode='transductive', knowledge='full', k=5, **kwargs): - """ - Wrapper for fingerprint generation based on mode and knowledge level. - Returns: - List of fingerprints - """ - if mode == 'transductive': - generator = TransductiveFingerprintGenerator( - model=model, - dataset=self.dataset, - candidate_fraction=kwargs.get('candidate_fraction', 1.0), - random_seed=kwargs.get('random_seed', None), - device=self.device, - randomize=kwargs.get('randomize', True), - ) - fingerprints = generator.generate_fingerprints(k=k, method=knowledge) - - unified_fingerprints = [(self.graph_data, node_id, label) for (node_id, label) in fingerprints] - - elif mode == 'inductive': - generator = InductiveFingerprintGenerator( - model=model, - dataset=self.dataset, - shadow_graph=self.dataset.graph_data, - knowledge=knowledge, - candidate_fraction=kwargs.get('candidate_fraction', 0.3), - num_fingerprints=k, - randomize=kwargs.get('randomize', True), - random_seed=kwargs.get('random_seed', None), - device=self.device, - perturb_fingerprints=kwargs.get('perturb_fingerprints', False), - perturb_budget=kwargs.get('perturb_budget', 5), - ) - fingerprints = generator.generate_fingerprints(method=knowledge) - if kwargs.get('perturb_fingerprints', False): - for i, (graph, node_idx, label) in enumerate(fingerprints): - generator.shadow_graph = graph - generator.greedy_edge_perturbation( - node_idx=node_idx, - perturb_budget=kwargs.get('perturb_budget', 5), - knowledge=knowledge - ) - fingerprints[i] = (generator.shadow_graph, node_idx, label) - - unified_fingerprints = fingerprints - - else: - raise ValueError("Unknown fingerprinting mode. Use 'transductive' or 'inductive'.") - - return unified_fingerprints - - def _evaluate_fingerprints(self, model, fingerprints): - """ - Checks if fingerprinted nodes have changed labels under the given model. - - Args: - model: The model to evaluate. - fingerprints: List of (graph, node_id, label) tuples. - - Returns: - results: { - 'flipped': List[Tuple[node_id, old_label, new_label]], - 'flip_rate': float - } - """ - model.eval() - flipped = [] - - with torch.no_grad(): - for graph, node_id, expected_label in fingerprints: - x = graph.ndata['feat'] if hasattr(graph, 'ndata') else graph.x - logits = model(graph.to(self.device), x.to(self.device)) - pred = logits[node_id].argmax().item() - if pred != expected_label: - flipped.append((node_id, expected_label, pred)) - - return { - 'flipped': flipped, - 'flip_rate': len(flipped) / len(fingerprints) if fingerprints else 0.0 - } - - def _run_attack(self, model, attack_type='mettack', knowledge='full', **kwargs): - """ - Run the specified attack on the model. - Returns: - poisoned_model: torch.nn.Module - metadata: dict with info about the attack - """ - if attack_type == 'bitflip': - bit = kwargs.get('bit', 30) - bfa_variant = kwargs.get('bfa_variant', 'BFA') - attacker = BitFlipAttack(model, attack_type=bfa_variant, bit=bit) - attack_info = attacker.apply() - return model, attack_info - - elif attack_type == 'random': - perturbed_graph = self._random_edge_addition_poisoning( - node_fraction=kwargs.get('node_fraction', 0.1), - edges_per_node=kwargs.get('edges_per_node', 5), - random_seed=kwargs.get('random_seed', None), - ) - poisoned_model = self._retrain_poisoned_model( - poisoned_graph=perturbed_graph, - epochs=kwargs.get('epochs', 200), - ) - return poisoned_model, {'type': 'random_poison', 'graph': perturbed_graph} - - elif attack_type == 'mettack': - num_edges = self.graph_data.num_edges() - poison_frac = kwargs.get('poison_frac', 0.05) - n_perturbations = int(poison_frac * num_edges) - - helper = MettackHelper( - graph=self.graph_data, - features=self._get_features(), - labels=self.dataset.labels, - train_mask=self.dataset.train_mask, - val_mask=getattr(self.dataset, 'val_mask', None), - test_mask=self.dataset.test_mask, - n_perturbations=n_perturbations, - device=self.device, - max_perturbations=kwargs.get('max_perturbations', 50), - surrogate_epochs=kwargs.get('surrogate_epochs', 30), - candidate_sample_size=kwargs.get('candidate_sample_size', 20), - ) - poisoned_graph, attack_metrics = helper.run() - poisoned_model = self._retrain_poisoned_model( - poisoned_graph=poisoned_graph, - epochs=kwargs.get('epochs', 200), - ) - return poisoned_model, {'type': 'mettack', 'metrics': attack_metrics, 'graph': poisoned_graph} - - else: - raise ValueError(f"Unsupported attack_type: {attack_type}") - - def _random_edge_addition_poisoning(self, node_fraction=0.1, edges_per_node=5, random_seed=None): - """ - Poison a fraction of nodes by adding random edges. - - Args: - dataset: Dataset object (DGL-based) - node_fraction: Fraction of nodes to poison (e.g., 0.1 = 10%) - edges_per_node: Number of random edges to add per poisoned node - random_seed: Optional seed - - Returns: - poisoned_graph: DGLGraph - """ - if random_seed is not None: - random.seed(random_seed) - torch.manual_seed(random_seed) - - poisoned_graph = copy.deepcopy(self.graph_data) - num_nodes = poisoned_graph.num_nodes() - num_poisoned_nodes = int(node_fraction * num_nodes) - poisoned_nodes = random.sample(range(num_nodes), num_poisoned_nodes) - - new_edges = [] - - for src in poisoned_nodes: - for _ in range(edges_per_node): - dst = random.randint(0, num_nodes - 1) - if src != dst and \ - not poisoned_graph.has_edges_between(src, dst) and \ - not poisoned_graph.has_edges_between(dst, src): - new_edges.append((src, dst)) - new_edges.append((dst, src)) - - if new_edges: - src, dst = zip(*new_edges) - poisoned_graph.add_edges(src, dst) - - return poisoned_graph - - def _retrain_poisoned_model(self, poisoned_graph, epochs=200): - """ - Retrain target GCN using the poisoned graph structure. - - Args: - dataset: Original Dataset object (provides features, labels, masks) - poisoned_graph: DGLGraph (with new random edges added) - defense_class: The defense class to use for model training (e.g., QueryBasedVerificationDefense) - device: 'cpu' or 'cuda' - - Returns: - model: Trained GCN model - """ - dataset_poisoned = copy.deepcopy(self.dataset) - dataset_poisoned.graph_data = poisoned_graph - - defense = QueryBasedVerificationDefense(dataset=dataset_poisoned, defense_ratio=0.1) - model = defense._train_target_model(epochs=epochs) - return model - - def _evaluate_accuracy(self, model, dataset): - """ - Evaluates test accuracy of the given model on the dataset. - - Args: - model: Trained GCN model - dataset: Dataset object (provides features, labels, test_mask, graph) - - Returns: - accuracy: float (test accuracy, 0-1) - """ - model.eval() - features = self._get_features().to(self.device) - labels = dataset.graph_data.ndata['label'].to(self.device) - test_mask = dataset.graph_data.ndata['test_mask'] - - with torch.no_grad(): - logits = model(dataset.graph_data.to(self.device), features) - pred = logits.argmax(dim=1) - correct = (pred[test_mask] == labels[test_mask]).float() - accuracy = correct.sum().item() / test_mask.sum().item() - return accuracy - - def run_full_pipeline(self, attack_type='random', mode='transductive', knowledge='full', k=5, trials=1, **kwargs): - """ - Runs the full fingerprinting + attack + evaluation pipeline. - - Parameters: - attack_type: 'random', 'bitflip', or 'mettack' - mode: 'transductive' or 'inductive' - knowledge: 'full' or 'limited' - k: number of fingerprints - trials: number of repeated trials - kwargs: extra params for attack or fingerprinting - - Prints per-trial results and summary statistics. - """ - flip_rates = [] - acc_drops = [] - - for trial in range(trials): - print(f"\n=== Trial {trial + 1}/{trials} ===") - - model_clean = self._train_target_model() - acc_clean = self._evaluate_accuracy(model_clean, self.dataset) - print(f"Clean model accuracy: {acc_clean:.4f}") - - fingerprints = self._generate_fingerprints(model_clean, mode=mode, knowledge=knowledge, k=k, **kwargs) - - model_poisoned, attack_meta = self._run_attack(model_clean, attack_type=attack_type, knowledge=knowledge, - **kwargs) - acc_poisoned = self._evaluate_accuracy(model_poisoned, self.dataset) - print(f"Poisoned model accuracy: {acc_poisoned:.4f}") - - eval_result = self._evaluate_fingerprints(model_poisoned, fingerprints) - flip_rate = eval_result['flip_rate'] - print(f"Fingerprint flip rate: {flip_rate:.4f}") - for (nid, old, new) in eval_result['flipped']: - print(f" Node {nid}: {old} → {new}") - - flip_rates.append(flip_rate) - acc_drops.append(acc_clean - acc_poisoned) - - print("\n=== Summary ===") - print(f"Avg Accuracy Drop: {np.mean(acc_drops):.4f}") - print(f"Avg Fingerprint Flip Rate: {np.mean(flip_rates):.4f}") - - -class TransductiveFingerprintGenerator: - def __init__(self, model, dataset, candidate_fraction=0.3, random_seed=None, device='cpu', randomize=True): - self.device = torch.device(device) - self.model = model.to(self.device) - self.dataset = dataset - self.graph_data = dataset.graph_data - self.candidate_fraction = candidate_fraction - self.random_seed = random_seed - self.randomize = randomize - - def _get_features(self): - """Backend-agnostic feature getter (DGL or PyG).""" - return self.graph_data.ndata['feat'] if hasattr(self.graph_data, 'ndata') else self.graph_data.x - - def get_candidate_nodes(self): - """Randomly sample a subset of nodes as candidates.""" - all_nodes = torch.arange(self.graph_data.num_nodes()) - num_candidates = max(1, int(len(all_nodes) * self.candidate_fraction)) - - if self.randomize and self.candidate_fraction < 1.0: - generator = torch.Generator(device=self.device) - if self.random_seed is not None: - generator.manual_seed(self.random_seed) - idx = torch.randperm(len(all_nodes), generator=generator)[:num_candidates] - return all_nodes[idx] - return all_nodes - - def compute_fingerprint_scores_full(self, candidate_nodes): - """Full-knowledge fingerprint scores (gradient-based).""" - self.model.eval() - scores = [] - x = self._get_features().to(self.device) - logits = self.model(self.graph_data.to(self.device), x) - - for node in candidate_nodes: - self.model.zero_grad() - logit = logits[node] - label = logit.argmax().item() - loss = F.cross_entropy(logit.unsqueeze(0), torch.tensor([label], device=self.device)) - loss.backward(retain_graph=True) - grad_norm = sum((p.grad ** 2).sum().item() for p in self.model.parameters() if p.grad is not None) - scores.append(grad_norm) - - return torch.tensor(scores, device=self.device) - - def compute_fingerprint_scores_limited(self, candidate_nodes): - """Limited-knowledge fingerprint scores (confidence margin).""" - self.model.eval() - x = self._get_features().to(self.device) - with torch.no_grad(): - logits = self.model(self.graph_data.to(self.device), x) - probs = F.softmax(logits, dim=1) - labels = probs.argmax(dim=1) - scores = 1.0 - probs[candidate_nodes, labels[candidate_nodes]] - return scores - - def select_top_fingerprints(self, scores, candidate_nodes, k, method='full'): - """Selects top-k fingerprint nodes after filtering out extreme score outliers.""" - q = 0.99 if method == 'full' else 1.0 - threshold = torch.quantile(scores, q) - mask = scores <= threshold - - filtered_scores = scores[mask] - filtered_candidates = candidate_nodes[mask] - - if filtered_scores.size(0) < k: - k = filtered_scores.size(0) - - topk = torch.topk(filtered_scores, k) - return filtered_candidates[topk.indices], topk.values - - def generate_fingerprints(self, k=5, method='full'): - candidate_nodes = self.get_candidate_nodes().to(self.device) - x = self._get_features().to(self.device) - - with torch.no_grad(): - logits = self.model(self.graph_data.to(self.device), x) - labels = logits.argmax(dim=1) - - if method == 'full': - scores = self.compute_fingerprint_scores_full(candidate_nodes) - elif method == 'limited': - scores = self.compute_fingerprint_scores_limited(candidate_nodes) - else: - raise ValueError("method must be 'full' or 'limited'") - - class_to_candidates = {} - for i, node in enumerate(candidate_nodes): - cls = int(labels[node]) - class_to_candidates.setdefault(cls, []).append((node.item(), scores[i].item())) - - rng = random.Random(self.random_seed) - class_list = list(class_to_candidates.keys()) - rng.shuffle(class_list) - - fingerprints = [] - for cls in class_list: - class_nodes = sorted(class_to_candidates[cls], key=lambda x: x[1], reverse=True) - top_node = class_nodes[0][0] - fingerprints.append((top_node, cls)) - if len(fingerprints) >= k: - break - - if len(fingerprints) < k: - fingerprint_nodes, _ = self.select_top_fingerprints(scores, candidate_nodes, k, method=method) - fingerprints = [(int(n), int(labels[n])) for n in fingerprint_nodes] - - return fingerprints - - -class InductiveFingerprintGenerator: - def __init__(self, model, dataset, shadow_graph=None, knowledge='limited', - candidate_fraction=0.3, num_fingerprints=5, - randomize=True, random_seed=None, device='cpu', - perturb_fingerprints=False, perturb_budget=5): - self.device = torch.device(device) - self.model = model.to(self.device) - self.dataset = dataset - self.shadow_graph = shadow_graph if shadow_graph is not None else dataset.graph_data - self.knowledge = knowledge - self.candidate_fraction = candidate_fraction - self.num_fingerprints = num_fingerprints - self.randomize = randomize - self.random_seed = random_seed - self.perturb_fingerprints = perturb_fingerprints - self.perturb_budget = perturb_budget - - if self.random_seed is not None: - torch.manual_seed(self.random_seed) - random.seed(self.random_seed) - - def _get_features(self): - return self.shadow_graph.ndata['feat'] if hasattr(self.shadow_graph, 'ndata') else self.shadow_graph.x - - def get_candidate_nodes(self): - all_nodes = torch.arange(self.shadow_graph.num_nodes()) - num_candidates = max(1, int(len(all_nodes) * self.candidate_fraction)) - - if self.randomize and self.candidate_fraction < 1.0: - generator = torch.Generator(device=self.device) - if self.random_seed is not None: - generator.manual_seed(self.random_seed) - idx = torch.randperm(len(all_nodes), generator=generator)[:num_candidates] - candidates = all_nodes[idx] - else: - candidates = all_nodes - - return candidates - - def compute_fingerprint_score(self, node_idx, graph_override=None): - """ - Computes the fingerprint score for a given node according to knowledge mode. - If graph_override is provided, scoring is done on that graph instead of shadow_graph. - """ - graph = graph_override if graph_override is not None else self.shadow_graph - x = (graph.ndata['feat'] if hasattr(graph, 'ndata') else graph.x).to(self.device) - self.model.eval() - - if self.knowledge == 'limited': - with torch.no_grad(): - logits = self.model(graph.to(self.device), x) - probs = torch.softmax(logits[node_idx], dim=0) - pred_class = probs.argmax().item() - return 1 - probs[pred_class].item() - - elif self.knowledge == 'full': - x.requires_grad_(True) - logits = self.model(graph.to(self.device), x) - pred = logits[node_idx] - label = pred.argmax().item() - - self.model.zero_grad() - loss = torch.nn.functional.nll_loss( - torch.log_softmax(pred.unsqueeze(0), dim=1), - torch.tensor([label], device=self.device) - ) - loss.backward(retain_graph=True) - - grad = x.grad[node_idx] - grad_norm_sq = (grad ** 2).sum().item() - x.requires_grad_(False) - x.grad = None - return grad_norm_sq - else: - raise ValueError("knowledge must be 'limited' or 'full'") - - def generate_fingerprint_nodes(self): - """ - Step 3: Identifies and returns the top-k (num_fingerprints) nodes with the highest - fingerprint scores from the candidate set. (Section 4.2.2) - - Returns: - List[int]: Indices of selected fingerprint nodes. - """ - candidates = self.get_candidate_nodes() - scores = [] - for idx in candidates: - score = self.compute_fingerprint_score(idx) - scores.append((score, int(idx))) - - scores.sort(reverse=True) - selected = [idx for (_, idx) in scores[:self.num_fingerprints]] - return selected - - def save_fingerprint_tuples(self, node_indices): - self.model.eval() - x = self._get_features().to(self.device) - with torch.no_grad(): - logits = self.model(self.shadow_graph.to(self.device), x) - labels = logits.argmax(dim=1).cpu().numpy() - return [(self.shadow_graph, int(idx), int(labels[idx])) for idx in node_indices] - - def generate_fingerprints(self, method='full'): - """ - Generate inductive fingerprints for model watermarking. - - Parameters: - method (str): 'full' for gradient-based or 'limited' for output-based - - Returns: - List of fingerprints - """ - if method == 'full': - return self._generate_full() - elif method == 'limited': - return self._generate_limited() - else: - raise ValueError(f"Invalid fingerprinting method: '{method}'") - - def _generate_full(self): - """ - Implements full knowledge fingerprint generation (gradient-based). - Based on Section 4.2.1 and 5.2 of Wu et al. (2023). - """ - self.knowledge = 'full' - print("[Fingerprint] Generating FULL knowledge fingerprints...") - fingerprint_nodes = self.generate_fingerprint_nodes() - - if self.perturb_fingerprints: - print("[Fingerprint] Applying greedy feature perturbation (FULL)...") - self.greedy_perturb_fingerprints(fingerprint_nodes) - - return self.save_fingerprint_tuples(fingerprint_nodes) - - def _generate_limited(self): - """ - Implements limited knowledge fingerprint generation (output-based). - Based on Section 4.2.2 and 5.2 of Wu et al. (2023). - """ - self.knowledge = 'limited' - print("[Fingerprint] Generating LIMITED knowledge fingerprints...") - fingerprint_nodes = self.generate_fingerprint_nodes() - - if self.perturb_fingerprints: - print("[Fingerprint] Applying greedy feature perturbation (LIMITED)...") - self.greedy_perturb_fingerprints(fingerprint_nodes) - - return self.save_fingerprint_tuples(fingerprint_nodes) - - def greedy_perturb_fingerprints(self, node_indices): - """ - Greedily perturbs each fingerprint node's features (not edges) to increase its - fingerprint score, without changing the predicted label. - - - For each node, for each feature dimension: - - Add or subtract a small epsilon. - - Accept change if predicted label stays the same and fingerprint score increases. - - Stop after perturb_budget attempts or no improvement. - - Returns: - List[int]: Indices of perturbed fingerprint nodes (features in shadow_graph are updated in-place). - """ - epsilon = 0.01 - features = self._get_features().clone().detach().to(self.device) - self.shadow_graph = self.shadow_graph.to(self.device) - - for idx in node_indices: - num_tries = 0 - improved = True - while num_tries < self.perturb_budget and improved: - improved = False - current_score = self.compute_fingerprint_score(idx, graph_override=self.shadow_graph) - - self.model.eval() - with torch.no_grad(): - logits = self.model(self.shadow_graph, features) - pred_label = logits[idx].argmax().item() - - original_features = features[idx].clone() - for dim in range(features.shape[1]): - for direction in [+1, -1]: - features[idx][dim] += direction * epsilon - - self.model.eval() - with torch.no_grad(): - logits_new = self.model(self.shadow_graph, features) - new_pred_label = logits_new[idx].argmax().item() - new_score = self.compute_fingerprint_score(idx, graph_override=self.shadow_graph) - - if new_pred_label == pred_label and new_score > current_score: - current_score = new_score - improved = True - num_tries += 1 - else: - features[idx][dim] = original_features[dim] - - if num_tries >= self.perturb_budget: - break - if num_tries >= self.perturb_budget: - break - - if hasattr(self.shadow_graph, 'ndata'): - self.shadow_graph.ndata['feat'] = features - else: - self.shadow_graph.x = features - return node_indices - - def greedy_edge_perturbation(self, node_idx, perturb_budget=5, knowledge='full'): - """ - Dispatch to greedy edge perturbation strategy based on verifier knowledge level. - - Args: - node_idx (int): Fingerprint node index. - perturb_budget (int): Number of edge perturbations allowed. - knowledge (str): 'full' or 'limited' - """ - if knowledge == 'full': - self._greedy_edge_perturbation_f(node_idx, perturb_budget) - elif knowledge == 'limited': - self._greedy_edge_perturbation_l(node_idx, perturb_budget) - else: - raise ValueError("knowledge must be 'full' or 'limited'") - - def _greedy_edge_perturbation_f(self, node_idx, perturb_budget): - """ - Full knowledge edge perturbation (Inductive-F). - Increases fingerprint score using model gradients while preserving prediction. - """ - - g_nx = to_networkx(self.shadow_graph.to('cpu'), to_undirected=True) - x = self._get_features().to(self.device) - self.model.eval() - - with torch.no_grad(): - original_pred = self.model(self.shadow_graph.to(self.device), x)[node_idx].argmax().item() - - def score_fn(modified_graph): - return self.compute_fingerprint_score(node_idx, graph_override=modified_graph) - - neighbors = list(g_nx.neighbors(node_idx)) - non_neighbors = list(set(range(self.shadow_graph.num_nodes())) - set(neighbors) - {node_idx}) - - applied = 0 - while applied < perturb_budget: - best_delta = 0 - best_graph = None - best_action = None - - for nbr in non_neighbors: - temp_g = copy.deepcopy(g_nx) - temp_g.add_edge(node_idx, nbr) - g_temp = from_networkx(temp_g).to(self.device) - with torch.no_grad(): - pred = self.model(g_temp, x)[node_idx].argmax().item() - if pred != original_pred: - continue - delta = score_fn(g_temp) - score_fn(self.shadow_graph) - if delta > best_delta: - best_delta = delta - best_graph = g_temp - best_action = ('add', nbr) - - for nbr in neighbors: - temp_g = copy.deepcopy(g_nx) - if temp_g.has_edge(node_idx, nbr): - temp_g.remove_edge(node_idx, nbr) - g_temp = from_networkx(temp_g).to(self.device) - with torch.no_grad(): - pred = self.model(g_temp, x)[node_idx].argmax().item() - if pred != original_pred: - continue - delta = score_fn(g_temp) - score_fn(self.shadow_graph) - if delta > best_delta: - best_delta = delta - best_graph = g_temp - best_action = ('remove', nbr) - - if best_graph is None: - break - self.shadow_graph = best_graph - g_nx = to_networkx(best_graph.to('cpu'), to_undirected=True) - - if best_action[0] == 'add': - non_neighbors.remove(best_action[1]) - neighbors.append(best_action[1]) - else: - neighbors.remove(best_action[1]) - non_neighbors.append(best_action[1]) - - applied += 1 - - def _greedy_edge_perturbation_l(self, node_idx, perturb_budget): - """ - Limited knowledge edge perturbation (Inductive-L). - Uses confidence margin (1 - confidence) as proxy for fingerprint sensitivity. - """ - - g_nx = to_networkx(self.shadow_graph.to('cpu'), to_undirected=True) - x = self._get_features().to(self.device) - self.model.eval() - - with torch.no_grad(): - logits = self.model(self.shadow_graph.to(self.device), x) - original_pred = logits[node_idx].argmax().item() - original_conf = F.softmax(logits[node_idx], dim=0)[original_pred].item() - original_score = 1 - original_conf - - def score_fn(modified_graph): - with torch.no_grad(): - logits = self.model(modified_graph.to(self.device), x) - pred = logits[node_idx].argmax().item() - if pred != original_pred: - return -1 - conf = F.softmax(logits[node_idx], dim=0)[pred].item() - return 1 - conf - - neighbors = list(g_nx.neighbors(node_idx)) - non_neighbors = list(set(range(self.shadow_graph.num_nodes())) - set(neighbors) - {node_idx}) - - applied = 0 - while applied < perturb_budget: - best_delta = 0 - best_graph = None - best_action = None - - for nbr in non_neighbors: - temp_g = copy.deepcopy(g_nx) - temp_g.add_edge(node_idx, nbr) - g_temp = from_networkx(temp_g).to(self.device) - new_score = score_fn(g_temp) - delta = new_score - original_score - if new_score >= 0 and delta > best_delta: - best_delta = delta - best_graph = g_temp - best_action = ('add', nbr) - - for nbr in neighbors: - temp_g = copy.deepcopy(g_nx) - if temp_g.has_edge(node_idx, nbr): - temp_g.remove_edge(node_idx, nbr) - g_temp = from_networkx(temp_g).to(self.device) - new_score = score_fn(g_temp) - delta = new_score - original_score - if new_score >= 0 and delta > best_delta: - best_delta = delta - best_graph = g_temp - best_action = ('remove', nbr) - - if best_graph is None: - break - self.shadow_graph = best_graph - g_nx = to_networkx(best_graph.to('cpu'), to_undirected=True) - - if best_action[0] == 'add': - non_neighbors.remove(best_action[1]) - neighbors.append(best_action[1]) - else: - neighbors.remove(best_action[1]) - non_neighbors.append(best_action[1]) - - applied += 1 - - -class BitFlipAttack: - def __init__(self, model, attack_type='random', bit=0): - self.model = model - self.attack_type = attack_type - self.bit = bit - - def _get_target_params(self): - params = [p for p in self.model.parameters() if p.requires_grad and p.numel() > 0] - if self.attack_type in ['random', 'BFA']: - return params - elif self.attack_type == 'BFA-F': - return [params[0]] - elif self.attack_type == 'BFA-L': - return [params[-1]] - else: - raise ValueError(f"Unknown attack_type {self.attack_type}") - - def _true_bit_flip(self, tensor, index=None, bit=0): - a = tensor.detach().cpu().numpy().copy() - flat = a.ravel() - if index is None: - index = np.random.randint(0, flat.size) - old_val = flat[index] - int_view = np.frombuffer(flat[index].tobytes(), dtype=np.uint32)[0] - int_view ^= (1 << bit) - new_val = np.frombuffer(np.uint32(int_view).tobytes(), dtype=np.float32)[0] - flat[index] = new_val - a = flat.reshape(a.shape) - tensor.data = torch.from_numpy(a).to(tensor.device) - return old_val, new_val, index - - def apply(self): - params = self._get_target_params() - with torch.no_grad(): - layer_idx = random.randrange(len(params)) - param = params[layer_idx] - idx = random.randrange(param.numel()) - old_val, new_val, actual_idx = self._true_bit_flip(param, index=idx, bit=self.bit) - return { - 'layer': layer_idx, - 'param_idx': actual_idx, - 'old_val': old_val, - 'new_val': new_val, - 'bit': self.bit, - 'attack_type': self.attack_type - } - - -class MettackHelper: - def __init__(self, graph, features, labels, train_mask, val_mask, test_mask, - n_perturbations=5, device='cpu', max_perturbations=50, - surrogate_epochs=30, candidate_sample_size=20): - self.device = device - self.graph = dgl.add_self_loop(graph).to(self.device) - self.features = features.to(self.device) - self.labels = labels.to(self.device) - self.train_mask = train_mask.to(self.device) - self.surrogate_epochs = surrogate_epochs - self.candidate_sample_size = candidate_sample_size - if val_mask is not None: - self.val_mask = val_mask.to(self.device) - else: - self.val_mask = self._create_val_mask_from_train(train_mask).to(self.device) - - self.test_mask = test_mask.to(self.device) - - self.n_perturbations = n_perturbations - - in_feats = features.shape[1] - n_classes = int(labels.max().item()) + 1 - self.surrogate = GCN(in_feats, n_classes).to(self.device) - - torch.manual_seed(42) - np.random.seed(42) - - self.modified_edges = set() - - original_graph_no_self_loop = dgl.remove_self_loop(graph) - self.original_edges = set(zip(original_graph_no_self_loop.edges()[0].cpu().numpy(), - original_graph_no_self_loop.edges()[1].cpu().numpy())) - - self.candidate_edges = self._get_candidate_edges() - - def _create_val_mask_from_train(self, train_mask): - """ - Create a validation mask by taking a subset of training nodes. - This is needed when the dataset doesn't provide a validation mask. - """ - train_indices = torch.where(train_mask)[0] - n_val = min(500, len(train_indices) // 4) - - perm = torch.randperm(len(train_indices)) - val_indices = train_indices[perm[:n_val]] - - val_mask = torch.zeros_like(train_mask, dtype=torch.bool) - val_mask[val_indices] = True - - self.train_mask = train_mask.clone() - self.train_mask[val_indices] = False - - return val_mask - - def run(self): - """ - Main entrypoint to run the Mettack algorithm. - Returns: - poisoned_graph (DGLGraph): The perturbed graph with edges changed. - metrics (dict): Metrics for before/after attack, for evaluation. - """ - print("Starting Mettack attack...") - - print("Training surrogate model...") - self._train_surrogate() - - print("Applying structure attack...") - poisoned_graph = self._apply_structure_attack() - - print("Evaluating attack results...") - metrics = self._evaluate(poisoned_graph) - - return poisoned_graph, metrics - - def _train_surrogate(self): - """ - Trains a surrogate GCN on the clean graph. - (Matches Wu et al., Section 6.1) - """ - optimizer = optim.Adam(self.surrogate.parameters(), lr=0.01, weight_decay=5e-4) - self.surrogate.train() - - for epoch in range(self.surrogate_epochs): - optimizer.zero_grad() - logits = self.surrogate(self.graph, self.features) - loss = F.cross_entropy(logits[self.train_mask], self.labels[self.train_mask]) - loss.backward() - optimizer.step() - - if epoch % 50 == 0: - self.surrogate.eval() - with torch.no_grad(): - val_logits = self.surrogate(self.graph, self.features) - val_acc = self._compute_accuracy(val_logits[self.val_mask], - self.labels[self.val_mask]) - print(f"Surrogate epoch {epoch}: Val Acc = {val_acc:.4f}") - self.surrogate.train() - - def _apply_structure_attack(self): - """ - Runs the Mettack structure perturbation loop (bi-level optimization). - - At each step, modify the adjacency matrix (add/remove an edge). - - Select the perturbation that maximizes surrogate model loss on the validation nodes. - - Repeat up to n_perturbations times. - Returns a new DGLGraph with edges modified. - (See Appendix A.2 in Wu et al.) - """ - current_graph = copy.deepcopy(self.graph) - perturbed_edges = set() - - for step in range(self.n_perturbations): - print(f"Perturbation step {step + 1}/{self.n_perturbations}") - - best_edge = None - best_loss = -float('inf') - best_action = None - - candidate_sample = np.random.choice(len(self.candidate_edges), - min(self.candidate_sample_size, len(self.candidate_edges)), - replace=False) - - for idx in tqdm(candidate_sample, desc="Evaluating candidates"): - edge = self.candidate_edges[idx] - - if edge in perturbed_edges or (edge[1], edge[0]) in perturbed_edges: - continue - - for action in ['add', 'remove']: - if action == 'add' and edge in self.original_edges: - continue - if action == 'remove' and edge not in self.original_edges: - continue - - temp_graph = self._apply_single_perturbation(current_graph, edge, action) - - attack_loss = self._compute_attack_loss(temp_graph) - - if attack_loss > best_loss: - best_loss = attack_loss - best_edge = edge - best_action = action - - if best_edge is not None: - current_graph = self._apply_single_perturbation(current_graph, best_edge, best_action) - perturbed_edges.add(best_edge) - self.modified_edges.add((best_edge, best_action)) - print(f"Applied {best_action} edge {best_edge} with loss increase: {best_loss:.4f}") - else: - print("No beneficial perturbation found, stopping early.") - break - - return current_graph - - def _get_candidate_edges(self): - """ - Generate candidate edges for perturbation. - Includes both existing edges (for removal) and non-existing edges (for addition). - """ - n_nodes = self.graph.num_nodes() - - all_possible_edges = [] - for i in range(n_nodes): - for j in range(i + 1, n_nodes): - all_possible_edges.append((i, j)) - - return all_possible_edges[:min(10000, len(all_possible_edges))] - - def _apply_single_perturbation(self, graph, edge, action): - """ - Apply a single edge perturbation (add or remove) to the graph. - """ - temp_graph = copy.deepcopy(graph) - - if action == 'add': - temp_graph.add_edges([edge[0], edge[1]], [edge[1], edge[0]]) - elif action == 'remove': - src, dst = temp_graph.edges() - edge_ids = [] - - for i, (s, d) in enumerate(zip(src.cpu().numpy(), dst.cpu().numpy())): - if (s == edge[0] and d == edge[1]) or (s == edge[1] and d == edge[0]): - edge_ids.append(i) - - if edge_ids: - temp_graph.remove_edges(edge_ids) - - temp_graph = dgl.add_self_loop(temp_graph) - - return temp_graph - - def _compute_attack_loss(self, perturbed_graph): - """ - Compute the attack loss on a perturbed graph. - This measures how much the surrogate model's performance degrades. - Uses proper bi-level optimization as in the original Mettack paper. - """ - - temp_surrogate = copy.deepcopy(self.surrogate) - temp_surrogate.train() - - optimizer = optim.Adam(temp_surrogate.parameters(), lr=0.01) - - for _ in range(5): - optimizer.zero_grad() - logits = temp_surrogate(perturbed_graph, self.features) - loss = F.cross_entropy(logits[self.train_mask], self.labels[self.train_mask]) - loss.backward() - optimizer.step() - - temp_surrogate.eval() - with torch.no_grad(): - val_logits = temp_surrogate(perturbed_graph, self.features) - val_loss = F.cross_entropy(val_logits[self.val_mask], self.labels[self.val_mask]) - - return val_loss.item() - - def _evaluate(self, poisoned_graph): - """ - Evaluates GCN accuracy before/after poisoning, etc. - """ - metrics = {} - - self.surrogate.eval() - with torch.no_grad(): - clean_logits = self.surrogate(self.graph, self.features) - clean_acc = self._compute_accuracy(clean_logits[self.test_mask], - self.labels[self.test_mask]) - metrics['clean_test_acc'] = clean_acc - - poisoned_model = GCN(self.features.shape[1], - int(self.labels.max().item()) + 1).to(self.device) - optimizer = optim.Adam(poisoned_model.parameters(), lr=0.01, weight_decay=5e-4) - - poisoned_model.train() - for epoch in range(200): - optimizer.zero_grad() - logits = poisoned_model(poisoned_graph, self.features) - loss = F.cross_entropy(logits[self.train_mask], self.labels[self.train_mask]) - loss.backward() - optimizer.step() - - poisoned_model.eval() - with torch.no_grad(): - poisoned_logits = poisoned_model(poisoned_graph, self.features) - poisoned_acc = self._compute_accuracy(poisoned_logits[self.test_mask], - self.labels[self.test_mask]) - metrics['poisoned_test_acc'] = poisoned_acc - - metrics['accuracy_drop'] = clean_acc - poisoned_acc - metrics['num_perturbations'] = len(self.modified_edges) - - return metrics - - def _compute_accuracy(self, logits, labels): - """Helper function to compute accuracy.""" - _, predicted = torch.max(logits, 1) - correct = (predicted == labels).sum().item() - return correct / len(labels) diff --git a/pygip/models/defense/RandomWM.py b/pygip/models/defense/RandomWM.py deleted file mode 100644 index 16c2f043..00000000 --- a/pygip/models/defense/RandomWM.py +++ /dev/null @@ -1,598 +0,0 @@ -import importlib -import time - -import dgl -import torch -import numpy as np -import torch.nn.functional as F -from tqdm import tqdm -from torch.utils.data import DataLoader -from torch_geometric.utils import erdos_renyi_graph -from dgl.dataloading import NeighborSampler, NodeCollator - -from pygip.models.nn import GraphSAGE -from pygip.models.defense.base import BaseDefense -from pygip.utils.metrics import DefenseCompMetric, DefenseMetric - - -class RandomWM(BaseDefense): - """ - A flexible defense implementation using watermarking to protect against - model extraction attacks on graph neural networks. - - This class combines the functionalities from the original watermark.py: - - Generating watermark graphs - - Training models on original and watermark graphs - - Merging graphs for testing - - Evaluating effectiveness against attacks - - Dynamic selection of attack methods - """ - supported_api_types = {"dgl"} - - def __init__(self, dataset, defense_ratio=0.1, wm_node=50, pr=0.2, pg=0.2, attack_name=None): - """ - Initialize the custom defense. - - Parameters - ---------- - defense_ratio : float - Defense strength (0-1): used to determine the number of watermark nodes and the attack node sampling scale - wm_node : Optional[int] - If specified, a fixed number of watermark nodes is used; otherwise, a dynamic calculation is performed based on defense_ratio * num_nodes - pr : float - Bernoulli probability of the watermark feature being 1 - pg : float - Edge probability of the watermark graph - attack_name : Optional[str] - Attack class name (from models.attack) - """ - super().__init__(dataset, defense_ratio) - self.defense_ratio = defense_ratio - self.attack_name = attack_name or "ModelExtractionAttack0" - self.dataset = dataset - self.graph = dataset.graph_data - - # Extract dataset properties - self.node_number = dataset.num_nodes - self.feature_number = dataset.num_features - self.label_number = dataset.num_classes - self.attack_node_number = int(self.node_number * defense_ratio) - - # Watermark parameters - self.wm_node = int(wm_node) if wm_node is not None else max(10, int(dataset.num_nodes * defense_ratio)) - self.pr = pr - self.pg = pg - - # Extract features and labels - self.features = self.graph.ndata['feat'] - self.labels = self.graph.ndata['label'] - - # Extract masks - self.train_mask = self.graph.ndata['train_mask'] - self.test_mask = self.graph.ndata['test_mask'] - - # Move tensors to self.device - if self.device != 'cpu': - self.graph = self.graph.to(self.device) - self.features = self.features.to(self.device) - self.labels = self.labels.to(self.device) - self.train_mask = self.train_mask.to(self.device) - self.test_mask = self.test_mask.to(self.device) - - def _get_attack_class(self, attack_name): - """ - Dynamically import and return the specified attack class. - - Parameters - ---------- - attack_name : str - Name of the attack class to import - - Returns - ------- - class - The requested attack class - """ - try: - # Try to import from models.attack module - attack_module = importlib.import_module('models.attack') - attack_class = getattr(attack_module, attack_name) - return attack_class - except (ImportError, AttributeError) as e: - print(f"Error loading attack class '{attack_name}': {e}") - print("Falling back to ModelExtractionAttack0") - # Fallback to ModelExtractionAttack0 - attack_module = importlib.import_module('models.attack') - return getattr(attack_module, "ModelExtractionAttack0") - - def defend(self, attack_name=None): - """ - Execute the random watermark defense. - """ - metric_comp = DefenseCompMetric() - metric_comp.start() - print("====================Random Watermark Defense====================") - - # If model wasn't trained yet, train it - if not hasattr(self, 'model_trained'): - self.train_target_model(metric_comp) - - # Evaluate the defended model - preds = self.evaluate_model(self.defense_model, self.dataset) - inference_s = time.time() - wm_preds = self.verify_watermark(self.defense_model) - inference_e = time.time() - - # metric - metric = DefenseMetric() - metric.update(preds, self.labels[self.test_mask]) - wm_labels = self.watermark_graph.ndata['label'] - metric.update_wm(wm_preds, wm_labels) - metric_comp.end() - - print("====================Final Results====================") - res = metric.compute() - metric_comp.update(inference_defense_time=(inference_e - inference_s)) - res_comp = metric_comp.compute() - - return res, res_comp - - def train_target_model(self, metric_comp: DefenseCompMetric): - """Train the target model with watermark injection.""" - defense_s = time.time() - - # Training and watermark generation (defense mechanism) - self.defense_model = self._train_defense_model() - self.model_trained = True - - defense_e = time.time() - metric_comp.update(defense_time=(defense_e - defense_s)) - return self.defense_model - - def evaluate_model(self, model, dataset): - """Evaluate model performance on downstream task""" - model.eval() - - # Setup data loading - sampler = NeighborSampler([5, 5]) - test_nids = self.test_mask.nonzero(as_tuple=True)[0].to(self.device) - test_collator = NodeCollator(self.graph, test_nids, sampler) - test_dataloader = DataLoader( - test_collator.dataset, - batch_size=32, - shuffle=False, - collate_fn=test_collator.collate, - drop_last=False - ) - - all_preds = [] - with torch.no_grad(): - for _, _, blocks in test_dataloader: - blocks = [b.to(self.device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_predictions = model(blocks, input_features) - pred = output_predictions.argmax(dim=1) - all_preds.append(pred) - - preds = torch.cat(all_preds, dim=0).cpu() - return preds - - def verify_watermark(self, model): - """Verify watermark success rate""" - model.eval() - - # Setup data loading for watermark graph - sampler = NeighborSampler([5, 5]) - wm_nids = torch.arange(self.watermark_graph.number_of_nodes(), device=self.device) - wm_collator = NodeCollator(self.watermark_graph, wm_nids, sampler) - wm_dataloader = DataLoader( - wm_collator.dataset, - batch_size=self.wm_node, - shuffle=False, - collate_fn=wm_collator.collate, - drop_last=False - ) - - all_preds = [] - with torch.no_grad(): - for _, _, blocks in wm_dataloader: - blocks = [b.to(self.device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_predictions = model(blocks, input_features) - pred = output_predictions.argmax(dim=1) - all_preds.append(pred) - - wm_preds = torch.cat(all_preds, dim=0).cpu() - return wm_preds - - def _train_target_model(self): - """ - Helper function for training the target model on the original graph. - - Returns - ------- - torch.nn.Module - The trained target model - """ - print("Training target model...") - - # Initialize model - model = GraphSAGE(in_channels=self.feature_number, - hidden_channels=128, - out_channels=self.label_number) - model = model.to(self.device) - optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4) - - # Setup data loading - sampler = NeighborSampler([5, 5]) - train_nids = self.train_mask.nonzero(as_tuple=True)[0].to(self.device) - test_nids = self.test_mask.nonzero(as_tuple=True)[0].to(self.device) - - train_collator = NodeCollator(self.graph, train_nids, sampler) - test_collator = NodeCollator(self.graph, test_nids, sampler) - - train_dataloader = DataLoader( - train_collator.dataset, - batch_size=32, - shuffle=True, - collate_fn=train_collator.collate, - drop_last=False - ) - - test_dataloader = DataLoader( - test_collator.dataset, - batch_size=32, - shuffle=False, - collate_fn=test_collator.collate, - drop_last=False - ) - - # Training loop - best_acc = 0 - for epoch in tqdm(range(1, 51), desc="Target model training"): - # Train - model.train() - total_loss = 0 - for _, _, blocks in train_dataloader: - blocks = [b.to(self.device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - - optimizer.zero_grad() - output_predictions = model(blocks, input_features) - loss = F.cross_entropy(output_predictions, output_labels) - loss.backward() - optimizer.step() - total_loss += loss.item() - - # Test - model.eval() - correct = 0 - total = 0 - with torch.no_grad(): - for _, _, blocks in test_dataloader: - blocks = [b.to(self.device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - output_predictions = model(blocks, input_features) - pred = output_predictions.argmax(dim=1) - correct += (pred == output_labels).sum().item() - total += len(output_labels) - - acc = correct / total - if acc > best_acc: - best_acc = acc - - print(f"Target model trained. Test accuracy: {best_acc:.4f}") - return model - - def _train_defense_model(self): - """ - Helper function for training a defense model with watermarking. - - Returns - ------- - torch.nn.Module - The trained defense model with embedded watermark - """ - print("Training defense model with watermarking...") - - # Generate watermark graph - wm_graph = self._generate_watermark_graph() - - # Initialize model - model = GraphSAGE(in_channels=self.feature_number, - hidden_channels=128, - out_channels=self.label_number) - model = model.to(self.device) - optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4) - - # Setup data loading for original graph - sampler = NeighborSampler([5, 5]) - train_nids = self.train_mask.nonzero(as_tuple=True)[0].to(self.device) - test_nids = self.test_mask.nonzero(as_tuple=True)[0].to(self.device) - - train_collator = NodeCollator(self.graph, train_nids, sampler) - test_collator = NodeCollator(self.graph, test_nids, sampler) - - train_dataloader = DataLoader( - train_collator.dataset, - batch_size=32, - shuffle=True, - collate_fn=train_collator.collate, - drop_last=False - ) - - test_dataloader = DataLoader( - test_collator.dataset, - batch_size=32, - shuffle=False, - collate_fn=test_collator.collate, - drop_last=False - ) - - # Setup data loading for watermark graph - wm_nids = torch.arange(wm_graph.number_of_nodes(), device=self.device) - wm_collator = NodeCollator(wm_graph, wm_nids, sampler) - - wm_dataloader = DataLoader( - wm_collator.dataset, - batch_size=self.wm_node, - shuffle=True, - collate_fn=wm_collator.collate, - drop_last=False - ) - - # First stage: Train on original graph - best_acc = 0 - for epoch in tqdm(range(1, 51), desc="Defense model - stage 1"): - # Train - model.train() - total_loss = 0 - for _, _, blocks in train_dataloader: - blocks = [b.to(self.device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - - optimizer.zero_grad() - output_predictions = model(blocks, input_features) - loss = F.cross_entropy(output_predictions, output_labels) - loss.backward() - optimizer.step() - total_loss += loss.item() - - # Test - model.eval() - correct = 0 - total = 0 - with torch.no_grad(): - for _, _, blocks in test_dataloader: - blocks = [b.to(self.device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - output_predictions = model(blocks, input_features) - pred = output_predictions.argmax(dim=1) - correct += (pred == output_labels).sum().item() - total += len(output_labels) - - acc = correct / total - if acc > best_acc: - best_acc = acc - - # Second stage: Fine-tune on watermark graph - for epoch in tqdm(range(1, 11), desc="Defense model - stage 2"): - # Train on watermark - model.train() - total_loss = 0 - for _, _, blocks in wm_dataloader: - blocks = [b.to(self.device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - - optimizer.zero_grad() - output_predictions = model(blocks, input_features) - loss = F.cross_entropy(output_predictions, output_labels) - loss.backward() - optimizer.step() - total_loss += loss.item() - - # Final evaluation - model.eval() - correct = 0 - total = 0 - with torch.no_grad(): - for _, _, blocks in test_dataloader: - blocks = [b.to(self.device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - output_predictions = model(blocks, input_features) - pred = output_predictions.argmax(dim=1) - correct += (pred == output_labels).sum().item() - total += len(output_labels) - - final_acc = correct / total - - # Watermark accuracy - wm_acc = self._test_on_watermark(model, wm_dataloader) - - print(f"Defense model trained.") - print(f"Test accuracy on original data: {final_acc:.4f}") - print(f"Test accuracy on watermark: {wm_acc:.4f}") - - # Store watermark graph for later verification - self.watermark_graph = wm_graph - - return model - - def _generate_watermark_graph(self): - """ - Generate a watermark graph using Erdos-Renyi random graph model. - - Returns - ------- - dgl.DGLGraph - The generated watermark graph - """ - # Generate random edges using Erdos-Renyi model - wm_edge_index = erdos_renyi_graph(self.wm_node, self.pg, directed=False) - - # Generate random features with binomial distribution - wm_features = torch.tensor(np.random.binomial( - 1, self.pr, size=(self.wm_node, self.feature_number)), - dtype=torch.float32).to(self.device) - - # Generate random labels - wm_labels = torch.tensor(np.random.randint( - low=0, high=self.label_number, size=self.wm_node), - dtype=torch.long).to(self.device) - - # Create DGL graph - wm_graph = dgl.graph((wm_edge_index[0], wm_edge_index[1]), num_nodes=self.wm_node) - wm_graph = wm_graph.to(self.device) - - # Add node features and labels - wm_graph.ndata['feat'] = wm_features - wm_graph.ndata['label'] = wm_labels - - # Add train and test masks (all True for simplicity) - wm_graph.ndata['train_mask'] = torch.ones(self.wm_node, dtype=torch.bool, device=self.device) - wm_graph.ndata['test_mask'] = torch.ones(self.wm_node, dtype=torch.bool, device=self.device) - - # Add self-loops - wm_graph = dgl.add_self_loop(wm_graph) - - return wm_graph - - def _test_on_watermark(self, model, wm_dataloader): - """ - Test a model's accuracy on the watermark graph. - - Parameters - ---------- - model : torch.nn.Module - The model to test - wm_dataloader : DataLoader - DataLoader for the watermark graph - - Returns - ------- - float - Accuracy on the watermark graph - """ - model.eval() - correct = 0 - total = 0 - with torch.no_grad(): - for _, _, blocks in wm_dataloader: - blocks = [b.to(self.device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - output_predictions = model(blocks, input_features) - pred = output_predictions.argmax(dim=1) - correct += (pred == output_labels).sum().item() - total += len(output_labels) - - return correct / total - - def _evaluate_watermark(self, model): - """ - Evaluate watermark detection effectiveness. - - Parameters - ---------- - model : torch.nn.Module - The model to evaluate - - Returns - ------- - float - Watermark detection accuracy - """ - if not hasattr(self, 'watermark_graph'): - print("Warning: No watermark graph found. Generate one first.") - return 0.0 - - # Setup data loading for watermark graph - sampler = NeighborSampler([5, 5]) - wm_nids = torch.arange(self.watermark_graph.number_of_nodes(), device=self.device) - wm_collator = NodeCollator(self.watermark_graph, wm_nids, sampler) - - wm_dataloader = DataLoader( - wm_collator.dataset, - batch_size=self.wm_node, - shuffle=False, - collate_fn=wm_collator.collate, - drop_last=False - ) - - return self._test_on_watermark(model, wm_dataloader) - - def _evaluate_attack_on_watermark(self, attack_model): - """ - Evaluate how well the attack model performs on the watermark graph. - - Parameters - ---------- - attack_model : torch.nn.Module - The model obtained from the attack - - Returns - ------- - float - Attack model's accuracy on the watermark graph - """ - if not hasattr(self, 'watermark_graph'): - print("Warning: No watermark graph found. Generate one first.") - return 0.0 - - # Check the model type to determine the correct evaluation approach - model_name = attack_model.__class__.__name__ - - # For GCN models that expect (g, features) input format - if model_name == 'GCN': - # Evaluate using the whole graph at once - attack_model.eval() - with torch.no_grad(): - # Pass the entire graph and features at once - output_predictions = attack_model(self.watermark_graph, self.watermark_graph.ndata['feat']) - pred = output_predictions.argmax(dim=1) - correct = (pred == self.watermark_graph.ndata['label']).sum().item() - total = self.watermark_graph.number_of_nodes() - - return correct / total - - # For GraphSAGE models that expect blocks input format - elif model_name == 'GraphSAGE': - # Setup data loading for watermark graph - sampler = NeighborSampler([5, 5]) - wm_nids = torch.arange(self.watermark_graph.number_of_nodes(), device=self.device) - wm_collator = NodeCollator(self.watermark_graph, wm_nids, sampler) - - wm_dataloader = DataLoader( - wm_collator.dataset, - batch_size=self.wm_node, - shuffle=False, - collate_fn=wm_collator.collate, - drop_last=False - ) - - # Evaluate attack model on watermark - attack_model.eval() - correct = 0 - total = 0 - with torch.no_grad(): - for _, _, blocks in wm_dataloader: - blocks = [b.to(self.device) for b in blocks] - input_features = blocks[0].srcdata['feat'] - output_labels = blocks[-1].dstdata['label'] - output_predictions = attack_model(blocks, input_features) - pred = output_predictions.argmax(dim=1) - correct += (pred == output_labels).sum().item() - total += len(output_labels) - - return correct / total - - # For any other model type, print a warning and return 0 - else: - print(f"Warning: Unsupported model type '{model_name}' for watermark evaluation") - return 0.0 diff --git a/pygip/models/defense/Revisiting.py b/pygip/models/defense/Revisiting.py deleted file mode 100644 index a6d24667..00000000 --- a/pygip/models/defense/Revisiting.py +++ /dev/null @@ -1,244 +0,0 @@ -from typing import Any, Dict, Iterable, Tuple - -import dgl -import torch -import torch.nn.functional as F -from dgl.dataloading import NeighborSampler, NodeCollator -from torch.utils.data import DataLoader -from tqdm import tqdm - -from pygip.models.defense.base import BaseDefense -from pygip.models.nn import GraphSAGE -from pygip.utils.metrics import DefenseCompMetric - - -class Revisiting(BaseDefense): - """ - A lightweight defense that 'revisits' node features via neighbor mixing. - - Idea (defense intuition) - ------------------------ - We pick a subset of nodes (size ~ attack_node_fraction * |V|) and *smoothly* - mix their features with their 1-hop / 2-hop neighborhoods using a mixing - factor `alpha`. This keeps utility (accuracy) largely intact while making - local feature structure less extractable for subgraph-based queries. - - API shape follows RandomWM: - - lives under models/defense/ - - inherits BaseDefense - - public entrypoint: .defend() - - Parameters - ---------- - dataset : Any - A dataset object providing a DGLGraph in `dataset.graph_data` and - ndata fields: 'feat', 'label', 'train_mask', 'test_mask'. - attack_node_fraction : float, default=0.2 - Fraction of nodes used as the 'focus set' for our revisiting transform. - alpha : float, default=0.8 - Mixing coefficient in [0,1]. Higher -> stronger neighbor mixing. - """ - - supported_api_types = {"dgl"} - - def __init__( - self, - dataset: Any, - attack_node_fraction: float = 0.2, - alpha: float = 0.8, - ) -> None: - super().__init__(dataset, attack_node_fraction) - - # knobs - self.alpha = float(alpha) - - # cache handles similar to RandomWM for consistency - self.dataset = dataset - self.graph: dgl.DGLGraph = dataset.graph_data - - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes - self.num_focus_nodes = max(1, int(self.num_nodes * attack_node_fraction)) - - self.features: torch.Tensor = self.graph.ndata["feat"] - self.labels: torch.Tensor = self.graph.ndata["label"] - self.train_mask: torch.Tensor = self.graph.ndata["train_mask"] - self.test_mask: torch.Tensor = self.graph.ndata["test_mask"] - - if self.device != "cpu": - self.graph = self.graph.to(self.device) - self.features = self.features.to(self.device) - self.labels = self.labels.to(self.device) - self.train_mask = self.train_mask.to(self.device) - self.test_mask = self.test_mask.to(self.device) - - # --------------------------------------------------------------------- # - # Public entrypoint - # --------------------------------------------------------------------- # - def defend(self) -> Tuple[Dict[str, Any], Dict[str, Any]]: - """ - 1) Train a baseline GraphSAGE on the original graph (utility baseline) - 2) Apply revisiting feature-mixing on a subset of nodes - 3) Train a defended GraphSAGE on the transformed features - 4) Return accuracy metrics and basic metadata - """ - metric_comp = DefenseCompMetric() - metric_comp.start() - # ---- Baseline (no transform) ------------------------------------- # - baseline_acc = self._train_and_eval_graphsage(use_transformed_features=False) - - # ---- Build transformed features (revisiting) --------------------- # - feat_defended, picked = self._build_revisiting_features() - - # ---- Train with defended features -------------------------------- # - # Temporarily override graph features, then restore - orig_feat = self.graph.ndata["feat"] - try: - self.graph.ndata["feat"] = feat_defended - defense_acc = self._train_and_eval_graphsage(use_transformed_features=True) - finally: - self.graph.ndata["feat"] = orig_feat # restore - - res = { - "ok": True, - "method": "Revisiting", - "alpha": self.alpha, - "focus_nodes": int(self.num_focus_nodes), - "baseline_test_acc": float(baseline_acc), - "defense_test_acc": float(defense_acc), - "acc_delta": float(defense_acc - baseline_acc), - # returning a small sample of picked nodes for debuggability - "sample_picked_nodes": picked[:10].tolist() if isinstance(picked, torch.Tensor) else [], - } - - return res, metric_comp.compute() - - # --------------------------------------------------------------------- # - # Core: feature revisiting (neighbor mixing) - # --------------------------------------------------------------------- # - def _build_revisiting_features(self) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Returns a new feature tensor where a subset of nodes (and optionally - their neighbors) are mixed with neighbor features. - - Mixing rule (simple & stable): - - For each picked node u: - x[u] <- (1 - alpha) * x[u] + alpha * mean(x[N(u)]) - - For each 1-hop neighbor v in N(u) we apply a *lighter* mix - x[v] <- (1 - 0.5*alpha) * x[v] + (0.5*alpha) * mean(x[N(v)]) - - This keeps the transform localized and smooth. - """ - g = self.graph - x = self.features.clone() - - # pick focus nodes - picked = torch.randperm(self.num_nodes, device=self.device)[: self.num_focus_nodes] - - # precompute neighbor lists (on CPU tensors if needed) - # we'll use undirected neighborhood by combining predecessors/successors - def neighbors(nodes: Iterable[int]) -> torch.Tensor: - cols = [] - for n in nodes: - # concatenate in- and out-neighbors to emulate undirected - nb = torch.unique( - torch.cat([g.successors(int(n)), g.predecessors(int(n))], dim=0) - ) - if nb.numel() > 0: - cols.append(nb) - if not cols: - return torch.empty(0, dtype=torch.long, device=self.device) - return torch.unique(torch.cat(cols)) - - # 1) mix picked nodes with mean of their neighbors - for u in picked.tolist(): - nb = neighbors([u]) - if nb.numel() == 0: - continue - mean_nb = self.features[nb].mean(dim=0) - x[u] = (1.0 - self.alpha) * self.features[u] + self.alpha * mean_nb - - # 2) lightly mix 1-hop neighbors as well (half strength) - one_hop = neighbors(picked.tolist()) - for v in one_hop.tolist(): - nb = neighbors([v]) - if nb.numel() == 0: - continue - mean_nb = self.features[nb].mean(dim=0) - x[v] = (1.0 - 0.5 * self.alpha) * self.features[v] + (0.5 * self.alpha) * mean_nb - - return x, picked - - # --------------------------------------------------------------------- # - # Training/Eval (same style as RandomWM) - # --------------------------------------------------------------------- # - def _train_and_eval_graphsage(self, use_transformed_features: bool) -> float: - """ - Train a GraphSAGE for a few epochs and return test accuracy. - Uses NeighborSampler + NodeCollator (same pattern as RandomWM). - """ - model = GraphSAGE( - in_channels=self.num_features, - hidden_channels=128, - out_channels=self.num_classes, - ).to(self.device) - - optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4) - sampler = NeighborSampler([5, 5]) - - train_nids = self.train_mask.nonzero(as_tuple=True)[0].to(self.device) - test_nids = self.test_mask.nonzero(as_tuple=True)[0].to(self.device) - - train_collator = NodeCollator(self.graph, train_nids, sampler) - test_collator = NodeCollator(self.graph, test_nids, sampler) - - train_loader = DataLoader( - train_collator.dataset, - batch_size=32, - shuffle=True, - collate_fn=train_collator.collate, - drop_last=False, - ) - test_loader = DataLoader( - test_collator.dataset, - batch_size=32, - shuffle=False, - collate_fn=test_collator.collate, - drop_last=False, - ) - - best_acc = 0.0 - for _ in tqdm(range(1, 51), - desc=("GraphSAGE (defended)" if use_transformed_features else "GraphSAGE (baseline)")): - # ---- Train - model.train() - for _, _, blocks in train_loader: - blocks = [b.to(self.device) for b in blocks] - feats = blocks[0].srcdata["feat"] - labels = blocks[-1].dstdata["label"] - - optimizer.zero_grad() - logits = model(blocks, feats) - loss = F.cross_entropy(logits, labels) - loss.backward() - optimizer.step() - - # ---- Eval - model.eval() - correct = 0 - total = 0 - with torch.no_grad(): - for _, _, blocks in test_loader: - blocks = [b.to(self.device) for b in blocks] - feats = blocks[0].srcdata["feat"] - labels = blocks[-1].dstdata["label"] - logits = model(blocks, feats) - pred = logits.argmax(dim=1) - correct += (pred == labels).sum().item() - total += labels.numel() - - acc = correct / max(1, total) - best_acc = max(best_acc, acc) - - return best_acc diff --git a/pygip/models/defense/SurviveWM.py b/pygip/models/defense/SurviveWM.py deleted file mode 100644 index dc489fa6..00000000 --- a/pygip/models/defense/SurviveWM.py +++ /dev/null @@ -1,301 +0,0 @@ -import time - -import dgl -import torch -import networkx as nx -import torch.nn.functional as F -from tqdm import tqdm -from torch_geometric.data import Data -from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score - -from pygip.models.defense.base import BaseDefense -from pygip.models.nn import GCN -from pygip.utils.metrics import GraphNeuralNetworkMetric, DefenseCompMetric, DefenseMetric - - -class SurviveWM(BaseDefense): - supported_api_types = {"dgl"} - - def __init__(self, dataset, defense_ratio: float = 0.1, model_path=None): - super().__init__(dataset, defense_ratio) - # load graph data - self.dataset = dataset - self.graph_dataset = dataset.graph_data - self.graph_data = dataset.graph_data.to(device=self.device) - self.model_path = model_path - self.graph = self.graph_data - self.features = self.graph_data.ndata['feat'] - self.labels = self.graph_data.ndata['label'] - self.train_mask = self.graph_data.ndata['train_mask'] - self.test_mask = self.graph_data.ndata['test_mask'] - - # load meta data - self.feature_number = dataset.num_features - self.label_number = dataset.num_classes - - # params - self.defense_ratio = defense_ratio - - def _load_model(self): - """ - Load a pre-trained model. - """ - assert self.model_path is not None, "Please provide a pre-trained model" - - # Create the model - self.net1 = GCN(self.feature_number, self.label_number).to(self.device) - - # Load the saved state dict - self.net1.load_state_dict(torch.load(self.model_path, map_location=self.device)) - - # Set to evaluation mode - self.net1.eval() - - def _to_cpu(self, tensor): - """ - Safely move tensor to CPU for NumPy operations - """ - if tensor.is_cuda: - return tensor.cpu() - return tensor - - # === Soft Nearest Neighbor Loss === - def snn_loss(self, x, y, T=0.5): - x = F.normalize(x, p=2, dim=1) - dist_matrix = torch.cdist(x, x, p=2) ** 2 - eye = torch.eye(len(x), device=self.device).bool() - sim = torch.exp(-dist_matrix / T) - mask_same = y.unsqueeze(1) == y.unsqueeze(0) - sim = sim.masked_fill(eye, 0) - denom = sim.sum(1) - nom = (sim * mask_same.float()).sum(1) - loss = -torch.log(nom / (denom + 1e-10) + 1e-10).mean() - return loss - - # === Trigger Graph Generator === - def generate_key_graph(self, num_nodes=None, edge_prob=None): - if num_nodes is None: - num_nodes = max(5, int(self.dataset.num_nodes * self.defense_ratio)) - if edge_prob is None: - edge_prob = min(0.5, self.defense_ratio * 3) - - trigger = nx.erdos_renyi_graph(num_nodes, edge_prob) - edge_index = torch.tensor(list(trigger.edges), dtype=torch.long).t().contiguous() - if edge_index.numel() == 0: - edge_index = torch.empty((2, 0), dtype=torch.long) - - x = torch.randn((num_nodes, self.feature_number), device=self.device) * 0.1 - label = torch.randint(0, self.label_number, (num_nodes,), device=self.device) - return Data(x=x, edge_index=edge_index.to(self.device), y=label) - - # === Combine base and trigger === - def combine_with_trigger(self, base_graph, base_features, base_labels, trigger_data): - # Convert DGL graph to edge_index format - src, dst = base_graph.edges() - base_edge_index = torch.stack([src, dst], dim=0) - - x = torch.cat([base_features, trigger_data.x], dim=0) - edge_index = torch.cat([base_edge_index, trigger_data.edge_index + base_features.size(0)], dim=1) - y = torch.cat([base_labels, trigger_data.y], dim=0) - - # Create DGL graph from combined data - src_combined, dst_combined = edge_index[0], edge_index[1] - combined_graph = dgl.graph((src_combined, dst_combined), num_nodes=x.size(0)).to(self.device) - - # **FIX: Add self-loops to handle zero in-degree nodes** - combined_graph = dgl.add_self_loop(combined_graph) - - combined_graph.ndata['feat'] = x.to(self.device) - - return combined_graph, y.to(self.device) - - def train_with_snnl(self, model, graph, features, labels, train_mask, optimizer, T=0.5, alpha=0.1): - model.train() - optimizer.zero_grad() - out = model(graph, features) - loss_nll = F.nll_loss(F.log_softmax(out, dim=1)[train_mask], labels[train_mask]) - snnl = self.snn_loss(out[train_mask], labels[train_mask], T=T) - loss = loss_nll - alpha * snnl - loss.backward() - optimizer.step() - return loss.item() - - @torch.no_grad() - def verify_watermark(self, model, trigger_graph, trigger_labels): - model.eval() - out = model(trigger_graph, trigger_graph.ndata['feat']) - pred = out.argmax(dim=1) - return (pred == trigger_labels).float().mean().item() - - def compute_metrics(self, y_true, y_pred, y_score=None): - return { - 'accuracy': accuracy_score(y_true, y_pred), - 'f1': f1_score(y_true, y_pred, average='macro'), - 'precision': precision_score(y_true, y_pred, average='macro'), - 'recall': recall_score(y_true, y_pred, average='macro'), - 'auroc': roc_auc_score(y_true, y_score, multi_class='ovo') if y_score is not None else None - } - - def defend(self): - """Execute the SurviveWM defense.""" - metric_comp = DefenseCompMetric() - metric_comp.start() - print("====================SurviveWM Defense====================") - - # If model wasn't trained yet, train it - if not hasattr(self, 'model_trained'): - self.train_target_model(metric_comp) - - # Evaluate the defended model - preds = self.evaluate_model(self.watermarked_model) - inference_s = time.time() - wm_preds = self.verify_watermark_model(self.watermarked_model) - inference_e = time.time() - - # metric - metric = DefenseMetric() - metric.update(preds, self.labels[self.test_mask]) - metric.update_wm(wm_preds, self.trigger_data.y) - metric_comp.end() - - print("====================Final Results====================") - res = metric.compute() - metric_comp.update(inference_defense_time=(inference_e - inference_s)) - res_comp = metric_comp.compute() - - return res, res_comp - - def train_target_model(self, metric_comp: DefenseCompMetric): - """Train the target model with watermark injection.""" - defense_s = time.time() - - # Generate trigger and train watermarked model (defense mechanism) - self.trigger_data = self.generate_key_graph().to(self.device) - self.watermarked_model = self._train_watermarked_model() - self.model_trained = True - - defense_e = time.time() - metric_comp.update(defense_time=(defense_e - defense_s)) - return self.watermarked_model - - def evaluate_model(self, model): - """Evaluate model performance on downstream task""" - model.eval() - with torch.no_grad(): - test_graph = dgl.add_self_loop(self.graph) - logits = model(test_graph, self.features) - pred = logits.argmax(dim=1) - preds = pred[self.test_mask].cpu() - return preds - - def verify_watermark_model(self, model): - """Verify watermark success rate""" - model.eval() - with torch.no_grad(): - # Use the stored trigger graph if available - if hasattr(self, 'trigger_graph'): - trigger_graph = self.trigger_graph - else: - # Create trigger graph for verification - trigger_src, trigger_dst = self.trigger_data.edge_index[0], self.trigger_data.edge_index[1] - trigger_graph = dgl.graph((trigger_src, trigger_dst), num_nodes=self.trigger_data.num_nodes).to( - self.device) - trigger_graph = dgl.add_self_loop(trigger_graph) - trigger_graph.ndata['feat'] = self.trigger_data.x.to(self.device) - - out = model(trigger_graph, trigger_graph.ndata['feat']) - wm_preds = out.argmax(dim=1).cpu() - return wm_preds - - def _train_watermarked_model(self): - """Helper function to train the watermarked model""" - print("Training watermarked model...") - - # Combine base graph with trigger - combined_graph, combined_labels = self.combine_with_trigger( - self.graph, self.features, self.labels, self.trigger_data) - - # Create train mask for combined data (include trigger nodes in training) - base_train_mask = self.train_mask - trigger_train_mask = torch.ones(self.trigger_data.num_nodes, dtype=torch.bool, device=self.device) - combined_train_mask = torch.cat([base_train_mask, trigger_train_mask]) - - # Create test mask for combined data (exclude trigger nodes from testing) - base_test_mask = self.test_mask - trigger_test_mask = torch.zeros(self.trigger_data.num_nodes, dtype=torch.bool, device=self.device) - combined_test_mask = torch.cat([base_test_mask, trigger_test_mask]) - - # Create watermarked model - watermarked_model = GCN(self.feature_number, self.label_number).to(self.device) - optimizer = torch.optim.Adam(watermarked_model.parameters(), lr=0.01, weight_decay=5e-4) - - # Create trigger graph for watermark verification - trigger_src, trigger_dst = self.trigger_data.edge_index[0], self.trigger_data.edge_index[1] - trigger_graph = dgl.graph((trigger_src, trigger_dst), num_nodes=self.trigger_data.num_nodes).to(self.device) - trigger_graph = dgl.add_self_loop(trigger_graph) - trigger_graph.ndata['feat'] = self.trigger_data.x.to(self.device) - - # Store trigger graph for later use - self.trigger_graph = trigger_graph - - dur = [] - best_performance_metrics = GraphNeuralNetworkMetric() - - for epoch in tqdm(range(200)): - if epoch >= 3: - t0 = time.time() - - # Train with SNNL - loss = self.train_with_snnl( - watermarked_model, combined_graph, combined_graph.ndata['feat'], - combined_labels, combined_train_mask, optimizer) - - if epoch >= 3: - dur.append(time.time() - t0) - - # Evaluate periodically - if epoch % 20 == 0: - watermarked_model.eval() - with torch.no_grad(): - # Test on original graph (ensure it has self-loops) - test_graph = dgl.add_self_loop(self.graph) - - logits = watermarked_model(test_graph, self.features) - pred = logits.argmax(dim=1) - test_acc = (pred[self.test_mask] == self.labels[self.test_mask]).float().mean() - - # Verify watermark - wm_acc = self.verify_watermark(watermarked_model, trigger_graph, self.trigger_data.y) - - print(f"Epoch {epoch}, Test Acc: {test_acc.item():.4f}, Watermark Acc: {wm_acc:.4f}") - - # Final evaluation - watermarked_model.eval() - with torch.no_grad(): - # Evaluate on test set (ensure graph has self-loops) - test_graph = dgl.add_self_loop(self.graph) - - logits = watermarked_model(test_graph, self.features) - pred = logits.argmax(dim=1) - probs = F.softmax(logits, dim=1) - - test_metrics = self.compute_metrics( - self._to_cpu(self.labels[self.test_mask]).numpy(), - self._to_cpu(pred[self.test_mask]).numpy(), - self._to_cpu(probs[self.test_mask]).numpy() - ) - - # Verify watermark - wm_acc = self.verify_watermark(watermarked_model, trigger_graph, self.trigger_data.y) - - print(f"Final Test Accuracy: {test_metrics['accuracy']:.4f}") - print(f"Final Test F1: {test_metrics['f1']:.4f}") - print(f"Final Test Precision: {test_metrics['precision']:.4f}") - print(f"Final Test Recall: {test_metrics['recall']:.4f}") - print(f"Final Watermark Accuracy: {wm_acc:.4f}") - - # Store final metrics for later use - self.final_test_metrics = test_metrics - self.final_wm_acc = wm_acc - - return watermarked_model diff --git a/pygip/models/defense/SurviveWM2.py b/pygip/models/defense/SurviveWM2.py deleted file mode 100644 index ad09c6a8..00000000 --- a/pygip/models/defense/SurviveWM2.py +++ /dev/null @@ -1,556 +0,0 @@ -import random -from typing import List, Tuple - -import networkx as nx -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -from torch_geometric.data import Data -from torch_geometric.loader import DataLoader -from torch_geometric.nn import SAGEConv, global_mean_pool, BatchNorm -from torch_geometric.utils import to_networkx - -from pygip.models.defense.base import BaseDefense - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - -class SAGEModel(nn.Module): - def __init__(self, input_dim: int, hidden_dim: int = 64, num_classes: int = 10, num_layers: int = 3, - dropout: float = 0.1): - super().__init__() - self.convs = nn.ModuleList() - self.norms = nn.ModuleList() - self.convs.append(SAGEConv(input_dim, hidden_dim)) - self.norms.append(BatchNorm(hidden_dim)) - for _ in range(num_layers - 1): - self.convs.append(SAGEConv(hidden_dim, hidden_dim)) - self.norms.append(BatchNorm(hidden_dim)) - self.classifier = nn.Linear(hidden_dim, num_classes) - self.dropout = nn.Dropout(dropout) - - def forward(self, x, edge_index, batch, return_embedding=False): - for i, conv in enumerate(self.convs): - x = conv(x, edge_index) - x = self.norms[i](x) - x = F.relu(x) - if i < len(self.convs) - 1: - x = self.dropout(x) - embedding = global_mean_pool(x, batch) - if return_embedding: - return embedding - return self.classifier(embedding) - - -class WatermarkGenerator: - def __init__(self, training_dataset: List[Data], num_watermark_samples: int = None): - self.training_dataset = training_dataset - self.num_classes = self._get_num_classes() - self.avg_num_nodes = self._get_avg_num_nodes() - self.feature_dim = training_dataset[0].x.size(1) if training_dataset else 32 - if num_watermark_samples is None: - self.num_watermark_samples = max(5, int(0.05 * len(training_dataset))) - else: - self.num_watermark_samples = num_watermark_samples - - def algorithm_1_key_input_topology_generation(self, N_t: int, N: int = None) -> Data: - if N is None: - N = min(50, self.avg_num_nodes) - x = torch.zeros(N, self.feature_dim) - p = 0.5 - G_r = nx.erdos_renyi_graph(N_t, p) - remaining_nodes = N - N_t - if remaining_nodes > 0: - training_sample = random.choice(self.training_dataset) - G_train = to_networkx(training_sample, to_undirected=True) - if G_train.number_of_nodes() >= remaining_nodes: - sampled_nodes = random.sample(list(G_train.nodes()), remaining_nodes) - G_o = G_train.subgraph(sampled_nodes).copy() - for i, node in enumerate(sampled_nodes): - if node < training_sample.x.size(0): - x[N_t + i] = training_sample.x[node] - else: - x[N_t + i] = torch.randn(self.feature_dim) - else: - G_o = G_train.copy() - for i in range(min(remaining_nodes, training_sample.x.size(0))): - x[N_t + i] = training_sample.x[i] - else: - G_o = nx.Graph() - edges = set() - for u, v in G_r.edges(): - if u < N and v < N: - edges.add((min(u, v), max(u, v))) - node_mapping = {old: new + N_t for new, old in enumerate(G_o.nodes())} - for u_old, v_old in G_o.edges(): - u, v = node_mapping[u_old], node_mapping[v_old] - if u < N and v < N: - edges.add((min(u, v), max(u, v))) - if edges: - edge_list = [] - for u, v in edges: - edge_list.extend([[u, v], [v, u]]) - edge_index = torch.tensor(edge_list, dtype=torch.long).t() - else: - edge_index = torch.zeros((2, 0), dtype=torch.long) - watermark_graph = Data(x=x, edge_index=edge_index) - return watermark_graph - - def generate_watermark_set_with_clean_model(self, clean_model) -> List[Tuple[Data, int]]: - watermark_pairs = [] - while len(watermark_pairs) < self.num_watermark_samples: - N_t = random.choice([3, 4]) - watermark_graph = self.algorithm_1_key_input_topology_generation(N_t) - clean_model.eval() - with torch.no_grad(): - batch = torch.zeros(watermark_graph.x.size(0), dtype=torch.long) - pred_logits = clean_model(watermark_graph.x, watermark_graph.edge_index, batch) - clean_pred = pred_logits.argmax().item() - probs = F.softmax(pred_logits, dim=1) - if probs.max().item() < 0.6: - continue - other_classes = [i for i in range(self.num_classes) if i != clean_pred] - watermark_label = random.choice(other_classes) if other_classes else (clean_pred + 1) % self.num_classes - watermark_pairs.append((watermark_graph, watermark_label)) - while len(watermark_pairs) < max(5, self.num_watermark_samples // 2): - wg = self.algorithm_1_key_input_topology_generation(random.choice([2, 3, 4])) - watermark_pairs.append((wg, random.randint(0, self.num_classes - 1))) - return watermark_pairs - - def _get_num_classes(self) -> int: - if not self.training_dataset: - return 2 - labels = {int(d.y) for d in self.training_dataset if hasattr(d, 'y')} - return max(labels) + 1 if labels else 2 - - def _get_avg_num_nodes(self) -> int: - if not self.training_dataset: - return 20 - total = sum(d.x.size(0) for d in self.training_dataset) - return total // len(self.training_dataset) - - -class SNNLLoss(nn.Module): - def __init__(self, temperature: float = 1.0): - super().__init__() - self.T = temperature - - def forward(self, embeddings: torch.Tensor, labels: torch.Tensor) -> torch.Tensor: - N = embeddings.size(0) - if N <= 1: - return torch.tensor(0.0, requires_grad=True, device=embeddings.device) - distances = torch.cdist(embeddings, embeddings, p=2).pow(2) - loss = 0.0 - count = 0 - for i in range(N): - same_mask = (labels == labels[i]) & (torch.arange(N, device=labels.device) != i) - diff_mask = torch.arange(N, device=labels.device) != i - if same_mask.sum() == 0 or diff_mask.sum() == 0: - continue - numerator = torch.exp(-distances[i, same_mask] / self.T).sum() - denominator = torch.exp(-distances[i, diff_mask] / self.T).sum() - loss += -torch.log((numerator + 1e-8) / (denominator + 1e-8)) - count += 1 - return loss / max(count, 1) if count > 0 else torch.tensor(0.0, requires_grad=True, device=embeddings.device) - - -def train_clean_model(training_data: List[Data], epochs: int = 200, batch_size: int = 32, - num_layers: int = 3) -> SAGEModel: - num_classes = max([int(d.y) for d in training_data]) + 1 - model = SAGEModel(input_dim=training_data[0].x.size(1), hidden_dim=160, num_classes=num_classes, - num_layers=num_layers) - optimizer = optim.Adam(model.parameters(), lr=1e-3) - criterion = nn.CrossEntropyLoss() - loader = DataLoader(training_data, batch_size=batch_size, shuffle=True) - for epoch in range(epochs): - model.train() - total_loss = 0 - correct = 0 - total = 0 - for batch in loader: - optimizer.zero_grad() - out = model(batch.x, batch.edge_index, batch.batch) - loss = criterion(out, batch.y.view(-1)) - loss.backward() - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) - optimizer.step() - total_loss += loss.item() * batch.num_graphs - preds = out.argmax(dim=1) - correct += (preds == batch.y.view(-1)).sum().item() - total += batch.num_graphs - - if epoch > 50 and epoch % 20 == 0: - for pg in optimizer.param_groups: - pg['lr'] *= 0.8 - return model - - -def train_watermarked_model_full( - training_data: List[Data], - key_inputs: List[Tuple[Data, int]], - epochs: int = 300, - alpha: float = 0.1, - num_layers: int = 4, - hidden_dim: int = 160, - dropout: float = 0.05, - lr: float = 1e-3, - snnl_temperature: float = 1.0, -): - num_classes = max([int(d.y) for d in training_data] + [label for _, label in key_inputs]) + 1 - model = SAGEModel( - input_dim=training_data[0].x.size(1), - hidden_dim=hidden_dim, - num_classes=num_classes, - num_layers=num_layers, - dropout=dropout, - ) - optimizer = optim.Adam(model.parameters(), lr=lr) - ce_criterion = nn.CrossEntropyLoss() - snnl_criterion = SNNLLoss(temperature=snnl_temperature) - batch_size = 32 - - wm_graphs = [d for d, _ in key_inputs] - wm_labels = [l for _, l in key_inputs] - - loader_clean = DataLoader(training_data, batch_size=batch_size, shuffle=True) - loader_wm = DataLoader([ - Data(x=g.x, edge_index=g.edge_index, y=torch.tensor([l])) for g, l in zip(wm_graphs, wm_labels) - ], batch_size=batch_size, shuffle=True) - - for epoch in range(epochs): - model.train() - total_loss, total_correct, total_count = 0, 0, 0 - for batch in loader_clean: - optimizer.zero_grad() - out = model(batch.x, batch.edge_index, batch.batch) - loss = ce_criterion(out, batch.y.view(-1)) - loss.backward() - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) - optimizer.step() - total_loss += loss.item() * batch.num_graphs - preds = out.argmax(dim=1) - total_correct += (preds == batch.y.view(-1)).sum().item() - total_count += batch.num_graphs - - wm_loss_total, wm_snnl_total, wm_batches = 0.0, 0.0, 0 - for batch in loader_wm: - optimizer.zero_grad() - out = model(batch.x, batch.edge_index, batch.batch) - loss_ce = ce_criterion(out, batch.y.view(-1)) - batch_embeddings, batch_labels = [], [] - emb_wm = model(batch.x, batch.edge_index, batch.batch, return_embedding=True) - batch_embeddings.append(emb_wm) - batch_labels.extend([1] * emb_wm.size(0)) - try: - clean_batch = next(iter(loader_clean)) - emb_clean = model(clean_batch.x, clean_batch.edge_index, clean_batch.batch, return_embedding=True) - batch_embeddings.append(emb_clean) - batch_labels.extend([0] * emb_clean.size(0)) - except StopIteration: - pass - if len(batch_embeddings) > 1: - emb_t = torch.cat(batch_embeddings, dim=0) - lbl_t = torch.tensor(batch_labels, dtype=torch.long, device=emb_t.device) - snnl_loss = snnl_criterion(emb_t, lbl_t) - else: - snnl_loss = torch.tensor(0.0, device=out.device) - loss = loss_ce + alpha * snnl_loss - loss.backward() - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) - optimizer.step() - wm_loss_total += loss_ce.item() - wm_snnl_total += snnl_loss.item() - wm_batches += 1 - - if epoch > 50 and epoch % 20 == 0: - for pg in optimizer.param_groups: - pg['lr'] *= 0.8 - - if epoch % 20 == 0: - avg_clean_loss = total_loss / max(total_count, 1) - avg_wm_loss = wm_loss_total / max(wm_batches, 1) - avg_wm_snnl = wm_snnl_total / max(wm_batches, 1) - model.eval() - c_corr, wm_corr = 0, 0 - with torch.no_grad(): - for d in training_data[:20]: - b = torch.zeros(d.x.size(0), dtype=torch.long) - if model(d.x, d.edge_index, b).argmax() == int(d.y): - c_corr += 1 - for x_exp, lbl in key_inputs[:10]: - b = torch.zeros(x_exp.x.size(0), dtype=torch.long) - if model(x_exp.x, x_exp.edge_index, b).argmax() == lbl: - wm_corr += 1 - clean_acc = c_corr / 20 - wm_acc = wm_corr / min(10, len(key_inputs)) - model.train() - return model - - -def evaluate_watermark_effectiveness(model: SAGEModel, key_inputs: List[Tuple[Data, int]]) -> float: - model.eval() - correct = 0 - with torch.no_grad(): - for x, expected_label in key_inputs: - batch = torch.zeros(x.x.size(0), dtype=torch.long) - pred = model(x.x, x.edge_index, batch).argmax(1).item() - if pred == expected_label: - correct += 1 - return correct / len(key_inputs) if key_inputs else 0.0 - - -def evaluate_clean_accuracy(model: SAGEModel, test_data: List[Data], batch_size=32) -> float: - if not test_data: - return 0.0 - loader = DataLoader(test_data, batch_size=batch_size) - model.eval() - correct = 0 - total = 0 - with torch.no_grad(): - for batch in loader: - out = model(batch.x, batch.edge_index, batch.batch) - preds = out.argmax(dim=1) - correct += (preds == batch.y.view(-1)).sum().item() - total += batch.num_graphs - return correct / total if total > 0 else 0.0 - - -class KeyInputOptimizer: - def __init__(self, training_dataset: List[Data], key_inputs: List[Tuple[Data, int]], T_opt: int = 20): - self.training_dataset = training_dataset - self.key_inputs = key_inputs - self.feature_dim = training_dataset[0].x.size(1) - self.num_classes = max([int(d.y) for d in training_dataset] + [label for _, label in key_inputs]) + 1 - self.T_opt = T_opt - - def optimize(self): - class MTopo(nn.Module): - def __init__(self, N_t): - super().__init__() - self.N_t = N_t - self.net = nn.Sequential( - nn.Linear(N_t * N_t, N_t * N_t), - nn.ReLU(), - nn.Linear(N_t * N_t, N_t * N_t), - nn.Sigmoid() - ) - - def forward(self, A): - x = A.flatten().unsqueeze(0) - out = self.net(x).reshape(self.N_t, self.N_t) - return out - - class MFeat(nn.Module): - def __init__(self, feat_dim, N_t): - super().__init__() - self.N_t = N_t - self.feat_dim = feat_dim - self.net = nn.Sequential( - nn.Linear(N_t * feat_dim, N_t * feat_dim), - nn.ReLU(), - nn.Linear(N_t * feat_dim, N_t * feat_dim) - ) - - def forward(self, F): - x = F.flatten().unsqueeze(0) - out = self.net(x).reshape(self.N_t, self.feat_dim) - return out - - optimized_pairs = [] - for orig_data, label in self.key_inputs: - N = orig_data.x.size(0) - N_t = min(4, N) - trigger_nodes = torch.arange(N_t) - rest_nodes = torch.arange(N_t, N) - - # Build trigger adjacency - A_trig = torch.zeros(N_t, N_t) - edge_set = set(tuple(edge) for edge in orig_data.edge_index.t().tolist()) - for i, u in enumerate(trigger_nodes): - for j, v in enumerate(trigger_nodes): - if (u.item(), v.item()) in edge_set: - A_trig[i, j] = 1 - F_trig = orig_data.x[:N_t].clone() - - m_topo = MTopo(N_t) - m_feat = MFeat(self.feature_dim, N_t) - opt = torch.optim.Adam(list(m_topo.parameters()) + list(m_feat.parameters()), lr=1e-2) - - for step in range(self.T_opt): - opt.zero_grad() - A_new = m_topo(A_trig) - F_new = m_feat(F_trig) - A_bin = (A_new > 0.5).float() - - # Construct the new key input graph - new_x = orig_data.x.clone() - new_x[:N_t] = F_new.detach().squeeze() - edges = [] - for i in range(N_t): - for j in range(N_t): - if A_bin[i, j] > 0.5: - edges.append([i, j]) - for u, v in orig_data.edge_index.t().tolist(): - if u >= N_t and v >= N_t: - edges.append([u, v]) - elif (u >= N_t and v < N_t) or (u < N_t and v >= N_t): - edges.append([u, v]) - if edges: - edge_index = torch.tensor(edges, dtype=torch.long).t().contiguous() - else: - edge_index = torch.zeros((2, 0), dtype=torch.long) - data_opt = Data(x=new_x, edge_index=edge_index) - - # Train temp SAGE model on this single key input (with train data) - temp_model = SAGEModel(self.feature_dim, hidden_dim=64, num_classes=self.num_classes, num_layers=3) - temp_opt = torch.optim.Adam(temp_model.parameters(), lr=1e-2) - criterion = nn.CrossEntropyLoss() - batch = torch.zeros(data_opt.x.size(0), dtype=torch.long) - for _ in range(2): - rand_data = random.choice(self.training_dataset) - temp_opt.zero_grad() - out1 = temp_model(data_opt.x, data_opt.edge_index, batch) - loss1 = criterion(out1, torch.tensor([label])) - batch_rand = torch.zeros(rand_data.x.size(0), dtype=torch.long) - out2 = temp_model(rand_data.x, rand_data.edge_index, batch_rand) - loss2 = criterion(out2, rand_data.y.view(-1)) - loss = loss1 + loss2 - loss.backward() - temp_opt.step() - - with torch.no_grad(): - pred = temp_model(data_opt.x, data_opt.edge_index, batch) - ce_loss = criterion(pred, torch.tensor([label])) - score = -ce_loss - score = score.requires_grad_() - score.backward() - opt.step() - - optimized_pairs.append((data_opt, label)) - return optimized_pairs - - -class SurviveWM2(BaseDefense): - def __init__( - self, - dataset, - attack_node_fraction, - model_path=None, - alpha=0.1, - num_layers=4, - clean_epochs=200, - wm_epochs=200, - **kwargs - ): - super().__init__(dataset, attack_node_fraction) - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - self.alpha = alpha - self.num_layers = num_layers - self.clean_epochs = clean_epochs - self.wm_epochs = wm_epochs - - # --- Data extraction --- - self.train_data = getattr(dataset, 'train_data', None) - self.test_data = getattr(dataset, 'test_data', None) - - if not (isinstance(self.train_data, list) and isinstance(self.test_data, list)): - raise ValueError( - "This defense only supports graph classification datasets (e.g., ENZYMES). Node-level datasets are not supported.") - - self.model_path = model_path - - def defend(self): - """ - Main defense workflow: - 1. Train a target model (clean) - 2. (optional) Simulate attack on target model (if implemented) - 3. Train defense (watermarked) model - 4. Evaluate defense and print detailed metrics - Returns - ------- - dict - Dictionary containing performance metrics - """ - print("=" * 60) - print("Step 1: Train clean (victim) model") - print("-" * 60) - target_model = self._train_target_model() - baseline_acc = evaluate_clean_accuracy(target_model, self.test_data) - print(f"Test accuracy on clean (victim) model: {baseline_acc:.4f}") - - print("\nStep 2: Generate and optimize watermark key inputs") - print("-" * 60) - wm_gen = getattr(self, 'wm_gen', None) - if wm_gen is None: - self.wm_gen = WatermarkGenerator(self.train_data, - num_watermark_samples=int(len(self.train_data) * self.alpha)) - key_pairs = self.wm_gen.generate_watermark_set_with_clean_model(target_model) - optimizer = KeyInputOptimizer(self.train_data, key_pairs, T_opt=20) - key_pairs_optimized = optimizer.optimize() - print(f"Generated and optimized {len(key_pairs_optimized)} watermark key inputs") - - print("\nStep 3: Train defense (watermarked) model") - print("-" * 60) - defense_model = train_watermarked_model_full( - self.train_data, key_pairs_optimized, - epochs=self.wm_epochs, alpha=self.alpha, num_layers=self.num_layers - ) - defense_acc = evaluate_clean_accuracy(defense_model, self.test_data) - print(f"Test accuracy on defense (watermarked) model: {defense_acc:.4f}") - - print("\nStep 4: Evaluate watermark effectiveness") - print("-" * 60) - watermark_success = evaluate_watermark_effectiveness(defense_model, key_pairs_optimized) - print(f"Watermark detection rate (defense model): {watermark_success:.4f}") - - acc_degradation = baseline_acc - defense_acc - - print("\nPerformance metrics:") - print("-" * 60) - print(f"{'Metric':<36} {'Value'}") - print("-" * 60) - print(f"{'Test acc. (clean model)':<36} {baseline_acc:.4f}") - print(f"{'Test acc. (defense/watermarked)':<36} {defense_acc:.4f}") - print(f"{'Accuracy degradation':<36} {acc_degradation:.4f}") - print(f"{'Watermark detection (defense)':<36} {watermark_success:.4f}") - print("-" * 60) - - results = { - "baseline_accuracy": baseline_acc, - "defense_accuracy": defense_acc, - "watermark_effectiveness": watermark_success, - "accuracy_degradation": acc_degradation, - } - return results - - def _load_model(self): - if not self.model_path: - raise ValueError("No model_path provided.") - - def _train_target_model(self): - print("[OptimizedWatermarkDefense] Training clean (victim) model...") - model = train_clean_model(self.train_data, epochs=self.clean_epochs, num_layers=self.num_layers) - self.net1 = model - self.wm_gen = WatermarkGenerator(self.train_data, num_watermark_samples=int(len(self.train_data) * self.alpha)) - return model - - def _train_defense_model(self, clean_model=None): - print("[OptimizedWatermarkDefense] Generating and optimizing watermark key inputs...") - if not hasattr(self, 'wm_gen'): - self.wm_gen = WatermarkGenerator(self.train_data, - num_watermark_samples=int(len(self.train_data) * self.alpha)) - key_pairs = self.wm_gen.generate_watermark_set_with_clean_model(clean_model or self.net1) - optimizer = KeyInputOptimizer(self.train_data, key_pairs, T_opt=20) - key_pairs_optimized = optimizer.optimize() - print("[OptimizedWatermarkDefense] Training watermarked model...") - model = train_watermarked_model_full( - self.train_data, key_pairs_optimized, - epochs=self.wm_epochs, alpha=self.alpha, num_layers=self.num_layers - ) - self.net2 = model - self.key_pairs_optimized = key_pairs_optimized - return model, key_pairs_optimized - - def _train_surrogate_model(self): - pass diff --git a/pygip/models/defense/__init__.py b/pygip/models/defense/__init__.py deleted file mode 100644 index 057c1c3e..00000000 --- a/pygip/models/defense/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -from .BackdoorWM import BackdoorWM -from .ImperceptibleWM import ImperceptibleWM -from .ImperceptibleWM2 import ImperceptibleWM2 -from .RandomWM import RandomWM -from .SurviveWM import SurviveWM -from .SurviveWM2 import SurviveWM2 -from .atom.ATOM import ATOM -from .Integrity import QueryBasedVerificationDefense as IntegrityVerification -from .GrOVe import GroveDefense -from .Revisiting import Revisiting - -__all__ = [ - 'BackdoorWM', - 'ImperceptibleWM', - 'ImperceptibleWM2', - 'RandomWM', - 'SurviveWM', - 'SurviveWM2', - 'IntegrityVerification' - 'GroveDefense' - 'ATOM', - 'Revisiting' -] diff --git a/pygip/models/defense/atom/ATOM.py b/pygip/models/defense/atom/ATOM.py deleted file mode 100644 index e96d92f3..00000000 --- a/pygip/models/defense/atom/ATOM.py +++ /dev/null @@ -1,995 +0,0 @@ -import ast -import os -import random -from pathlib import Path -from time import time - -import networkx as nx -import numpy as np -import pandas as pd -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -from sklearn.metrics import ( - precision_score, - recall_score, - f1_score, - accuracy_score, - roc_curve, - auc -) -from sklearn.model_selection import train_test_split -from torch.distributions import Categorical -from torch.nn.utils.rnn import pad_sequence -from torch.utils.data import Dataset, DataLoader -from torch_geometric.datasets import Planetoid, CitationFull, WebKB -from torch_geometric.nn import GCNConv -from torch_geometric.seed import seed_everything -from torch_geometric.utils import to_networkx -from tqdm import tqdm - -from pygip.datasets.datasets import Dataset as PyGIPDataset -from pygip.models.defense.base import BaseDefense -from pygip.utils.metrics import DefenseMetric, DefenseCompMetric - - -def set_seed(seed: int): - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - np.random.seed(seed) - random.seed(seed) - - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - seed_everything(seed) - - -class GCN(nn.Module): - def __init__(self, input_dim, hidden_dim, output_dim): - super(GCN, self).__init__() - self.conv1 = GCNConv(input_dim, hidden_dim) - self.conv2 = GCNConv(hidden_dim, output_dim) - - def forward(self, x, edge_index): - hidden = self.conv1(x, edge_index) - x = F.relu(hidden) - output = self.conv2(x, edge_index) - return F.log_softmax(output, dim=1), output - - -def train_gcn(model, data, optimizer, criterion, epochs=200, verbose=True): - model.train() - for epoch in range(epochs): - optimizer.zero_grad() - output, _ = model(data.x, data.edge_index) - loss = criterion(output[data.train_mask], data.y[data.train_mask]) - loss.backward() - optimizer.step() - - if verbose and epoch % 10 == 0: - print(f"[GCN-Train] Epoch {epoch}, Loss: {loss.item()}") - - -class TargetGCN: - def __init__(self, trained_model, data): - self.model = trained_model - self.data = data - - def predict(self, query_indices): - self.model.eval() - with torch.no_grad(): - output, _ = self.model(self.data.x, self.data.edge_index) - probs = F.softmax(output[query_indices], dim=1).cpu().numpy() - return probs - - def get_embedding(self): - self.model.eval() - with torch.no_grad(): - _, embeddings = self.model(self.data.x, self.data.edge_index) - return embeddings - - -def get_node_embedding(model, data, node_idx): - embeddings = model.get_embedding() - return embeddings[node_idx] - - -def get_one_hop_neighbors(data, node_idx): - edge_index = data.edge_index - neighbors = edge_index[1][edge_index[0] == node_idx].tolist() - return neighbors - - -def average_pooling_with_neighbors(model, data, node_idx): - embeddings = model.get_embedding() - neighbors = get_one_hop_neighbors(data, node_idx) - neighbors.append(node_idx) - neighbor_embeddings = embeddings[neighbors] - pooled_embedding = torch.mean(neighbor_embeddings, dim=0) - return pooled_embedding - - -def k_core_decomposition(graph): - k_core_dict = nx.core_number(graph) - return k_core_dict - - -def average_pooling_with_neighbors_batch(model, data, node_indices): - embeddings = model.get_embedding() - neighbors = [get_one_hop_neighbors(data, idx) for idx in node_indices] - - node_and_neighbors = [torch.tensor([idx] + list(neighbors[i])) for i, idx in enumerate(node_indices)] - - pooled_embeddings = torch.stack([ - embeddings[node_idx_list].mean(dim=0) for node_idx_list in node_and_neighbors - ]) - return pooled_embeddings - - -def compute_embedding_batch(target_model, data, k_core_values_graph, max_k_core, node_indices, lamb=1.0): - pooled_embeddings = average_pooling_with_neighbors_batch(target_model, data, node_indices) - k_core_values = torch.tensor([k_core_values_graph[node_idx] for node_idx in node_indices], dtype=torch.float32).to( - pooled_embeddings.device) - - max_k_core_tensor = torch.log(max_k_core) - scaled_k_core = torch.log(k_core_values) / max_k_core_tensor - scaling_function = 1 + lamb * (torch.sigmoid(scaled_k_core) - 0.5) * 2 - final_embeddings = pooled_embeddings * scaling_function.unsqueeze(-1) - return final_embeddings - - -def simple_embedding_batch(target_model, data, node_indices): - pooled_embeddings = average_pooling_with_neighbors_batch(target_model, data, node_indices) - return pooled_embeddings - - -def precompute_all_node_embeddings( - target_model, - data, - k_core_values_graph, - max_k_core, - lamb=1.0 -): - all_node_indices = list(range(data.num_nodes)) - all_embeddings = compute_embedding_batch( - target_model, - data, - k_core_values_graph, - max_k_core, - all_node_indices, - lamb=lamb - ) - return all_embeddings - - -def precompute_simple_embeddings(target_model, data): - all_node_indices = list(range(data.num_nodes)) - return simple_embedding_batch(target_model, data, all_node_indices) - - -def collate_fn_no_pad(batch): - batch_seqs = [item[0] for item in batch] - batch_labels = [item[1] for item in batch] - return batch_seqs, torch.tensor(batch_labels, dtype=torch.long) - - -def preprocess_sequences(df): - def convert_to_list(sequence): - if isinstance(sequence, str): - return ast.literal_eval(sequence) - return sequence - - df["Sequence"] = df["Sequence"].apply(convert_to_list) - return df - - -class SequencesDataset(Dataset): - def __init__(self, df): - self.df = df.reset_index(drop=True) - - def __len__(self): - return len(self.df) - - def __getitem__(self, idx): - seq = self.df.loc[idx, "Sequence"] - lbl = self.df.loc[idx, "Label"] - if isinstance(seq, str): - raise TypeError(f"Sequence should be list[int], but received str: {seq}") - return list(seq), int(lbl) - - -def split_and_adjust(dataset_sequences, seed): - train_df, temp_df = train_test_split(dataset_sequences, test_size=0.3, random_state=seed) - val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=seed) - return train_df, val_df, test_df - - -def build_loaders( - csv_path="attack_CiteSeer.csv", - batch_size=16, - drop_last=True, - seed=42, -): - df = pd.read_csv(csv_path) - df_unique = df.drop_duplicates(subset="Sequence") - df = df_unique - dataset_sequences = df[["Sequence", "Label"]].copy() - dataset_sequences = preprocess_sequences(dataset_sequences) - dataset_sequences["Label"] = dataset_sequences["Label"].astype(int) - dataset_sequences = dataset_sequences[dataset_sequences['Sequence'].apply(len) > 1] - - train_df, val_df, test_df = split_and_adjust(dataset_sequences, seed) - - train_dataset = SequencesDataset(train_df) - val_dataset = SequencesDataset(val_df) - test_dataset = SequencesDataset(test_df) - - train_loader = DataLoader( - train_dataset, - batch_size=batch_size, - shuffle=True, - collate_fn=collate_fn_no_pad, - drop_last=drop_last - ) - val_loader = DataLoader( - val_dataset, - batch_size=batch_size, - shuffle=False, - collate_fn=collate_fn_no_pad, - drop_last=drop_last - ) - test_loader = DataLoader( - test_dataset, - batch_size=batch_size, - shuffle=False, - collate_fn=collate_fn_no_pad, - drop_last=drop_last - ) - - return train_loader, val_loader, test_loader - - -def load_data_and_model(csv_path, batch_size, seed, data_path, lamb): - try: - script_dir = Path(__file__).resolve().parent - parent_dir = script_dir.parent - except NameError: - parent_dir = Path.cwd().parent - print( - "If __file__ is not defined, " - "the directory above the current working directory is used as the target directory." - ) - - os.chdir(parent_dir) - - train_loader, val_loader, test_loader = build_loaders( - csv_path=csv_path, - batch_size=batch_size, - drop_last=True, - seed=seed - ) - - # ======== Step 2: target_model, data ========= - if data_path == "CiteSeer": - dataset = Planetoid(root="./data", name=data_path) - data = dataset[0] - elif data_path == "PubMed": - dataset = Planetoid(root="./data", name="PubMed") - data = dataset[0] - elif data_path == "Cora": - dataset = Planetoid(root="./data", name=data_path) - data = dataset[0] - elif data_path == "Cora_ML": - dataset = CitationFull(root="./data", name="Cora_ML") - data = dataset[0] - num_nodes = data.num_nodes - num_train = int(num_nodes * 0.6) - num_val = int(num_nodes * 0.2) - num_test = num_nodes - num_train - num_val - perm = torch.randperm(num_nodes) - data.train_mask = torch.zeros(num_nodes, dtype=torch.bool) - data.val_mask = torch.zeros(num_nodes, dtype=torch.bool) - data.test_mask = torch.zeros(num_nodes, dtype=torch.bool) - - data.train_mask[perm[:num_train]] = True - data.val_mask[perm[num_train:num_train + num_val]] = True - data.test_mask[perm[num_train + num_val:]] = True - elif data_path == "Cornell" or data_path == "Wisconsin": - dataset = WebKB(root="./data", name=data_path) - data = dataset[0] - num_nodes = data.num_nodes - num_train = int(num_nodes * 0.6) - num_val = int(num_nodes * 0.2) - num_test = num_nodes - num_train - num_val - - perm = torch.randperm(num_nodes) - data.train_mask = torch.zeros(num_nodes, dtype=torch.bool) - data.val_mask = torch.zeros(num_nodes, dtype=torch.bool) - data.test_mask = torch.zeros(num_nodes, dtype=torch.bool) - - data.train_mask[perm[:num_train]] = True - data.val_mask[perm[num_train:num_train + num_val]] = True - data.test_mask[perm[num_train + num_val:]] = True - - trained_gcn = GCN(dataset.num_features, 16, dataset.num_classes) - target_model = TargetGCN(trained_model=trained_gcn, data=data) - - G = to_networkx(data, to_undirected=True) - G.remove_edges_from(nx.selfloop_edges(G)) - k_core_values_graph = k_core_decomposition(G) - max_k_core = torch.tensor(max(k_core_values_graph.values()), dtype=torch.float32) - - all_embeddings = precompute_all_node_embeddings( - target_model, data, k_core_values_graph, max_k_core, lamb=lamb - ) - - return train_loader, val_loader, test_loader, target_model, max_k_core, all_embeddings, dataset, data - - -class StateTransformMLP(nn.Module): - def __init__(self, input_dim, hidden_dim, output_dim): - super(StateTransformMLP, self).__init__() - self.fc1 = nn.Linear(input_dim, hidden_dim) - self.fc2 = nn.Linear(hidden_dim, output_dim) - - def forward(self, prob_factor): - x = prob_factor - x = F.relu(self.fc1(x)) - x = self.fc2(x) - return x - - -class FusionGRU(nn.Module): - def __init__(self, input_size, hidden_size): - super(FusionGRU, self).__init__() - self.hidden_size = hidden_size - - self.Wz = nn.Linear(input_size + hidden_size, hidden_size) - self.Wr = nn.Linear(input_size + hidden_size, hidden_size) - self.Wh = nn.Linear(input_size + hidden_size, hidden_size) - self.Wg = nn.Linear(input_size * 2, input_size) - self.bg = nn.Parameter(torch.zeros(input_size)) - - def forward(self, h_it, h_it_m1, hidden_state): - - delta_it = h_it - h_it_m1 - - concat_input = torch.cat((delta_it, h_it), dim=-1) - g_t = torch.sigmoid(self.Wg(concat_input) + self.bg) - - x_t = g_t * delta_it + (1 - g_t) * h_it - - combined = torch.cat((x_t, hidden_state), dim=-1) - z_t = torch.sigmoid(self.Wz(combined)) - r_t = torch.sigmoid(self.Wr(combined)) - - r_h_prev = r_t * hidden_state - combined_candidate = torch.cat((x_t, r_h_prev), dim=-1) - h_tilde = torch.tanh(self.Wh(combined_candidate)) - - h_next = (1 - z_t) * hidden_state + z_t * h_tilde - return h_next - - def process_sequence(self, inputs, hidden_state=None): - batch_size, seq_len, input_size = inputs.size() - if hidden_state is None: - hidden_state = torch.zeros(batch_size, self.hidden_size, device=inputs.device) - - outputs = [] - h_it_m1 = torch.zeros(batch_size, input_size, device=inputs.device) - - for t in range(seq_len): - h_it = inputs[:, t, :] - hidden_state = self.forward(h_it, h_it_m1, hidden_state) - outputs.append(hidden_state.unsqueeze(1)) - h_it_m1 = h_it - - return torch.cat(outputs, dim=1) - - -def test_model(agent, gru, mlp_transform, test_loader, target_model, data, all_embeddings, hidden_size, device): - agent.eval() - gru.eval() - mlp_transform.eval() - - total_reward = 0.0 - action_dim = 2 - all_true_labels = [] - all_predicted_labels = [] - all_predicted_probs = [] - - with torch.no_grad(): - for batch_seqs, batch_labels in test_loader: - batch_labels = batch_labels.to(device) - - batch_seqs = [torch.tensor(seq, dtype=torch.long, device=device) for seq in batch_seqs] - padded_seqs = pad_sequence(batch_seqs, batch_first=True, padding_value=0) - mask = (padded_seqs != 0).float().to(device) - - max_seq_len = padded_seqs.size(1) - hidden_states = torch.zeros(len(batch_seqs), hidden_size, device=device) - - all_inputs = [] - for t in range(max_seq_len): - node_indices = padded_seqs[:, t].tolist() - cur_inputs = all_embeddings[node_indices] - all_inputs.append(cur_inputs) - - all_inputs = torch.stack(all_inputs, dim=1).to(device) - hidden_states = gru.process_sequence(all_inputs) - masked_hidden_states = hidden_states * mask.unsqueeze(-1) - - prob_factors = torch.ones(len(batch_seqs), max_seq_len, action_dim, device=device) - custom_states = (mlp_transform(prob_factors) * masked_hidden_states).detach() - - actions, probabilities, _, _ = agent.select_action(custom_states.view(-1, hidden_size)) - actions = actions.view(len(batch_seqs), max_seq_len) - probabilities = probabilities.view(len(batch_seqs), max_seq_len) - - for i in range(len(batch_seqs)): - last_valid_step = (mask[i].sum().long() - 1).item() - predicted_action = actions[i, last_valid_step].item() - predicted_prob = probabilities[i, last_valid_step].item() - true_label = batch_labels[i].item() - - all_true_labels.append(true_label) - all_predicted_labels.append(predicted_action) - all_predicted_probs.append(predicted_prob) - - reward = custom_reward_function(predicted_action, true_label) - total_reward += reward - - accuracy = accuracy_score(all_true_labels, all_predicted_labels) - precision = precision_score(all_true_labels, all_predicted_labels, average='binary') - recall = recall_score(all_true_labels, all_predicted_labels, average='binary') - f1 = f1_score(all_true_labels, all_predicted_labels, average='binary') - - fpr, tpr, _ = roc_curve(all_true_labels, all_predicted_probs) - auc_value = auc(fpr, tpr) - - return accuracy, precision, recall, f1, auc_value - - -class Memory: - def __init__(self): - self.states = [] - self.actions = [] - self.log_probs = [] - self.rewards = [] - self.dones = [] - self.advantages = [] - self.entropies = [] - self.returns = [] - self.all_probs = {} - self.masks = [] - - def store(self, custom_states, action, log_prob, reward, done, entropy, probs=None, masks=None): - for i in range(custom_states.size(0)): - state_seq = custom_states[i] - action_seq = action[i] - log_prob_seq = log_prob[i] - reward_seq = reward[i] - done_seq = done[i] - mask_seq = masks[i] - - valid_len = int(mask_seq.sum().item()) - - state_seq = torch.cat([state_seq[:valid_len], - torch.zeros(custom_states.size(1) - valid_len, custom_states.size(2), - device=state_seq.device)]) - action_seq = torch.cat( - [action_seq[:valid_len], torch.zeros(action.size(1) - valid_len, device=action_seq.device)]) - log_prob_seq = torch.cat( - [log_prob_seq[:valid_len], torch.zeros(log_prob.size(1) - valid_len, device=log_prob_seq.device)]) - reward_seq = torch.cat( - [reward_seq[:valid_len], torch.zeros(reward.size(1) - valid_len, device=reward_seq.device)]) - done_seq = torch.cat([done_seq[:valid_len], torch.zeros(done.size(1) - valid_len, device=done_seq.device)]) - mask_seq = torch.cat([mask_seq[:valid_len], torch.zeros(masks.size(1) - valid_len, device=mask_seq.device)]) - - self.states.append(state_seq) - self.actions.append(action_seq) - self.log_probs.append(log_prob_seq) - self.rewards.append(reward_seq) - self.dones.append(done_seq) - self.masks.append(mask_seq) - - consistent_shape = all(tensor.shape == self.states[0].shape for tensor in self.states) - - def clear(self): - self.states = [] - self.actions = [] - self.log_probs = [] - self.rewards = [] - self.dones = [] - self.advantages = [] - self.entropies = [] - self.returns = [] - self.masks = [] - - -def compute_returns_and_advantages(memory, gamma=0.99, lam=0.95): - rewards = torch.stack(memory.rewards, dim=0) - dones = torch.stack(memory.dones, dim=0) - masks = torch.stack(memory.masks, dim=0) - batch_size, max_seq_len = rewards.size() - - returns = torch.zeros_like(rewards) - advantages = torch.zeros_like(rewards) - - running_return = torch.zeros(batch_size, device=rewards.device) - running_advantage = torch.zeros(batch_size, device=rewards.device) - - for t in reversed(range(max_seq_len)): - mask_t = masks[:, t] - reward_t = rewards[:, t] - done_t = dones[:, t] - - running_return = reward_t + gamma * running_return * (1 - done_t) - td_error = reward_t + gamma * (returns[:, t + 1] if t + 1 < max_seq_len else 0) * (1 - done_t) - reward_t - - running_return *= mask_t - td_error *= mask_t - - returns[:, t] = running_return - running_advantage = td_error + gamma * lam * running_advantage * (1 - done_t) - running_advantage *= mask_t - advantages[:, t] = running_advantage - - memory.returns = returns - memory.advantages = advantages - - -def custom_reward_function(predicted, label, predicted_distribution=None): - reward = 0.0 - if predicted_distribution is not None: - if predicted_distribution > 0.90: - reward += -8.0 - if predicted == 1 and label == 0: - return -22.0 - if predicted == 0 and label == 1: - return -18.0 - if predicted == 1 and label == 1: - return 16.0 - if predicted == 0 and label == 0: - return 16.0 - return reward - - -class PolicyNetwork(nn.Module): - def __init__(self, state_dim, action_dim): - super(PolicyNetwork, self).__init__() - self.fc1 = nn.Linear(state_dim, 64) - self.fc2 = nn.Linear(64, 64) - self.action_layer = nn.Linear(64, action_dim) - self.value_layer = nn.Linear(64, 1) - - def forward(self, state): - x = torch.relu(self.fc1(state)) - x = torch.relu(self.fc2(x)) - action_logits = self.action_layer(x) - state_value = self.value_layer(x) - return action_logits, state_value - - -class PPOAgent(nn.Module): - def __init__(self, learning_rate, batch_size, K_epochs, state_dim, action_dim, gru, mlp, clip_epsilon, entropy_coef, - device): - super(PPOAgent, self).__init__() - self.policy = PolicyNetwork(state_dim, action_dim).to(device) - self.optimizer = optim.Adam( - list(self.policy.parameters()) + list(gru.parameters()) + list(mlp.parameters()), - lr=learning_rate - ) - self.policy_old = PolicyNetwork(state_dim, action_dim).to(device) - self.policy_old.load_state_dict(self.policy.state_dict()) - self.mse_loss = nn.MSELoss() - self.batch_size = batch_size - self.K_epochs = K_epochs - self.device = device - self.hidden_size = state_dim - self.clip_epsilon = clip_epsilon - self.entropy_coef = entropy_coef - - def select_action(self, state): - device = next(self.policy.parameters()).device - if isinstance(state, torch.Tensor): - state = state.clone().detach().to(device) - else: - state = torch.tensor(state, dtype=torch.float).to(device) - - with torch.no_grad(): - action_logits, _ = self.policy_old(state) - probs = torch.softmax(action_logits, dim=-1) - dist = Categorical(probs) - actions = dist.sample() - log_probs = dist.log_prob(actions) - entropy = dist.entropy() - - return actions, log_probs, entropy, probs - - def update(self, memory): - states = torch.stack(memory.states).view(self.batch_size, -1, self.hidden_size).to(self.device) - actions = torch.cat(memory.actions, dim=0) - actions = actions.view(self.batch_size, -1).to(self.device) - log_probs_old = torch.cat(memory.log_probs, dim=0).view(self.batch_size, -1).to(self.device) - returns = memory.returns.view(self.batch_size, -1).to(self.device) - advantages = memory.advantages.view(self.batch_size, -1).to(self.device) - - for _ in range(self.K_epochs): - action_logits, state_values = self.policy(states) - probs = torch.softmax(action_logits, dim=-1) - dist = Categorical(probs) - - log_probs = dist.log_prob(actions.squeeze()).unsqueeze(1) - entropy = dist.entropy().mean() - - log_probs = log_probs.view_as(advantages) - ratios = torch.exp(log_probs - log_probs_old) - surr1 = ratios * advantages - surr2 = torch.clamp(ratios, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages - - loss = -torch.min(surr1, surr2).mean() + \ - 0.5 * self.mse_loss(state_values.squeeze(), returns) - \ - self.entropy_coef * entropy - - self.optimizer.zero_grad() - loss.backward() - self.optimizer.step() - - self.policy_old.load_state_dict(self.policy.state_dict()) - - -class ATOM(BaseDefense): - supported_api_types = {"pyg"} - supported_datasets = { - "Cora", "CiteSeer", "PubMed", - "Computers", "Photo", - "CS", "Physics" - } - - def __init__(self, dataset: PyGIPDataset, attack_node_fraction: float = 0): - super().__init__(dataset, attack_node_fraction) - self.dataset = dataset - # Load dataset related information, following BackdoorWM pattern - self.graph_dataset = dataset.graph_dataset - self.graph_data = dataset.graph_data - - # Get basic information - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes - - # Set device - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.graph_data = self.graph_data.to(device=self.device) - - def _load_data_and_model(self, batch_size=16, seed=0, lamb=0): - """ - Load data and model following BackdoorWM format - Use data from self.dataset directly instead of reloading - """ - # Get dataset name - dataset_name = self.dataset.__class__.__name__ - - # Build CSV path - current_dir = os.path.dirname(os.path.abspath(__file__)) - csv_path = os.path.join(current_dir, 'csv_data', f'attack_{dataset_name}.csv') - - # Load sequence data - train_loader, val_loader, test_loader = build_loaders( - csv_path=csv_path, - batch_size=batch_size, - drop_last=True, - seed=seed - ) - - # Use PyG format directly (since we declared supported_api_types = {"pyg"}) - features = self.graph_data.x - labels = self.graph_data.y - train_mask = self.graph_data.train_mask - test_mask = self.graph_data.test_mask - - # Create and train GCN model - trained_gcn = GCN(self.num_features, 16, self.num_classes).to(self.device) - target_model = TargetGCN(trained_model=trained_gcn, data=self.graph_data) - - # Train model - self._train_gcn_model(trained_gcn, self.graph_data, train_mask, labels) - - # Compute k-core decomposition of the graph - G = to_networkx(self.graph_data, to_undirected=True) - G.remove_edges_from(nx.selfloop_edges(G)) - k_core_values_graph = k_core_decomposition(G) - max_k_core = torch.tensor(max(k_core_values_graph.values()), dtype=torch.float32) - - # Precompute embeddings for all nodes - all_embeddings = precompute_all_node_embeddings( - target_model, self.graph_data, k_core_values_graph, max_k_core, lamb=lamb - ) - - return train_loader, val_loader, test_loader, target_model, max_k_core, all_embeddings - - def _convert_to_networkx(self): - """ - Convert graph data to NetworkX format, compatible with different graph data structures - """ - if hasattr(self.graph_data, 'edge_index'): - # PyG format - return to_networkx(self.graph_data, to_undirected=True) - else: - # DGL format - requires conversion - # This may need adjustment based on specific DGL format - edge_list = [] - if hasattr(self.graph_data, 'edges'): - src, dst = self.graph_data.edges() - edge_list = list(zip(src.cpu().numpy(), dst.cpu().numpy())) - - G = nx.Graph() - G.add_nodes_from(range(self.num_nodes)) - G.add_edges_from(edge_list) - return G - - def _create_default_masks(self): - """ - Create default train/test masks for datasets without predefined masks - """ - num_nodes = self.num_nodes - num_train = int(num_nodes * 0.6) - num_test = int(num_nodes * 0.2) - - perm = torch.randperm(num_nodes) - train_mask = torch.zeros(num_nodes, dtype=torch.bool, device=self.device) - test_mask = torch.zeros(num_nodes, dtype=torch.bool, device=self.device) - - train_mask[perm[:num_train]] = True - test_mask[perm[num_train:num_train + num_test]] = True - - return train_mask, test_mask - - @staticmethod - def _train_gcn_model(model, data, train_mask, labels): - """ - Train GCN model for PyG format - """ - model.train() - optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4) - criterion = nn.NLLLoss() - - features = data.x - edge_index = data.edge_index - - for epoch in range(200): - optimizer.zero_grad() - output, _ = model(features, edge_index) - loss = criterion(output[train_mask], labels[train_mask]) - loss.backward() - optimizer.step() - - if epoch % 50 == 0: - print(f" Epoch {epoch}: Loss: {loss.item():.4f}") - - @staticmethod - def _evaluate_downstream_task(model, data, test_mask, labels): - """ - Evaluate model performance on downstream task - """ - model.eval() - with torch.no_grad(): - output, _ = model(data.x, data.edge_index) - preds = output[test_mask].argmax(dim=1).cpu() - true_labels = labels[test_mask].cpu() - - return preds, true_labels - - def defend(self): - """ - Execute ATOM defense, following BackdoorWM's defend method structure - """ - metric_comp = DefenseCompMetric() - metric_comp.start() - - print("====================ATOM Defense====================") - - # Configuration parameters - config = { - "seed": 37719, - "K_epochs": 10, - "batch_size": 16, - "hidden_size": 196, - "hidden_action_dim": 16, - "clip_epsilon": 0.30, - "entropy_coef": 0.05, - "lr": 1e-3, - "gamma": 0.99, - "lam": 0.95, - "num_epochs": 2, - "lamb": 0 - } - - # Set random seed - set_seed(config["seed"]) - - # Load data and model - defense_s = time() - train_loader, val_loader, test_loader, target_model, max_k_core, all_embeddings = self._load_data_and_model( - batch_size=config["batch_size"], - seed=config["seed"], - lamb=config["lamb"] - ) - - # Set device and parameters - device = self.device - action_dim = 2 - hidden_size = config["hidden_size"] - - # Initialize model components - input_size = self.num_classes - gru = FusionGRU(input_size=input_size, hidden_size=hidden_size).to(device) - mlp_transform = StateTransformMLP(action_dim, config["hidden_action_dim"], hidden_size).to(device) - agent = PPOAgent( - learning_rate=config["lr"], - batch_size=config["batch_size"], - K_epochs=config["K_epochs"], - state_dim=hidden_size, - action_dim=action_dim, - gru=gru, - mlp=mlp_transform, - clip_epsilon=config["clip_epsilon"], - entropy_coef=config["entropy_coef"], - device=device - ).to(device) - - # Training process - memory = Memory() - accuracy_list = [] - precision_list = [] - recall_list = [] - f1_list = [] - auc_value_list = [] - - for epoch in tqdm(range(config["num_epochs"]), desc="Training Epochs", ncols=100): - # Training logic - episode_reward = 0.0 - for batch_idx, (batch_seqs, batch_labels) in enumerate(train_loader): - batch_labels = batch_labels.to(device) - - # Convert sequences to tensors and pad them - batch_seqs = [torch.tensor(seq, dtype=torch.long, device=device) for seq in batch_seqs] - padded_seqs = pad_sequence(batch_seqs, batch_first=True, padding_value=0) - mask = (padded_seqs != 0).float().to(device) - max_seq_len = padded_seqs.size(1) - - # Extract embeddings for all time steps - all_inputs = [] - for t in range(max_seq_len): - node_indices = padded_seqs[:, t].tolist() - cur_inputs = all_embeddings[node_indices] - all_inputs.append(cur_inputs) - all_inputs = torch.stack(all_inputs, dim=1).to(device) - - # Process through GRU - hidden_states = gru.process_sequence(all_inputs) - masked_hidden_states = hidden_states * mask.unsqueeze(-1) - - # Prepare probability factors - prob_factors = torch.ones(len(batch_seqs), max_seq_len, action_dim, device=device) - if memory.all_probs: - prob_factors[:, :-1] = torch.stack([ - torch.tensor(memory.all_probs.get(t, [1.0] * action_dim)) - for t in range(max_seq_len - 1) - ], dim=1).to(device) - - # Transform states - custom_states = (mlp_transform(prob_factors) * masked_hidden_states).detach() - - # Select actions using PPO agent - actions, log_probs, entropies, probs = agent.select_action( - custom_states.view(-1, hidden_size) - ) - - # Reshape outputs - actions = actions.view(len(batch_seqs), max_seq_len) - log_probs = log_probs.view(len(batch_seqs), max_seq_len) - entropies = entropies.view(len(batch_seqs), max_seq_len) - probs = probs.view(len(batch_seqs), max_seq_len, action_dim) - - # Initialize rewards and done flags - rewards = torch.zeros(len(batch_seqs), max_seq_len, device=device) - dones = torch.zeros(len(batch_seqs), max_seq_len, device=device) - - # Compute rewards - batch_predictions = actions.cpu().numpy() - predicted_distribution = (batch_predictions == 1).mean() - last_valid_steps = mask.sum(dim=1).long() - 1 - - for i in range(len(batch_seqs)): - for t in range(last_valid_steps[i] + 1): - if mask[i, t] == 1: - r = custom_reward_function( - actions[i, t].item(), - batch_labels[i].item(), - predicted_distribution - ) - rewards[i, t] = r - episode_reward += r - dones[i, last_valid_steps[i]] = 1.0 - - # Store experience in memory - memory.store(custom_states, actions, log_probs, rewards, dones, - entropy=entropies, masks=mask) - - # Compute returns and advantages - compute_returns_and_advantages(memory, gamma=config["gamma"], lam=config["lam"]) - - # Update agent - agent.update(memory) - memory.clear() - - # Testing and evaluation - agent.eval() - gru.eval() - mlp_transform.eval() - with torch.no_grad(): - accuracy, precision, recall, f1, auc_value = test_model( - agent, gru, mlp_transform, test_loader, - target_model, self.graph_data, all_embeddings, hidden_size, device - ) - accuracy_list.append(accuracy) - precision_list.append(precision) - recall_list.append(recall) - f1_list.append(f1) - auc_value_list.append(auc_value) - - defense_e = time() - inference_s = time() - - # Calculate final watermark results - wm_accuracy = np.mean(accuracy_list) - wm_precision = np.mean(precision_list) - wm_recall = np.mean(recall_list) - wm_f1 = np.mean(f1_list) - wm_auc = np.mean(auc_value_list) - - # Evaluate downstream task performance - if hasattr(self.graph_data, 'test_mask'): - test_mask = self.graph_data.test_mask - labels = self.graph_data.y if hasattr(self.graph_data, 'y') else self.graph_data.ndata.get('label') - else: - _, test_mask = self._create_default_masks() - labels = self.graph_data.y if hasattr(self.graph_data, 'y') else self.graph_data.ndata.get('label') - - downstream_preds, downstream_true = self._evaluate_downstream_task( - target_model.model, self.graph_data, test_mask, labels - ) - - # Convert watermark results to tensors for metric computation - wm_predictions = torch.tensor([1 if acc > 0.5 else 0 for acc in accuracy_list[-10:]]) - wm_targets = torch.ones_like(wm_predictions) - - inference_e = time() - - # Compute metrics - metric = DefenseMetric() - metric.update(downstream_preds, downstream_true) - metric.update_wm(wm_predictions, wm_targets) - - metric_comp.update( - defense_time=(defense_e - defense_s), - inference_defense_time=(inference_e - inference_s) - ) - metric_comp.end() - - print("====================Final Results====================") - res = metric.compute() - res_comp = metric_comp.compute() - - # Print detailed results - print(f"Downstream Task - Accuracy: {res['accuracy']:.4f}, F1: {res['f1']:.4f}") - print(f"Watermark Detection - Accuracy: {wm_accuracy:.4f}, AUC: {wm_auc:.4f}") - print(f"Defense Time: {res_comp['defense_time']:.4f}s") - print(f"Inference Time: {res_comp['inference_defense_time']:.4f}s") - - return res, res_comp diff --git a/pygip/models/defense/atom/__init__.py b/pygip/models/defense/atom/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/pygip/models/defense/atom/csv_data/attack_CiteSeer.csv b/pygip/models/defense/atom/csv_data/attack_CiteSeer.csv deleted file mode 100644 index 3c8bf4ee..00000000 --- a/pygip/models/defense/atom/csv_data/attack_CiteSeer.csv +++ /dev/null @@ -1,177 +0,0 @@ -Sequence,Label,NCL,Query Budget,Num Sample Nodes,Fidelity -"[3021, 1189, 3140, 1305, 1067, 1487, 666, 9, 2138, 1491, 2060, 1007, 1040, 2877, 1767, 2168, 1208, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9]",0,,,, -"[3021, 1189, 3140, 1305, 1067, 1487, 666, 9, 2138, 1491, 2060, 1007, 1040, 2877, 1767, 2168, 1208, 9, 9, 9]",0,,,, -"[3021, 1189, 3140, 1305, 1067, 1487, 666, 9, 2138, 1491, 2060, 1007, 1040, 2877, 1767, 2168, 1208, 9, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305]",0,,,, -"[3021, 1067, 1305, 1487, 1491, 666, 2138, 3140, 1189, 2060, 9, 1040, 2877, 1007, 1767, 2168, 1208, 9, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305, 9, 1305]",0,,,, -"[1915, 2196, 542, 2295, 1020, 2519, 2507, 450, 1532, 2920, 1004, 1660, 2179, 2299, 234, 154, 106, 1561, 130, 3089, 405, 272, 2332, 1965, 2776, 1849, 2222, 2391, 1671, 1704, 2400, 587, 882, 2249, 2950, 3218, 2993, 1170, 489, 3046, 2840, 1221, 254, 1551, 379, 1772, 3257, 547, 1693, 1387, 879, 3148, 3099, 582, 1545, 2052, 647, 1052, 2269, 3017, 2180, 3107, 3236, 653, 784, 1587, 1701, 2567, 3228, 3217, 155, 110, 676, 244, 2347, 1410, 2921, 1200, 3216, 3215, 721, 412, 2258, 2066, 1363, 49, 2447, 1094, 2312, 2316, 1967, 18, 1778, 1525, 1822, 2153, 3006, 1578, 3009, 746, 2059, 2315, 2313, 3271, 1353, 567, 2345, 1964, 472, 2900, 508, 285, 2025, 1477, 408, 1307, 1782, 70, 1384, 3052, 3220, 1729, 809, 84, 2557, 1753, 1474, 2054, 28, 95, 1246, 269, 3145, 923, 2420, 2308, 2314, 2498, 2189, 2154, 215, 780, 1678, 33, 397, 906, 1903, 1000, 2952, 2973, 1768, 637, 1239, 2245, 2745, 1542, 2210, 1265, 1002]",1,,,, -"[2280, 1249, 112, 112, 1249, 2280, 112, 1249, 2280, 112, 1249, 2280, 112, 1249, 2280, 112, 1249, 2280, 112, 1249, 2280, 112, 1249, 2280, 112, 1249, 2280, 112, 1249, 2280]",0,14.0,28.0,295.0,0.10760444845205891 -"[1653, 1714, 1869, 341, 1714, 1653, 1869, 341, 1714, 1653, 1869, 341, 1714, 1653, 1869, 341, 1714, 1653, 1869, 341, 1714, 1653, 1869, 341, 1714, 1653, 1869, 341, 1714, 1653, 1869, 341, 1714, 1653, 1869, 341, 1714, 1653, 1869, 341]",0,16.0,28.0,285.0,0.10760444845205891 -"[2016, 2569, 3250, 3250, 2569, 2016, 3250, 2569, 2016, 3250, 2569, 2016, 3250, 2569, 2016, 3250, 2569, 2016, 3250, 2569, 2016, 3250, 2569, 2016, 3250, 2569, 2016, 3250, 2569, 2016]",0,20.0,25.0,285.0,0.10760444845205891 -"[1935, 640, 3214, 640, 3214, 640, 3214, 640, 3214, 640, 3214, 640, 3214, 640, 3214, 640, 3214, 640, 3214, 640]",0,4.0,2.0,50.0,0.1003907424105801 -"[3177, 3176, 789, 3176, 789, 3176, 789, 3176, 789, 3176, 789, 3176, 789, 3176, 789, 3176, 789, 3176, 789, 3176]",0,4.0,2.0,70.0,0.10760444845205891 -"[334, 1692, 334, 1692, 334, 1692, 334, 1692, 334, 1692, 334, 1692, 334, 1692, 334, 1692, 334, 1692, 334, 1692]",0,4.0,2.0,90.0,0.10760444845205891 -"[1416, 1380, 742, 740, 58, 2009, 284, 58, 740, 742, 2009, 58, 284, 740, 742, 2009, 58, 284, 740, 742, 2009, 58, 284, 740, 742, 2009, 58, 284, 740, 742, 2009, 58, 284, 740, 742, 2009, 58, 284, 740, 742, 2009, 58, 284, 740, 742, 2009, 58, 284, 740, 742]",0,3.0,5.0,275.0,0.18004207995190863 -"[2847, 1800, 2847, 1800, 2847, 1800, 2847, 1800, 2847, 1800, 2847, 1800, 2847, 1800, 2847, 1800, 2847, 1800, 2847, 1800]",0,3.0,10.0,200.0,0.18455064622783288 -"[1683, 670, 928, 265, 2578, 573, 1678, 540, 2631, 2120, 1311, 504, 1536, 2673, 1704, 3009, 32, 203, 950, 2245, 1103, 228, 1116, 822, 777, 1335, 550, 2845, 1836, 703, 2670, 2220, 1220, 3046, 584, 2913, 95, 3050, 2292, 1721, 342, 1865, 110, 2452, 70, 2721, 1464, 1383, 1992, 1033, 3299, 83, 1768, 2448, 2383, 1087, 2451, 2449, 3018, 1551, 455, 541, 472, 1077, 2346, 2441, 2316, 624, 915, 809, 1455, 1326, 1247, 3048, 2819, 1477, 1418, 3049, 2911, 2745, 1842, 1318, 1656, 865, 923, 2450, 2401, 476, 2515, 392, 411, 484, 3317, 80, 2547, 1791, 649, 442, 2252, 2153]",0,3.0,10.0,275.0,0.18094379320709347 -"[2920, 1004, 1967, 2840, 1002, 405, 2672, 2066, 587, 130, 1721, 653, 2179, 1200, 84, 1818, 1220, 2473, 2249, 2734, 1992, 790, 1410, 563, 1387, 647, 1516, 234, 676, 879, 2100, 582, 1701, 2819, 541, 215, 1753, 1921, 18, 1660, 783, 205, 637, 1498, 2391, 1683, 379, 419, 615, 515, 2296, 2220, 2448, 92, 392, 80, 2726, 3299, 1170, 1383, 3284, 974, 819, 2450, 3018, 2673, 1536, 2515, 1311, 1842, 3317, 654, 1615, 3039, 771, 2316, 1945, 1247, 638, 2646, 2670, 1525, 923, 1116, 3017, 1891, 2189, 1836, 1225, 1039, 1256, 624, 1693, 455, 2452, 83, 540, 489, 2911, 2283, 2258, 2278, 674, 1307, 547, 1684, 1656, 294, 2134, 2259, 2495, 1939, 2203, 462, 2671, 1103, 319, 670, 2447, 2451, 1865, 2449, 1813, 2547, 476, 2383, 2218, 194, 921, 610, 516, 2531, 442, 3318, 3099, 1077, 555, 911, 3060, 1678, 28, 584, 3046, 649, 1472, 1932, 2910, 1727, 2210, 478]",0,3.0,15.0,150.0,0.17553351367598438 -"[3088, 2014, 2172, 2172, 2014, 3088, 2172, 2014, 3088, 2172, 2014, 3088, 2172, 2014, 3088, 2172, 2014, 3088, 2172, 2014, 3088, 2172, 2014, 3088, 2172, 2014, 3088, 2172, 2014, 3088]",0,3.0,15.0,275.0,0.17222723174030657 -"[1720, 1730, 1173, 2795, 2794, 2796, 659, 867, 2516, 2517, 2751, 329, 2618, 1889, 2617, 2618, 329, 2617, 1889, 2517, 2618, 329, 2617, 1889, 2517, 2618, 329, 2617, 1889, 2517, 2618, 329, 2617, 1889, 2517, 2618, 329, 2617, 1889, 2517, 2618, 329, 2617, 1889, 2517, 2618, 329, 2617, 1889, 2517]",0,5.0,5.0,150.0,0.17523294259092276 -"[278, 1530, 2015, 488, 1187, 760, 2469, 1099, 1179, 2470, 667, 1555, 1707, 3101, 1950, 166, 1243, 1079, 1950, 3101, 1950, 3101, 1530, 1243, 1187, 1950, 3101, 1530, 1243, 1187, 1950, 3101, 1530, 1243, 1187, 1950, 3101, 1530, 1243, 1187, 1950, 3101, 1530, 1243, 1187, 1950, 3101, 1530, 1243, 1187]",0,5.0,5.0,275.0,0.189960925758942 -"[2164, 1327, 2164, 1327, 2164, 1327, 2164, 1327, 2164, 1327, 2164, 1327, 2164, 1327, 2164, 1327, 2164, 1327, 2164, 1327]",0,5.0,15.0,200.0,0.17222723174030657 -"[1465, 2832, 2833, 2833, 2832, 1465, 2833, 2832, 1465, 2833, 2832, 1465, 2833, 2832, 1465, 2833, 2832, 1465, 2833, 2832, 1465, 2833, 2832, 1465, 2833, 2832, 1465, 2833, 2832, 1465]",0,5.0,15.0,275.0,0.12413585813044785 -"[1108, 891, 2087, 2471, 1108, 891, 2087, 2471, 1108, 891, 2087, 2471, 1108, 891, 2087, 2471, 1108, 891, 2087, 2471, 1108, 891, 2087, 2471, 1108, 891, 2087, 2471, 1108, 891, 2087, 2471, 1108, 891, 2087, 2471, 1108, 891, 2087, 2471]",0,5.0,25.0,275.0,0.19056206792906522 -"[2396, 2021, 2166, 616, 691, 549, 147, 314, 528, 2585, 27, 1731, 2404, 2444, 158, 1, 2774, 1157, 2650, 2254, 2234, 1439, 1219, 1663, 2230, 2257, 2649, 2398, 1943, 2181, 1071, 2782, 965, 2282, 351, 1605, 16, 2919, 1567, 2829, 2264, 180, 835, 303, 2641, 1013, 527, 220, 1343, 894]",0,7.0,5.0,275.0,0.19987977156597536 -"[425, 171, 425, 171, 425, 171, 425, 171, 425, 171, 425, 171, 425, 171, 425, 171, 425, 171, 425, 171]",0,7.0,10.0,150.0,0.18004207995190863 -"[2755, 2086, 1159, 2393, 710, 2754, 1297, 2392, 275, 1297, 275, 2393, 710, 2392, 1159, 2086, 2754, 2755, 1297, 275, 2393, 710, 2392, 1159, 2086, 2754, 2755, 1297, 275, 2393, 710, 2392, 1159, 2086, 2754, 2755, 1297, 275, 2393, 710, 2392, 1159, 2086, 2754, 2755, 1297, 275, 2393, 710, 2392, 1159, 2086, 2754, 2755, 1297, 275, 2393, 710, 2392, 1159, 2086, 2754, 2755, 1297, 275, 2393, 710, 2392, 1159, 2086, 2754, 2755, 1297, 275, 2393, 710, 2392, 1159, 2086, 2754, 2755, 1297, 275, 2393, 710, 2392, 1159, 2086, 2754, 2755]",0,7.0,10.0,275.0,0.19777577397054402 -"[405, 1660, 130, 1004, 2066, 2132, 647, 234, 1516, 1387, 18, 2899, 653, 1620, 1525, 84, 1836, 2578, 865, 924, 587, 582, 637, 1967, 563, 194, 1039, 2531, 489, 2631, 584, 3009, 3017, 1701, 92, 1225, 2515, 974, 215, 379, 1932, 649, 654, 2910, 1753, 676, 610, 790, 670, 1170, 1891, 1103, 2495, 2134, 879, 1693, 771, 1472, 1002, 83, 540, 1256, 2911, 2278, 142, 1410, 3046, 1455, 294, 2588, 2844, 421, 471, 1945, 911, 2819, 2734, 573, 1842, 2646, 2726, 878, 1565, 2771, 555, 3299, 3018, 1383, 2218, 713, 1200, 3318, 595, 516, 638, 476, 2080, 1077, 3099, 920, 2163, 135, 2811, 1865, 2059, 2547, 751, 319, 478, 2314, 923, 1615, 1566, 442, 2312, 136, 3317, 2313, 2450, 1247, 1921, 1447, 2296, 2339, 1423, 1654, 2401, 2560, 2673, 2739, 2315, 547, 641, 819, 1578, 1912, 3284, 2670, 615, 1311, 28, 1678, 1895, 359, 3029, 2316, 2383, 80, 2607, 392]",0,7.0,15.0,150.0,0.17493237150586113 -"[2026, 3279, 2026, 3279, 2026, 3279, 2026, 3279, 2026, 3279, 2026, 3279, 2026, 3279, 2026, 3279, 2026, 3279, 2026, 3279]",0,7.0,20.0,200.0,0.17222723174030657 -"[902, 97, 1201, 2572, 902, 97, 1201, 2572, 902, 97, 1201, 2572, 902, 97, 1201, 2572, 902, 97, 1201, 2572, 902, 97, 1201, 2572, 902, 97, 1201, 2572, 902, 97, 1201, 2572, 902, 97, 1201, 2572, 902, 97, 1201, 2572]",0,7.0,20.0,225.0,0.16561466786895102 -"[1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919]",0,7.0,25.0,275.0,0.10970844604749024 -"[2078, 1374, 2078, 1374, 2078, 1374, 2078, 1374, 2078, 1374, 2078, 1374, 2078, 1374, 2078, 1374, 2078, 1374, 2078, 1374]",0,9.0,5.0,150.0,0.18184550646227832 -"[403, 3185, 1662, 1092, 1579, 3185, 403, 1092, 1662, 1579, 3185, 403, 1092, 1662, 1579, 3185, 403, 1092, 1662, 1579, 3185, 403, 1092, 1662, 1579, 3185, 403, 1092, 1662, 1579, 3185, 403, 1092, 1662, 1579, 3185, 403, 1092, 1662, 1579, 3185, 403, 1092, 1662, 1579, 3185, 403, 1092, 1662, 1579]",0,9.0,5.0,225.0,0.12744214006612564 -"[2802, 2801, 2802, 2801, 2802, 2801, 2802, 2801, 2802, 2801, 2802, 2801, 2802, 2801, 2802, 2801, 2802, 2801, 2802, 2801]",0,9.0,10.0,225.0,0.19056206792906522 -"[1137, 2544, 1137, 2544, 1137, 2544, 1137, 2544, 1137, 2544, 1137, 2544, 1137, 2544, 1137, 2544, 1137, 2544, 1137, 2544]",0,9.0,10.0,275.0,0.16501352569882777 -"[1104, 2871, 1104, 2871, 1104, 2871, 1104, 2871, 1104, 2871, 1104, 2871, 1104, 2871, 1104, 2871, 1104, 2871, 1104, 2871]",0,9.0,15.0,150.0,0.17222723174030657 -"[2009, 742, 284, 58, 740, 1380, 1416, 1416, 58, 740, 1380, 284, 742, 2009, 1416, 58, 740, 1380, 284, 742, 2009, 1416, 58, 740, 1380, 284, 742, 2009, 1416, 58, 740, 1380, 284, 742, 2009, 1416, 58, 740, 1380, 284, 742, 2009, 1416, 58, 740, 1380, 284, 742, 2009, 1416, 58, 740, 1380, 284, 742, 2009, 1416, 58, 740, 1380, 284, 742, 2009, 1416, 58, 740, 1380, 284, 742, 2009]",0,9.0,15.0,200.0,0.18004207995190863 -"[1660, 92, 405, 2179, 2189, 104, 1964, 2316, 2332, 379, 1704, 84, 1002, 564, 1170, 3215, 2866, 2100, 440, 2993, 254, 3046, 790, 130, 2052, 1967, 1545, 1561, 1239, 1303, 2283, 1307, 676, 547, 1516, 1572, 2900, 1384, 923, 2899, 1200, 1544, 215, 1094, 489, 1387, 3047, 582, 1678, 2066, 450, 2312, 1693, 653, 2984, 1004, 234, 2222, 822, 1510, 1119, 2861, 70, 2400, 2498, 2629, 491, 1551, 95, 1865, 3048, 1410, 1522, 1477, 2347, 2567, 2134, 3009, 1915, 1043, 2895, 2059, 2455, 1353, 2364, 148, 2994, 170, 1727, 1772, 2558, 1464, 3220, 2315, 2269, 1577, 3052, 674, 1587, 2745, 1525, 2154, 1087, 1921, 1542, 1768, 746, 508, 922, 3274, 110, 1782, 2228, 3049, 2095, 2028, 1320, 2568, 2153, 1578, 784, 2363, 3234, 921, 1408, 421, 1701, 3026, 1235, 397, 2054, 707, 465, 587, 1253, 563, 809, 2335, 2345, 729, 2245, 28, 3051, 2631, 472, 647, 879, 2420, 38, 18, 1753, 194, 249, 906, 49, 1822, 1001, 339, 2835, 1729, 682, 2132, 541, 269, 1363, 3107, 2180, 808, 637, 2840, 703, 3050, 780, 2218, 2025, 2931, 2159, 928, 2636, 1817, 3235, 1326, 311, 2977, 429, 3236, 3006, 3207, 3228, 2314, 2721, 2562, 1328, 494, 1447, 304, 1903, 244, 1429, 638, 2313, 3194, 33, 2440, 3271, 1620, 2441, 1797, 2346, 1912, 408, 2341, 135, 412, 2894, 1771, 600, 2493, 3045, 1826, 2308, 2342, 3144, 2080, 3145, 1778, 3187, 1042, 62, 1027, 2698, 56, 54, 1823, 2609, 955, 2105, 1785, 2608, 2661, 1379, 66, 577, 728, 2442, 2662, 1566, 2878, 1672, 390, 1948, 1373, 2103, 195, 871, 2738, 1407, 1155, 1409, 357, 2737, 877, 177, 1885, 2350, 1448, 2687, 2479, 2353, 1468, 2467, 1867, 2378, 999, 781, 1916, 940, 1971, 2351, 2357, 1754, 2062, 776, 2375, 3160, 1062, 2358, 2354, 1030, 458]",1,15.0,29.0,302.0,0.5016531409678389 -"[95, 3107, 70, 1545, 2899, 3236, 2180, 790, 1303, 3228, 674, 440, 2558, 3046, 2568, 1964, 1043, 1551, 3009, 104, 1363, 3274, 587, 1094, 2269, 1410, 809, 3047, 1768, 1921, 3050, 3235, 2222, 600, 1865, 1464, 1307, 1967, 582, 1772, 2498, 2283, 784, 2840, 928, 3234, 3144, 2313, 244, 269, 1542, 2052, 2567, 254, 1704, 2332, 2059, 3215, 3026, 1447, 921, 822, 1408, 2189, 110, 1782, 1727, 2132, 3194, 3271, 3049, 906, 1384, 2993, 2315, 1822, 2400, 498, 339, 2636, 2316, 1239, 2745, 49, 33, 2314, 472, 2900, 564, 2105, 1778, 2312, 3048, 3006, 2066, 28, 2347, 1729, 746, 2631, 2420, 703, 541, 923, 2308, 2345, 465, 249, 508, 2341, 1587, 2245, 2335, 780, 3052, 1578, 66, 421, 1678, 2652, 2153, 808, 1235, 2154, 1771, 2095, 637, 1326, 1353, 3051, 1477, 2931, 1087, 729, 2218, 450, 1903, 1429, 3220, 2977, 1620, 194, 1522, 2562, 1817, 2721, 2054, 408, 494, 638, 1797, 429, 2342, 2440, 304, 1885, 3207, 2028, 1373, 397]",1,21.0,16.0,240.0,0.5142771265404268 -"[808, 1094, 2901, 2900, 927, 2977, 1410, 70, 1660, 1946, 84, 790, 241, 2845, 2345, 2562, 396, 2899, 2217, 653, 3017, 930, 637, 3026, 936, 130, 2855, 624, 1162, 550, 1387, 2911, 2973, 2085, 2721, 587, 203, 31, 2507, 1135, 882, 2902, 1002, 3018, 83, 1340, 1170, 2275, 2413, 32, 2720, 405, 1004, 1771, 634, 1759, 879, 1446, 421, 2363, 3099, 1576, 2196, 2380, 379, 2245, 1574, 582, 2921, 3009, 101, 2120, 450, 2335, 489, 1068, 755, 2066, 2440, 541, 194, 3215, 2520, 2261, 620, 540, 1691, 2776, 3046, 304, 1087, 2861, 1802, 2646, 494, 2903, 1455, 2718, 2439, 2017, 1620, 333, 670, 1027, 777, 98, 2924, 709, 1787, 429, 923, 2519, 2734, 1326, 2132, 106, 2636, 2607, 1445, 2189, 1895, 1348, 18, 153, 2598, 1848, 2670, 825, 2174, 618, 170, 1200, 1921, 1311, 491, 1817, 647, 2346, 1961, 1561, 43, 1525, 2631, 466, 1516, 676, 215, 1080, 142, 2218, 2163, 2959, 136, 234, 236, 2673, 2401, 2228, 1967, 2925, 563, 615, 751, 2383, 2672, 316, 2719, 148, 92, 530, 1418, 1359, 285, 1987, 1877, 2509, 577, 250, 1247, 819, 547, 2364, 2209, 2547, 2584, 946, 535, 62, 1785, 49, 359, 1621, 1826, 2595, 641, 3029, 1264, 80, 682, 595]",1,7.0,20.0,300.0,0.5229936880072137 -"[221, 150, 2632, 1791, 468, 847, 2633, 2549, 559, 1875, 954, 1097, 1625, 1467, 144, 1398, 138, 1362, 1759, 1130, 1123, 2427, 1666, 2422, 1084, 2367, 1399, 12, 597, 3152, 2110, 979, 2812, 3174, 3244, 2356, 2819, 43, 2501, 716, 2366, 2948, 1, 1855, 1989, 136, 2939, 3139, 1731, 3068, 2574, 1742, 2647, 651, 604, 1879, 441, 690, 1085, 3287, 2541, 1656, 1570, 1033, 3293, 1613, 1853, 158, 2729, 2520, 509, 1238, 1218, 253, 1401, 578, 3160, 2243, 486, 1170, 447, 2685, 1684, 797, 2335, 1115, 1738, 2043, 1202, 2343, 1429, 1031, 2943, 2919, 1030, 2949, 455, 1971, 1125, 2891, 1096, 1804, 2147, 2458, 1868, 1587, 887, 152, 648, 669, 2546, 2331, 1726, 3286, 3017, 2030, 2062, 2790, 3042, 3134, 2573, 2944, 352, 1411, 2368, 2461, 78, 2305, 1737, 988, 1992, 1818, 525, 2460, 3011, 2425, 1156, 2575, 615, 118, 2491, 2157, 3156, 1235, 3007, 2421, 2306, 2579, 3133, 1072, 2459, 2686, 3028, 672, 91, 191, 1685, 123, 1143, 1782, 374, 2345, 370, 3228, 1286, 119, 2347, 2245, 2492, 1244, 2627, 1279, 1927, 556, 5, 843, 1863, 1186, 798, 1667, 969, 3053, 2884, 1048, 2628, 942, 2378, 473, 2202, 2065, 1687, 1808, 1677, 949, 2499, 884, 227, 1890, 1728, 266]",1,9.0,20.0,275.0,0.5001502855425308 -"[2196, 1877, 2673, 615, 1111, 3012, 1280, 1895, 1683, 2383, 178, 3284, 1818, 1033, 2184, 2730, 1474, 2727, 2316, 2275, 1813, 228, 1196, 8, 2058, 1234, 359, 417, 1447, 2313, 1654, 1311, 2252, 2761, 2005, 1233, 771, 1521, 2314, 1613, 2450, 1094, 80, 1418, 2212, 2519, 542, 1220, 1227, 915, 853, 2132, 921, 654, 3231, 253, 2507, 854, 3139, 711, 2763, 2150, 989, 3232, 119, 1684, 383, 3029, 1971, 3120, 670, 2671, 1322, 2451, 2412, 584, 2898, 1426, 882, 1102, 1435, 2464, 1020, 2902, 3051, 2900, 2312, 2643, 610, 777, 2670, 1256, 1437, 2578, 285, 920, 419, 1353, 927, 1423, 2052, 393, 552, 1335, 484, 455, 70, 2607, 2903, 2095, 2557, 3294, 1039, 28, 2814, 2620, 1944, 516, 392, 3018, 1107, 91, 2547, 2452, 778, 1629, 411, 642, 2959, 865, 2496, 3133, 649, 2401, 819, 2025, 2899, 1687, 2472, 1985, 2868, 1939, 1656, 2295, 333, 3317, 1472, 1791, 2259, 2606, 974, 950, 998, 567, 2952, 1135, 2910, 1992, 316, 2346, 2515, 566, 2449, 2059, 634, 672, 2534, 2609, 2739, 1531, 3207, 1721, 17, 774, 317, 2080, 2203, 2495, 135, 2817, 1156, 1677, 118, 1792, 1836, 2646, 1147, 2816, 2924, 332, 3257, 2448, 1445, 624, 1247, 1912, 2911, 2815, 1678, 132, 1848, 3296, 2220, 3134, 1103, 2909, 450, 442, 286, 2776, 577, 2837, 106, 3221, 476, 342, 1449, 203, 1485, 2845, 745, 573, 1959, 155, 2473, 1536, 2153, 83, 1210, 1685, 2819, 1785, 2112, 2844, 2913, 2932, 2683, 300, 205, 1946, 3299, 2612, 3223, 729, 1384, 462, 2055, 1842, 2672, 3153, 2210, 2292, 540, 721, 2345, 700, 2531, 421, 265, 3147, 2726, 56, 319, 1116, 167, 911, 1324, 2486, 1239, 2951, 1578, 1562, 465, 1068, 923, 2177, 2497, 1385, 222, 1000, 2931, 96, 1532, 1408, 2380, 1265, 1545, 2315, 2085, 201, 884, 707, 569, 32, 2734, 1272, 3215, 1052, 1778, 1046]",1,,,, -"[1835, 3151, 61, 2113, 2592, 502, 453, 1944, 2148, 2598, 2664, 825, 1112, 1940, 2073, 55, 2324, 542, 918, 1313, 818, 1973, 1568, 2650, 1341, 287, 16, 2330, 1020, 2122, 186, 1996, 2405, 912, 517, 1668, 1011, 755, 2200, 2647, 3072, 1743, 1795, 1292, 1215, 2632, 1644, 2234, 2597, 2883, 2435, 220, 2927, 2564, 2775, 1961, 1647, 993, 2719, 588, 746, 203, 428, 1013, 2239, 2226, 894, 1350, 550, 627, 2829, 2886, 1157, 1308, 3009, 616, 641, 2404, 2709, 31, 2666, 1080, 2667, 435, 27, 2882, 2254, 2230, 715, 994, 2063, 137, 10, 1620, 2631, 1012, 1483, 1447, 2681, 2680, 719, 2912, 2097, 70, 1070, 2925, 2665, 622, 1484, 2295, 847, 1135, 887, 2558, 162, 1925, 705, 2663, 1877, 3207, 872, 136, 1670, 492, 501, 147, 2257, 1825, 1517, 1575, 1943, 416, 2132, 816, 2166, 1591, 2595, 1922, 1399, 875, 1732, 548, 2444, 2519, 2782, 927, 197, 2759, 398, 1533, 464, 3028, 144, 1605, 2585, 2649, 2032, 2774, 2420, 1817, 2902, 1214, 2361, 351, 2401, 882, 2264, 2639, 2675, 2422, 880, 2899, 2128, 2275, 1176, 2596, 1275, 1946, 549, 2399, 1918, 1212, 1761, 79, 1295, 1655, 2507, 1561, 634, 1128, 445, 1642, 1422, 2398, 2891, 2033, 985, 1567, 1725, 1821, 3086, 2218, 322, 1202, 413, 1542, 2651, 1009, 3080, 450, 347, 1584, 1230, 770, 2379, 231, 506, 2057, 1353, 2331, 2387, 154, 153, 1731, 2360, 1758, 725, 1580, 2797, 972, 76, 180, 1257, 2638, 1019, 2119, 2426, 1665, 2362, 2443, 1130, 3042, 272, 2520, 247, 968, 25, 1558, 2506, 2967, 2429, 661, 1522, 1582, 1068, 2633, 2183, 1583, 1811, 796, 2355, 194, 350, 397, 946, 2227, 1439, 1903, 2102, 826, 2224, 2903, 34, 198, 242, 1956, 3081, 1015, 1587, 1995, 2565, 20, 1915, 1532, 1759, 744, 3199, 975, 409, 1490, 1965, 43, 2901, 3027, 691, 1131, 2622, 2488, 2720, 2427]",1,,,, -"[304, 637, 429, 2440, 494, 2562, 2759, 62, 541, 1633, 2475, 2435, 2097, 1826, 373, 2444, 220, 2230, 2421, 1275, 622, 131, 268, 1157, 2113, 2162, 2829, 1647, 3138, 1422, 1736, 2882, 677, 2622, 982, 1620, 808, 2912, 1587, 1567, 1635, 2147, 2257, 2234, 1802, 2649, 2650, 1130, 1013, 1708, 2887, 1605, 2631, 351, 2166, 398, 2977, 361, 1517, 2331, 16, 466, 59, 2586, 377, 1956, 1112, 965, 616, 1473, 2466, 2323, 2522, 396, 1009, 1399, 2564, 2927, 1097, 1771, 2721, 627, 2021, 2636, 1044, 138, 2264, 968, 468, 2422, 237, 2681, 691, 1484, 2371, 1699, 912, 1398, 1415, 314, 2110, 2645, 2774, 2401, 2324, 2417, 1028, 1251, 1989, 1591, 1117, 2967, 2398, 1630, 1835, 194, 1879, 755, 2782, 2405, 1666, 2443, 3026, 2597, 1214, 55, 2429, 1578, 889, 1064, 147, 2245, 2638, 144, 1027, 2820, 10, 1351, 2089, 1012, 2226, 2361, 1973, 428, 1087, 680, 796, 1843, 273, 1034, 1943, 3086, 298, 2483, 2718, 2883, 1853, 247, 414, 770, 1330, 2362, 1875, 1912, 1326, 1644, 768, 2335, 2626, 2250, 453, 3014, 2545, 152, 1668, 2830, 661, 3144, 1678, 1725, 1453, 2217, 864, 894, 1821, 2312, 2481, 1480, 1698, 61, 1492, 1070, 904, 153, 1580, 1576, 2919, 1417, 1731, 2239, 1008, 2360, 1362, 2063, 2205, 1655, 930, 1429, 114, 2339, 347, 2651, 744, 2637, 2487, 307, 684, 2506, 2314, 186, 2964, 850, 847, 2488, 492, 2596, 98, 2316, 2313, 2509, 2605, 2032, 1877, 2599, 2527, 2122, 2632, 1758, 2404, 2420, 3227, 242, 887, 715, 1918, 2204, 2799, 1015, 28, 2224, 2399, 1625, 964, 1665, 395, 2783, 1812, 151, 2633, 2282, 2369, 1238, 2017, 2119, 1131, 1557, 2321, 1582, 1019, 936]",1,,,, -"[2067, 2275, 1543, 3029, 2222, 154, 2732, 1309, 1912, 927, 2733, 2519, 80, 2903, 359, 272, 1135, 1965, 882, 2299, 32, 2932, 1020, 450, 1447, 949, 2507, 915, 1445, 2132, 28, 1822, 2579, 2120, 421, 876, 3231, 1772, 1183, 690, 3009, 2312, 550, 417, 2492, 2401, 1363, 2551, 178, 484, 1072, 729, 254, 2314, 2763, 2196, 2761, 1778, 703, 1654, 2899, 1704, 265, 2607, 2512, 2088, 1848, 1220, 3288, 1033, 110, 3147, 1536, 2252, 542, 1210, 3076, 286, 716, 212, 2730, 2025, 1629, 2269, 1221, 95, 2901, 1678, 419, 2835, 2058, 900, 504, 1366, 2868, 316, 1311, 2844, 2771, 1613, 2315, 2335, 392, 1180, 1971, 2497, 383, 3207, 3249, 247, 8, 711, 2726, 227, 928, 3202, 1561, 2366, 472, 1531, 3257, 2631, 2578, 2059, 300, 2547, 1408, 1578, 2383, 266, 2295, 2095, 920, 3047, 2313, 2745, 2332, 634, 2212, 2028, 1233, 2683, 1058, 1895, 3146, 3051, 3232, 1785, 2316, 1836, 1435, 778, 2816, 2819, 70, 1326, 1102, 135, 2973, 774, 2449, 1946, 577, 624, 1817, 1052, 3265, 2721, 2845, 989, 2055, 1335, 3089, 2643, 2836, 860, 853, 2727, 228, 1915, 1107, 1683, 1094, 541, 974, 3012, 203, 2606, 2080, 1485, 2163, 1532, 2672, 2153, 2054, 2977, 865, 253, 3052, 1303, 1318, 2902, 3148, 2052, 2898, 777, 1384, 2486, 1172, 569, 411, 670, 1068, 1562, 1234, 2112, 2557, 319, 1477, 843, 1085, 884, 440, 2346, 485, 2837, 2619, 1565, 3294, 2612, 194, 2900, 342, 2741, 1162, 854, 1771, 2498, 3134, 2657, 2400, 1791, 1939, 429, 2448, 476, 1426, 783, 2608, 2581, 1239, 1423, 2464, 2245, 707, 561, 1265, 2177, 3223, 556, 205, 1522, 1027, 2451, 924, 721, 1247, 1455, 3274, 1043]",1,,,, -"[2683, 1522, 203, 383, 2245, 1746, 2952, 2610, 1235, 2770, 2335, 70, 561, 641, 1449, 721, 3223, 2612, 808, 2931, 989, 1445, 2095, 2512, 3071, 2498, 2899, 1970, 1085, 2932, 1180, 2595, 1080, 535, 316, 711, 2562, 1566, 194, 825, 2020, 2132, 1210, 813, 778, 228, 1485, 2636, 96, 3009, 3147, 1877, 2817, 2950, 2058, 618, 884, 1676, 286, 2588, 822, 2215, 2900, 1322, 1324, 371, 526, 2580, 698, 201, 2219, 860, 135, 2662, 1111, 1778, 1498, 595, 1654, 2836, 397, 253, 1094, 1791, 566, 95, 3244, 3139, 2314, 1172, 494, 1562, 2240, 515, 2025, 2163, 2819, 3288, 87, 550, 2727, 1326, 2579, 1961, 2055, 3146, 1464, 411, 2631, 2815, 876, 2252, 1027, 2401, 1613, 304, 2339, 798, 1469, 2789, 2672, 1701, 178, 2120, 471, 2177, 1620, 2292, 2977, 1832, 2463, 2581, 504, 2951, 920, 3120, 713, 2924, 1196, 3051, 159, 2554, 2608, 1572, 1107, 519, 485, 1387, 577, 1043, 2497, 2811, 393, 1837, 3049, 56, 1710, 2462, 1033, 1771, 843, 18, 3052, 1917, 1585, 32, 1049, 1335, 3218, 1419, 1366, 484, 339, 8, 2380, 2752, 119, 2080, 3217, 1999, 2052, 615, 3282, 2473, 2346, 3026, 3207, 1447, 2604, 2400, 1629, 3220, 2449, 1874, 2059, 1246, 2345, 1000, 969, 483]",1,,,, -"[1472, 2902, 3317, 670, 2903, 442, 1307, 573, 1683, 1836, 1318, 2911, 542, 2295, 649, 28, 1112, 2354, 2346, 32, 1030, 2901, 2899, 3046, 2210, 2452, 1865, 159, 465, 1077, 265, 1727, 332, 1891, 3299, 2451, 1251, 2449, 2345, 2473, 2952, 2515, 154, 865, 1303, 2845, 1131, 624, 1246, 1956, 3099, 2495, 922, 550, 2886, 923, 1768, 2752, 2531, 2578, 135, 3296, 2222, 2740, 927, 1039, 1366, 450, 1210, 540, 2427, 1964, 2835, 1536, 2973, 1946, 3215, 1654, 104, 2739, 1625, 3276, 1068, 2448, 1912, 96, 2299, 3318, 1813, 247, 1772, 2816, 2815, 2312, 2673, 2065, 3076, 2112, 833, 155, 1103, 584, 1020, 3018, 882, 3009, 1256, 2154, 674, 83, 1116, 2062, 1965, 2575, 504, 1455, 342, 771, 2259, 468, 2670, 2931, 2283, 2643, 2347]",1,,,, -"[1094, 3010, 253, 2770, 1107, 2497, 1286, 410, 2579, 1422, 798, 843, 2366, 1871, 2202, 1808, 3051, 1566, 519, 2584, 1102, 2602, 2580, 473, 1097, 2112, 3011, 1033, 550, 1210, 2368, 30, 2819, 860, 228, 203, 135, 3284, 2581, 535, 1004, 2790, 1582, 2149, 1469, 1543, 1180, 1890, 485, 1992, 123, 1366, 2266, 920, 158, 2462, 884, 1772, 449, 865, 813, 1903, 1418, 1052, 2222, 1246, 2080, 946, 2153, 1449, 78, 3294, 2050, 508, 2643, 716, 1, 2692, 876, 1358, 2815, 878, 812, 2951, 2817, 2347, 1719, 1874, 2367, 1235, 1789, 1545, 2132, 2245, 2025, 421, 1999, 3288, 2899, 2732, 1183, 3029, 2240, 1531, 1515, 2478, 698, 1318, 3146, 2313, 18, 561, 2789, 1478, 1585, 2666, 469, 2752, 2906, 333, 950, 1221, 484, 3139, 247, 2150, 1172, 316, 751, 397, 573, 2088, 1272, 33, 2963, 3223, 2512, 577, 91, 2741, 1939, 407, 2977, 254, 2589, 401, 3147, 1485, 1058, 2524]",1,,,, -"[1768, 1704, 3324, 2000, 2269, 2745, 1865, 549, 1522, 434, 3209, 350, 397, 595, 878, 2926, 2673, 471, 2490, 2339, 2828, 2416, 472, 2250, 464, 2588, 2122, 2840, 713, 2585, 80, 2415, 3051, 1655, 302, 650, 131, 835, 912, 137, 2399, 25, 2314, 2942, 923, 2218, 541, 1921, 935, 2655, 2640, 2822, 1464, 2245, 110, 1644, 2362, 527, 2418, 808, 1617, 1997, 1678, 962, 1477, 703, 79, 2868, 2783, 2663, 2639, 1575, 2547, 2264, 2715, 1542, 706, 1761, 2316, 637, 965, 691, 122, 2912, 142, 1961, 373, 263, 1470, 289, 1533, 2927, 2097, 1725, 1647, 351, 2739, 1281, 3013, 1034, 2650, 705, 2775, 27, 931, 1758, 2597, 194, 1064, 1708, 1494, 2181, 585, 1071, 428, 2641, 347, 1853, 1423, 1157, 1013, 755, 3138, 2562, 1, 2166, 2226, 633, 2880, 2666, 2254, 2648, 1967, 2383, 16, 2116, 392, 2665, 827, 2247, 2064, 59, 2281, 2565, 559, 307, 1590, 773, 2638, 3300, 1311, 1821, 2331, 2667, 3242, 1480, 2059, 2440, 2933, 2023, 2417, 298, 641, 2631, 1894, 2527, 268, 770, 2396, 2478, 2560, 1015, 2021, 1202, 1117, 216, 2398, 886, 2651, 416, 1567, 1128, 1834, 2113, 2182, 2919, 2925, 1665, 1319, 153, 1943, 1414, 819, 2057, 2625, 1879, 2428, 2475, 2637, 1447, 2827, 494, 1257, 2311, 2128, 1230, 1740, 2444, 303, 197, 1290, 751, 822, 2422, 985, 928, 2028, 872, 3054, 311, 2025, 2721, 2824, 1605, 2820, 180, 809, 616, 1351, 1599, 29, 993, 39, 895, 2313, 1301, 304, 1825, 2421, 2481]",1,,,, -"[1848, 186, 1143, 2488, 2507, 2506, 2959, 1009, 157, 2520, 2032, 2217, 2598, 2883, 927, 1625, 1877, 253, 2741, 138, 1582, 2910, 856, 1964, 156, 136, 1677, 1965, 177, 610, 1737, 242, 2886, 3284, 1220, 1251, 2203, 485, 44, 151, 2844, 1851, 2401, 884, 2196, 1227, 1135, 2901, 2153, 226, 1147, 1785, 2868, 104, 771, 2580, 1448, 2902, 2163, 774, 2581, 3039, 968, 2763, 2354, 2035, 3202, 930, 205, 2479, 24, 468, 1393, 744, 1068, 1256, 472, 1472, 401, 654, 991, 135, 2899, 2771, 2557, 1352, 2210, 2662, 430, 2346, 3232, 504, 2315, 173, 1818, 95, 1112, 3294, 882, 1992, 1813, 3068, 1829, 950, 2739, 2752, 544, 339, 638, 2713, 822, 703, 2642, 1116, 1654, 1687, 2673, 2184, 2112, 798, 1562, 2472, 26, 2643, 989, 119, 802, 1845, 2670, 476, 1721, 2313, 393, 1030, 519, 1180, 2119, 1046, 2063, 1704, 1971, 2672, 2519, 3125, 2515, 1727, 2269, 396, 2608, 338, 286, 417, 2609, 1307, 106, 1768, 1131, 1246, 545, 1854, 2761, 1445, 1578, 1218, 1684, 924, 2547, 1449, 1946, 462, 1418, 2903, 96, 118, 2299, 300, 555, 1111, 1868, 2059, 2900, 566, 91, 2606, 2789, 1366, 2450, 2924, 28, 2080, 561, 392, 2292, 1485, 1912, 569, 1136, 383, 1956, 670, 1573, 3139, 1447]",1,,,, -"[1064, 347, 2527, 2312, 1678, 1853, 2316, 2416, 1758, 2314, 1666, 2059, 847, 307, 1417, 2097, 2633, 889, 1647, 2239, 153, 268, 2925, 2398, 1605, 1117, 229, 2785, 1619, 1009, 2422, 351, 2545, 541, 886, 2631, 661, 1343, 2339, 1130, 2322, 172, 28, 850, 1517, 2443, 1708, 2490, 677, 2475, 2637, 1031, 1989, 2632, 627, 152, 2585, 1012, 2427, 453, 2564, 770, 2645, 2361, 2839, 2825, 1484, 941, 1087, 611, 1015, 2401, 395, 273, 2113, 807, 1157, 1097, 2481, 549, 2649, 2597, 2506, 2331, 303, 2323, 1399, 1875, 2369, 2360, 1429, 2396, 1447, 2217, 2774, 1658, 2428, 2064, 2822, 930, 361, 220, 2370, 162, 1275, 1008, 600, 2638, 1578, 1424, 243, 1987, 2622, 2244, 1609, 10, 2655, 51, 2110, 527, 2404, 768, 1019, 2105, 1251, 1864, 2313, 246, 1663, 1044, 69, 157, 2824, 1398, 186, 880, 1731, 138, 1566, 2205, 429, 269, 236, 1330, 2328, 931, 875, 2912, 1034, 691, 864, 1178, 2782, 1214, 2282, 709, 1943, 113, 2721, 2875, 2927, 414, 2224, 1567, 2196, 2115, 1655, 314, 396, 428, 887, 1879, 2417, 2444, 2435, 99, 2783, 2362, 2405, 764, 755, 2254, 2230, 61, 2520, 147, 962, 2204, 144, 1918, 1580, 1835, 2063, 2823, 1415, 1587, 1131, 2053, 2488, 1071, 2421, 1591, 135, 498, 1668, 2493, 2507, 982, 3266, 2264, 2797, 137, 2977, 3026, 1725, 2420, 2509, 2418, 2759, 1633, 1822, 55, 1644, 237]",1,,,, -"[1214, 3026, 887, 2488, 1230, 17, 2112, 1545, 1943, 3274, 1736, 2568, 2649, 249, 2153, 1356, 1762, 2296, 2226, 1709, 2650, 158, 414, 138, 2062, 2574, 1128, 3277, 2964, 31, 205, 3018, 2464, 790, 379, 1083, 59, 2734, 1956, 2378, 1731, 567, 2329, 3265, 8, 2189, 1234, 1441, 1093, 2398, 1965, 906, 879, 2345, 783, 411, 2573, 2234, 339, 1410, 180, 1176, 2622, 2857, 405, 2447, 1666, 1426, 2633, 691, 1033, 142, 2424, 1000, 1610, 525, 1398, 43, 2669, 1625, 1097, 1019, 351, 2110, 1439, 1894, 96, 1310, 627, 854, 2567, 1620, 1989, 2977, 1309, 150, 2774, 2531, 2005, 465, 1112, 1392, 222, 921, 212, 1729, 1200, 1435, 1759, 136, 2621, 2400, 2512, 2179, 178, 1399, 1031, 2486, 647, 1131, 2487, 2328, 2639, 1043, 2404, 2940, 2249, 2948, 1567, 2065, 83, 1422, 2391, 2435, 2073, 2681, 1072, 468, 2425, 90, 3212, 900, 3076, 3306, 1821, 884, 1070, 2933, 2646, 2347, 1387, 1853, 2166, 2919, 2264, 2202, 2245, 2685, 1587, 1039, 486, 2100, 3206, 54, 656, 2458, 331, 2577, 3221, 3305, 1761, 2565, 244, 2346, 1251, 1012, 1287, 1002, 1733, 304, 1, 2200, 1225, 2493, 92, 2427, 2673, 2097, 1429, 689, 398, 2638, 1932, 26, 637, 661, 1196, 540]",1,,,, -"[2588, 713, 471, 2314, 158, 2473, 1097, 2670, 2584, 411, 2339, 535, 1, 2939, 485, 566, 1843, 155, 922, 2657, 1335, 824, 2739, 2948, 526, 1818, 577, 690, 300, 397, 1052, 2541, 3326, 194, 1721, 2671, 2903, 1583, 946, 813, 1531, 2531, 2212, 2752, 778, 2884, 615, 3052, 1469, 2562, 1033, 1582, 1832, 417, 33, 2901, 1515, 2906, 729, 287, 722, 1522, 920, 698, 1162, 1410, 407, 2643, 2977, 808, 637, 865, 1170, 2952, 1315, 3213, 2240, 3215, 1303, 2811, 517, 2815, 1087, 1039, 1874, 2770, 2512, 3152, 998, 641, 1220, 2931, 30, 1286, 2628, 408, 2608, 2085, 236, 2920, 1587, 2814, 1585, 2330, 1011, 304, 2425, 2366, 1422, 3197, 1364, 135, 2949, 2163, 2672, 2080, 1246, 3011, 1613, 1447, 515, 1890, 2387, 506, 2050, 2095, 2899, 247, 3314, 3080, 1221, 106, 1836, 751, 1027, 1946, 1363, 3051, 927, 2391, 3202, 1235, 1211, 2790, 2919, 1094, 159, 1068, 564, 2245, 532, 2692, 3193, 991, 1426, 915, 860, 2157, 2345, 3296, 2054, 1401, 579, 2589, 2367, 2578, 18, 2154, 1999, 2025, 2673, 1566, 2148, 2066, 123, 190, 745, 2612, 2153, 3007, 2900, 136, 1797, 293, 3299, 1116, 2440, 2560, 3194, 1225, 2043, 1477, 634, 2631, 3006, 265, 2130, 504, 3066, 1021, 253, 1383, 2993, 2779, 1474, 234, 2819, 2732, 465, 2112, 924, 370, 2462, 3107, 1002, 3047, 969, 1239, 412, 56, 1004, 2441, 2771, 1921, 954, 2420, 2315, 1296, 1992, 450, 2400, 2266, 2401, 440, 28, 2497, 1000, 784, 501, 2776, 354, 1967, 2902, 1575, 1719, 812, 483, 1584, 2950, 1565, 2607, 2020, 1687, 1498, 884, 78]",1,,,, -"[70, 3071, 1474, 469, 1080, 2245, 1101, 2557, 2380, 2463, 2507, 3282, 2338, 31, 1917, 1478, 2058, 2770, 1759, 2624, 2244, 3296, 550, 1685, 2095, 2335, 43, 2219, 843, 2339, 371, 1924, 1419, 2595, 1049, 253, 2869, 2924, 774, 1613, 3010, 1235, 1562, 2464, 1746, 825, 483, 2534, 607, 3147, 1620, 2497, 2606, 2900, 564, 1447, 2520, 3133, 567, 119, 2598, 2899, 135, 2662, 2870, 136, 618, 342, 1043, 2512, 577, 2080, 2868, 641, 1393, 3207, 815, 884, 1561, 2608, 2580, 406, 1296, 1789, 989, 1107, 2902, 1423, 1322, 2959, 1326, 1220, 1585, 860, 3288, 1033, 2903, 2631, 3120, 3244, 280, 1027, 2346, 1817, 132, 1156, 1778, 900, 634, 1849, 2112, 401, 1848, 1172, 1959, 2177, 808, 2752, 96, 118, 2721, 78, 2579, 1522, 751, 254, 1566, 2811, 813, 1246, 1288, 1072, 2920, 362, 2025, 1309, 1877, 3009, 1702, 736, 1085, 1408, 2206, 2315, 1721, 2020, 1826, 2610, 1676, 2401, 1771, 707, 3274, 1999, 678, 3257, 1572, 703, 2740, 3220, 2059, 1180, 1944, 2672, 485, 286, 2400, 2819, 2212, 1687, 300, 1710, 2132, 2816, 62, 1657, 505, 1792, 450, 2963, 2779, 2932, 1353, 2837, 1725, 2745, 472, 1234, 2612, 1785, 2496, 3048, 779, 2780, 1485, 316, 1211, 1477, 87, 383, 1874, 3051, 672, 922, 2560, 1162, 494, 3134, 1551, 228, 1829, 2030, 1324, 1135, 590, 642, 339, 780, 2789, 429, 1946, 2052, 1671, 721, 1837, 2554, 1449, 3146, 1000, 2739, 2120, 407, 2314, 2815, 2845, 3265, 2028, 1648, 2581, 32]",1,,,, -"[2196, 1052, 2519, 359, 540, 3029, 2507, 138, 882, 542, 2588, 136, 468, 3215, 641, 1046, 338, 3284, 1333, 80, 713, 1, 1221, 923, 486, 1020, 1879, 1247, 155, 751, 1265, 316, 110, 2295, 1849, 595, 1845, 2054, 2048, 450, 12, 1000, 2919, 1239, 2642, 1654, 2560, 44, 2952, 2498, 1965, 471, 440, 2332, 2931, 394, 2673, 135, 1447, 1625, 1566, 2835, 56, 922, 2315, 878, 2712, 1671, 2646, 2911, 2776, 2609, 2400, 819, 2401, 2314, 1986, 154, 1704, 1836, 2450, 2299, 2283, 272, 1561, 1039, 106, 156, 1915, 2531, 95, 83, 300, 1246, 2670, 1932, 2710, 158, 1532, 3046, 2080, 822, 1227, 2312, 535, 2933, 567, 2711, 3018, 142, 670, 3274, 615, 2163, 649, 2059, 1551, 1156, 2154, 24, 2739, 1097, 1901, 579, 2313, 2210, 150, 3089, 1043, 1939, 2740, 624, 928, 2815, 1531, 1023, 1875, 1423, 920, 1737, 674, 2220, 1105, 2448, 3047, 285, 1167, 2734, 1429, 2339, 1545, 3216, 1971, 2547, 2608, 1685, 778, 2672, 1477, 577, 1865, 104, 409, 707, 2993, 2811, 1311, 221, 564, 1493, 1677, 1912, 2028, 1727, 411, 2607, 13, 3050, 455, 2447, 2292, 638, 2427, 2269, 1087, 2189, 1768, 860, 3202, 3099]",1,20.0,19.0,376.0,0.5548542230237451 -"[1915, 1532, 595, 1660, 3089, 3218, 272, 641, 489, 1849, 405, 2179, 547, 927, 1965, 2993, 106, 1200, 1135, 2776, 2249, 1387, 450, 2316, 1052, 3017, 2295, 879, 3215, 1004, 2811, 379, 1693, 2400, 2299, 1280, 1701, 2332, 2950, 3148, 2920, 3216, 567, 1447, 1170, 3236, 1046, 582, 2840, 234, 2269, 2498, 1753, 154, 882, 1704, 130, 1964, 587, 215, 1002, 2154, 542, 3228, 421, 1043, 1516, 1671, 1561, 2210, 155, 2519, 2132, 2560, 1227, 1239, 1020, 95, 2180, 2275, 653, 676, 2054, 84, 2189, 1677, 2153, 921, 923, 92, 1162, 2973, 2196, 647, 1551, 1221, 2134, 780, 3257, 3099, 3046, 1921, 2507, 3217, 1545, 2120, 563, 751, 1768, 465, 564, 2222, 2391, 1678, 1778, 3145, 2740, 104, 3207, 634, 1246, 2745, 110, 822, 1000, 472, 2312, 203, 1303, 3107, 1353, 2921, 784, 2902, 2258, 3051, 1235, 1265, 2447, 2100, 1522, 550, 1094, 412, 3274, 3050, 2347, 3009, 2025, 18, 1525, 49, 2899, 1587, 244, 2952, 2095, 1578, 1068, 397]",1,22.0,16.0,355.0,0.5807033363390441 -"[1677, 1964, 2217, 104, 2316, 2519, 138, 1135, 2903, 1437, 1116, 28, 2931, 1768, 2212, 2510, 561, 2682, 927, 2196, 1532, 2902, 316, 2515, 98, 2952, 3257, 242, 1235, 336, 610, 468, 1946, 2580, 1678, 3266, 3292, 923, 540, 1311, 2607, 421, 2266, 2275, 2295, 2899, 2041, 634, 2329, 1474, 2119, 2132, 2614, 440, 2299, 654, 2113, 1227, 882, 450, 812, 1992, 2815, 1965, 1103, 930, 1478, 1684, 2912, 419, 95, 1915, 1572, 465, 1031, 2507, 974, 2819, 3018, 1836, 2292, 3025, 1999, 1259, 154, 1020, 2451, 1953, 2218, 865, 476, 822, 1561, 542, 1877, 2814, 2531, 3089, 1891, 1484, 412, 3047, 905, 2498, 1136, 1704, 1778, 2578, 1384, 1551, 1162, 504, 1920, 3049, 1335, 2345, 2452, 1000, 1835, 924, 1303, 1734, 771, 2332, 649, 968, 3294, 1280, 2673, 142, 2901, 573, 2120, 272, 1939, 876, 1545, 1415, 2557, 2122, 3050, 201, 3207, 2312, 203, 2269, 1180, 716, 286, 1068, 1477, 2581, 638, 1078, 61, 32, 1600, 928, 3296, 3274]",1,22.0,16.0,402.0,0.5043582807333934 -"[2282, 2565, 2354, 2491, 492, 1448, 2264, 229, 414, 59, 1943, 2774, 3286, 1214, 2052, 1422, 2639, 1034, 2373, 2686, 1940, 2196, 1493, 2097, 2347, 1567, 1740, 661, 220, 1956, 3174, 1702, 351, 968, 1275, 2021, 453, 2366, 2640, 994, 1009, 13, 180, 2592, 2372, 2429, 1492, 2353, 91, 374, 473, 3132, 887, 2632, 1357, 1894, 2217, 2247, 2405, 1758, 1971, 669, 1128, 1012, 794, 2492, 1927, 2633, 677, 322, 1230, 691, 1157, 394, 2627, 1238, 147, 1561, 993, 839, 2239, 2641, 627, 1672, 2819, 450, 2315, 2519, 1167, 1731, 2718, 330, 2782, 2435, 2404, 1965, 2675, 1666, 1808, 2790, 2638, 2550, 2650, 2368, 2651, 1030, 1013, 227, 2649, 755, 1986, 3011, 798, 843, 1811, 253, 2711, 2719, 776, 1350, 3184, 2230, 1096, 930, 2507, 1668, 136, 347, 1279, 2506, 2202, 2584, 2479, 1605, 2299, 2073, 1809, 796, 2089, 2628, 177, 672, 2360, 2944, 1992, 1105, 2205, 1401, 1439, 1286, 1143, 695, 2443, 1777, 1879, 2166, 1677, 2710, 2546, 156, 500, 990, 237, 1131, 370, 2224, 2306, 154, 2797, 2621, 2573, 1064, 122, 577, 2655, 648, 3133, 3053, 1112, 847, 1048, 75, 113, 1244, 2579, 1726, 2728, 1609, 690, 2598]",1,22.0,19.0,415.0,0.5004508566275925 -"[2354, 3286, 2052, 2373, 1493, 1940, 2196, 1448, 13, 2347, 2366, 374, 473, 1702, 1894, 2353, 414, 1357, 2491, 1956, 1672, 2686, 1971, 3174, 794, 1758, 2368, 2372, 2627, 2633, 968, 91, 2217, 1965, 394, 3011, 839, 1808, 2519, 1167, 1561, 2819, 2315, 322, 2718, 2202, 2628, 2719, 227, 2711, 1279, 798, 1986, 2429, 930, 843, 3132, 253, 1030, 2790, 661, 2404, 1666, 1927, 2944, 2632, 330, 347, 2479, 2584, 136, 2710, 1811, 695, 2507, 229, 776, 1105, 177, 1992, 3184, 2089, 1096, 2360, 672, 1064, 2550, 156, 1009, 669, 1809, 1131, 1286, 2621, 887, 2492, 677, 1401, 1677, 1244, 1492, 1609, 1879, 2546, 2306, 2598, 122, 113, 577, 990, 2638, 1726, 2886, 370, 1143, 2426, 3133, 352, 75, 1125, 2527, 1350, 2205, 396, 2675, 31, 1665, 1358, 1818, 1048, 2579, 2073, 557, 2648, 1728, 884, 216, 2728, 2487, 455, 847, 1411, 1575, 1334, 298, 607, 2355, 2573, 500, 690, 123, 680, 648, 1238, 1652, 3053, 2524, 154, 2299, 3007, 1901, 2428, 982, 325, 3139, 188, 2478, 1019, 827, 3298, 949, 435, 12, 1517, 1655, 1558, 846, 2367, 1738, 152, 1251, 2681, 43, 78, 150, 464, 1723, 3249, 2488, 2224, 1868, 1875, 1186, 1731, 1625, 468, 803, 2655, 1622, 1453, 25, 1333, 3019, 1399, 138, 2499, 2057, 2680, 993, 2551, 1591, 1759, 2884, 1853, 3027, 1736, 263, 190, 410, 2622, 875, 2619, 273, 556, 770, 2520, 2394, 2062, 1130, 266, 2919, 1989, 2427, 2331, 1473, 2399, 966, 2512, 1172, 2733]",1,22.0,24.0,355.0,0.5650736399158401 -"[1660, 405, 2447, 84, 130, 1753, 1004, 541, 2840, 1965, 1326, 2488, 547, 2335, 3218, 879, 429, 3046, 624, 1525, 2134, 3217, 2179, 2245, 1387, 2258, 3216, 2950, 489, 653, 582, 2973, 2920, 2234, 3026, 2782, 921, 1027, 83, 2399, 2646, 1131, 1170, 1430, 154, 1701, 2299, 768, 2439, 2911, 18, 2487, 808, 2622, 587, 616, 1002, 2089, 1609, 569, 2596, 1625, 483, 1516, 563, 466, 1668, 1620, 1426, 186, 1221, 2391, 2462, 1585, 273, 1472, 2597, 215, 1813, 1200, 1665, 2721, 790, 1826, 2100, 1422, 1654, 3153, 1097, 3148, 2413, 2562, 62, 1822, 2861, 3138, 676, 647, 2829, 1693, 2032, 1157, 1973, 620, 2774, 1251, 3099, 2166, 1020, 59, 2799, 1009, 680, 234, 691, 1410, 2632, 2977, 3017, 1987, 455, 55, 2645, 494, 1279, 2295, 1117, 157, 542, 1561, 2883, 709, 1817, 1853, 1214, 2440, 1253, 2434, 882, 2017, 1915, 1921, 1864, 2931, 1943, 408, 2631, 2054, 670, 1771, 304, 2636, 1967, 395, 2875, 138, 2154, 194, 1848, 3086, 2174, 450, 1736, 972, 2066, 2598, 1165, 254, 1582, 637, 485, 2097, 931, 115, 1417, 1802, 2312, 2627, 745, 982, 136, 2921, 241, 2882, 468, 220, 2020, 968, 1567, 379, 2122, 1046, 3224, 1532, 936, 2226, 28, 92, 2674, 1605, 151, 2507, 2564, 1473, 2633, 1264, 2249, 242, 1195, 353, 2993, 2509, 396, 2752, 1359, 2896, 1576, 2210, 1445, 332, 880, 1877, 2204, 2222, 2506, 2217, 2657, 2967, 2628, 1772, 2519, 311, 2803, 2067, 1087, 2041, 2739, 2520, 2401, 2924, 1797, 2897, 1415, 1758, 1052, 2740, 930, 749, 744, 286, 272, 3215, 2718, 2189, 2510, 2196, 2119, 2864, 2013, 2719, 236, 986, 267, 2863, 1025, 2720, 1259, 2511, 1362, 805, 298, 1466, 551, 98, 1734, 1443, 1239, 2113, 2912, 1136, 3266, 1835, 2063, 1484, 796, 413, 548, 243, 1920, 599]",1,22.0,29.0,340.0,0.525097685602645 -"[634, 1162, 2275, 1103, 2150, 771, 3317, 2776, 2901, 3120, 2203, 2259, 2132, 1068, 2252, 2727, 566, 203, 3288, 2515, 2899, 1046, 2519, 654, 2210, 3318, 421, 882, 2911, 1946, 1791, 2080, 1246, 119, 17, 2278, 3223, 1915, 649, 3133, 3299, 154, 1992, 476, 2450, 419, 854, 698, 1000, 778, 2412, 2814, 2646, 542, 132, 1813, 272, 3009, 1329, 2497, 2815, 550, 106, 70, 1225, 642, 2902, 1842, 1437, 1039, 83, 2059, 1247, 1561, 1532, 1384, 779, 2726, 700, 3215, 1094, 3134, 1671, 1107, 1135, 584, 2495, 552, 1472, 1932, 1324, 2557, 316, 577, 3018, 1971, 974, 450, 2819, 1836, 1545, 670, 1474, 155, 3294, 2534, 923, 32, 2952, 2052, 2740, 462, 1531, 442, 2295, 28, 2903, 1020, 1052, 3089, 228, 2120, 1077, 2299, 2900, 2608, 2196, 2218, 3052, 205, 1265, 1256, 610, 1239, 142, 2609, 1891, 540, 1710, 819, 927, 455, 2816, 2837, 1792, 638, 2931, 579, 2734, 2035, 3296, 332, 2220, 2452, 1455, 860, 408, 2400, 300, 2189, 2296, 2316, 317, 1383, 922, 2631, 3207, 411, 1678, 2507, 1965, 2177, 1227, 294, 3146, 2993, 2963, 1410, 2910, 2025, 624, 2531, 2312, 911, 2673, 1033, 1322, 3046, 2292, 3017, 465, 1353, 3265, 1170, 412, 2054, 812, 1004, 1352, 729, 1221, 879, 2447, 2973, 96, 2950, 2112, 1945, 921, 3099, 721, 1280, 1789, 555, 2909, 3051, 790, 2345, 2134, 2763, 3257, 2441, 18, 2951, 2449, 1829, 2154, 2451, 1027, 567, 1970, 2095, 194, 1200, 56, 201, 811, 1087, 2977, 3148, 707, 547, 3026, 1677, 1849, 2920, 1002, 1967, 1817, 2789, 2672, 2149, 2440, 2335, 2245, 2066, 783, 285, 1408, 2921, 1235, 379, 1522, 2249, 808, 1921, 494, 397, 1701, 304, 92, 234, 1326, 637, 2448, 1778, 1939, 3218, 3076, 212, 2085, 2028, 1307, 2836, 478, 745, 130, 653, 2562, 582, 2391, 1771, 1516, 84, 215, 647, 429, 489, 1387, 1043, 1620, 1693, 2283, 440, 587, 3047, 3050, 1865, 2636, 1303, 3216, 1797, 822, 2498, 3217, 1464, 676, 1525, 2100, 928]",1,22.0,32.0,355.0,0.5284039675383229 -"[649, 2292, 300, 1836, 624, 3296, 1324, 2779, 2497, 1531, 670, 1410, 3046, 2560, 83, 860, 455, 2816, 3018, 3294, 777, 2815, 745, 2646, 3017, 411, 878, 641, 595, 2911, 2220, 201, 579, 2920, 1002, 1384, 535, 2951, 3284, 3099, 2814, 2080, 2919, 2085, 1200, 1932, 2673, 1225, 359, 106, 142, 154, 158, 2507, 2451, 2950, 1366, 2932, 1170, 394, 2819, 135, 542, 2447, 2726, 1561, 136, 1654, 2921, 540, 1039, 2473, 2670, 3215, 615, 1004, 2448, 1052, 138, 156, 2450, 1845, 2179, 1311, 3257, 1043, 1265, 1939, 1429, 2196, 3029, 332, 1, 2531, 2295, 17, 2643, 1701, 3217, 920, 44, 1447, 2776, 1721, 1532, 2734, 2059, 1879, 2315, 440, 2993, 2740, 1566, 2210, 1246, 2134, 130, 2345, 2283, 316, 1020, 2449, 1625, 2441, 234, 1220, 155, 24, 1303, 2672, 486, 471, 468, 3039, 564, 2739, 2452, 778, 1895, 409, 790, 1578, 2811, 923, 1551, 2519, 1333, 2313, 1247, 272, 2401, 80, 819, 1423, 1000, 1912, 2588, 1387, 2642, 2607, 2933, 638, 879, 2312, 1915, 2383, 698, 1239, 2952, 2391, 2153, 2314, 567, 2187, 1583, 450, 2712, 379, 1865, 412, 1727, 3047, 2584, 1143, 713, 1693, 90, 751, 419, 1525, 3216, 2189, 405, 1280, 921, 577, 1545, 1046, 1227, 1964, 2332, 28, 1753, 882, 1778, 1971, 2547, 2339, 338, 2299, 2498, 1875, 1986, 2154, 582, 1307, 2249, 676, 1477, 2427, 1097, 674, 1677, 3052, 3048, 1965, 18, 3218, 922, 1464, 104, 2711, 3274, 587, 2609, 2835, 2316, 822, 3089, 1901, 1516, 2269, 1493, 2400, 1023, 559, 118, 1871, 1418, 2218, 84, 2163, 3146, 2440, 3133, 729, 3148, 465, 285, 2931, 3026, 2054, 110, 1572, 1704, 489, 1768, 339, 812, 1967, 653, 1660, 472, 13, 2648, 95, 2048, 56, 2608, 1467, 92, 707, 392, 159, 1221, 2840, 780, 1167, 1087, 2710, 3049, 2977, 541, 547, 1849, 1894, 1671, 703, 1921, 2973, 12, 1620, 1771, 2745, 221, 1826, 647, 1817, 2636, 429, 563, 2028, 1027, 194, 1737, 3050, 397, 1678, 150, 2100]",1,22.0,32.0,376.0,0.5858130447850917 -"[2401, 2373, 1836, 1435, 1654, 1939, 2730, 177, 2312, 624, 2220, 2450, 3076, 3146, 2898, 1448, 1971, 3153, 1030, 1, 2752, 2780, 2661, 860, 234, 819, 2292, 44, 138, 2815, 136, 1845, 2447, 455, 1901, 2259, 468, 486, 1879, 1211, 485, 1333, 1131, 783, 2479, 2062, 2354, 1324, 2814, 2662, 1912, 698, 2643, 547, 2451, 1200, 649, 142, 1052, 2080, 2734, 3148, 584, 419, 2497, 1964, 332, 1672, 2449, 2299, 2919, 2816, 1167, 2486, 3017, 778, 1004, 247, 2993, 2391, 545, 567, 2507, 2712, 1002, 476, 2196, 154, 2464, 3120, 2005, 394, 409, 3296, 411, 2346, 1531, 2448, 2452, 2920, 1225, 1585, 135, 300, 212, 2672, 544, 1112, 28, 1625, 2495, 3221, 2519, 392, 2932, 579, 1986, 2657, 12, 1532, 2531, 2134, 1418, 1039, 1891, 104, 201, 882, 2345, 2648, 158, 1410, 478, 1932, 2048, 1246, 647, 2642, 2776, 2646, 792, 2670, 2278, 1721, 2933, 777, 2210, 654, 1227, 642, 130, 610, 555, 3089, 1668, 1915, 2085, 338, 80, 2473]",1,24.0,16.0,415.0,0.5755936278929967 -"[2911, 777, 624, 201, 1030, 1448, 525, 2670, 17, 540, 1836, 2574, 3197, 1721, 96, 2573, 1971, 2816, 1225, 745, 212, 2292, 106, 649, 2643, 597, 2575, 33, 2495, 651, 1894, 177, 28, 1039, 2729, 2932, 1701, 2519, 1366, 2473, 783, 2734, 879, 2835, 3174, 155, 1392, 2255, 3017, 2497, 689, 1939, 542, 142, 83, 2672, 2673, 1324, 812, 2836, 294, 1052, 923, 392, 1829, 1002, 2345, 1170, 130, 1020, 1220, 455, 648, 2646, 316, 339, 1435, 2196, 478, 2134, 1654, 2921, 1043, 2819, 421, 3294, 2312, 2452, 2634, 2814, 3076, 18, 778, 2486, 419, 2343, 3133, 1551, 2052, 547, 2531, 2547, 2507, 860, 1525, 2295, 2776, 3215, 1932, 2789, 107, 405, 300, 2498, 819, 1418, 2153, 2401, 159, 1244, 332, 1200, 1660, 2857, 2085, 3296, 1004, 2316, 2726, 3194, 2815, 2730, 2779, 3195, 1854, 3046, 3018, 2278, 489, 846, 2305, 1162, 2476, 2080, 2332, 476, 2647, 1727, 3167, 1472, 1694, 2447, 3120, 1410, 1964, 882, 26, 707, 1915, 2558, 222, 2950, 92, 1384, 2464, 2451, 5, 2952, 104, 3210, 1353, 1572, 942, 3053, 3010, 465, 1863, 2951, 285, 2100, 3257, 577, 2898, 1531, 2993, 118, 1256, 790, 722, 647]",1,24.0,19.0,402.0,0.5217914036669672 -"[2898, 1435, 2730, 356, 2769, 2110, 1551, 3110, 2202, 1989, 1130, 1768, 3207, 506, 582, 3234, 1704, 1921, 1964, 104, 43, 405, 1094, 1666, 2186, 88, 567, 850, 18, 2729, 3046, 1398, 2052, 843, 3051, 472, 2923, 2498, 2745, 1853, 1307, 421, 1759, 2277, 3236, 144, 2275, 1464, 3011, 669, 1625, 2524, 3080, 886, 1865, 473, 1011, 790, 26, 780, 2283, 2332, 689, 269, 203, 1583, 2584, 1111, 2993, 1429, 674, 1097, 587, 1843, 703, 2721, 2642, 222, 2132, 110, 1678, 638, 3156, 3215, 339, 3221, 2066, 2884, 24, 3274, 2631, 1239, 2840, 3108, 2182, 1184, 3235, 1353, 3089, 2628, 637, 1477, 1796, 84, 2421, 1808, 2980, 1967, 2269, 2422, 2387, 3228, 1135, 1215, 499, 2900, 1545, 1191, 564, 1992, 227, 1875, 444, 2464, 1399, 1567, 338, 44, 1727, 923, 2491, 1476, 2655, 2331, 3009, 2931, 2567, 2218, 2054, 118, 2768, 2154, 2427, 1782, 708, 2400, 2973, 541, 2685, 412, 2486, 1043, 1983, 2440, 1162, 2005, 2140, 2453, 244, 32, 482, 822, 3109, 1218, 152, 3120, 681, 1652, 1890, 304, 2120, 1587, 969, 2189, 2441, 1778, 3050, 1000, 2492, 1227, 1441, 906, 1849, 2548, 2520, 2288, 1474, 550, 2899, 1980, 408, 2147, 613, 3133, 3026, 1738, 1687, 2608, 3048, 2683, 921, 3049, 1485, 2579, 249, 2157, 1156, 1872, 634, 1797, 1845, 884, 194, 3068, 494, 2919, 1087, 922, 1172, 3010, 397, 42, 3206, 2225, 642, 2551, 2568, 2065, 263, 465, 577, 440, 2028, 809, 95, 1068, 2669, 737, 56, 2901]",1,24.0,24.0,415.0,0.5037571385632702 -"[1130, 1580, 268, 2110, 2316, 2422, 2830, 1567, 1666, 1034, 1758, 144, 2362, 492, 2586, 2443, 347, 1097, 1591, 2444, 1517, 2360, 1875, 2235, 1117, 69, 1678, 2887, 361, 10, 904, 314, 2331, 1582, 466, 1708, 1453, 744, 622, 2622, 2361, 3275, 1417, 847, 138, 755, 2323, 1044, 2488, 2089, 453, 1989, 152, 2527, 2314, 2650, 627, 2782, 1853, 1699, 1178, 3138, 1019, 1587, 1070, 1644, 2506, 135, 661, 1399, 1605, 2783, 2859, 1698, 2645, 153, 2466, 1578, 377, 864, 1422, 677, 131, 1943, 229, 1630, 1330, 1028, 1802, 1398, 1351, 1012, 1214, 1774, 1812, 2649, 468, 1633, 2413, 2651, 2205, 1821, 273, 2420, 565, 28, 2765, 396, 1655, 2059, 2370, 2371, 2405, 2324, 2681, 889, 2032, 2919, 2230, 3187, 2217, 242, 1009, 414, 98, 2204, 2720, 2404, 2162, 197, 307, 2421, 186, 2264, 2774, 2509, 764, 1251, 796, 2224, 2017, 1492, 1429, 1275, 1447, 1995, 2250, 1665, 1725, 2638, 2282, 2247, 930, 887, 1112, 395, 770, 2632, 1877, 2313, 55, 875, 2605, 2637, 2596, 968, 1918, 151, 2239, 2147, 2021, 2417, 1711, 1131, 2964, 2166, 1609, 1668, 428, 2839, 1625, 2339, 2621, 1015, 2680, 2399, 1064, 1042, 147, 1823, 1731, 936, 2545, 715, 1973, 2369, 3266, 1359, 2883, 2633, 3045, 2797, 1912, 2174, 2718, 157, 611, 2299, 1238, 3144, 1557, 1395, 3227, 237, 272, 2321, 2119, 2312, 114, 1424, 2320, 413, 1096, 1956, 620, 2196, 2439, 2799, 1264, 1965, 350, 2519, 1419, 2322, 575, 1702, 2063, 172, 2429, 2874, 684, 1759, 2719, 154, 2886, 1346, 66, 2507, 1940, 609, 2520, 512, 43, 1576, 805, 1414, 2315, 709, 241, 1062, 2861, 1136, 1634, 31, 2866, 1355, 2342, 1987, 2122, 603, 1471, 2984, 3128, 874, 2698, 313, 2308, 2208, 2598, 450, 964, 2494, 402, 1091, 2803, 439, 311, 2341, 136, 1328]",1,24.0,29.0,340.0,0.5085662759242561 -"[2919, 1, 1130, 147, 2912, 1943, 492, 158, 113, 2925, 2782, 1471, 2494, 2829, 2360, 2967, 1054, 2282, 1731, 2774, 1517, 1034, 1238, 1355, 549, 982, 1439, 1214, 1012, 1484, 144, 1091, 1533, 994, 273, 453, 229, 2984, 2110, 2113, 2420, 486, 1630, 1580, 1275, 2097, 2174, 1178, 1128, 2234, 2421, 1096, 2926, 428, 2404, 2361, 2147, 61, 2331, 1761, 2675, 1157, 12, 241, 1617, 1665, 2089, 2362, 398, 1257, 2398, 2264, 847, 1362, 1230, 2426, 1398, 1605, 2797, 2226, 59, 637, 682, 1414, 773, 2427, 3086, 2200, 2564, 2596, 2933, 2405, 3054, 2021, 2422, 55, 150, 768, 2205, 2565, 2443, 1395, 2363, 2977, 2633, 1795, 755, 1989, 494, 1879, 1587, 691, 1758, 1634, 152, 2475, 1826, 677, 1736, 1467, 559, 1301, 220, 1417, 1399, 2308, 1422, 1607, 402, 194, 69, 1835, 2073, 1843, 1308, 1483, 2887, 2631, 2401, 1429, 303, 1644, 2859, 153, 609, 808, 2861, 1386, 1831, 887, 2626, 2645, 2636, 1567, 2583, 796, 1655, 1480, 1097, 1918, 2866, 2483, 2063, 2435, 627, 1326, 1821, 2562, 1875, 1176, 231, 1777, 1853, 2648, 2324, 1666, 968, 2204, 1647, 2638, 2440, 2245, 548, 2224, 1027, 661, 198, 965, 1346, 2927, 2639, 2506, 429, 680, 2622, 2820, 304, 43, 1894, 541, 2715, 1620, 875, 3026, 2119, 180, 2335, 2487, 414, 2217, 2166, 1009, 2466, 62, 3324, 435, 2429, 2341, 2640, 2342, 1771, 157, 2428, 162, 2641, 3013, 1087, 1019, 3275, 2632, 2994, 468, 1940, 1635, 2621, 2681, 2759, 1973, 1877, 1492, 170, 2882, 298, 2680, 99, 1759, 1251, 1453, 186, 2721, 1812, 1740, 3014, 1568, 874, 1415, 247, 1956, 351, 2598, 805, 744, 2488, 242, 138, 1070, 396, 1351, 2523, 2520, 491, 930, 2676, 2399, 413, 880, 2228, 221, 151, 1625, 2247, 2839, 2586, 2522, 2964, 2235, 706, 1112, 1582, 2718, 2803, 1131, 2720, 31, 1609, 1473, 2455, 395, 894, 2719, 1817, 136, 2364, 2886, 1702, 49, 311, 2196, 2032, 1965, 2519, 1668, 2299, 38, 148, 154, 2507, 236, 2315, 1510]",1,26.0,32.0,340.0,0.5025548542230237 -"[1666, 2110, 847, 1587, 924, 2356, 865, 1517, 1989, 43, 321, 144, 2360, 1202, 2399, 887, 3153, 582, 2205, 1836, 1609, 2578, 794, 1399, 2650, 154, 2886, 1200, 414, 569, 1013, 2664, 2089, 504, 1759, 1422, 152, 2227, 547, 2718, 2398, 2292, 3099, 298, 2919, 1335, 1575, 2891, 1130, 1002, 2429, 2147, 1570, 2670, 875, 616, 2141, 1622, 3282, 1398, 1070, 2632, 1019, 768, 2422, 113, 2421, 839, 1917, 2474, 1665, 2672, 2478, 1131, 2331, 670, 468, 930, 2488, 2681, 158, 2487, 1429, 1853, 968, 2734, 2621, 2219, 371, 982, 220, 2646, 557, 2598, 396, 2217, 509, 1625, 1492, 573, 1400, 31, 138, 627, 2924, 2420, 486, 2680, 12, 1112, 1357, 1875, 1049, 401, 680, 719, 137, 1115, 2196, 2622, 1965, 2299, 2520, 2854, 2134, 492, 637, 2712, 1170, 273, 150, 1473, 2224, 394, 559, 2585, 549, 1419, 3028, 1455, 1760, 1009, 1362, 2463, 3071, 435, 2427, 2257, 2426, 1877, 2911, 1736, 1582, 285, 1467, 2740, 1620, 197, 1871, 532, 912, 624, 2254, 736, 153, 1746, 359, 540, 2519, 1561, 1986, 1647, 3046, 677, 1453, 594, 2163, 2535, 2604, 577, 819, 2933, 2931, 2719, 2066, 2187, 2607, 1, 2507, 232, 3228, 1811, 362, 142, 2339, 755, 1921, 966, 1821, 1879, 2667, 1251, 1247, 3018, 136, 292, 1445, 607, 721, 27, 1967, 2554, 815, 2663, 83, 501, 1956, 2739, 1944, 3009, 2845, 2837, 2741, 3029, 661, 1272, 1097, 286, 2338, 1023, 2128, 648, 2638, 2673, 2547, 1423, 1676, 920, 2959, 2900, 2401, 615, 1493, 856, 1940, 923, 2899, 1318, 2932, 333, 1311, 2428, 713, 75, 915, 3081, 2483, 2631, 3080, 918, 2534, 80, 2132, 316, 2647, 322, 2923, 221, 2711, 3284, 2661, 2218, 2346, 2665, 993, 3072, 2080, 1215, 65, 471, 1333, 236, 2666, 1094, 13, 2052, 1843, 1925, 421, 517, 2316, 1341, 506, 2048, 1167, 398, 3151, 1583, 2588, 1702, 1678, 2315, 1895, 2383, 409, 2314, 1848, 2906, 777, 2380, 618, 1529, 2710, 1961, 878, 20, 1011, 502, 1901, 2059]",1,26.0,32.0,376.0,0.5037571385632702 -"[2911, 777, 624, 2670, 201, 1448, 525, 542, 540, 1020, 17, 2519, 1030, 1836, 1721, 2574, 2816, 649, 1225, 2295, 106, 745, 96, 212, 2573, 2643, 2495, 2299, 2292, 2729, 1039, 2507, 3197, 28, 1894, 1971, 450, 651, 1366, 2932, 2196, 2473, 1701, 33, 783, 2734, 2575, 155, 879, 2835, 3017, 1392, 2255, 177, 689, 1939, 2497, 1965, 597, 83, 2673, 142, 2672, 392, 1324, 812, 294, 154, 1170, 2836, 923, 1052, 130, 1002, 1829, 2345, 1220, 2646, 316, 455, 1915, 3174, 1435, 882, 648, 2819, 339, 2134, 2921, 1654, 3294, 2312, 18, 1043, 778, 2452, 2814, 2634, 272, 3076, 419, 2486, 478, 2052, 547, 2531, 1525, 3133, 2547, 2776, 860, 1532, 107, 3215, 1932, 1561, 2789, 300, 405, 1551, 819, 1418, 2153, 2401, 2343, 159, 1660, 790, 1200, 332, 2085, 1244, 2498, 2857, 2726, 3296, 1004, 2316, 3194, 2815, 2779, 3195, 2730, 707, 489, 1854, 3018, 3046, 846, 2278, 2476, 2332, 476, 2647, 1472, 3257, 2447, 1694, 26, 1410, 1572, 2952, 3120, 92, 222, 2950, 1964, 1727, 2464, 2451, 1384, 5, 3010, 3210, 647, 2100, 104, 1353, 465, 285, 1863, 2951, 421, 1531, 2898, 2558, 118, 3099, 2993, 1256, 3053, 1753, 722, 2383, 1162, 2609, 3216, 2450, 1311, 3193, 90, 2160, 2189, 3218, 3221, 440, 921, 1303, 3084, 1678, 942, 577, 1265, 1402, 2920, 2080, 2740, 1143, 582, 2739, 676, 780, 2179, 215, 2305, 1778, 3274, 3196, 579, 653, 1239, 2449, 1817, 411, 80, 2243, 2931, 2220, 1280, 1246, 3047, 1310, 2112, 2448, 3148, 234, 308, 792, 587, 698, 3132, 567, 2283, 1334, 2391, 1464, 3146, 379, 721, 2900, 922, 822, 84, 1307, 1967, 1221, 2210, 2683, 2269, 412, 3217, 1485, 168, 2154, 1728, 2608, 56, 563, 1704, 1865, 1516, 2258, 3049, 3026, 1186, 1000, 3052, 642, 2028, 1545, 472, 2249, 746, 2218, 156, 2441, 1921, 2054, 3207, 1046, 1768, 110, 1737, 1135, 550, 2275, 372, 2731, 2005, 2840, 1891, 2713, 1094, 3039, 1907, 729, 2973, 1693, 638, 32]",1,26.0,32.0,402.0,0.5891193267207695 -"[2461, 982, 468, 2460, 604, 2622, 582, 1591, 1666, 2488, 1202, 136, 1742, 887, 637, 2427, 2458, 2066, 979, 1875, 2459, 923, 2404, 2346, 2153, 1853, 263, 1251, 847, 347, 1759, 1398, 1410, 1191, 3017, 2305, 525, 1918, 43, 2575, 330, 1729, 2726, 138, 2655, 988, 2345, 1989, 2574, 2646, 465, 1399, 1453, 1170, 1235, 2527, 1804, 2573, 486, 2886, 413, 331, 2090, 1019, 989, 2224, 3110, 1570, 2633, 2057, 2501, 1180, 1238, 243, 2638, 2343, 1039, 247, 336, 96, 106, 1965, 1731, 1492, 1064, 2378, 942, 875, 414, 3108, 411, 2422, 1130, 1184, 2356, 1426, 3206, 1429, 2848, 2112, 2729, 2675, 2891, 3293, 2911, 2426, 2549, 1362, 2507, 2110, 132, 1309, 648, 2548, 3109, 2643, 900, 383, 1111, 447, 1033, 3027, 2580, 2949, 2727, 2520, 2948, 1196, 705, 1737, 3184, 93, 31, 430, 83, 1812, 1709, 1072, 92, 2169, 1225, 3156, 921, 352, 1070, 968, 2621, 1983, 2933, 509, 379, 3152, 3028, 1600, 144, 2065, 25, 1558, 2478, 1684, 2776, 2331, 2062, 178, 559, 155, 3244, 2561, 2581, 1009, 2647, 2088, 2189, 1, 2686, 8, 3084, 1677, 2531, 441, 464, 1879, 152, 2681, 2055, 1625, 2685, 3025, 1587, 651, 2140, 5, 1097, 1096, 1350, 156, 3068, 2673, 2919, 3053, 922, 2421, 2944, 1655, 3046, 2425, 661, 2939, 32, 2603, 1656, 1234, 158, 2632, 122, 3134, 191, 2812, 455, 876, 2648, 1575, 1791, 1782, 1868, 1048, 1894, 597, 615, 3228, 2306, 1358, 1030, 1971, 1115, 325, 561, 2072, 3174, 1087, 827, 1218, 3287, 1085, 119, 1143, 205, 2943, 2147, 2424, 2429, 1667, 3160, 2613, 1093, 2546, 3234, 3011, 2567, 2058, 12, 3292, 221, 954, 2728, 244, 2819, 1863, 2541, 118, 2157, 1613, 1685, 1244, 2347, 1728, 3139, 1366, 1818, 3132, 2288, 3236, 1334, 54, 3286, 2740, 1183, 1467, 154, 90, 2614, 690, 190, 1821, 906, 1542, 1829, 2367, 150, 3042, 2420, 1186, 2680, 3133, 269, 370, 683, 2568, 1609, 1927, 2329, 2328, 2558, 728, 746, 249, 2225, 578, 2368, 2030]",1,26.0,32.0,415.0,0.5846107604448452 -"[234, 2993, 3017, 2316, 2249, 1660, 2179, 2332, 3236, 3218, 1964, 379, 1004, 405, 2159, 3215, 2498, 1693, 1170, 2920, 3228, 1387, 879, 1200, 1704, 547, 3046, 2894, 923, 130, 95, 2180, 489, 2840, 2222, 3216, 2269, 587, 104, 2153, 1768, 784, 1239, 1551, 2189, 465, 2950, 1701, 1043, 2154, 2391, 2400, 2973, 3148, 582, 1002, 1678, 780, 564, 2025, 92, 1753, 84, 3099, 676, 653, 110, 215, 822, 921, 3107, 2312, 1921, 1578, 106, 1545, 2567, 472, 647, 1516, 2347, 2054, 1303, 3274, 1522, 1094, 1587, 3050, 285, 2313, 2134, 2921, 1353, 421, 3217, 3048, 2052, 397, 2132, 563, 2745, 1772, 254, 1363, 244, 3051, 269, 1778, 2900, 703, 450, 28, 809, 1235, 339, 1447, 2059, 674, 408, 412, 2258, 2315, 2899, 1384, 3009, 1087, 1967, 2095, 3006, 638, 2345, 2100, 3234, 3235, 1477, 1822, 790, 707, 49, 928, 2066, 2218, 922, 2447, 18, 1525, 2631, 1410, 906, 2568, 3052, 1865, 746, 1782, 194, 3049, 2245, 2636, 541, 2335, 1464, 494, 2977, 3047, 2283, 2420, 70, 2028, 3207, 508, 304, 721, 3144, 3271, 1727, 1408, 1729, 3026, 2558, 729, 1542, 637, 1771, 1817, 1326, 2341, 1429, 429, 2835, 1797, 2314, 955, 2308, 1307, 2440, 3220, 3194, 2672, 808, 440, 1903, 2721, 2562, 1572, 249, 33, 1912, 2342, 1620, 3145, 2931, 135, 2346, 2080, 1379, 2878, 66, 1823, 3045, 2737, 2441, 1672, 2738, 577, 2353, 2662, 2493, 54, 2661, 1409, 1027, 2442, 1448, 458, 1042, 3187, 877, 1971, 1155, 2378, 1826, 195, 1566, 62, 1030, 1785, 2354, 177, 2358, 2608, 56, 1407, 2431, 2687, 2357, 1062, 2062, 2373, 2375, 728, 2609, 2479, 2652, 1887, 2233, 940, 2467, 2232, 498, 1885, 1867, 1468, 776, 781, 2372, 598, 576, 390, 1373, 1001, 2105, 761, 2103, 1577, 871, 999, 1948, 2350, 3160, 2351, 600, 1882, 1916, 357, 2552, 1754, 1119, 2374, 260, 2749]",1,28.0,30.0,300.0,0.5079651337541329 -"[887, 1290, 1660, 2023, 962, 2782, 405, 2650, 528, 51, 492, 16, 27, 2435, 541, 2785, 2447, 2840, 2335, 2064, 807, 429, 1480, 246, 1753, 2632, 549, 493, 1607, 2475, 130, 59, 2488, 2234, 2230, 137, 1326, 547, 3026, 1965, 303, 2829, 2826, 1525, 3217, 965, 1034, 84, 3218, 43, 586, 2245, 879, 2116, 921, 3046, 1012, 1004, 2396, 624, 2258, 2664, 585, 2021, 835, 755, 1027, 2181, 2179, 1708, 2134, 2950, 2668, 2651, 1013, 453, 2622, 1430, 2257, 2166, 1230, 2596, 616, 2882, 1414, 2089, 2828, 2784, 1387, 2053, 2527, 3138, 2399, 2398, 2585, 2299, 2439, 3216, 1131, 582, 1343, 1644, 1426, 2827, 466, 1170, 1759, 1214, 489, 1834, 2597, 314, 2413, 808, 1620, 1625, 2487, 2462, 83, 483, 1157, 2646, 62, 2973, 2786, 2663, 18, 1609, 653, 569, 847, 587, 1422, 1009, 495, 768, 1516, 1668, 1630, 2649, 2774, 1701, 2681, 154, 2887, 2799, 2226, 1273, 1238, 1973, 2920, 2911, 790, 1221, 1654, 2861, 563, 1826, 2824, 1585, 691, 2823, 1219, 186, 2721, 941, 1002, 2254, 3153, 680, 2417, 2783, 2564, 1822, 978, 3148, 2830, 2032, 2860, 1665, 1813, 1472, 1020, 2097, 2391, 2821, 215, 2562, 886, 2100, 2444, 2115, 1064, 2977, 2481, 2434, 1853, 1864, 620, 157, 1200, 775, 455, 485, 3099, 1663, 153, 1071, 1253, 2295, 347, 676, 1693, 273, 1943, 1015, 1567, 1117, 1251, 1281, 1561, 1410, 1987, 647, 972, 2822, 1097, 55, 542, 494, 2631, 2017, 2633, 882, 29, 3017, 1817, 2047, 234, 1915, 220, 633, 1771, 1279, 289, 1658, 1921, 1736, 304, 2931, 2645, 770, 1165, 1605, 2820, 745, 2154, 2244, 2440, 395, 709, 2883, 2875, 408, 2896, 194, 241, 2636, 181, 2490, 2174, 527, 1470, 2054, 2752, 1802, 670, 2139, 982, 396, 136, 1758, 1848, 138, 115, 450, 968, 2598, 931, 1590, 3086, 3224, 637, 2921, 2312, 2520, 1532, 1967, 2066, 2657, 1724, 2825, 1582, 2020, 28, 2506, 353, 151, 1264, 936, 30, 254, 799, 2204, 1576, 2122, 2509, 2627, 2507, 2967, 1046, 468, 1359, 1417, 744, 242, 92, 2674, 379, 2897, 1195, 880, 2993, 2249, 1054, 332, 311, 2217, 1052, 2041, 2222, 1445, 2210, 2803, 1877, 1415, 1473, 1772, 2628, 1087, 2739, 1025, 1797, 2519, 749, 2067, 2924, 2401, 2740, 930, 2119, 2863, 2864, 272, 286, 267, 3215, 986, 551, 2511, 2013, 1259, 2189, 2196, 805, 2510, 2718, 98, 1484, 1443, 2284, 2719, 1362, 905, 2297, 1920, 2720, 599, 1466, 796, 413, 236, 1734, 298, 2113, 386, 1239, 1406, 2063, 1890]",1,28.0,40.0,450.0,0.5025548542230237 -"[155, 2295, 2993, 922, 1915, 1532, 1246, 2776, 882, 1965, 234, 2210, 2578, 106, 2080, 2299, 2196, 542, 2519, 450, 1046, 3215, 1004, 1000, 2507, 624, 2332, 2973, 1221, 272, 1162, 634, 2609, 670, 1020, 1849, 653, 1002, 790, 135, 1472, 865, 316, 359, 721, 1671, 285, 2054, 1572, 587, 2100, 84, 1265, 2391, 1561, 3029, 1200, 771, 1043, 550, 1256, 2441, 2059, 3009, 2498, 1891, 1525, 319, 3218, 2132, 2258, 1912, 154, 2835, 1677, 18, 676, 1227, 1052, 516, 3217, 1964, 1516, 2179, 1946, 1551, 1545, 1971, 547, 1239, 1303, 2920, 2734, 2163, 2903, 92, 3060, 577, 2400, 2608, 2952, 130, 2557, 2950, 2296, 421, 1693, 1247, 2154, 647, 2495, 2672, 563, 1836, 3257, 455, 1408, 440, 379, 405, 1727, 1967, 2189, 95, 104, 3148, 2931, 1135, 2313, 1660, 1992, 582, 2315, 1447, 3274, 56, 3089, 2283, 1683, 489, 2447, 1474, 2727, 1087, 2269, 3047, 822, 1778, 920, 80, 1701, 1753, 2450, 567, 2740, 2278, 1895, 2275, 1615, 1704, 28, 819, 1068, 2547, 2646, 1033, 412, 674, 2840, 3049, 110, 476, 1578, 2312, 707, 1410, 1865, 911, 2899, 203, 1235, 2670, 1768, 70, 2095, 3026, 564, 780, 2134, 515, 1307, 729, 879, 2631, 615, 2249, 1311, 2901, 215, 3046]",1,30.0,20.0,300.0,0.5278028253681996 -"[113, 755, 220, 150, 83, 2421, 347, 12, 2239, 887, 2147, 2527, 1471, 186, 1422, 1, 1064, 1214, 405, 3046, 1112, 2650, 236, 2964, 158, 627, 351, 3128, 839, 180, 1130, 2331, 2651, 2179, 1398, 1758, 2435, 2649, 1965, 2883, 2399, 1004, 1943, 2933, 2073, 1630, 1157, 55, 994, 2565, 2639, 1009, 492, 3026, 2427, 402, 1582, 2234, 2205, 847, 2632, 549, 2488, 2675, 2633, 2028, 790, 59, 1002, 2782, 1417, 1568, 982, 1989, 2226, 2404, 1956, 2405, 1921, 2020, 2920, 2598, 2420, 2487, 2641, 2645, 2919, 1761, 1031, 1257, 130, 2774, 464, 1230, 2398, 486, 1795, 1170, 1853, 603, 1915, 84, 1483, 1660, 1525, 2674, 1200, 547, 1731, 144, 2882, 2391, 1012, 2258, 1176, 1918, 1668, 2426, 2422, 2510, 2545, 1221, 653, 2973, 396, 2721, 1238, 1473, 2648, 332, 2911, 921, 2739, 2200, 1587, 2110, 2950, 1693, 1517, 1567, 1605, 1128, 1399, 894, 1771, 691, 2631, 2829, 194, 2638, 1429, 879, 1813, 1020, 889, 569, 152, 2797, 2562, 808, 157, 1736, 2640, 1879, 241, 2447, 661, 2166, 455, 2967, 559, 1797, 1561, 2429, 2360, 1654, 1097, 1665, 1096, 1894, 1027, 1666, 136, 1326, 3017, 2682, 2886, 2564, 744, 2520, 3086, 1701, 1821, 768, 2622, 1362, 2720, 1777, 882, 1967]",1,30.0,20.0,400.0,0.5019537120529005 -"[3139, 2283, 2588, 471, 555, 2452, 1220, 1307, 253, 123, 974, 3039, 928, 3049, 674, 1721, 2245, 205, 1865, 3274, 2339, 2314, 1683, 783, 2449, 2448, 798, 80, 1437, 2497, 3050, 2657, 2863, 1992, 1235, 624, 670, 1046, 545, 2269, 3296, 472, 1704, 713, 1531, 610, 1727, 28, 95, 884, 2095, 1919, 2547, 2819, 1536, 1836, 2383, 2450, 2670, 812, 300, 577, 1326, 777, 2316, 2447, 1654, 465, 2259, 1939, 1116, 2220, 110, 860, 923, 2910, 2451, 392, 1678, 319, 1103, 70, 822, 1324, 2278, 540, 2292, 3257, 3009, 2608, 2683, 1311, 2222, 1891, 1687, 1818, 3099, 3153, 91, 2400, 2296, 2721, 2673, 3146, 2519, 1778, 2816, 1621, 1474, 851, 615, 856, 2347, 2249, 1027, 2203, 3284, 579, 1165, 516, 1768, 2531, 515, 771, 468, 1920, 1545, 419, 2900, 3052, 3018, 2028, 2473, 421, 3047, 1813, 1734, 2434, 430, 1383, 1877, 1772, 2217, 2085, 911, 1945, 1684, 2726, 267, 541, 2122, 649, 2020, 2345, 1000, 1408, 3294, 2899, 2312, 1485, 294, 3299, 2672, 56, 1615, 2557, 1472, 721, 2631, 1418, 2973, 1256, 1522, 930, 778, 3318, 1443, 401, 2335, 2462, 142, 353, 2920, 1445, 2210, 703, 2752, 2950, 3266, 968, 2745, 2739, 2495, 476, 2734, 3148, 1247, 3317, 450, 2815, 2119, 1170, 2509, 698, 2218, 2741, 2646, 455, 2951, 1004, 285, 53, 1817, 1410, 138, 2628, 194, 2174, 2924, 2507, 819, 3051, 18, 2515, 809, 1656, 1039, 654, 2196, 2284, 462, 2132, 1477, 3224, 1890, 729, 2911, 2636, 83, 3216, 749, 3046, 929, 2134, 1842, 2297, 1002, 548, 61, 584, 599, 3048, 2054, 2401, 411, 405, 2952, 412, 397, 485, 2441, 796, 1052, 2041, 1932, 2897, 1848, 1136, 429, 790, 316, 2511, 2025, 1771, 1826, 2814, 1225, 17, 1200, 1221, 1259, 442, 386, 3026, 3217, 3218, 1464, 2510, 1620, 2391, 1239, 1484, 1514, 2179, 98, 1835, 1921, 1812, 62, 2932]",1,30.0,30.0,400.0,0.507063420498948 -"[1964, 2519, 2515, 104, 2316, 1677, 3257, 1532, 316, 2196, 540, 2931, 1768, 2295, 654, 1678, 1684, 2531, 1992, 2507, 882, 1474, 2819, 561, 2952, 1915, 1572, 3018, 923, 1235, 2113, 2815, 1877, 2329, 465, 450, 2899, 822, 542, 95, 2614, 412, 2498, 440, 1311, 2292, 2580, 1836, 2912, 2345, 1551, 1020, 1891, 1561, 1965, 419, 2332, 154, 2452, 504, 974, 1039, 1000, 2673, 1778, 610, 649, 1484, 924, 865, 2814, 1303, 3047, 2218, 1939, 272, 1545, 3089, 1835, 2578, 3049, 2132, 2299, 1384, 2557, 1103, 2845, 1043, 455, 1335, 300, 1280, 584, 142, 777, 1094, 1180, 3296, 2269, 1531, 716, 3294, 3274, 771, 1971, 1033, 2734, 1704, 61, 411, 819, 3318, 339, 421, 110, 2441, 2911, 3299, 1849, 638, 476, 1142, 17, 1477, 921, 397, 1183, 928, 2080, 319, 1256, 1383, 70, 579, 2581, 2400, 2497, 83, 2900, 1600, 1656, 1932, 3146, 2449, 1058, 2085, 2741, 1727, 2052, 2259, 2220, 2608, 294, 2672, 2447, 2448, 577, 201, 3052, 3050, 472, 1366, 860, 573, 812, 2609, 2745, 1410, 1170, 564, 1671, 332, 876, 2631, 3317, 1225, 1324, 1953, 2450, 3051, 2816, 2613, 2835, 2278, 3099, 745, 780, 1522, 2950, 698, 1307, 2848, 1353, 2063, 624, 567, 2495, 2451, 2646, 3207]",1,32.0,20.0,350.0,0.5037571385632702 -"[319, 624, 136, 3153, 2217, 2734, 545, 2296, 2789, 777, 2719, 1046, 2220, 2259, 2646, 1366, 2932, 1472, 2062, 468, 2598, 819, 332, 2657, 96, 2519, 1251, 1578, 242, 2718, 1615, 1493, 2278, 930, 2643, 783, 1992, 1112, 106, 3076, 3296, 135, 339, 247, 151, 3029, 670, 1654, 212, 1672, 1167, 1131, 2662, 316, 2315, 396, 1030, 882, 1871, 3017, 778, 1912, 395, 2450, 2354, 455, 142, 3120, 3060, 1418, 2451, 155, 516, 138, 2401, 2720, 1842, 2776, 2134, 154, 2727, 2292, 411, 359, 1211, 1485, 2312, 13, 2964, 2080, 1877, 1956, 721, 1895, 1836, 911, 222, 1097, 1136, 771, 201, 2032, 2196, 28, 2898, 2063, 2670, 2314, 1052, 2672, 2210, 1033, 1247, 2447, 1585, 2119, 2730, 2313, 2179, 2819, 2449, 744, 1000, 1625, 18, 2059, 2383, 2473, 1448, 2661, 285, 2495, 1116, 792, 1939, 991, 80, 2520, 44, 1220, 2920, 2488, 1105, 2584, 1932, 2299, 1971, 1668, 812, 1582, 234, 2452, 1561, 84, 2711, 968, 338, 1693, 653, 1, 1435, 2780, 515, 555, 3146, 1265, 1721, 2448, 1737, 1103, 3046, 2921, 1531, 1965, 1447, 2836, 2642, 1256, 584, 24, 2726, 1002, 1701, 294, 610, 2346, 157, 860, 2302, 2910, 544, 649, 2835, 478, 1253, 579, 1324, 642, 2815, 2814, 615, 2497, 1753, 2752, 118, 674, 3266, 1671, 2464, 2673, 2507, 2163, 186, 1200, 485, 177, 2739, 2085, 3148, 547, 1791, 2258, 1383, 2100, 563, 2683, 2765, 1020, 1915, 745, 2648, 272, 1845, 1894, 1246, 2479, 1945, 3318, 476, 2498, 1532, 1311, 3299, 1221, 1280, 1410, 1009, 2547, 2531, 540, 567, 300, 1225, 1227, 2228, 2883, 2054, 440, 921, 1891, 130, 920, 2952, 2779, 2886, 156, 3218, 3284, 405, 2911, 2816, 2950, 1660, 104, 2391, 1039, 1474, 450, 2112, 2373, 419, 2316, 2345, 2557, 2951, 3010, 465, 1967, 1727, 83, 2931, 1525, 3294, 2249, 3018, 32, 1004, 676, 1043, 1387]",1,34.0,30.0,450.0,0.5467388037270815 -"[2054, 850, 307, 1290, 895, 2182, 2230, 886, 2860, 622, 2269, 472, 292, 314, 110, 2828, 493, 1708, 851, 268, 1567, 616, 1678, 302, 1605, 941, 59, 2527, 1281, 2396, 2899, 1430, 703, 51, 27, 95, 2418, 2585, 2651, 691, 2427, 2784, 937, 3009, 2282, 2023, 1943, 3050, 2631, 1591, 993, 2665, 1087, 3162, 2597, 2637, 1214, 2417, 2324, 1522, 2331, 70, 2783, 2443, 1333, 131, 2021, 2361, 2416, 1590, 2667, 2064, 75, 16, 3274, 931, 2655, 2128, 3049, 303, 150, 2968, 373, 770, 2785, 1967, 1239, 790, 412, 1012, 2360, 2786, 10, 99, 2264, 2774, 1704, 2822, 2254, 2583, 586, 2162, 2132, 1013, 2565, 922, 2622, 2866, 847, 2933, 1986, 2840, 912, 2489, 1157, 527, 627, 2648, 2002, 2166, 113, 839, 1417, 1117, 2650, 2398, 1670, 1894, 1761, 2782, 1973, 2426, 872, 2415, 1630, 1015, 773, 2926, 144, 962, 397, 2663, 2799, 421, 1617, 1989, 548, 3047, 2401, 1658, 585, 1219, 1328, 147, 2487, 1843, 2599, 2110, 2226, 1607, 1470, 928, 2571, 2967, 921, 347, 2664, 1128, 1517, 1054, 677, 2234, 3324, 695, 1484, 835, 55, 582, 2645, 2116, 428, 799, 2803, 2490, 1343, 1834, 715, 2440, 972, 880, 229, 2676, 2925, 2775, 1399, 2441, 1130, 2388, 1568, 1583]",1,10.0,20.0,550.0,0.5422302374511572 -"[1660, 405, 2179, 2993, 1387, 3017, 2920, 1004, 3218, 2950, 2332, 1170, 234, 3236, 489, 1693, 379, 1200, 3215, 2249, 2316, 2153, 547, 3228, 2840, 1704, 106, 2269, 3216, 879, 1701, 130, 3148, 3046, 582, 1753, 923, 2154, 2498, 1964, 95, 465, 587, 676, 2180, 647, 2189, 1002, 2159, 1516, 1043, 215, 1353, 84, 2391, 1551, 92, 2222, 1239, 653, 2894, 3099, 2567, 2973, 1545, 1678, 2313, 2400, 2134, 1768, 921, 780, 1235, 104, 564, 822, 2921, 809, 3107, 110, 3274, 2025, 2347, 1921, 472, 1303, 563, 2054, 1587, 3217, 2745, 784, 2312, 244, 3050, 3051, 1363, 1094, 2258, 408, 2447, 1578, 2315, 2100, 2132, 18, 674, 1525, 49, 1522, 3048, 790, 703, 2900, 1772, 638, 285, 450, 254, 269, 2095, 1778, 28, 412, 2059, 1087, 2899, 3207, 3006, 2052, 1410, 2066, 2345, 3235, 1967, 2218, 1447, 1477, 1865, 339, 541, 2245, 397, 906, 922, 1822, 2283, 3009, 2636, 928, 2335, 3047, 2631, 421, 707, 3234, 194, 494, 70, 3049, 2977, 3026, 1771, 2420, 304, 2341, 1817, 1464, 508, 1727, 2558, 637, 1384, 1542, 429, 1307, 1408, 3271, 1326, 729, 3052, 2568, 746, 721, 3144, 1782, 2028, 2440, 2835, 808, 440, 1729, 2672, 1903, 2308, 1797, 1429, 3220, 2562, 1620, 2342, 33, 249, 2314, 3194, 2721, 955, 66, 1912, 1572, 1027, 62, 2878, 2931, 3045, 135, 577, 2080, 3187, 3145, 2441, 2493, 1826, 2346, 1823, 54, 56, 1672, 2738, 2737, 2608, 1062, 1155, 1409, 2661, 2353, 1379, 1448, 2375, 2378, 2609, 1971, 1042, 2062, 877, 2431, 2442, 940, 195, 2232, 1030, 2467, 2662, 177, 2354, 2373, 2372, 2358, 2687, 1887, 761, 728, 1407, 1566, 2357, 2479, 2233, 2652, 3160, 458, 781, 2350, 2374, 1468, 1916, 498, 776, 1785, 598, 1867, 2351, 576, 1885, 390, 357, 260, 2749, 871, 2103, 1373, 1119, 999, 1948, 2105, 1001, 1754, 1577, 2552, 600, 1882]",1,10.0,30.0,300.0,0.504658851818455 -"[2448, 2451, 579, 396, 2217, 1324, 2452, 2450, 1531, 2495, 2816, 2447, 372, 540, 2345, 1939, 294, 1410, 1220, 819, 2085, 2519, 168, 3046, 2346, 136, 3210, 1625, 2646, 3294, 2032, 1932, 2643, 411, 1863, 476, 1002, 142, 2302, 846, 2734, 1728, 3296, 2558, 332, 746, 468, 3018, 3196, 1112, 2488, 2153, 2670, 2920, 2449, 642, 2220, 968, 247, 1694, 2574, 2911, 1472, 212, 3266, 300, 882, 923, 649, 930, 3193, 547, 2752, 597, 3017, 177, 2836, 26, 154, 2062, 1244, 2598, 2497, 1964, 2950, 860, 2228, 2729, 2343, 1251, 308, 1004, 33, 242, 1912, 138, 1561, 186, 17, 83, 1836, 395, 744, 544, 3153, 2835, 2119, 2196, 2647, 1721, 201, 2112, 2531, 2479, 1448, 3146, 106, 2673, 942, 2575, 3076, 2259, 2383, 2312, 653, 777, 1485, 1311, 2255, 151, 1200, 1701, 2964, 1891, 2278, 2464, 1039, 2951, 2373, 419, 2134, 525, 2815, 3099, 1136, 1582, 2506, 1307, 1854, 2886, 879, 1265, 1532, 689, 478, 2573, 2486, 2210, 1030, 2292, 316, 485, 1256, 3197, 1052, 2520, 2401, 1131, 1965, 2609, 3132, 542, 2634, 2299, 1971, 1186, 1654, 2657, 3195, 2063, 2814, 1956, 2662, 2547, 2952, 2305, 104, 624, 455, 107, 3194, 5, 2883, 783, 92, 28, 2932, 1280, 3221, 2765, 587, 3084, 2316, 272, 2507, 648, 2473, 2080, 465, 2776, 3174, 1366, 1915, 130, 2672, 1672, 159, 2726, 563, 2283, 1877, 2295, 405, 234, 3148, 379, 1225, 2993, 1753, 1668, 2898, 2354, 155, 135, 1727, 698, 1387, 1170, 3257, 707, 1392, 647, 545, 1907, 157, 2739, 1435, 921, 1660, 222, 1865, 2332, 2789, 3133, 84, 450, 440, 1310, 1046, 1525, 1048, 2391, 489, 1009, 2730, 2179, 582, 1020, 2661, 3215, 2857, 2921, 2538, 1043, 721, 1693, 1353, 722, 1572, 2189, 1246, 2608, 392, 2476, 2160, 56, 80, 156, 339, 2931, 2740, 1829, 792, 1239, 412, 1143, 1253, 567, 1551, 2840]",1,10.0,30.0,450.0,0.5608656447249775 -"[1068, 136, 1660, 405, 2179, 2519, 2973, 2920, 2447, 634, 18, 647, 1162, 1946, 3017, 1693, 2899, 1532, 2132, 550, 1525, 2283, 2902, 1915, 2903, 3218, 713, 2835, 882, 3099, 676, 1004, 1654, 3046, 582, 3148, 1423, 2275, 465, 2588, 2739, 1265, 2258, 2134, 2059, 3274, 92, 1387, 130, 316, 790, 595, 1671, 421, 2391, 567, 1677, 1000, 1516, 1727, 1135, 471, 2498, 2315, 2507, 2316, 489, 927, 3215, 1865, 142, 1753, 3217, 95, 547, 1307, 1170, 2921, 1545, 155, 1701, 1678, 203, 2314, 1200, 2100, 2153, 2901, 1246, 822, 1384, 923, 1043, 1410, 3009, 2400, 2740, 879, 921, 1572, 906, 2196, 3052, 1052, 641, 1474, 563, 2312, 2931, 3216, 2401, 2984, 2066, 1002, 379, 285, 215, 2249, 878, 234, 2295, 1921, 70, 2332, 412, 2364, 170, 1280, 674, 703, 1239, 2776, 2154, 1464, 2269, 1885, 2210, 922, 2631, 1849, 2950, 542, 587, 1551, 84, 2339, 1020, 1235, 1094, 269, 3048, 38, 1303, 106, 2120, 2189, 450, 1729, 3049, 564, 751, 1561, 472, 1967, 653, 2993, 1447, 3089, 2952, 1768, 491, 1704, 638, 3207, 32, 2557, 339, 3257, 110, 498, 2159, 2861, 2866, 2095, 2560, 2345, 2840, 1965, 1227, 1964, 28, 928, 3026, 104, 2218, 2994, 1328, 1822, 3047, 2900, 2745, 1903, 1477, 541, 3051, 2228, 1373, 1253, 440, 707, 721, 2811, 2028, 2636, 311, 2335, 2652, 408, 2493, 2894, 2363, 2245, 1046, 780, 2313, 3050, 2440, 808, 2052, 2299, 2455, 637, 154, 194, 429, 2672, 272, 1353, 1087, 2441, 600, 2562, 2054, 1221, 1578, 809, 2347, 1326, 2721, 2420]",1,20.0,25.0,350.0,0.5581605049594229 -"[373, 1605, 1708, 975, 501, 1011, 1758, 677, 2299, 2361, 2230, 622, 966, 1123, 912, 2923, 123, 2166, 2443, 2639, 1570, 1843, 1818, 1275, 2711, 1580, 1567, 1167, 1117, 450, 486, 1105, 1034, 691, 965, 2356, 499, 1940, 1308, 1855, 1112, 1343, 1986, 557, 307, 1731, 2483, 42, 434, 2183, 1956, 1427, 2564, 1996, 1290, 2200, 186, 2478, 492, 1894, 1795, 2217, 1901, 509, 1214, 3074, 1575, 2247, 1944, 2648, 1872, 1215, 394, 2507, 396, 2444, 1012, 1084, 2596, 2925, 273, 1251, 2343, 2435, 2649, 298, 1796, 1483, 180, 887, 2819, 3086, 680, 1561, 875, 2769, 2130, 1761, 2968, 839, 2202, 2398, 1460, 2487, 398, 506, 2052, 468, 220, 150, 12, 1943, 3221, 1333, 347, 2882, 2506, 2964, 850, 2475, 2710, 1010, 1493, 1927, 351, 886, 13, 2680, 2598, 1476, 1633, 2839, 768, 2565, 968, 2655, 229, 797, 1644, 2282, 162, 1279, 2360, 138, 3011, 361, 3209, 794, 253, 2315, 2387, 982, 2196, 2097, 1740, 2650, 2782, 1070, 231, 2362, 1583, 1808, 2347, 1202, 153, 528, 930, 2967, 1738, 1992, 567, 1965, 2405, 1877, 230, 2883, 237, 2550, 2226, 1013, 2182, 1, 1725, 1257, 2641, 1402, 3228, 1362, 26, 2820, 715, 796, 2891, 2857, 661, 2790, 502, 464, 1357, 2000, 435, 1009, 843, 2366, 2634, 2861, 2859, 1157, 549, 268, 2277, 1422, 1854, 136, 2458, 2829, 737, 244, 962, 816, 833, 1871, 2257, 25, 1736, 2797, 356, 2567, 755, 1568, 410, 91, 75, 158, 59, 1760, 453, 1589, 2021, 1517, 157, 2474, 1622, 1176, 1115, 2404, 1655, 2622, 2519, 1435, 1980, 2234, 1230, 2632, 1131, 559, 2886, 473, 3236, 2584, 249, 70, 325, 2305, 1522, 2293, 1351, 2089, 2399, 1630, 1473, 2728, 1879, 3139, 830, 2057, 2980, 1910, 2264, 3206, 607, 2095, 1401, 1350, 1821, 1411, 3080, 455, 1591, 2122, 2774, 213, 556, 1918, 3234, 147, 2568, 650, 1782, 1125]",1,20.0,30.0,550.0,0.5350165314096784 -"[540, 2486, 649, 2646, 2815, 396, 1932, 1225, 300, 1582, 2495, 2067, 157, 2814, 242, 1001, 468, 2179, 3017, 2507, 2920, 777, 1693, 2506, 1324, 83, 2734, 2883, 3296, 3099, 2719, 1039, 92, 43, 2739, 567, 2292, 3221, 405, 316, 2816, 2299, 1280, 1660, 136, 819, 138, 3018, 1097, 2886, 968, 142, 930, 1531, 2451, 1525, 1759, 2217, 3193, 1251, 2898, 2911, 222, 2222, 1265, 3218, 2032, 647, 676, 545, 1836, 921, 2531, 3028, 783, 1485, 1256, 186, 3248, 151, 2339, 18, 1965, 1625, 2732, 2447, 2726, 2258, 2964, 600, 2588, 294, 579, 1303, 698, 156, 1418, 508, 2314, 1561, 1410, 3046, 3197, 1170, 582, 550, 3153, 624, 2560, 595, 2448, 812, 2740, 746, 3076, 722, 2733, 489, 450, 1387, 1532, 882, 2278, 1516, 2210, 2005, 1143, 3215, 2598, 2473, 2228, 1939, 713, 3148, 1753, 2450, 3249, 2973, 254, 1435, 923, 1131, 1721, 1472, 2488, 2497, 1004, 2180, 2673, 2331, 2730, 190, 471, 3120, 878, 2683, 1200, 285, 3107, 2189, 2059, 2835, 3217, 2302, 860, 1956, 1246, 155, 1701, 130, 1853, 212, 1046, 2134, 411, 547, 544, 2811, 1543, 2054, 744, 49, 478, 1423, 2921, 2932, 1227, 1239, 2931, 2464, 379, 1668, 879, 1009, 2100, 2519, 3047, 927, 634, 778]",1,30.0,20.0,550.0,0.5157799819657349 -"[3215, 234, 2776, 676, 3017, 2391, 3046, 2275, 2993, 1246, 1701, 1551, 2447, 923, 3099, 203, 1200, 379, 3218, 1772, 2179, 2745, 721, 2249, 1967, 1170, 1587, 2921, 2208, 2180, 2132, 587, 1387, 489, 2973, 582, 2519, 1660, 421, 2189, 1227, 1319, 1004, 2154, 339, 2100, 703, 927, 3220, 2920, 92, 2720, 1094, 2196, 936, 3207, 1044, 2369, 2066, 1264, 98, 106, 709, 1135, 1002, 1464, 2250, 1363, 805, 2439, 1516, 2840, 155, 465, 2312, 1753, 446, 3148, 921, 2867, 1395, 746, 1812, 2631, 1307, 1052, 3128, 2314, 2719, 2740, 2598, 2420, 547, 1768, 2313, 2258, 254, 1577, 2903, 472, 1822, 3107, 2283, 1693, 350, 2222, 154, 3051, 1000, 2637, 508, 2315, 1326, 564, 3257, 634, 1303, 1770, 620, 130, 790, 413, 405, 2672, 879, 2874, 784, 84, 2204, 684, 1778, 1545, 2899, 3164, 285, 1429, 2558, 1346, 1964, 1091, 2122, 1410, 2718, 1239, 2095, 1474, 3194, 3216, 565, 2507, 1408, 2059, 1542, 3235, 3009, 3006, 2399, 2134, 402, 1532, 1525, 1671, 18, 1668, 1353, 1384, 1865, 3217, 2052, 1678, 1265, 653, 3048, 647, 2218, 2900, 33, 2950, 215, 563, 1070, 1634, 2269, 134, 136, 2388, 1162, 1704, 550, 3052, 2389, 2332, 1477, 2316, 2323, 2498, 922, 104, 28, 1001, 1677, 1946, 1414, 2803, 1119, 1875, 244, 1359, 241, 1046, 110, 2028, 1802, 609, 603, 1987, 3271, 864, 2400, 2520, 2509, 874, 3026, 2299, 707, 882, 2153, 780, 1221, 450, 3274, 2054, 2721, 440, 1759, 43, 2174, 31, 1020, 3047, 638, 1578, 2952, 2441, 1043, 316, 2335, 2025, 412, 2017, 1447, 1422, 928, 2931, 2210, 1087, 2413, 2902, 2861, 1471, 2440, 70, 1727, 1921, 32, 466, 3089, 541, 1940, 674, 2977, 194, 1782, 2954, 2568, 1797, 637, 2345, 1702, 311, 3228, 542, 1849, 1826, 269, 822, 2295, 2494, 1068, 1235, 2984, 2866, 1566, 397, 2245, 2562, 95, 729, 2901, 2347]",1,30.0,30.0,450.0,0.5987376014427412 -"[2335, 2632, 1835, 541, 2721, 936, 2404, 715, 1625, 1326, 709, 1340, 3026, 236, 429, 2275, 930, 624, 3025, 2380, 241, 1574, 1817, 618, 2674, 1609, 408, 2855, 98, 2621, 1660, 2633, 2584, 1940, 887, 1826, 2196, 2959, 1774, 1131, 286, 548, 2440, 250, 101, 3018, 2473, 1620, 1429, 1027, 405, 2245, 1087, 1162, 1702, 2134, 825, 1711, 1691, 790, 3046, 70, 83, 1135, 1348, 2439, 1485, 396, 2861, 1785, 1004, 2217, 1094, 634, 620, 450, 540, 333, 2977, 880, 1877, 808, 2900, 2845, 2840, 2261, 1721, 946, 130, 421, 1987, 2911, 3017, 3099, 2562, 2776, 1466, 1418, 43, 2132, 106, 1220, 535, 2921, 587, 194, 2903, 777, 2622, 2899, 1446, 1753, 170, 2218, 203, 2507, 1973, 84, 2020, 2720, 2950, 1445, 491, 653, 1918, 2646, 1971, 1759, 2683, 379, 637, 927, 2179, 1946, 882, 62, 2931, 1771, 1080, 2120, 2901, 755, 2363, 2672, 923, 304, 2100, 676, 285, 31, 3218, 2718, 863, 1731, 2670, 1693, 494, 489, 1096, 2520, 2607, 3009, 1837, 615, 18, 2364, 1264, 2189, 577, 2636, 2719, 550, 2346, 2519, 670, 316, 2413, 2920, 547, 2066, 2401, 682, 2174, 92, 721, 2163, 2391, 2345, 2902, 582, 234, 1561, 2017, 466, 2598, 1170, 647, 2228, 2631, 395]",1,40.0,20.0,350.0,0.5019537120529005 -"[1587, 2110, 1131, 2621, 649, 517, 1472, 2779, 2752, 1759, 3153, 670, 1399, 1836, 144, 485, 2662, 2331, 3053, 2292, 2027, 2210, 1843, 2421, 624, 1829, 1435, 2147, 1609, 1398, 1011, 2730, 476, 1583, 2495, 2661, 300, 2387, 1989, 119, 31, 875, 1130, 1251, 3046, 2681, 455, 3076, 1341, 1384, 2680, 1575, 509, 506, 1429, 3221, 2891, 2497, 2422, 138, 1324, 1362, 2080, 783, 154, 2217, 24, 1853, 3080, 1334, 2354, 1410, 3296, 2746, 2898, 394, 2622, 1052, 942, 2632, 83, 745, 478, 212, 607, 1002, 1531, 2919, 2085, 641, 2479, 2594, 3017, 567, 1247, 486, 2005, 3084, 860, 1200, 2560, 2816, 2420, 1666, 2464, 1448, 2646, 1030, 3099, 156, 878, 1256, 2346, 595, 409, 294, 1845, 1473, 411, 2451, 2224, 1529, 1838, 1070, 2112, 3277, 1891, 1654, 2220, 273, 43, 2835, 2815, 3294, 1202, 2911, 3120, 2718, 930, 177, 2474, 96, 847, 2356, 777, 201, 106, 2448, 1366, 322, 1517, 3284, 968, 2507, 359, 2819, 1013, 1570, 2836, 1170, 2951, 2932, 3215, 2950, 680, 222, 3018, 2920, 1971, 2488, 642, 2726, 2814, 1115, 1625, 2278, 2427, 2447, 545, 1039, 535, 468, 1156, 396, 2062, 1043, 1932, 579, 1357, 1143, 1225, 2657, 2429, 982, 1311, 142, 1333, 3193, 2921, 2538]",1,40.0,20.0,550.0,0.5154794108806733 -"[2643, 3145, 155, 2196, 3296, 1932, 266, 106, 3153, 624, 2883, 1001, 2506, 968, 186, 2292, 1136, 2734, 157, 751, 783, 1582, 3266, 2779, 2776, 2063, 2507, 1324, 332, 468, 1172, 2275, 190, 2497, 2032, 2495, 1474, 2898, 2619, 142, 2730, 1225, 242, 1251, 2951, 138, 2488, 83, 1668, 2835, 1366, 2220, 154, 2486, 722, 2950, 294, 2920, 70, 478, 3046, 300, 43, 2134, 2210, 2752, 2302, 2816, 812, 698, 1131, 17, 1625, 2067, 930, 3076, 2458, 2119, 2952, 1135, 540, 1447, 3107, 2339, 1115, 1435, 2902, 136, 1200, 2903, 2921, 2683, 2450, 1965, 2512, 544, 600, 641, 1363, 713, 2520, 1004, 860, 2464, 778, 1829, 2732, 1384, 2558, 927, 486, 2911, 1572, 683, 3018, 419, 95, 2259, 2299, 579, 2819, 1112, 2560, 649, 3120, 471, 3144, 1423, 2894, 595, 2217, 1256, 2059, 2278, 1002, 2180, 476, 3228, 545, 1877, 2733, 18, 1510, 2765, 567, 2899, 2422, 3106, 2258, 721, 777, 2886, 2646, 2811, 2314, 440, 1891, 550, 2598, 1246, 634, 790, 3028, 485, 1915, 1836, 1485, 222, 247, 3236, 542, 1531, 421, 1170, 2519, 1715, 1472, 1545, 212, 1253, 2249, 824, 1009, 2901, 921, 2557, 878, 819, 1759, 450, 1097, 2973, 2448, 272, 1853, 2455, 587, 2588, 1130, 3221, 2315, 1043, 32, 1967, 879, 1532, 3009, 1782, 1046, 1410, 1890, 1543, 642, 2085, 1894, 2147, 784, 3017, 1516, 3248, 411, 3193, 2269, 745, 1280, 49, 1956, 1989, 3216, 991, 1587, 2189, 1753, 96, 2112, 3249, 2452, 674, 285, 1398, 3049, 3148, 882, 955, 1387, 1307, 159, 2222, 1303, 2447]",1,40.0,25.0,550.0,0.5527502254283138 -"[136, 2217, 396, 2560, 774, 24, 544, 968, 2730, 2496, 468, 2779, 2495, 2606, 2302, 573, 2520, 3221, 1196, 1561, 1233, 228, 151, 2299, 2119, 178, 2844, 642, 1818, 504, 2062, 2913, 8, 484, 222, 1447, 1135, 1791, 615, 1000, 3029, 566, 950, 1478, 1683, 1107, 2032, 1531, 865, 3223, 1227, 2153, 567, 333, 2898, 2519, 1393, 649, 1737, 411, 2228, 2924, 2292, 2212, 2718, 989, 2662, 561, 1112, 159, 930, 670, 777, 1562, 927, 610, 1335, 1582, 338, 1956, 1785, 1848, 1912, 2055, 1435, 383, 634, 1625, 1030, 1691, 157, 2005, 465, 442, 203, 519, 1721, 3147, 1836, 2473, 2868, 342, 2345, 2479, 2817, 186, 1472, 2486, 1915, 393, 2964, 2642, 920, 1020, 2346, 1455, 1384, 1251, 2580, 1383, 1116, 1687, 624, 1729, 1532, 242, 1180, 3012, 2765, 1536, 1965, 505, 2719, 1668, 3146, 2671, 3202, 1046, 3296, 2315, 1449, 1672, 2598, 882, 3076, 2080, 2819, 2196, 2296, 1307, 2261, 2815, 1094, 550, 2347, 3294, 1677, 2959, 286, 300, 1959, 154, 417, 319, 1615, 778, 1033, 2902, 2112, 1147, 798, 2464, 2401, 2278, 545, 83, 1324, 2910, 2451, 1656, 1992, 698, 2259, 1448, 2058, 2661, 3120, 542, 2734, 1247, 2452, 1253, 1426, 1162, 2911, 1103, 3153, 212, 2163, 2845, 1971, 2670, 1613, 1578, 317, 1629, 2507, 2814, 1845, 2673, 401, 2150, 2547, 1877, 359, 138, 123, 2901, 1272, 744, 2354, 2283, 44, 118, 2752, 2578, 1545, 1654, 1842, 2450, 1318, 3010, 884, 783, 2220, 711, 2720, 2498, 3284, 2741, 2313, 2488, 450, 1183, 1210, 253, 2132, 1077, 1813, 272, 3231, 1111, 3207, 2506, 455, 2883, 1311, 3046, 1303, 156, 2683, 2448, 2581, 177, 2497, 80, 1143, 3139, 569, 28, 2899, 3318, 2886, 2646, 2727, 395, 421, 3039, 462, 911, 1964, 265, 2177, 2763, 555, 3232, 2252, 2836, 1234, 56, 135, 860, 2607, 332, 2052, 579, 3299, 1445, 1894, 1385]",1,40.0,30.0,550.0,0.5341148181544936 -"[472, 3274, 582, 2269, 1967, 2312, 95, 1704, 1915, 2899, 2316, 2132, 2066, 2400, 1768, 1678, 1921, 1545, 2153, 790, 1410, 110, 465, 703, 2314, 1822, 70, 450, 906, 822, 928, 2315, 269, 2840, 3009, 2347, 2745, 2631, 2493, 1903, 28, 2159, 1464, 2420, 2059, 311, 2345, 1253, 1578, 2189, 3051, 921, 38, 1328, 3026, 1729, 421, 412, 2900, 600, 809, 1885, 2028, 498, 541, 3052, 2894, 922, 3049, 2228, 808, 1239, 1235, 637, 2931, 2455, 2562, 1447, 2105, 2095, 2636, 2313, 2245, 2441, 2440, 3048, 1326, 2335, 1865, 1087, 1373, 2652, 194, 1510, 3050, 429, 3047, 1522, 408, 49, 2025, 2154, 2721, 1817, 999, 2977, 397, 304, 135, 2218, 1754, 3144, 2054, 1948, 1620, 1477, 494, 1912, 3145, 2994, 955, 390, 54, 729, 2341, 2103, 871, 2346, 1797, 2358, 2342, 66, 1408, 1778, 1771, 1042, 2698, 2308, 2878, 2357, 2350, 1887, 728, 1916, 2431, 1566, 2351, 2080, 357, 598]",1,50.0,15.0,200.0,0.500751427712654 -"[2903, 927, 468, 2899, 2275, 1946, 3018, 1135, 924, 2122, 1932, 1437, 2510, 138, 2901, 865, 1919, 968, 974, 2902, 1094, 1256, 1225, 654, 550, 1415, 1835, 634, 1068, 1842, 2259, 286, 1660, 405, 203, 856, 930, 1514, 242, 2299, 670, 1259, 2447, 2450, 1891, 2401, 234, 1532, 1162, 319, 2391, 905, 2515, 98, 2119, 2217, 1280, 2519, 3299, 401, 1915, 1000, 2132, 573, 624, 2950, 796, 83, 771, 294, 2312, 1103, 1849, 130, 2196, 1443, 2952, 540, 2864, 1002, 3207, 584, 1551, 1877, 1920, 2134, 2278, 1734, 2041, 61, 1992, 2646, 882, 3292, 1004, 2052, 819, 442, 1200, 332, 421, 2673, 649, 1684, 2614, 547, 28, 1516, 707, 2314, 53, 2080, 316, 2739, 567, 2911, 1484, 1136, 2332, 3046, 851, 1545, 923, 1953, 653, 1387, 2179, 2609, 1078, 674, 542, 3099, 2884, 2316, 1077, 610, 2495, 3266, 1303, 2507, 455, 516, 3318, 1410, 3148, 478, 84, 2400, 2113, 921, 545, 1383, 32, 1020, 489, 1753, 3216, 3218, 2920, 1971, 729, 2328, 18, 2835, 450, 2863, 2973, 1227, 3089, 2672, 1170, 1142, 2100, 2296, 1677, 1656, 2931, 587, 3009, 790, 569, 577, 2741, 1615, 2295, 1125, 647, 2959, 582, 3274, 1031, 95, 1693, 339, 2900, 548, 2063, 440, 3051, 2912, 1671, 3317, 1727, 215, 3047, 285, 70, 154, 272, 3017, 1474, 2498, 1572, 1678, 2557, 2608, 1043, 56, 2269, 104, 1464, 638, 1525, 1426, 110, 1967, 1704, 1865, 1472, 476, 1965, 564, 1046, 2993, 2258, 3002, 2631, 3153, 2028, 780, 745, 1701, 3257, 1964, 2613, 3026, 2840, 822, 929, 2329, 879, 1217, 1921, 2218, 2095, 1561, 2120, 1235, 2921, 1768, 472, 1817, 2682, 2636, 721, 3049, 1477, 1307, 412, 2740, 2848, 3048, 2977, 1778, 928, 2283, 563, 676, 1353, 3217, 2657, 336, 2066, 2580, 169, 3025, 716, 561, 2745, 2335, 1384, 2581, 379, 541, 3050, 2245, 1813, 1027, 1180, 703]",1,50.0,30.0,350.0,0.5220919747520288 -"[2110, 1130, 2527, 1821, 549, 1012, 2580, 347, 1644, 1587, 1448, 1853, 2361, 2420, 2745, 1064, 411, 144, 1492, 2230, 1399, 1704, 968, 703, 2299, 1415, 1430, 242, 2650, 152, 2815, 2295, 2479, 110, 1097, 1625, 2220, 812, 1965, 542, 232, 2292, 1453, 1410, 1591, 2062, 851, 2608, 1019, 2819, 1778, 860, 2113, 2224, 1971, 1251, 2830, 2621, 1666, 1582, 677, 2276, 928, 55, 1966, 131, 2638, 579, 3047, 2269, 27, 1272, 1531, 561, 1214, 2585, 1580, 300, 2429, 1474, 1239, 1030, 2373, 1835, 2799, 2451, 491, 2632, 1429, 1318, 875, 1836, 2581, 1131, 1665, 1875, 1464, 2726, 138, 1275, 319, 56, 1672, 2663, 492, 2622, 2814, 2886, 2681, 1180, 177, 2450, 10, 2797, 3050, 2422, 1989, 541, 170, 2362, 414, 3207, 2405, 17, 419, 197, 2771, 1162, 115, 1609, 472, 2249, 2448, 1335, 624, 915, 229, 654, 3049, 2443, 1004, 1973, 2162, 1943, 2134, 2844, 1768, 468, 2421, 1630, 2782, 2447, 1422, 3294, 1771, 2245, 1398, 2331, 2666, 2497, 2083, 2063, 3153, 634, 2452, 2095, 1052, 2488, 1959, 661, 548, 2122, 1522, 1585, 584, 2444, 2951, 745, 333, 2649]",1,,,, -"[851, 396, 865, 661, 2598, 31, 2068, 2774, 1759, 2264, 1565, 2638, 468, 1492, 529, 1563, 2388, 2863, 1958, 1625, 2114, 2621, 930, 2217, 1331, 229, 351, 2520, 2674, 280, 136, 1731, 2422, 924, 2507, 237, 3002, 1770, 2559, 1836, 1655, 1096, 427, 2128, 1190, 154, 2404, 2389, 3162, 850, 1052, 2597, 1472, 388, 807, 1399, 450, 2519, 2919, 1517, 1580, 670, 148, 2361, 2667, 2925, 2166, 1034, 2224, 1, 1725, 2417, 1087, 2599, 3215, 796, 2416, 158, 548, 38, 2868, 2196, 912, 460, 493, 1997, 286, 2933, 2418, 2844, 886, 1342, 2073, 1214, 2429, 2596, 2578, 1019, 1587, 492, 314, 2556, 2151, 3263, 2655, 1328, 1398, 1466, 3000, 847, 2592, 585, 941, 1281, 1429, 2254, 1178, 2331, 1724, 1075, 1561, 3030, 43, 1835, 138, 586, 1605, 2063, 2822, 303, 3045, 1853, 772, 1009, 2339, 236, 2210, 1654, 2396, 268, 28, 2397, 835, 3186, 2719, 2676, 157, 1071, 993, 2649]",1,,,, -"[2406, 1299, 443, 1828, 857, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077, 2077]",0,,,, -"[2631, 885, 268, 1864, 1633, 1771, 1580, 611, 1591, 429, 213, 2880, 1678, 2622, 2427, 2291, 1290, 3324, 2927, 541, 895, 1647, 675, 1587, 719, 2838, 1961, 2422, 2410, 2144, 2125, 351, 2128, 2488, 2205, 2585, 307, 2721, 770, 2250, 2677, 2074, 2645, 1399, 548, 1222, 529, 1835, 2097, 2421, 12, 1480, 2122, 2239, 2362, 2654, 2759, 2649, 2487, 2650, 2017, 755, 61, 395, 2664, 298, 2605, 1326, 2245, 1736, 1958, 2434, 880, 1230, 2509, 2313, 2264, 2828, 2230, 27, 2183, 1879, 2510, 2782, 428, 1130, 2059, 3093, 2435, 353, 1049, 131, 2511, 2217, 3187, 1620, 2875, 468, 1665, 2361, 2676, 575, 976, 1538, 2977, 2633, 303, 1617, 2797, 2147, 1877, 10, 1097, 2638, 2626, 2321, 492, 1415, 2868, 3186, 42, 532, 982, 1012, 2846, 3144, 1176, 2537, 1666, 2316, 151, 1261, 1190, 764, 2110, 2023, 768, 1980, 2331, 55, 2655, 1918, 1219, 894, 1255, 2399, 414, 2667, 2113, 454]",1,,,, -"[1611, 2071, 1611, 2071, 1611, 2071, 1611, 2071, 1611, 2071, 1611, 2071]",0,,,, -"[919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425, 919, 1425]",0,,,, -"[3095, 3094, 1314, 1314, 3094, 3095, 1314, 3094, 3095, 1314, 3094, 3095, 1314, 3094, 3095, 1314, 3094, 3095, 1314, 3094, 3095, 1314, 3094, 3095, 1314, 3094, 3095, 1314, 3094, 3095]",0,,,, -"[1625, 2062, 1721, 1030, 2354, 860, 2734, 2259, 1836, 1531, 2450, 1097, 2497, 3296, 2452, 2752, 2473, 485, 2451, 142, 2479, 300, 177, 624, 579, 2670, 1167, 212, 332, 2449, 2920, 783, 1654, 2447, 1493, 465, 544, 2657, 1200, 3076, 135, 1912, 1220, 2661, 2448, 1672, 1971, 3294, 1932, 1002, 1253, 1225, 28, 2085, 1004, 819, 201, 1112, 1052, 234, 2898, 2648, 1410, 2228, 2345, 155, 1131, 1435, 2836, 1939, 411, 942, 1701, 3084, 13, 1366, 419, 698, 1211, 156, 2391, 745, 3146, 2711, 1334, 2662, 106, 2573, 2814, 2278, 2373, 3174, 1105, 2243, 455, 1915, 154, 2951, 247, 2815, 3148, 649, 542, 777, 1265, 547, 1046, 1256, 2950, 721, 2910, 2952, 1246, 2080, 2312, 2816, 3153, 2609, 2776, 3193, 1894, 1485, 2835, 2495, 2005, 1965, 1077, 2973, 1039, 2383, 1737, 2547, 2464, 2584, 1048, 222, 3017, 2538, 2780, 2643, 2739, 2249, 1311, 2921, 2027, 2292, 563, 587, 545, 2531, 3217, 2746, 642, 1143, 80, 2100, 1020, 2283, 3221, 610, 676, 2683, 2515, 1561, 1383, 2519, 922, 3257, 1585, 567, 647, 1677, 1525, 83, 476, 2316, 1693, 582, 2196, 2134, 440, 1221, 1516, 1678, 3218, 56, 648, 489, 3039, 2608, 2295, 3216, 2258, 654, 1668, 2054, 3318, 1945, 2346, 2730]",1,,,, -"[1003, 1801, 1216, 1216, 1801, 1003, 1216, 1801, 1003, 1216, 1801, 1003, 1216, 1801, 1003, 1216, 1801, 1003, 1216, 1801, 1003, 1216, 1801, 1003, 1216, 1801, 1003, 1216, 1801, 1003]",0,,,, -"[665, 2256, 665, 2256, 665, 2256, 665, 2256, 665, 2256, 665, 2256, 665, 2256, 665, 2256, 665, 2256, 665, 2256]",0,,,, -"[1131, 2299, 242, 2032, 2418, 622, 2564, 930, 2196, 2527, 468, 2665, 2416, 1122, 1605, 796, 2860, 347, 154, 1875, 2415, 847, 875, 1281, 307, 2991, 1176, 1666, 213, 719, 1918, 1453, 1943, 1130, 691, 1708, 3073, 931, 1587, 2925, 2226, 237, 627, 1230, 2200, 131, 2597, 2097, 2281, 2166, 2861, 2422, 1958, 1190, 2598, 1853, 2217, 1064, 1019, 1025, 2774, 2859, 1821, 388, 152, 2421, 395, 2519, 2637, 912, 2621, 2596, 2429, 2147, 1567, 1940, 1214, 414, 2404, 686, 2882, 1398, 529, 2786, 1591, 402, 241, 1617, 1301, 2107, 2162, 1609, 2680, 2435, 373, 1633, 1997, 1582, 2110, 1427]",0,,,, -"[730, 2986, 2987, 2985, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730, 2986, 730]",0,,,, -"[229, 622, 1708, 895, 2416, 912, 2282, 2597, 616, 314, 1012, 307, 2637, 27, 2023, 2021, 1013, 16, 2415, 1117, 886, 2110, 2775, 887, 1587, 1290, 2331, 237, 1230, 1644, 2182, 850, 1176, 303, 937, 2490, 2417, 549, 794, 492, 755, 1357, 2785, 1666, 2585, 2418, 2598, 2361, 373, 144, 1480, 1731, 113, 1605, 1343, 453, 1301, 2166, 2651, 993, 931, 1567, 2435, 528, 2655, 2596, 965, 2650, 2925, 2257, 2665, 2254, 1219, 1943, 2204, 414, 150, 2782, 2396, 962, 2073, 2786, 2827, 2427, 1956, 2784, 2057, 1019, 847, 770, 2728, 2230, 2860, 2621, 2398, 2649, 1232, 1214, 1760, 557]",0,,,, -"[1992, 1087, 1046, 1620, 584, 293, 1256, 1874, 3215, 545, 3026, 2259, 2153, 2817, 541, 1052, 638, 808, 2218, 2313, 2020, 1919, 878, 1789, 2562, 2347, 1562, 2189, 465, 483, 83, 1865, 294, 2531, 921, 455, 1211, 3318, 1449, 2278, 2312, 2977, 860, 2734, 1039, 1077, 135, 1383, 540, 1545, 2770, 3046, 2252, 1585, 1324, 332, 2819, 618, 825, 923, 2462, 1307, 595, 3018, 1961, 819, 1296, 3299, 3274, 2314, 1791, 494, 812, 1945, 535, 1536, 2184, 2495, 2440, 1239, 1225, 304, 3017, 1813, 2602, 2816, 641, 3039, 2779, 2315, 2066, 28, 1265, 1000, 1912, 2497, 911, 1446, 649, 654, 3294, 1437, 2910, 2921, 1102, 1170, 637, 1080, 555, 771, 1678]",0,,,, -"[1214, 2782, 887, 180, 351, 2639, 220, 136, 1238, 2655, 1758, 2640, 2641, 2230, 1422, 2398, 2797, 2405, 2519, 347, 2435, 622, 2829, 59, 2527, 894, 1918, 1903, 2507, 1112, 1567, 2558, 1257, 1542, 796, 2720, 395, 2596, 2638, 2264, 1096, 2097, 263, 416, 1157, 1009, 1605, 1439, 122, 2032, 450, 2506, 242, 661, 715, 2234, 269, 1591, 2565, 827, 16, 2073, 1064, 2443, 25, 2057, 10, 2063, 1582, 2564, 2597, 2967, 2119, 2166, 691, 2420, 1668, 2226, 875, 413, 2478, 2882, 464, 1670, 1275, 968, 770, 755]",0,,,, -"[694, 2965, 2286, 694, 694, 694, 694, 694, 694, 694, 694]",0,,,, -"[2021, 1644, 835, 453, 807, 492, 2023, 307, 549, 3197, 27, 1219, 941, 1009, 1131, 2782, 33, 2404, 585, 1343, 2205, 2398]",0,,,, -"[1940, 1725, 715, 886, 993, 1647, 2182, 147, 2774, 850, 1655, 661, 2506, 2925, 2621, 1759, 2097, 2655, 1731, 2481, 691, 1230]",0,,,, -"[2488, 887, 2681]",0,,,, -"[2829, 2633]",0,,,, -"[2919, 1, 486, 158, 1625, 1845, 44, 468, 3215, 138, 1836, 1429, 1875, 624, 83, 2292, 1039, 1531, 860, 923, 3296, 2531, 2048, 300, 2059, 2497, 1097, 2673, 1239, 2952, 338, 156, 579, 106, 1879, 24, 1849, 1737, 95, 2993, 12, 150, 1671, 819, 2450, 1227, 567, 221, 1939, 2776, 2710, 142, 2220, 2400, 2642, 316, 559, 1467, 110, 3089, 2931, 1932, 2448, 1156, 822, 1685, 455, 285, 1677, 2189, 2449, 778, 411, 2154, 1704, 2933, 3294, 812, 3202, 928, 3017, 2447, 745, 2269, 17, 3277, 56, 2427, 649, 1000, 2313, 2920, 2646, 2819, 3050]",1,,,, -"[887, 1731, 2527, 1214, 180, 1238, 2597, 1275, 622, 351, 1655, 2398, 755, 2435, 1112, 1350, 2234, 1758, 136, 1843, 2728, 2443, 2506, 1721, 1587, 770, 2166, 2405, 1417, 2404, 2882, 691, 2355, 827, 242, 16, 1422, 395, 2782, 154, 2226, 2097, 972, 464, 2633, 398, 1666, 1558, 220, 2558, 3027, 1009, 2519, 43, 2829, 1633, 2230, 2110, 2675, 1943, 985, 1668, 1918, 705, 1759, 10, 2331, 1096, 2426, 1399, 2670, 1130, 2032, 1605, 2596, 2421, 968, 715, 1064, 59, 268, 1989, 2422, 2420, 2672, 2774, 347, 122, 25, 661]",0,,,, -"[2398, 2829, 1943, 755, 2234, 1157, 2097, 1009, 153, 2774, 416, 993, 2645, 1417, 2967, 2226, 796, 1605, 398, 347, 2564, 1275, 1214, 2435, 691, 1064, 2633, 2483, 1422, 2478, 2119, 1843, 1575, 79, 2166, 2519, 59, 220, 2882, 435, 725, 1251, 2489, 2782, 985, 2527, 930, 1130, 2711, 1965, 3086, 1666, 1357, 677, 982, 557, 1112, 1041, 1013, 2718, 450, 2063, 1019, 273, 2360, 113, 509, 2506, 1567, 559, 152, 2205, 242, 839, 968, 154, 2710, 2399, 1591, 2507, 144, 1696, 1570, 1875, 2331, 1736, 150, 1922, 12, 2964]",0,,,, -"[2899, 1587, 203, 847, 1666, 194, 2331, 1399, 2110, 3051, 2886, 1130, 144, 31, 1989, 2404, 2422, 1517, 2681, 43, 1398, 1956, 2205, 2621, 2360, 517, 1011, 2638, 1965, 1759, 641, 2426, 2420, 506, 2900, 1094, 2632, 2520, 875, 1583, 435, 2147, 661, 2399, 2427, 2421, 152, 1070, 1341, 1429, 946, 1529, 414, 1875, 2680, 535]",0,,,, -"[1097, 486, 2331, 43, 1759, 1625, 1115, 1130, 2421, 1989, 2110, 2147, 1429, 1587, 144, 648, 2573, 1399, 3134, 1613, 3236, 152, 1656, 2062, 1398, 441, 3228, 3174, 325, 1048, 2944, 3244]",0,,,, -"[1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215, 1836, 215]",0,,,, -"[1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703, 1097, 2703]",0,,,, -"[2016, 2569, 3250]",0,,,, -"[637, 1836, 1678, 624, 582, 1447, 2266, 670, 1335, 3046, 751, 865, 777, 573, 2844, 333, 304, 215, 915, 2448, 2245, 808, 2066, 379, 1387, 1660, 1002, 729, 2220, 2921, 2447, 1932, 1939, 2672, 469, 1891, 135, 1478, 1620, 2973]",0,,,, -"[968, 1625, 1647, 2404, 2639, 428, 2625, 2264, 1429, 1157, 12, 2416, 585, 661, 414, 2475, 1131, 43, 2097, 912, 1668, 2774, 2564, 796, 40, 1918, 1965, 1480, 2488, 2427, 1351, 231, 450, 1467, 1567, 2942, 719, 2107, 2415, 361, 559, 158, 1940, 2621, 2522, 2585, 453, 2478, 162, 1439, 1214, 2675, 220, 627, 2667, 229, 2839, 340, 691, 2073, 2483, 1398, 715, 549, 2282, 2782, 622, 2596, 1257, 2797, 2147, 2444, 2626, 1070, 1635, 137, 1731, 138, 486, 1350, 965, 2128, 240, 1012, 1, 2565, 894, 398, 152, 1453, 2681, 2181, 2021, 3086, 435, 2933, 2597, 2820, 2377, 27]",0,,,, -"[968, 755, 1647, 1117, 2416, 2115, 1157, 2664, 147, 2023, 2398, 807, 495, 886, 59, 1034, 1834, 2967, 2404, 1943, 2166, 2655, 2598, 2625, 2651, 559, 2097, 1351, 289, 1630, 2564, 1591, 396, 912, 2488]",0,,,, -"[977, 2699]",0,,,, -"[2197, 2084]",0,,,, -"[3017, 582, 1531, 624, 1585, 2316, 3046, 28, 1410, 865, 1426, 2447, 1046, 1447, 2462, 2296, 2249, 531, 1033, 777, 3039, 1478, 1210, 670, 638, 1939, 1288, 469, 1472, 393, 333, 1813, 1383, 304, 555, 294, 920, 915, 1116, 406, 92, 265, 808, 1387, 2066, 1002, 2921, 2497, 2672, 790, 1660, 1932, 1891, 921, 1620, 2531, 541, 462, 1384, 2817, 476, 2932, 2952, 3274, 1578, 783, 1562, 2347, 3153, 2085, 280, 2041, 2020, 879, 547, 647, 653, 1239, 2811, 2993, 911, 504, 2844, 430, 2973, 2673, 2184, 1211, 1233, 778, 135, 489, 83, 2259, 2218, 1445, 1791, 569, 573, 317, 2100, 3294, 1684, 751, 2977, 3026, 419, 721, 745, 3215, 359, 3284, 1865, 1683, 2646, 405, 587, 1256, 2770, 142, 332, 3009, 2059]",0,,,, -"[1518, 2267, 2049, 1109, 574]",0,,,, -"[3316, 1181]",0,,,, -"[1422, 2639, 968, 154, 1112, 2622, 1599, 2925, 1117, 1157, 1429, 1176, 2047, 2405, 2023, 2416, 1131, 2121, 495, 2299, 1275, 147, 585, 2182, 661, 454, 2475, 59, 807, 2967, 43, 2522, 3030, 2097, 542, 912, 1915, 2282, 131, 796, 2564, 1918, 2244, 2204, 2295, 1480, 1351, 237, 1958, 2488, 2427, 2651, 241, 414, 29, 2230, 450, 1668, 1965, 2415, 559, 346, 748, 2655, 1582, 1943, 3324, 2585, 2926, 402, 2542, 2842, 1532, 220, 1630, 2435, 2783, 993, 2782, 1301, 2483, 1439, 2667, 302, 691, 2507, 1591, 2625, 549, 775, 1414, 1607, 622, 229, 1214, 2839, 719, 2596, 2506, 2861, 361, 627, 1020, 137, 2821, 1910, 2361, 2253, 1025, 1070, 2444, 965, 1635, 453, 633, 1658, 1724, 2116, 398, 1290, 2525, 1262, 236, 895, 1731, 2107, 395, 138, 894, 1567, 181, 1071, 2021, 2064, 2181, 2823, 2827, 2828, 1012, 493, 1230, 2128, 686, 1122, 1342]",0,,,, -"[3042, 1429, 254, 2819, 1143, 1097, 1822, 1782, 2367, 3193, 1992, 1030, 3011, 3133, 3046, 1868, 690, 2366, 1677, 672, 2347, 2306, 1903, 1411, 2628, 1125, 325, 1613, 1093, 3249, 900, 1031, 776, 441, 1927, 3286, 3132, 12, 1186, 648, 2550, 1808, 556, 430, 2368, 2157, 3293, 2067, 2686, 2567, 3234, 168, 1804, 661, 1772, 2245, 1202, 222, 582, 1334, 43, 1363, 1625, 509, 266, 2404, 921, 188, 1965, 2619, 1989, 1918, 3220, 2939, 2488, 2655, 2531, 577, 1172, 3298, 2427, 2030, 559, 414, 1426, 2541, 1235, 119, 2356, 1467, 83, 3107, 824, 1809, 3174, 1818, 152, 1791, 2812, 1655, 2732, 2551, 107, 2520, 651, 3068, 1684, 716, 1971, 3160, 2546, 158, 2621, 263, 2507, 2478, 993, 26, 2673, 1309, 1039, 2354, 2733, 722, 846, 2558, 969, 3228, 669, 486, 1398, 1667, 1863, 2675, 347, 1435, 708, 132, 2646, 2058, 3271, 2579, 227, 884, 2884, 3235, 3028, 2043, 798, 2062, 1853, 2147, 2422, 604, 2730, 2911, 465, 2421, 33]",1,,,, -"[1531, 3017, 582, 1620, 624, 1447, 2819, 541, 2771, 28, 2646, 2643, 1410, 1967, 1437, 865, 1239, 2973, 87, 860, 3039, 2447, 642, 879, 670, 3046, 304, 3223, 531, 2269, 1383, 777, 1170, 1478, 1210, 1002, 638, 2497, 1939, 469, 1288, 1472, 1789, 393, 1046, 555, 519, 1485, 2863, 333, 294, 915, 1116, 1813, 379, 1220, 265, 2245, 3318, 808, 1387, 2066, 2921, 729, 2672, 790, 1660, 1656, 2020, 32, 472, 106, 1932, 2752, 1891, 921, 169, 2547, 2531, 95, 476, 1426, 2932, 2952, 2296, 3274, 2347, 2063, 2613, 783, 1578, 1562, 1551, 3153, 2154, 2085, 2817, 2249, 547, 653, 2811, 911, 1818, 3049, 504, 2844, 2184, 1233, 778, 2673, 1211, 1455, 1418, 516, 135, 489, 1791, 922, 247, 2259, 2218, 62, 569, 573, 3215, 317, 2993, 3294, 83, 1684, 751, 2977, 3026, 419, 721, 822, 1307, 1683, 405, 587, 2100, 2054, 1256, 2316, 142, 745, 3009, 2088, 205, 2283, 2770, 2059, 1449, 2835, 3048, 3050, 2134, 2950, 2449, 647, 2391, 2515, 3018, 17]",0,,,, -"[1422, 2489, 968, 1625, 2519, 1429, 887, 2506, 1112, 361, 1097, 136, 2622, 1034, 460, 1447, 2264, 1747, 616, 2633, 2925, 755, 428, 1275, 12, 1176, 1157, 2664, 1214, 894, 274, 167, 2632, 2416, 2676, 2211, 1517, 1128, 495, 585, 1853, 886, 1668, 930, 1117, 661, 2404, 27, 454, 2097, 59, 3030, 2295, 1591, 2522, 43, 2598, 2564, 1958, 2205, 1915, 2775, 1965, 237, 839, 1351, 1630, 2651, 1989, 1918, 2565, 1758, 768, 807, 2488, 1480, 2230, 677, 2655, 2964, 2204, 622, 559, 2483, 3324, 2427, 231, 1417, 302, 2774, 1910, 450, 2196, 241, 2842, 1580, 1467, 162, 1473, 542, 1020, 2390, 895, 719, 29, 1414, 2783, 715, 152, 2415, 1301, 691, 2107, 402, 346, 748, 157, 2883, 1655, 2073, 1532, 912, 131, 2782, 2585, 775, 1343, 2435, 1940, 2507, 2520, 2621, 2625, 993, 1439, 2542, 158, 2478, 2546, 263, 1257, 2797, 1568, 2311, 229, 2839, 2667, 137, 220, 627, 2823, 1398, 347, 2967, 1262, 2861, 236, 2362, 486, 2675, 2558, 2592, 151, 2586, 340, 453, 2282, 549, 1607, 965, 1724, 2226, 2185, 2147, 2422, 2361, 2596, 398, 2253, 3308, 588, 872, 2421, 590, 1731]",0,,,, -"[1518, 2267, 2049, 1109, 574]",0,,,, -"[590, 1822, 1758, 2900, 818, 2522, 2506, 2645, 1903, 2091, 906, 2639, 1362, 2264, 269, 2206, 1657, 180, 525, 509, 725, 3274, 1910, 872, 1483, 445, 413, 3042, 2428, 1795, 680, 464, 768, 1821, 1665, 2200, 510, 2546, 1875, 3236, 2494, 2640, 2217, 468, 2183, 54, 1655, 136, 197, 2000, 650, 2638, 394, 3209, 2125, 2119, 2868, 2565, 152, 2324, 1625, 347, 2527, 2052, 2493, 1996, 435, 651, 850, 213, 3073, 930, 2861, 1417, 2626, 1825, 263, 1130, 2835, 2719, 1275, 1202, 1220, 79, 1064, 2574, 1580, 2097, 2429, 691, 2870, 1094, 2836, 428, 2927, 3076, 298, 1634, 2420, 373, 31, 2564, 1740, 1761, 2205, 186, 1300, 2360, 661, 1965, 2455, 416, 2399, 3308, 2519, 2775, 966, 268, 414, 3014]",1,,,, -"[590, 1822, 1758, 2900, 818, 2522, 2506, 2645, 1903, 2091, 906, 2639, 1362, 2264, 269, 2206, 1657, 180, 525, 509, 725, 3274, 1910, 872, 1483, 445, 413, 3042, 2428, 1795, 680, 464, 768, 1821, 1665, 2200, 510, 2546, 1875, 3236, 2494, 2640, 2217, 468, 2183, 54, 1655, 136, 197, 2000, 650, 2638, 394, 3209, 2125, 2119, 2868, 2565, 152, 2324, 1625, 347, 2527, 2052, 2493, 1996, 435, 651, 850, 213, 3073, 930, 2861, 1417, 2626, 1825, 263, 1130, 2835, 2719, 1275, 1202, 1220, 79, 1064, 2574, 1580, 2097, 2429, 691, 2870, 1094, 2836, 428, 2927, 3076, 298, 1634, 2420, 373, 31, 2564, 1740, 1761, 2205, 186, 1300, 2360, 661, 1965, 2455, 416, 2399, 3308, 2519, 2775, 966, 268, 414, 3014, 1112, 796, 2416, 2473, 1471, 2244, 2089, 2199, 2520, 1128, 2281, 1492, 1105, 1346, 1054, 2967, 533, 2122, 1395, 2718, 2886, 3030, 1070, 76, 1091, 935, 2869, 2720, 1019, 1635, 2641, 755, 2507, 2226, 557, 839, 2854, 395, 351, 2311]",1,,,, -"[1625, 2416, 2396, 59, 2605, 691, 2597, 968, 2103, 1708, 1517, 931, 611, 904, 2828, 2166, 962, 43, 2651, 154, 2321, 1096, 2398, 2371, 2632, 1567, 2737, 528, 871, 575, 2427, 887, 1251, 2650, 138, 2181, 1013, 2827, 2428, 54, 616, 390, 1157, 2826, 1027, 2481, 2784, 1759, 16, 1699, 2439, 1605, 453, 2510, 2655, 2417, 620, 314, 1918, 1875, 2649, 1731, 1097, 2064, 2418, 2254, 622, 2105, 1965, 468, 113, 715, 1415, 2320, 1131, 51, 2444, 2405, 2369, 978, 1117, 2925, 2586, 303, 1008, 512, 2475, 2234, 1453, 2378, 965, 1019, 744, 2413, 27, 2507, 137, 941, 2829, 1070, 1112, 157, 2484, 439, 1330, 2621, 1009, 1412, 1668, 577, 2370, 353, 1989, 1238, 1429, 835, 2738, 2360, 565, 1034, 2596, 435, 3138, 2598, 1663, 807, 1015, 1373, 2419, 1343, 1219, 2023, 2295, 2021, 2426, 1557, 289, 2681, 2237, 2115, 2144, 2783, 1136, 395, 2964, 886, 2886, 422, 1576, 2017, 2230, 3128, 2434, 1319, 2622, 2080, 1956, 1658, 2585, 2633, 1078, 2410, 894, 1409, 2821, 684, 2874, 396, 864, 31, 2822, 2637, 2506, 1273, 850, 2284, 2196, 2053, 847, 220, 2205, 1987, 2509, 242, 1031, 1835, 775, 492, 30, 2208, 1644, 2479, 162, 246, 936, 1724, 2488, 2182, 527, 2490, 986, 1607, 882, 1754, 1620, 2883, 2404, 1422, 2519, 2257, 2825, 3160, 1774, 2765, 1702, 2063, 2785, 709, 2217, 1916, 217, 1619, 2599, 2867, 1711, 2297, 1044, 466, 2820, 2250, 98, 2954, 1802, 189, 2174, 186, 1877, 542, 181, 1071, 2663, 2204, 3266, 930, 2299, 1603, 99, 1940, 2328, 2520, 386, 2116, 1359, 1419, 2511, 1948, 2875, 243, 1582, 1864, 136, 151, 1834, 2389, 495, 2323, 2062, 115, 2399, 2830, 2719, 2799, 609, 2139, 2824, 2718, 450, 1424, 999, 1264, 2122, 2032, 603, 272, 28, 2720, 2315, 446, 2823, 2664, 549, 1770, 1630, 134, 521, 1471, 1346, 1448, 2388, 2494, 2119, 799, 1971, 1817, 29, 781, 1091, 1406, 586, 805, 2244, 964, 241, 1590, 585, 1634, 2668, 2435, 874, 1020, 357, 2047, 1470, 2350, 1812, 1395, 633, 1414, 402, 313, 2861, 1672, 2373, 3164, 1355, 1155, 413, 682, 2351, 2984, 2866, 2375, 350, 177, 1030, 2571, 2455, 2354, 38, 2372, 2353, 2442, 2994, 1253, 1468, 1561, 49, 776, 881, 458, 877, 1915, 544, 2467, 1379, 2363, 2583, 2228, 2687, 940, 1867, 2358, 2233, 1540, 148, 1510, 2374, 761, 311, 2698, 170, 491, 1532, 2302, 2364, 1755, 1328, 2308, 2357, 2342, 2878, 2431, 1407, 2068, 3162, 2341, 2803, 2652, 1887, 576]",1,20.0,4.0,419.0,0.5013525698827773 -"[1453, 1875, 1956, 847, 2399, 347, 2217, 1736, 1625, 1817, 298, 1965, 2429, 2680, 2621, 2239, 1666, 414, 2519, 2633, 1940, 1877, 242, 2119, 273, 1429, 2299, 2488, 2422, 1609, 1096, 1153, 982, 1915, 31, 2714, 1591, 1532, 1064, 1759, 2527, 1492, 1019, 1097, 770, 1725, 2089, 268, 272, 2638, 2196, 499, 3324, 1821, 2200, 2718, 2625, 43, 1740, 2640, 882, 1708, 2632, 1587, 850, 796, 1131, 2667, 450, 2481, 136, 1944, 930, 1070, 2415, 2596, 2185, 2377, 2665, 2032, 147, 2964, 2598, 138, 2361, 2641, 2405, 1362, 937, 434, 1275, 2224, 695, 1232, 1665, 2715, 398, 2427, 2919, 113, 3266, 2886, 532, 263, 468, 395, 396, 875, 2768, 219, 800, 719, 715, 361, 2264, 1350, 2861, 1010, 1439, 2564, 1853, 895, 1176, 2483, 768, 2226, 1136, 2507, 2786, 2968, 1843, 2860, 2506, 241, 773, 2719, 2063, 559, 622, 229, 680, 2416, 3093, 529, 2281, 1561, 1918, 1961, 10, 1230, 2599, 1633, 1583, 1025, 627, 292, 2655, 661, 1020, 454, 413, 153, 886, 2597, 1605, 1205, 2967, 542, 131, 2211, 1473, 693, 706, 2121, 1731, 340, 1415, 3086, 675, 2769, 2045, 2295, 302, 755, 2404, 748, 2926, 1668, 1872, 2435, 1980, 258, 1290, 1476, 237, 197, 2443, 2842, 894, 42]",1,22.0,2.0,405.0,0.5073639915840096 -"[1130, 616, 1567, 982, 1635, 2597, 268, 1580, 1034, 691, 1326, 965, 2977, 1758, 492, 494, 2110, 220, 622, 1647, 347, 1666, 2335, 144, 2362, 2316, 1097, 2235, 2245, 1708, 2626, 2422, 1415, 2830, 1480, 2564, 1605, 1117, 1112, 1835, 1736, 1070, 2097, 2401, 2331, 894, 2887, 1875, 2829, 1591, 2527, 314, 1989, 2506, 2586, 2417, 10, 2481, 2487, 2360, 637, 1879, 2421, 1453, 627, 680, 466, 2113, 1517, 2089, 429, 755, 2443, 453, 2444, 3275, 1362, 1473, 69, 904, 361, 2488, 2405, 744, 152, 548, 2323, 1399, 304, 847, 2475, 377, 1417, 1678, 2234, 1484, 2782, 661, 2774, 1699, 1044, 194, 2622, 1157, 138, 1582, 2562, 2398, 2483, 864, 3014, 2435, 1587, 3138, 1853, 135, 2859, 2592, 1943, 2522, 2820, 351, 2361, 1774, 2650, 1877, 1655, 1178, 2324, 768, 2645, 237, 1644, 1019, 1630, 2440, 1422, 2764, 2205, 1330, 2783, 2964, 2314, 2927, 428, 677, 2765, 1578, 153, 229, 1398, 1214, 1698, 131, 62, 2413, 1802, 1251, 151, 468, 1351, 147, 1821, 1812, 2224, 2312, 1012, 541, 2466, 2651, 273, 2017, 2166, 1609, 1843, 850, 396, 808, 2282, 2420, 2204, 1633, 1668, 28, 2649, 565, 2257, 1620, 1009, 1429, 2059, 242, 2032, 2147, 1492, 414, 2371, 2247, 2919, 2370, 2509, 186, 2596, 2681, 2637, 398, 3187, 197, 2404, 98, 2162, 968, 59, 2882, 2638, 2063, 2217, 2264, 887, 1711, 2631, 61, 2720, 2230, 930, 2250, 1028, 1973, 307, 395, 1823, 1995, 2967, 1956, 796, 2632, 2226, 880, 764, 55, 1131, 2399, 2122, 875, 1625, 2313, 2239, 2839, 1447, 2621, 1275, 1042, 1665, 770, 1064, 2721, 2883, 715, 889, 1725, 1238, 1087, 2021, 3045, 1918, 2680, 1359, 3266, 2759, 1731, 2369, 350, 2339, 2633, 1008, 2174, 936, 2299, 272, 114, 157, 2605, 2797, 611, 298, 1015, 1395, 2718, 1096, 3144, 3026, 154, 2439, 3227, 1912, 2545, 413, 620, 1702, 2321]",1,22.0,3.0,419.0,0.5139765554553652 -"[95, 3046, 819, 83, 2745, 1422, 1768, 2622, 649, 924, 2829, 1165, 928, 1842, 395, 2774, 2495, 273, 1567, 540, 3274, 2509, 2450, 397, 1157, 1522, 2097, 865, 2132, 136, 1410, 1865, 2174, 2234, 1736, 1853, 1200, 2284, 59, 2259, 2269, 768, 1615, 691, 1665, 2632, 2578, 542, 2462, 2488, 332, 2739, 2720, 2645, 3099, 1417, 110, 2166, 1251, 545, 2089, 2520, 2911, 2278, 2900, 2511, 2899, 2718, 3050, 1020, 220, 1836, 3294, 3217, 1009, 2297, 2633, 1965, 2028, 516, 130, 2598, 2844, 151, 1002, 3009, 1704, 1943, 1004, 1605, 2025, 2646, 599, 472, 1609, 703, 547, 1693, 2564, 408, 2950, 974, 982, 1678, 771, 3148, 1561, 3216, 1625, 624, 2295, 2218, 551, 822, 2840, 749, 2973, 1406, 2032, 2920, 1582, 396, 2447, 1214, 1778, 680, 2897, 1921, 670, 587, 489, 1545, 194, 2400, 880, 1477, 2631, 70, 2782, 319, 267, 234, 573, 3049, 298, 1311, 2864, 3047, 3017, 2596, 968, 412, 1466, 2636, 541, 1387, 1131, 1362, 3052, 2740, 3224, 468, 1408, 386, 98, 405, 1753, 1426, 2184, 921, 2217, 455, 2020, 1235, 2041, 2299, 2967, 936, 1046, 2226, 882, 1259, 2771, 115, 2519, 1973, 809, 2959, 1668, 138, 421, 2245, 154, 2335, 2134, 2399, 1967, 186, 2487, 582]",1,24.0,2.0,356.0,0.5488428013225127 -"[1875, 1453, 827, 2681, 1666, 1965, 2886, 468, 122, 2090, 2633, 1558, 1097, 414, 138, 1591, 2680, 1971, 1251, 347, 1625, 2331, 2424, 509, 221, 1093, 1030, 1202, 25, 2933, 2356, 1759, 1655, 988, 2427, 1804, 2460, 2426, 2057, 3042, 2622, 2461, 2488, 2638, 661, 1989, 1467, 2225, 486, 1130, 1492, 847, 982, 2421, 2425, 43, 2575, 2329, 1115, 968, 1821, 1, 604, 2949, 1613, 2621, 154, 1362, 8, 2429, 1019, 83, 2531, 2891, 2458, 178, 1879, 1829, 152, 2520, 2527, 3293, 2058, 1709, 158, 2062, 1235, 3206, 2561, 331, 1894, 2573, 12, 1064, 136, 411, 2224, 875, 1070, 923, 150, 1570, 3068, 2948, 31, 1737, 2065, 1096, 651, 525, 582, 900, 2459, 1853, 2648, 921, 1742, 578, 2685, 2404, 1609, 106, 2110, 2673, 3046, 1429, 3134, 2727, 2919, 3017, 1039, 1410, 1366, 2632, 1399, 648, 3028, 205, 1918, 597, 2647, 979, 559, 1170, 1587, 2189, 352, 1731, 3236, 3084, 2729, 1031, 3156, 3027, 1575, 5, 2153, 2686, 2546, 2288, 2574, 2346, 144, 3160, 716, 1238, 119, 2147, 2328, 1358, 1677, 1218, 887, 1225, 1729, 3174, 465, 1072, 1196, 2507, 1309, 2812, 2501, 1350, 1398, 441, 93, 464, 2478, 1868, 132, 2728, 330, 2911, 2944, 3228, 989, 263, 2422, 2675, 2072, 3011, 2345, 2943, 615, 325, 3053, 705, 1656, 2646, 2939, 1033, 156, 1048, 2726, 2306, 1782, 1143, 2541, 383, 3244, 2169, 191, 1334, 1009, 96, 32, 2157, 2567, 2548, 690, 2112, 1542, 1983, 1184, 3184, 2655, 1791, 90, 3152, 2378, 118, 3133, 455, 1244, 954, 1667, 1085, 1685, 2603, 2549, 1818, 3132, 1426, 3108, 2055, 2819, 1728, 430, 249, 447, 1927, 1125, 2420, 942, 3139, 2030, 2367, 1186, 1234, 190, 370, 746, 1684, 2558, 269, 906, 2347, 3286, 2368, 722, 1863, 2366, 3235, 683, 3287, 1992, 244, 2568, 253, 374, 473, 846, 843, 884, 3193, 798, 1286, 1156, 123]",1,24.0,3.0,378.0,0.5611662158100391 -"[615, 1167, 1965, 2673, 2711, 13, 3029, 641, 2299, 2163, 670, 920, 2670, 2059, 83, 1423, 272, 3257, 1493, 1020, 2911, 155, 3018, 713, 471, 2646, 450, 540, 359, 751, 1915, 142, 2547, 106, 104, 2588, 1551, 542, 1303, 3046, 2734, 2811, 2519, 1768, 1566, 1561, 1105, 136, 3284, 1845, 2080, 80, 1785, 2648, 2710, 154, 2346, 2295, 1447, 1932, 3215, 2314, 1311, 1239, 412, 135, 595, 1097, 2507, 923, 2952, 819, 1849, 822, 1894, 2531, 1532, 2196, 1964, 882, 2339, 339, 1836, 1778, 2607, 2210, 505, 649, 1247, 2745, 2269, 1265, 1971, 44, 1738, 1052, 95, 110, 721, 2931, 3274, 2776, 2154, 878, 2816, 1671, 2315, 1039, 1485, 96, 316, 567, 2401, 922, 2993, 1545, 703, 17, 638, 2441, 2560, 2450, 2951, 2283, 201, 132, 338, 1727, 2498, 2400, 1654, 408, 674, 2332, 2312, 1000, 3047, 1043, 624, 1227, 577, 564, 1729, 1324, 1246, 2451, 2739, 2292, 392, 2218, 1225, 24, 2642, 1046, 1704, 1865, 300, 440, 1143, 3017, 2347, 2189, 1531, 745, 1572, 2835, 2726, 2497, 1895, 3296, 2054, 1737, 1221, 472, 2740, 1912, 2316, 3202, 812, 2672, 2950, 2452, 2683, 2608, 3089, 707, 397, 1829, 3048, 1170, 156, 729, 2819, 1408, 1087, 1477, 2249, 2584, 2134, 2609]",1,26.0,2.0,356.0,0.5722873459573189 -"[2295, 542, 649, 1307, 2903, 2902, 882, 2911, 1246, 2222, 1836, 2299, 2901, 3046, 922, 923, 154, 450, 927, 83, 2899, 28, 3215, 550, 155, 2507, 465, 1772, 1303, 2196, 1964, 2275, 2519, 104, 634, 1561, 2210, 32, 3272, 2776, 2316, 1727, 1589, 1020, 2345, 2952, 1076, 2609, 3276, 2497, 860, 3006, 2642, 1865, 1768, 254, 2740, 2973, 2531, 2816, 1532, 332, 624, 106, 203, 1946, 338, 3009, 1039, 1068, 2646, 1932, 2312, 272, 3099, 2931, 833, 1135, 2132, 3107, 2815, 421, 1965, 1162, 2451, 1239, 1915, 2449, 1572, 1363, 674, 2085, 2189, 780, 1822, 3296, 56, 482, 1094, 2154, 3051, 1704, 1004, 1052, 1678, 2608, 3049, 2453, 3047, 412, 88, 2180, 156, 2447, 2840, 411, 472, 1474, 316, 1921, 698, 2283, 2249, 1265, 2452, 419, 142, 2921, 830, 1939, 991, 708, 2734, 2120, 3257, 1324, 2220, 2993, 809, 1542, 2498, 2448, 1225, 2951, 2745, 638, 2450, 1002, 300, 579, 3207, 819, 2672, 676, 703, 1903, 1477, 2835, 3053, 3050, 745, 2134, 440, 1737, 3017, 2218, 2292, 1551, 784, 339, 230, 1410, 2080, 2900, 721, 285, 356, 2814, 1221, 508, 1227, 1531, 44, 1967, 2631, 1170, 2920, 2950, 1845, 3156, 822, 1043, 2179, 1200, 564, 2420, 3274, 1087, 1677, 2332]",1,26.0,2.0,378.0,0.5145776976254884 -"[468, 138, 2383, 1311, 1251, 2752, 930, 545, 642, 2547, 392, 485, 2739, 1435, 2670, 3265, 968, 2951, 1912, 2354, 2316, 2464, 135, 2932, 2964, 2646, 2005, 1625, 2196, 1131, 1971, 2488, 2259, 2299, 2819, 2062, 3120, 2643, 777, 80, 670, 1721, 17, 300, 991, 465, 106, 212, 579, 136, 2373, 2950, 2673, 1956, 1672, 771, 177, 2479, 2450, 567, 1448, 2898, 2815, 923, 1030, 316, 2816, 1894, 1136, 332, 242, 2080, 2683, 2814, 1324, 654, 927, 3221, 812, 222, 2779, 1000, 205, 2740, 3039, 1225, 28, 1965, 3134, 3018, 610, 2346, 1170, 1052, 2661, 1220, 3133, 2730, 2531, 2292, 2734, 1033, 1531, 1112, 2451, 2765, 1103, 1046, 3294, 974, 247, 1002, 2473, 1384, 1366, 2278, 3099, 1135, 2672, 2486, 721, 879, 1561, 1253, 1891, 1322, 2495, 1485, 860, 2515, 411, 2519, 1387, 1813, 2296, 142, 3076, 2228, 2203, 2449, 1162, 1946, 1353, 1654, 1472, 3146, 203, 2657, 2507, 1020, 3148, 2836, 2886, 2920, 2345, 18, 778, 1246, 700, 2052, 1039, 154, 2902, 1247, 1329, 1265, 2662, 819, 1068, 462, 2312, 2776, 2952, 1849, 1836, 624, 1410, 1200, 2035, 2973, 3296, 201, 1877, 792, 272, 2726, 3089, 634, 2557, 2302, 2412, 132, 547, 1474, 2085, 2921, 1970, 419, 155, 442]",1,26.0,2.0,405.0,0.5130748422001803 -"[2404, 2228, 151, 847, 2483, 2919, 542, 895, 2964, 491, 1561, 2638, 2021, 1290, 2645, 428, 1875, 1843, 1131, 1417, 398, 2023, 2032, 2933, 2230, 2181, 2295, 43, 351, 1190, 1609, 661, 307, 2887, 2585, 2667, 1965, 303, 835, 1647, 1591, 1176, 396, 347, 1666, 532, 2860, 881, 2299, 3093, 147, 2680, 2783, 158, 2866, 31, 10, 416, 1230, 2828, 1758, 27, 1262, 1015, 1587, 1020, 1894, 137, 1, 1453, 1570, 1012, 1940, 2506, 492, 1473, 1599, 1251, 2556, 554, 1821, 2361, 1997, 2651, 1064, 2254, 1019, 2822, 2583, 464, 2329, 930, 1219, 2626, 1644, 1009, 2064, 229, 150, 1853, 2466, 2784, 941, 1877, 2968, 2527, 486, 2786, 2217, 3030, 495, 887, 719, 1736, 2363, 2649, 131, 2861, 1070, 2315, 493, 255, 30, 3186, 2107, 937, 1429, 2520, 972, 1467, 2128, 2621, 1414, 2681, 770, 242, 768, 2586, 2002, 627, 509, 434, 1097, 2282, 2274, 1759, 799, 737, 1724, 2648, 2622, 1480, 978, 1670, 1590, 268, 1130, 875, 1212, 1582, 1071, 2435, 51, 2341, 2053, 2994, 2821, 453, 2427, 882, 2487, 544, 414, 2116, 807, 2886, 1343, 12, 2525, 450, 170, 2239, 525, 2444, 2302, 3275, 2633, 1703, 2397, 79, 1625, 1731, 1125, 586, 2488, 154, 2782, 2827, 1607, 2927, 2665, 1492, 3324, 2224, 1755, 273, 136, 1238, 744, 2632, 3162, 2825, 1031, 1665, 1658, 1663, 2089, 549, 982, 2235, 2883, 1214, 186, 1281, 181, 2119, 2455, 157, 49, 2115, 1834, 1362, 1617, 1034, 705, 138, 1376, 1817, 2507, 1273, 237, 2564, 2162, 755, 1050, 585, 1633, 221, 1995, 2398, 1879, 499, 2666, 1422, 1112, 298, 2830, 2823, 1532, 2650, 2429, 1915, 2226, 1025, 559, 468, 2625, 796, 528, 2785, 246, 2422, 2396, 1342, 872, 153, 2663, 1010, 2519, 302, 1328, 395, 775, 272, 263, 197, 2824, 2277, 2196, 1630, 2967, 1427, 433, 3129, 1054, 340, 527, 2139, 1122, 213]",1,26.0,3.0,405.0,0.5341148181544936 -"[1417, 27, 2626, 2398, 1644, 3014, 2720, 542, 1020, 2829, 413, 416, 2645, 2775, 13, 1647, 2925, 428, 2032, 2230, 2782, 2718, 965, 1617, 157, 818, 1532, 241, 827, 1238, 1167, 2443, 1290, 272, 1414, 2174, 2711, 1480, 1758, 122, 1012, 2475, 755, 273, 1915, 2597, 1835, 3198, 2967, 2774, 464, 1731, 1112, 16, 2102, 2166, 2830, 2217, 535, 1292, 34, 887, 2506, 395, 1918, 2797, 2299, 2444, 153, 1415, 1105, 1943, 2507, 1583, 2759, 1157, 1871, 1591, 1333, 1582, 2048, 1986, 1493, 2315, 450, 1202, 1655, 2234, 1642, 1940, 691, 2404, 2650, 2584, 1214, 307, 1843, 1091, 1903, 268, 715, 1034, 2622, 1725, 2558, 1634, 501, 398, 2295, 2594, 2519, 147, 1094, 1635, 1257, 2649, 409, 1471, 1422, 453, 361, 1956, 220, 242, 151, 1696, 1811, 2097, 622, 2355, 1567, 2926, 124, 882, 1013, 2356, 29, 930, 2063, 2820, 796, 394, 2639, 1439, 1136, 2187, 263, 872, 2712, 2488, 1817, 10, 2196, 2598, 1944, 79, 402, 2859, 3266, 1023, 492, 2883, 1346, 229, 2710, 269, 1777, 2676, 1568, 2964, 1575, 2522, 2264, 2421, 1350, 186, 2596, 3114, 2162, 1633, 972, 162, 770, 993, 2882, 1995, 1473, 1308, 2119, 725, 2651, 115, 1542, 136, 2311, 30, 975, 1605, 2861, 2868, 2091, 1922, 1362, 1665, 3074, 1640, 224, 768, 1799, 131, 396, 1670, 2405, 1008, 298, 985, 2200, 3236, 2122, 1894, 1275, 2891, 3228, 1212, 1702, 1736, 351, 1064, 627, 1009, 2377, 434, 2226, 2435, 2478, 549, 1561, 2052, 347, 2033, 445, 59, 2927, 509, 2719, 2564, 2420, 968, 1128, 2494, 2455, 2655, 75, 2641, 1230, 1251, 231, 2199, 1761, 607, 2474, 2089, 982, 894, 1115, 875, 2527, 1570, 1879, 677, 1965, 1901, 1630, 2483, 2565, 1668, 180, 486, 1795, 2057, 468, 2680, 2900, 1558, 2239, 1760, 2361, 3042, 2887, 2632, 2638, 661, 154, 43, 1923, 2001, 1483, 1740, 557, 1070]",1,26.0,3.0,419.0,0.5788999098286745 -"[1251, 1358, 449, 2360, 737, 2632, 637, 2655, 938, 1131, 194, 1817, 330, 1096, 43, 1965, 968, 1875, 1731, 263, 486, 304, 494, 2919, 2224, 843, 227, 1215, 808, 705, 2633, 2205, 2426, 661, 2681, 1666, 2680, 1609, 2631, 2104, 1522, 2562, 1543, 70, 582, 1009, 1087, 1826, 2728, 1238, 2829, 1771, 3184, 1, 1620, 886, 1453, 2550, 2488, 1230, 1151, 3026, 3206, 1130, 1350, 3027, 2428, 541, 928, 1019, 414, 3249, 2422, 2245, 2884, 2551, 158, 1863, 2440, 113, 1558, 3228, 1517, 2252, 2507, 847, 2675, 517, 2636, 2429, 1591, 1821, 3007, 722, 1625, 1399, 1583, 31, 1809, 347, 790, 188, 2057, 1334, 1856, 2732, 502, 982, 2774, 1286, 154, 2524, 1027, 3234, 136, 1156, 3236, 95, 2404, 2886, 2638, 62, 1467, 138, 887, 1575, 2527, 429, 468, 2577, 2097, 2305, 435, 2622, 2435, 2579, 1808, 325, 266, 1064, 1853, 2925, 1759, 2520, 3232, 410, 2619, 2621, 2546, 152, 993, 2790, 2478, 2933, 2399, 1847, 150, 500, 1879, 1141, 2492, 875, 969, 3298, 1097, 2733, 681, 2775, 1989, 2067, 1652, 2493, 1655, 191, 884, 25, 942, 672, 1070, 2095, 3106, 2335, 269, 1408, 1326, 2568, 1851, 2648, 1992, 2979, 525, 2331, 1890, 12, 221, 2147, 1411, 1402, 2928, 2378]",1,29.0,20.0,356.0,0.547039374812143 -"[2740, 2739, 1660, 2619, 2110, 2302, 266, 3017, 2814, 144, 1000, 405, 155, 1130, 3248, 544, 411, 1311, 2519, 254, 92, 2732, 2815, 1399, 2383, 1687, 2179, 2189, 2331, 3046, 2120, 32, 721, 392, 1671, 1398, 2733, 2507, 1545, 508, 1410, 922, 1587, 2902, 2422, 547, 3249, 2749, 1964, 1265, 819, 2401, 542, 1654, 1770, 2547, 2316, 2400, 683, 1992, 1965, 136, 882, 106, 1849, 1246, 413, 1474, 2196, 2100, 3009, 2210, 1002, 790, 441, 1704, 104, 2388, 379, 550, 3202, 421, 2776, 2551, 2332, 567, 130, 1915, 2067, 489, 300, 285, 440, 1119, 2347, 1625, 1324, 1162, 579, 1516, 505, 1677, 2312, 1693, 2295, 1384, 80, 2919, 2552, 3294, 486, 3220, 1543, 43, 2901, 1097, 1239, 2950, 1115, 1068, 2275, 822, 1967, 2315, 1561, 2629, 1387, 148, 1510, 70, 1532, 3193, 450, 2052, 2866, 2512, 2567, 3089, 2993, 2973, 142, 215, 2673, 2059, 471, 2558, 1170, 2899, 1303, 84, 2903, 1853, 1200, 159, 2147, 1531, 2249, 921, 1782, 3215, 582, 2066, 676, 3216, 1020, 3047, 2520, 2900, 2921, 2299, 1759, 2861, 1989, 95, 38, 2153, 3099, 1544, 1429, 1423, 203, 1307, 154, 923, 1094, 2314, 2557, 2498, 1135, 311, 2984, 272, 2134, 2364, 1729, 2222, 2421, 1172, 2952, 316]",1,31.0,20.0,459.0,0.5296062518785692 -"[2898, 1435, 489, 1693, 405, 3216, 2179, 1660, 2950, 2100, 2769, 2316, 2730, 1387, 3099, 1921, 356, 2258, 88, 130, 1551, 647, 215, 582, 1989, 2110, 506, 1525, 1094, 18, 1130, 923, 2447, 1964, 1704, 3110, 3046, 104, 2202, 1666, 2729, 1678, 3207, 3218, 43, 2277, 2249, 1011, 2186, 563, 669, 1004, 676, 1398, 850, 3011, 2132, 1768, 3234, 2993, 1307, 2923, 234, 3080, 3017, 567, 843, 2745, 3051, 203, 2134, 1410, 886, 3236, 1002, 1464, 790, 1516, 1200, 1111, 2498, 2275, 1865, 472, 144, 879, 1759, 689, 1170, 721, 1583, 473, 2052, 2283, 587, 26, 1796, 2584, 1625, 110, 2332, 2524, 1701, 674, 1239, 637, 1353, 1967, 2920, 2840, 444, 3215, 2066, 1429, 1843, 1853, 2921, 269, 3156, 2931, 2391, 3148, 3217, 3274, 2422, 339, 1753, 638, 547, 2884, 421, 285, 703, 2721, 2980, 1476, 1097, 3235, 222, 499, 2631, 2218, 412, 1808, 780, 1477, 3221, 1184, 2655, 44, 653, 2628, 2440, 1215, 2182, 3108, 3228, 1983, 2768, 92, 2491, 2154, 2387, 921, 2269, 84, 2900, 1485, 3089, 1567, 1992, 2683, 1727, 2567, 1135, 541, 379, 227, 2642, 2421, 2973, 3050, 1875, 2427, 708, 564, 482, 3009, 1399, 1191, 244, 681, 2464, 2453, 1545, 2331, 1778, 338, 822, 2520, 1782, 3120, 304, 1474, 1218, 1043, 2400, 2685, 2441, 3109, 1652, 152, 118, 408, 1890, 2486, 1980, 2492, 2189, 3026, 32, 2312, 2120, 2005, 613, 2899, 1162, 1797, 2140, 1587, 2054, 3049, 550, 906, 1738, 1849, 1000, 1441, 2157, 1156, 1872, 634, 2608, 3133, 42, 494, 3048, 1845, 884, 2147, 2579, 263, 737, 969, 3068, 56, 577, 1172, 465, 809, 1460, 397, 1227, 429, 2548, 249, 194, 1068, 2568, 3010, 2225, 2065, 1027, 2901, 1087, 1384, 2551, 3206, 2025, 642, 1687, 808, 370, 2028, 2919, 927, 922, 2043, 95, 3052, 1826, 1868, 2669, 440, 1771, 517, 3047, 1671, 2562, 24, 2603, 3249, 2686, 2835, 2288, 2619, 991, 1572, 2903, 993, 2306, 2902, 2977, 954, 1715, 449, 3293, 1903, 2550, 1946, 1620, 78, 447, 672, 792, 354, 2636, 578, 2378, 70, 54, 2733, 450, 1303, 410, 728, 2577, 1809, 705, 928, 188, 330, 2420, 3152, 3248, 3007, 502, 1817, 352, 62, 2549, 3028, 784, 1363, 1522, 2541, 1408, 3006, 2499, 2512, 500, 707, 266, 254, 1326, 2222, 729, 3220, 1772, 1543, 2493, 508, 2095, 1667, 2367, 1542, 3184, 3132, 3298, 525, 2557, 2647, 746, 1728, 2732, 2245, 2558, 1186, 2067, 2366, 1822, 325, 1244, 2573, 3194, 1286, 2575, 3286, 2944, 2335]",1,31.0,40.0,459.0,0.5121731289449954 -"[649, 2670, 3133, 83, 2646, 2473, 2911, 1721, 476, 2519, 1220, 3018, 2789, 882, 2734, 1247, 920, 2163, 1167, 2345, 540, 3039, 2196, 2153, 118, 3029, 80, 2134, 359, 923, 3017, 2547, 3099, 713, 2952, 2584, 90, 615, 2672, 159, 878, 2711, 2920, 638, 1829, 155, 142, 2507, 2950, 136, 2673, 13, 3046, 819, 1170, 2179, 1493, 92, 2710, 1753, 339, 1410, 2779, 1043, 1105, 234, 379, 1200, 542, 1894, 1671, 2295, 465, 2607, 641, 595, 3148, 1660, 96, 1566, 1551, 2080, 653, 547, 272, 564, 1002, 84, 674, 489, 3216, 2588, 106, 670, 2498, 478, 1418, 2447, 2609, 1280, 3284, 2776, 1727, 1004, 3218, 1227, 2249, 3089, 1246, 285, 2391, 1701, 294, 2739, 879, 1516, 2648, 563, 1693, 1020, 751, 405, 1311, 1097, 1915, 1384, 135, 450, 1525, 18, 676, 215, 582, 2283, 1677, 1561, 1532, 1965, 1052, 130, 2314, 922, 1423, 1256, 2313, 1865, 921, 32, 2218, 1447, 2315, 2931, 2189, 2740, 1046, 471, 2993, 2059, 2400, 1572, 316, 1912, 3048, 2299, 1654, 2608, 1895, 1477, 3215, 577, 56, 1464, 822, 1000, 154, 1239, 2921, 2316, 2811, 1221, 2332, 2383, 2112, 3274, 2054, 2339, 2095, 2560, 567, 991, 1307, 110, 104, 587, 2269, 1967, 2312, 1235, 1704, 2401, 2210, 1849, 1545, 2100, 790, 1265, 472, 3257, 1964, 3049, 2066, 2052, 2258, 647, 2973, 1387, 95, 1971, 2840, 2154, 28, 3217, 3052, 1474, 412, 2557, 1768, 1921, 1087, 3026, 1778, 2835, 1303, 1326, 2335, 3047, 707, 928, 1522, 2245, 780, 541, 2028, 637, 1027, 440, 429, 808, 392, 703, 3050, 2562, 1578, 1817, 2745, 2440, 2636, 2721, 1408, 2977, 194, 2025, 2900, 397, 494, 70, 1678, 1620, 809, 1771, 408, 304, 3009, 1797, 1353, 1094, 2441, 2120, 729, 203, 3051, 2899, 2631, 2903, 721, 2901, 1068, 1946, 2902, 62]",1,12.0,29.0,303.0,0.5458370904718967 -"[2638, 13, 1167, 3079, 2264, 180, 1825, 1894, 1493, 1558, 311, 2710, 1333, 675, 1439, 2584, 2224, 2443, 2711, 416, 2428, 2639, 695, 1580, 1015, 2293, 1670, 1575, 274, 1705, 2675, 1275, 1105, 394, 43, 1587, 31, 25, 509, 2506, 607, 351, 2360, 1261, 1561, 1386, 875, 147, 3186, 113, 2228, 2964, 2147, 618, 1811, 2331, 292, 1625, 1591, 2474, 3266, 557, 773, 2861, 2838, 2429, 2183, 2426, 150, 2052, 1533, 1831, 794, 847, 1997, 3228, 1070, 1153, 2174, 542, 770, 413, 2880, 2633, 138, 273, 1958, 1517, 1136, 1570, 2626, 2480, 1965, 1483, 2542, 3027, 1956, 495, 680, 588, 75, 1995, 677, 2404, 839, 1901, 1532, 2715, 2868, 2478, 1875, 1755, 2621, 878, 1357, 12, 1419, 1238, 10, 1879, 1622, 2483, 3242, 661, 1412, 631, 1362, 2891, 402, 450, 152, 1758, 805, 1262, 2648, 1473, 29, 39, 2444, 881, 2632, 2827, 2151, 1635, 1759, 1115, 2089, 559, 3138, 2196, 255, 303, 2121, 2718, 1760, 686, 2645, 1986, 966, 52, 529, 1096, 395, 1020, 1725, 1989, 3078, 2527, 428, 2362, 1480, 2295, 985, 1398, 1202, 1122, 158, 2919, 1399, 3308, 1702, 937, 2421, 595, 1583, 466, 236, 486, 1764, 1665, 1698, 2680, 242, 2399, 2405, 2356, 554, 241, 2487, 2860, 2282, 2649, 154, 1843, 1131, 2927, 1915, 2363, 1447, 2785, 1414, 347, 2361, 510, 1668, 1976, 1724, 1453, 136, 1212, 1429, 895, 2786, 144, 2253, 1492, 872, 768, 2205, 2116, 2021, 2311, 493, 2064, 1417, 1064, 79, 3054, 2676, 1350, 2420, 1817, 2239, 1795, 1130, 1540, 2200, 99, 2230, 982, 1812, 2842, 2185, 151, 1663, 706, 2162, 346, 2045, 1351, 644, 40, 972, 2520, 2110, 167, 2886, 1736, 2933, 3000, 2824, 2377, 272, 1376, 1019, 2063, 2556, 2507, 715, 1655, 1834, 1877, 3086, 298, 1198, 2783, 1821, 2119, 1609, 978]",1,12.0,29.0,558.0,0.5100691313495642 -"[2329, 2035, 1202, 1865, 2460, 879, 2461, 1531, 2815, 771, 988, 2685, 300, 150, 2331, 2891, 2134, 144, 1352, 1335, 3053, 317, 1570, 138, 156, 489, 1097, 411, 228, 2110, 2950, 1855, 1238, 2921, 2058, 847, 2670, 1103, 2459, 2495, 486, 212, 743, 2632, 1693, 1613, 93, 12, 716, 2356, 2458, 405, 159, 2686, 1002, 2973, 465, 783, 1701, 1115, 2727, 1307, 2515, 3076, 1677, 1587, 2673, 2179, 979, 3265, 1334, 1829, 516, 2258, 587, 1123, 900, 2345, 3132, 1660, 319, 43, 331, 3277, 2574, 584, 2346, 2169, 2561, 654, 2427, 3216, 676, 1971, 84, 582, 1130, 1200, 1087, 2920, 2343, 649, 2080, 1218, 2646, 215, 1625, 921, 136, 3084, 1742, 1804, 1143, 2296, 2520, 2090, 3215, 1322, 3284, 579, 1853, 1031, 1785, 1759, 779, 2633, 1753, 2948, 8, 647, 3026, 2391, 1410, 1429, 1666, 2100, 2840, 2328, 3318, 604, 1387, 683, 3217, 2939, 3156, 509, 1030, 1111, 2573, 205, 2949, 2919, 1709, 191, 2669, 563, 1782, 2776, 1280, 1362, 1398, 1256, 3228, 1, 1170, 1196, 3028, 2140, 2065, 2450, 1737, 2911, 1096, 1039, 18, 90, 3046, 1731, 178, 653, 3068, 2814, 1399, 1043, 1366, 2627, 2647, 1728, 1084, 1235, 3317, 2316, 1072, 1989, 2422, 1048, 540, 790, 2066, 352, 1093, 92, 1729, 3042, 887, 3174, 1571, 2249, 3148, 1921, 819, 1472, 2306, 294, 2447, 3099, 1525, 2531, 1516, 3218, 2558, 234, 379, 3009, 441, 637, 1868, 330, 2278, 2189, 1967, 2062, 3184, 597, 3294, 2726, 22, 1004, 797, 1441, 2501, 648, 107, 83, 2424, 1615, 106, 3110, 2812, 989, 1542, 2153, 2603, 5, 1309, 1329, 1186, 615, 3017, 1225, 3018, 547, 132, 1842, 1891, 1234, 923, 2421, 2575, 2055]",1,12.0,27.0,458.0,0.5287045386233844 -"[158, 464, 620, 1973, 2413, 2422, 1054, 2919, 2784, 2887, 453, 1758, 43, 466, 2520, 2527, 1429, 483, 1, 2439, 1759, 1853, 1064, 151, 1130, 936, 272, 1417, 2368, 444, 2633, 2230, 268, 542, 1817, 241, 2315, 1633, 1009, 2488, 1071, 2398, 1561, 881, 1020, 2860, 1219, 2427, 2681, 1131, 1731, 2254, 2994, 1195, 138, 509, 1214, 709, 2435, 2507, 2115, 843, 627, 2866, 154, 2622, 1987, 586, 1585, 491, 835, 1609, 2822, 1012, 2162, 347, 136, 450, 2564, 2775, 528, 2455, 1582, 2277, 3138, 887, 2053, 1872, 2020, 1010, 1877, 51, 3014, 2418, 2786, 2896, 2366, 2861, 396, 930, 847, 3275, 768, 2830, 31, 680, 1726, 1644, 2295, 181, 1943, 2562, 137, 2487, 978, 2397, 30, 303, 227, 374, 1480, 27, 1620, 2302, 1251, 131, 2483, 2897, 150, 2977, 2925, 622, 1605, 2181, 2226, 2579, 242, 486, 2655, 880, 2299, 808, 499, 78, 1301, 2097, 2228, 2586, 42, 2785, 147, 263, 1647, 2598, 1965, 982, 1992, 1476, 1013, 1015, 1112, 1625, 3326, 2583, 28, 2827, 993, 1591, 882, 2341, 395, 170, 2769, 2664, 2466, 1034, 2645, 2571, 744, 2032, 492, 1875, 3197, 1362, 3046, 2475, 500, 2444, 2825, 2821, 749, 2783, 2875, 2964, 2828, 2524, 2511, 2782, 2632, 29, 775, 3164, 2774, 1910, 2650, 49, 807, 941, 2933, 1809, 493, 153, 1630, 2440, 585, 2882, 719, 1286, 551, 2826, 2651, 1576, 2174, 1967, 1940, 3162, 1663, 2116, 2021, 2665, 2829, 2924, 1414, 221, 1658, 693, 3007, 2396, 416, 1808, 2128, 314, 1343, 2416, 2235, 2993, 2667, 2649, 549, 307, 273, 1097, 1467, 2017, 10, 1702, 1736, 2666, 2217, 1422, 2023, 1510, 544, 2585, 12, 2739, 722, 1466, 1848, 2967]",1,12.0,27.0,558.0,0.5632702134054703 -"[1746, 3071, 2338, 545, 371, 2339, 1917, 2598, 736, 1778, 1532, 2581, 1419, 1049, 1180, 2869, 2741, 561, 2844, 1565, 924, 1687, 3139, 401, 1477, 3025, 989, 3282, 1999, 1915, 2463, 253, 43, 1046, 2578, 670, 2932, 430, 1818, 878, 2219, 853, 1426, 2554, 2580, 3076, 1303, 2845, 8, 2739, 564, 212, 713, 618, 783, 1478, 1894, 136, 1472, 1895, 856, 2259, 2588, 1702, 3047, 1992, 2383, 2692, 865, 595, 1771, 2299, 1227, 2261, 1111, 1246, 478, 711, 815, 1033, 3257, 2547, 362, 573, 1727, 247, 2507, 123, 333, 2520, 1671, 2606, 2314, 1256, 2776, 1965, 383, 504, 928, 2163, 3153, 2811, 876, 607, 3231, 2515, 1836, 1311, 2252, 2595, 173, 1572, 1676, 450, 1837, 2313, 1103, 2095, 3029, 1326, 2332, 2643, 2727, 1759, 316, 1551, 1466, 56, 798, 194, 1116, 884, 590, 2112, 1221, 1447, 1307, 2120, 1704, 2054, 205, 1080, 272, 2269, 777, 392, 178, 2210, 2212, 1813, 505, 3232, 641, 3089, 1052, 2184, 2301, 1940, 915, 1561, 1684, 3049, 2519, 2661, 440, 1657, 2218, 2868, 1449, 2400, 155, 1971, 3048, 91, 2316, 1183, 825, 2028, 920, 300, 577, 567, 1423, 1729, 2052, 1366, 2721, 3052, 822, 1691, 2464, 1817, 2088, 471, 31, 1234, 1961, 1408, 462, 485, 1087, 541, 2347, 1437, 1865, 1945, 2604, 417, 519, 80, 703, 1613, 1027, 2278, 2335, 3296, 465, 359, 2472, 922, 2283, 1919, 1335, 2763, 2870, 1609, 2612, 882, 2206, 1678, 555, 2662, 2771, 624, 2058, 159, 3046, 2761, 95, 2657, 1318, 2080, 771, 3317, 1464, 2752, 1725, 2835, 1848, 2196, 339, 2295, 774, 2557, 1247, 542, 1020, 1077, 3294, 2266, 2244, 584, 484, 1849, 2450, 228, 393, 2560, 476, 707, 154, 135, 2495, 2452, 2836, 1265, 569, 1280, 2609, 1147, 319, 716, 2959, 442, 654, 1239, 615, 2607, 540, 3050, 2059, 2449, 2203, 642, 3215, 110, 991, 1445, 1522, 3009, 2911, 3018, 1924, 472, 535, 104, 751, 3299, 1324, 2819, 2020, 2814, 429, 566, 2153, 397, 1683, 1385, 638, 342, 1415, 1233, 3147, 974, 1566, 2636, 2296, 106, 455, 2608, 3223, 1562, 1829, 3012, 674, 1235, 1384, 2924, 1826, 286, 2345, 1210, 1835, 880, 778, 2177, 1107, 2496, 809, 3284, 2815, 515, 1578, 294, 2740, 2025, 2951, 2745, 1058, 1536]",1,12.0,36.0,558.0,0.5828073339344755 -"[526, 1234, 2898, 2519, 1180, 3244, 2674, 1435, 471, 1162, 924, 555, 716, 1196, 561, 1710, 876, 1085, 595, 2557, 2730, 3139, 2339, 991, 421, 1469, 634, 1366, 2581, 2163, 2120, 878, 2112, 1671, 1426, 2275, 1033, 2606, 1364, 2631, 843, 2579, 920, 2196, 2588, 3010, 2314, 1915, 3009, 792, 2512, 2088, 751, 1423, 1919, 2212, 1233, 462, 3265, 1227, 2901, 2132, 2578, 286, 2080, 1727, 371, 1691, 2580, 178, 1287, 567, 911, 1907, 1835, 2844, 3221, 1474, 1172, 2811, 2313, 1058, 550, 3153, 989, 136, 3052, 104, 884, 56, 1578, 2836, 2052, 780, 2661, 2900, 1183, 96, 2266, 540, 430, 1280, 70, 2210, 774, 1832, 2184, 1447, 2296, 2005, 3102, 1678, 1027, 2020, 783, 2058, 1352, 1999, 1466, 2662, 711, 690, 1561, 32, 135, 1791, 2059, 3012, 476, 865, 1353, 3103, 882, 713, 2868, 212, 2498, 678, 1445, 2095, 472, 922, 564, 2547, 203, 577, 3048, 1068, 1849, 173, 1877, 3299, 194, 1924, 2902, 1094, 2752, 2899, 1964, 1912, 205, 28, 1247, 2400, 2671, 519, 2610, 8, 1532, 1842, 2315, 2608, 383, 2507, 1945, 440, 504, 1970, 272, 2464, 1684, 641, 1384, 118, 1629, 228, 1256, 1971, 2203, 3029, 3060, 2278, 253, 2643, 110, 2731, 2283, 1116, 638, 2602, 342, 450, 2560, 119, 2259, 515, 1613, 339, 397, 2301, 2727, 1865, 1221, 247, 1307, 649, 2762, 1562, 505, 2835, 2496, 2486, 1449, 2609, 2670, 2761, 2346, 2055, 2745, 1729, 2692, 1210, 1135, 2269, 1072, 1246, 2612, 2473, 969, 2837, 3202, 777, 62, 2054, 2025, 1043, 3039, 2845, 222, 485, 703, 771, 1052, 3089, 1946, 670, 566, 3051, 642, 707, 2910, 3147, 1551, 2657, 915, 2913, 2771, 569, 1848, 3018, 1046, 856, 921, 1829, 1571, 1818, 316, 928, 3232, 542, 2401, 3220, 78, 822, 674, 3076, 1485, 3120, 1687, 2028]",1,22.0,29.0,558.0,0.5097685602645026 -"[3062, 611, 603, 2321, 402, 2320, 2410, 2605, 2494, 1471, 1625, 2212, 1557, 1335, 1422, 1965, 136, 2967, 1999, 880, 2017, 624, 573, 1563, 2632, 1251, 379, 575, 2586, 115, 2166, 2066, 1044, 2993, 2323, 2622, 1414, 3153, 1214, 2829, 466, 1618, 189, 2142, 2399, 1157, 2154, 2782, 2299, 59, 1264, 2633, 1802, 2578, 1478, 885, 2520, 1395, 186, 2097, 542, 1466, 2984, 1486, 2718, 1736, 2861, 620, 691, 1609, 2924, 2369, 2596, 744, 1817, 3138, 1561, 976, 1091, 273, 157, 582, 2488, 1620, 1864, 2598, 1222, 1362, 1634, 2506, 1331, 2032, 1097, 1417, 982, 637, 1665, 1020, 1009, 2720, 1567, 2013, 1311, 2434, 220, 2774, 2645, 865, 565, 2883, 1605, 3017, 154, 2439, 1582, 241, 1943, 1836, 2234, 709, 2487, 395, 675, 2226, 2295, 1131, 2564, 2654, 968, 1973, 2507, 2896, 1424, 3086, 924, 680, 1412, 151, 768, 2122, 1075, 2674, 1346, 1565, 1853, 2312, 2719, 1668, 468, 1848, 512, 242, 286, 55, 2455, 2519, 2864, 138, 450, 1812, 1877, 2174, 92, 1734, 1443, 1473, 1915, 2413, 670, 2921, 2054, 2401, 2189, 2217, 3215, 396, 2884, 1546, 2266, 1758, 1195, 930, 936, 2210, 2882, 280, 2089, 1087, 772, 1987, 2875, 2328, 1532, 2897, 1576, 2844, 2249, 2863, 298, 2740, 882, 2780, 1125, 2657, 206, 749, 2509, 2752, 974, 485, 1052, 2196, 1472, 1031, 61, 1514, 2959, 1835, 551, 1239, 272, 1445, 353, 1359, 796, 53, 1415, 2113, 851, 2623, 716, 2776, 106, 2624, 548, 922, 2063, 1384, 1296, 483, 267, 3052, 2020, 2329, 2119, 1259, 929, 155, 2510, 1406, 98, 406, 2779, 3292, 733, 2771, 2614, 813, 2643, 1211, 1600, 2511, 1265, 599, 2912, 1246, 1585, 2770, 2284, 386]",1,22.0,27.0,303.0,0.5229936880072137 -"[550, 2704, 404, 2925, 2706, 773, 2170, 162, 2330, 2380, 1824, 622, 588, 2444, 2774, 2775, 2650, 518, 2148, 3009, 1580, 2626, 70, 2379, 993, 76, 1145, 2705, 2703, 2181, 1146, 2829, 2558, 2647, 1731, 1896, 2901, 20, 3072, 2859, 1215, 1275, 1534, 2902, 492, 2264, 994, 1542, 2625, 2362, 16, 1070, 2595, 1605, 2597, 1725, 1011, 287, 2649, 350, 3080, 3198, 1584, 1080, 2218, 2039, 72, 3199, 2227, 1230, 450, 3081, 438, 2708, 2651, 2861, 153, 2632, 1197, 34, 997, 825, 2720, 1883, 1647, 347, 1034, 2399, 1462, 918, 2324, 128, 377, 1825, 218, 2097, 2132, 2707, 2234, 2903, 2428, 2899, 2596, 2226, 2702, 1943, 2073, 1668, 1965, 2631, 2360, 203, 242, 220, 2639, 2429, 2361, 2183, 1422, 453, 517, 2886, 1761, 2199, 211, 1567, 1089, 2564, 715, 1644, 818, 717, 3213, 2633, 1940, 2443, 2868, 2534, 894, 136, 1351, 2678, 1176, 1946, 151, 770, 2250, 2675, 395, 1633, 1758, 2275, 2507, 2927, 1777, 1529, 1696, 229, 428, 2239, 2091, 2230, 1015, 2405, 2355, 634, 2063, 1112, 3246, 2506, 2122, 503, 2435, 1400, 3207, 2398, 75, 2119, 124, 2967, 1214, 322, 2592, 2519, 2830, 1582, 2021, 705, 1135, 2483, 2247, 1795, 1533, 2782, 1697, 1157, 1630, 2837, 796, 1012, 627, 2527, 1925, 2282, 1918, 3027, 1238, 269, 2032, 502, 131, 927, 1811, 413, 549, 1308, 755, 746, 1300, 65, 197, 506, 268, 1068, 3151, 946, 2645, 816, 2681, 2546, 2638, 147, 194, 2906, 872, 2565, 224, 2655, 416, 292, 3042, 122, 1922, 1903, 887, 2882, 1517, 641, 59, 985, 1013, 3114, 2224, 2299, 398, 351, 1439, 1620, 2387, 3051, 1642, 1483, 691, 1843, 2640, 2166, 1041, 1821, 1583, 1799, 1522, 464, 1591, 1292, 3052, 10, 2420, 1944, 397, 501, 1558, 1609, 2535, 1128, 1136, 2797, 2200, 968, 1568, 180, 2718, 725, 1417, 2622, 154, 1009, 2680, 1996, 43, 2102, 30, 2048, 231, 827, 1341, 930, 677, 1019, 1313, 1665, 661, 2196, 557, 2404, 1202, 1094, 1956, 2839, 1251, 3236, 2641, 435, 2426, 2217, 847, 2489, 2964, 1064, 1492, 2520, 1670, 3028, 2621, 607, 2719, 414, 1759, 1655, 1666, 263, 2033, 1736, 1740, 1257, 1625, 1096, 2488, 2356, 1877, 2162, 875, 2422, 2900, 1760, 152, 25, 2676, 396, 2057, 2474]",1,22.0,36.0,458.0,0.547039374812143 -"[2959, 31, 2088, 95, 1543, 2067, 1561, 1551, 1094, 136, 2636, 3274, 1759, 1940, 729, 247, 2063, 1702, 106, 2835, 1746, 2613, 2245, 3051, 595, 43, 2868, 1629, 155, 1961, 1832, 1217, 1704, 1768, 2901, 1162, 3026, 254, 2977, 927, 1107, 2055, 3071, 2315, 1220, 110, 2401, 78, 641, 2269, 1687, 1791, 1531, 2366, 825, 2520, 1545, 421, 1469, 286, 878, 2902, 618, 2025, 1445, 809, 2043, 1196, 201, 1366, 2554, 1135, 2732, 2745, 169, 3076, 1917, 969, 920, 2741, 2662, 2733, 3060, 2498, 2507, 2598, 526, 1043, 2189, 2671, 1221, 2763, 2643, 579, 2312, 2963, 2066, 2837, 1447, 2619, 545, 3282, 2519, 2670, 484, 1522, 745, 1485, 266, 2345, 1309, 465, 2920, 690, 2595, 2740, 535, 1464, 3288, 783, 1039, 2899, 2222, 1449, 300, 3296, 2604, 550, 371, 408, 2492, 2163, 1246, 1877, 3039, 642, 3120, 1080, 638, 2275, 1967, 1410, 471, 2739, 2054, 2817, 1654, 3223, 822, 1772, 379, 1848, 2776, 2752, 203, 2924, 3249, 1822, 1946, 2497, 304, 1676, 2814, 2900, 736, 634, 1912, 1701, 2473, 2863, 1234, 2202, 2463, 2611, 1789, 721, 713, 654, 397, 3220, 1000, 3048, 637, 32, 751, 911, 70, 856, 1265, 472, 2672, 928, 1324, 485, 1331, 1165, 2512, 2339, 1363, 922, 1326, 541, 401, 900, 2400, 778, 556, 2903, 2314, 2338, 2080, 1072, 2154, 2515, 194, 2819, 1729, 1046, 3146, 2551, 2346, 159, 3049, 494, 2588, 2150, 2177, 2921, 227, 2219, 3050, 3052, 2661, 2153, 2472, 92, 2335, 212, 860, 1233, 206, 1049, 1721, 3148, 407, 2683, 2816, 1027, 2252, 1516, 2132, 812, 1753, 2579, 98, 62, 2085, 1419, 1259, 1620, 584, 843, 3147, 519, 2623, 1002, 3215, 1727, 1693, 228, 342, 285, 1455, 808, 3244, 1075, 854, 205, 1078, 1307, 2562, 2657, 703, 921, 383, 2815, 577, 2313, 429, 419, 2811, 790, 17, 2509, 359, 2112, 1172, 252, 2283, 1385, 215, 1660, 772, 1953, 142, 3009, 1033, 1384, 2734, 1087, 1393, 515, 1068, 2531, 1247, 1418, 2993, 1280, 3284, 1566, 1200, 1578, 1771, 1563, 393, 405, 1210, 1446, 1364, 1477, 84, 1101, 2041, 2510, 1797, 2347, 2631, 2179, 2932, 1052, 3299, 674, 3047, 2240, 563, 815, 884, 949, 473, 135, 1865, 3217, 1004, 1546, 280, 2440, 1085, 3218, 1710, 2602]",1,22.0,36.0,558.0,0.6053501653140968 -"[2110, 80, 860, 1915, 1225, 2598, 624, 294, 2495, 540, 2730, 2646, 2032, 2752, 3089, 1253, 1932, 1836, 106, 3146, 1472, 1965, 1435, 2898, 1039, 136, 2473, 2643, 1532, 1115, 486, 1956, 1366, 3018, 1130, 1759, 1891, 3228, 471, 2278, 242, 2588, 144, 1052, 2776, 1398, 2726, 3076, 783, 247, 1251, 930, 83, 3221, 3148, 3236, 2455, 2299, 1135, 2920, 419, 3153, 1170, 3039, 567, 3218, 2331, 2486, 1829, 1004, 201, 641, 927, 2292, 1587, 2886, 595, 485, 1849, 450, 1677, 266, 2401, 2911, 1877, 505, 1701, 478, 212, 2249, 547, 151, 544, 3046, 879, 405, 2259, 1660, 2673, 1387, 3017, 1200, 2400, 1693, 2179, 17, 2657, 968, 2134, 222, 2811, 1136, 1046, 545, 2964, 2497, 1964, 2993, 2464, 2302, 272, 2210, 2498, 1625, 3216, 2950, 1485, 332, 2316, 3296, 1715, 1227, 2332, 468, 2840, 923, 1582, 95, 579, 1043, 3249, 1510, 2547, 1256, 2894, 3215, 649, 2932, 1447, 2733]",1,32.0,15.0,558.0,0.5205891193267208 -"[1677, 191, 2169, 541, 637, 2686, 1620, 1410, 1729, 411, 1097, 921, 3017, 2147, 2269, 1235, 582, 3053, 2345, 2153, 2189, 1191, 2346, 1927, 156, 711, 383, 2055, 1039, 1196, 3132, 900, 2495, 110, 3228, 543, 1083, 1399, 2558, 3046, 2343, 3109, 703, 2562, 822, 2531, 2112, 465, 2335, 486, 1542, 430, 2727, 1033, 3108, 1474, 1989, 2095, 1115, 1143, 1791, 2573, 1551, 1587, 2422, 331, 808, 1278, 441, 2331, 3236, 2278, 991, 2066, 2673, 1031, 989, 95, 2911, 1072, 1704, 2567, 1768, 2631, 1111, 1990, 2245, 2919, 1625, 2534, 3110, 1678, 83, 683, 3174, 2973, 942, 2948, 405, 178, 2726, 1170, 2423, 1309, 1326, 2110, 648, 2561, 2424, 1130, 1983, 1709, 1426, 3274, 2161, 421, 70, 3234, 2421, 144, 8, 649, 2548, 1093, 540, 1234, 1782, 1684, 1522, 2132, 552, 1759, 1472, 1429, 819, 2305, 1186, 1842, 1048, 2072, 2458, 2627, 3210, 1868, 923, 2943, 2306, 2899, 2557, 2450, 1225, 3152, 1728, 2165, 2288, 1279, 2140, 472, 3018, 352, 1398, 3028, 2425, 2549, 954, 2789, 1244, 152, 1613, 2058, 615, 2944, 2030, 2837, 2520, 1334, 780, 1184, 2090, 2568, 2646, 43, 269, 578, 3196, 2225, 2819, 700, 168, 2949, 1125, 2062, 447, 3244, 2836, 2420, 1615, 2149, 516, 3193, 1818, 455, 906, 1970, 1287, 2541, 716, 728, 771, 2347, 244, 3010, 372, 2498, 1656, 846, 550, 2378, 1863, 746, 2603, 2412, 2550, 330, 249, 1853, 2628, 1094, 3235, 203, 3009, 2835, 1085, 2939, 1043, 690, 3184, 93, 2035, 54, 2367, 1667, 1521, 3195, 107, 3139, 2620, 3011, 932, 339, 325, 3106, 2157, 854, 190, 722, 319, 2368, 3286, 212, 3287, 783, 779, 2366, 1992, 1903, 2884, 2120, 370, 798, 3197, 3107, 123, 2296, 1352, 410, 3076, 884, 253, 3265, 91, 3194, 33, 205, 374, 2524, 1792, 1172, 2512, 2579]",1,32.0,29.0,359.0,0.5058611361587015 -"[2110, 80, 860, 1915, 1225, 2598, 624, 294, 2495, 540, 2730, 2646, 2032, 2752, 3089, 1253, 1932, 1836, 106, 3146, 1472, 1965, 1435, 2898, 1039, 136, 2473, 2643, 1532, 1115, 486, 1956, 1366, 3018, 1130, 1759, 1891, 3228, 471, 2278, 242, 2588, 144, 1052, 2776, 1398, 2726, 3076, 783, 247, 1251, 930, 83, 3221, 3148, 3236, 2455, 2299, 1135, 2920, 419, 3153, 1170, 3039, 567, 3218, 2331, 2486, 1829, 1004, 201, 641, 927, 2292, 1587, 2886, 595, 485, 1849, 450, 1677, 266, 2401, 2911, 1877, 505, 1701, 478, 212, 2249, 547, 151, 544, 3046, 879, 405, 2259, 1660, 2673, 1387, 3017, 1200, 2400, 1693, 2179, 17, 2657, 968, 2134, 222, 2811, 1136, 1046, 545, 2964, 2497, 1964, 2993, 2464, 2302, 272, 2210, 2498, 1625, 3216, 2950, 1485, 332, 2316, 3296, 1715, 1227, 2332, 468, 2840, 923, 1582, 95, 579, 1043, 3249, 1510, 2547, 1256, 2894, 3215, 649, 2932, 1447, 2733, 2315, 1235, 2698, 379, 819, 2531, 2452, 2119, 2347, 2994, 2422, 1704, 489, 2180, 2519, 2619, 2520, 2054, 2819, 1671, 777, 642, 155, 2951, 3266, 1561, 2063, 1239, 2153, 882, 638, 421, 2670, 49, 1531, 1280, 2739, 1768, 2488, 1989, 792, 2295, 2196, 1265, 2269, 1112, 142, 3120, 2740, 1721, 784, 2732, 2189, 1890, 587, 2450, 1097, 2512, 3202, 43, 3197, 2222, 1399, 582, 1992, 476, 138, 2217, 1131, 2067, 2507, 2154, 1221, 104, 234, 2765, 154, 130, 698, 822, 1753, 2816, 780, 1543, 878, 647, 676, 2005, 1545, 2683, 411, 2147, 2421, 921, 2312, 2883, 1353, 2560, 2314, 2383, 392, 2835, 1429, 2159, 1246, 1311, 1729, 1303, 713, 1516, 441, 3028, 215, 2779, 2120, 92, 2448, 465, 1000, 2919]",1,32.0,27.0,558.0,0.5365193868349865 -"[2711, 1789, 1167, 1097, 396, 468, 2719, 1493, 1392, 2584, 689, 2648, 1685, 1311, 1894, 1565, 2857, 670, 1615, 930, 2752, 1992, 2215, 2729, 812, 1919, 1562, 13, 2963, 136, 138, 2951, 2844, 2315, 1437, 1585, 2634, 222, 300, 228, 865, 485, 1251, 2819, 465, 3133, 1435, 2932, 2763, 26, 2005, 2547, 3029, 392, 1455, 642, 2607, 1912, 573, 2950, 1156, 2316, 516, 1536, 3265, 2731, 3284, 2713, 1625, 2217, 2520, 2383, 567, 87, 504, 2771, 17, 2964, 159, 545, 8, 968, 777, 2163, 2816, 2259, 672, 2646, 2598, 2160, 2578, 2059, 2196, 1845, 2815, 2734, 515, 1211, 286, 1324, 2670, 1105, 383, 135, 2710, 3221, 316, 2119, 2739, 212, 610, 2814, 924, 771, 319, 3134, 920, 569, 80, 2779, 2354, 2464, 1136, 308, 854, 1103, 1907, 991, 2032, 2683, 1792, 332, 1956, 1854, 2643, 579, 3288, 879, 927, 178, 1033, 3039, 2718, 2299, 1721, 3147, 2452, 2080, 2476, 1578, 1656, 205, 2292, 3294, 2661, 2153, 2898, 242, 974, 2673]",1,42.0,16.0,558.0,0.5127742711151188 -"[807, 2782, 2398, 1071, 220, 2110, 2475, 27, 586, 229, 2401, 2829, 2564, 2650, 1064, 2527, 144, 1678, 2626, 2586, 1214, 1989, 30, 2097, 428, 2977, 962, 181, 1877, 237, 1429, 152, 528, 1130, 1087, 880, 627, 1758, 1480, 2435, 29, 2335, 1647, 429, 978, 3086, 1417, 1580, 2681, 677, 2312, 3138, 941, 2021, 1725, 1736, 3275, 16, 2483, 304, 2820, 151, 2440, 2360, 2415, 2645, 10, 1034, 1698, 982, 541, 2636, 1398, 1009, 1973, 1853, 588, 755, 1699, 1582, 236, 2089, 2638, 2316, 2631, 1028, 1912, 2282, 2605, 1826, 2651, 268, 246, 1422, 307, 1012, 616, 2113, 1415, 768, 930, 904, 453, 2774, 1326, 2421, 1835, 495, 637, 2622, 968, 1620, 2162, 2720, 289, 744, 2404, 775, 494, 1591, 347, 680, 2562, 3026, 2235, 1644, 1447, 2633, 2119, 585, 2721, 147, 985, 2649, 549, 2466, 273, 2230, 2362, 2234, 2239, 492, 796, 1484, 2443, 55, 2245, 2264, 137, 1587, 303, 2882, 466, 835, 2416, 965, 153, 2370, 548, 1665, 2063, 1666, 138, 2226, 2405, 2147, 51, 889, 1668, 2967, 61, 2925, 377, 2597, 1251, 2912, 69, 1042, 1013, 661, 2420, 808, 28, 59, 2444, 2331, 2919, 1157, 247, 2632, 2361, 1399, 131, 575, 2422, 1825, 2257, 1275, 2522, 1771, 2247, 2439, 2887, 2488, 2506, 2314, 565, 2339, 611, 398, 1609, 3266, 2313, 1633, 1008, 2599, 2322, 1875, 2032, 1238, 197, 2759, 715, 1918, 2371, 1517, 633, 2204, 2487, 1112, 2718, 468, 1879, 2324, 351, 1821, 964, 887, 1995, 912, 2429, 1027, 361, 1097, 2217, 172, 2596, 1943, 2182, 2418, 1330, 1359, 62, 2205, 194, 894, 135, 2122, 1812, 1655, 886, 1062, 2545, 1578, 1453, 850, 2783, 1843, 2224, 691, 770, 847, 1419, 864, 512, 396, 114, 186, 242, 799, 2166, 2839, 1567, 2481, 1096, 1731, 298, 1044, 1956, 527]",1,42.0,29.0,458.0,0.5145776976254884 -"[1238, 1096, 3293, 3069, 2815, 43, 2844, 106, 579, 979, 2776, 3294, 2316, 1202, 228, 1531, 159, 1322, 2035, 887, 1570, 352, 2147, 2574, 300, 465, 779, 1191, 2501, 1366, 1921, 294, 2447, 1782, 1971, 2673, 2891, 2573, 1083, 1742, 430, 1410, 3026, 1235, 1125, 132, 5, 212, 1759, 2329, 711, 136, 1225, 3174, 3160, 339, 1329, 716, 1989, 2459, 1031, 2450, 2110, 3103, 2727, 2670, 819, 1587, 96, 2740, 2461, 1030, 2072, 2458, 1625, 921, 441, 405, 2814, 2153, 3102, 597, 1234, 714, 2495, 790, 1324, 3156, 587, 1352, 1143, 2973, 3110, 2840, 1868, 647, 2356, 83, 2575, 1932, 2726, 2306, 2062, 1170, 1842, 3076, 3017, 1829, 3009, 3053, 3236, 3028, 509, 2421, 3276, 1891, 1287, 1677, 1218, 1087, 1398, 3108, 648, 2729, 2940, 2919, 2189, 486, 637, 900, 3018, 215, 3299, 2427, 1256, 1804, 1967, 3284, 540, 988, 2065, 1709, 1200, 3046, 3068, 525, 1429, 653, 411, 3202, 2134, 2391, 1307, 130, 2520, 651, 2603, 2305, 746, 1737, 1865, 1667, 1186, 1853, 3109, 244, 1399, 3228, 2647, 2345, 2066, 2531, 2179, 1278, 2112, 1097, 1387, 1660, 152, 2943, 2288, 2058, 2328, 547, 942, 2422, 2548, 32, 613, 1043, 1516, 1039, 783, 1785, 1729, 1472, 2669, 1111, 1048, 2460, 1362, 331, 2331, 1710, 505, 3152, 1441, 90, 2169, 3235, 1, 1115, 3277, 604, 1280, 3318, 1728, 649, 3218, 2258, 2549, 92, 3265, 2343, 923, 1004, 615, 1033, 2567, 1077, 2950, 1196, 1072, 2346, 1983, 269, 379, 3206, 3234, 584, 2378, 582, 1383, 3153, 156, 2541, 205, 2812, 2921, 1684, 447, 119, 2100, 1335, 3216, 234, 2157, 2646, 478, 3084, 1426, 2080, 476, 18, 1002, 84, 2686, 771, 1130, 2911, 383, 178, 1525, 1309, 654, 2685, 8, 144, 442, 2515, 3134, 1791, 249, 3099, 3195, 1279, 118, 2055, 1863, 879, 2249, 728, 2884, 906, 2627, 2296, 2278, 2920, 3215, 683, 1542, 1753, 610, 2420, 1184, 989, 1571, 1701, 2944, 1613, 2819, 1244, 954, 563, 1103, 317, 54, 2948, 191, 1656, 455, 2558, 489, 1818, 168, 2030, 854, 370, 2367, 3193, 3148, 3042, 3286, 2186, 2218, 2368, 676, 1085, 3317, 2628, 330, 516, 3244, 2225, 3132, 2347, 372, 3217, 319, 1693, 578, 1615, 3210, 3139, 2568, 3196, 846, 1411, 374, 1334]",1,42.0,36.0,458.0,0.5722873459573189 -"[2579, 205, 1020, 3244, 2932, 1710, 542, 690, 2868, 1654, 809, 2903, 421, 1915, 1085, 634, 272, 843, 253, 118, 2132, 792, 2472, 485, 3139, 2900, 920, 1532, 2512, 1227, 450, 1353, 1684, 2473, 2088, 1818, 2899, 3010, 3076, 1877, 2112, 1477, 1162, 1629, 2930, 1522, 882, 2030, 397, 32, 1946, 2275, 2740, 2295, 1970, 1704, 110, 2312, 2631, 1135, 615, 550, 3048, 927, 564, 969, 2602, 2507, 2739, 2519, 247, 1220, 2902, 3265, 1727, 2314, 2095, 1561, 2727, 2752, 316, 212, 2196, 3103, 974, 711, 2789, 541, 674, 1691, 3029, 265, 455, 1052, 2762, 1072, 308, 2255, 8, 3049, 2266, 2052, 1842, 154, 2671, 884, 2578, 2581, 822, 339, 1865, 1426, 1033, 2212, 2299, 2670, 1965, 96, 1671, 1964, 707, 1077, 2335, 2836, 2401, 95, 3212, 1536, 1565, 1366, 1999, 716, 2210, 654, 1919, 1272, 876, 2154, 2910, 2761, 1246, 2901, 1408, 429, 3215, 2283, 472, 91, 703, 1265, 401, 3220, 2218, 1613, 1043, 1848, 777, 194, 2580, 1992, 2560, 989, 2269, 2301, 2184, 56, 856, 2741, 2515, 1571, 1817, 1485, 2924, 1094, 155, 3299, 2028, 1307, 1102, 1068, 3207, 178, 649, 3257, 3317, 430, 2609, 106, 1464, 286, 3050, 2634, 2058, 294, 3047, 1234, 2606, 2683, 1287, 173, 1687, 1392, 1303, 104, 2547, 774, 1326, 1256, 1180, 2498, 2857, 1572, 70, 670, 2959, 783, 3089, 1172, 1768, 1891, 203, 1447, 383, 2313, 1945, 2220, 2911, 2452, 333, 359, 638, 2776, 922, 2608, 1027, 3009, 2771, 2055, 1385, 2259, 865, 1656, 2054, 442, 566, 561, 2612, 3102, 802, 798, 924, 2296, 78, 119, 1895, 3012, 2346, 83, 1478, 2025, 2332, 516, 540, 2643, 1683, 610, 1678, 928, 2245, 2844, 3018, 2673, 1721, 2657, 1445, 28, 2059, 1813, 1046, 1111, 515, 1335, 2636, 2745, 371, 1352, 1116, 1196, 80, 573, 1423, 1103, 1868, 1785, 3051, 2261, 1778, 3202, 2080, 915, 642, 2383, 1472, 1829, 624, 2163, 3153, 577, 1836, 2845, 2817, 1239, 2486, 1087, 3052, 1677, 1235, 1854, 1455, 2450, 2729, 771, 2557, 319, 1912, 393, 1383, 123, 412, 1058, 2347, 1791, 1924, 3147, 2730, 519, 780, 1474, 2646, 2692, 1551, 2672, 1221, 2449, 1311, 222, 254, 2835, 1183, 2315, 417, 2952, 1384, 440, 2120, 3046, 2721, 721, 3284, 1210]",1,42.0,36.0,558.0,0.5305079651337541 -"[2217, 396, 627, 2650, 1251, 1422, 1358, 1817, 755, 2681, 1771, 2651, 494, 968, 2360, 2632, 194, 1087, 2224, 1009, 1863, 1125, 304, 637, 43, 2631, 2919, 768, 1131, 1568, 2728, 1350, 1609, 938, 486, 2655, 1620, 1238, 2368, 1736, 2633, 2519, 1826, 273, 2485, 492, 2680, 1034, 737, 263, 2104, 1875, 1096, 1215, 2487, 3139, 1821, 2782, 2884, 1965, 1214, 2829, 705, 2506, 2649, 1731, 2230, 2247, 253, 2977, 1, 1666, 808, 1769, 1130, 1019, 62, 1760, 2507, 2488, 1151, 1027, 3251, 449, 2252, 930, 3026, 227, 2562, 557, 1399, 2440, 669, 1275, 2196, 1726, 1453, 770, 982, 2245, 453, 843, 2205, 1625, 2428, 2422, 2675, 2627, 921, 886, 70, 330, 113, 1543, 1655, 123, 188, 1230, 2057, 541, 1286, 661, 1522, 2426, 677, 502, 517, 2546, 2476, 158, 1591, 2021, 3027, 2062, 1583, 582, 1115, 887, 1943, 928, 2767, 414, 2839, 468, 1558, 2622, 794, 1012, 2550, 473, 3286, 150, 429, 2598, 1351, 2443, 347, 2315, 31, 136]",1,52.0,16.0,458.0,0.5719867748722572 -"[70, 2335, 2245, 1220, 1340, 3026, 2900, 927, 2440, 2672, 203, 808, 1326, 2120, 624, 1620, 1721, 3009, 194, 2855, 2473, 1771, 1826, 1609, 1946, 2134, 429, 1817, 1131, 880, 2562, 2901, 2636, 1835, 2670, 1574, 550, 101, 1135, 790, 2899, 32, 1485, 1625, 2977, 2674, 1759, 840, 2196, 548, 1410, 2721, 2840, 541, 316, 1348, 494, 2275, 3046, 1877, 2845, 62, 1418, 1094, 2931, 3025, 1027, 634, 2020, 31, 1162, 1068, 2902, 2477, 2345, 587, 286, 92, 637, 234, 304, 421, 755, 83, 2174, 2391, 2447, 153, 2066, 1004, 2921, 215, 1466, 2519, 1445, 653, 43, 18, 466, 106, 2776, 721, 615, 2959, 2683, 2903, 1412, 379, 84, 3217, 1787, 1247, 2584, 1087, 2380, 540, 3215, 408, 1660, 3017, 2911, 777, 879, 1525, 1691, 2598, 676, 2132, 825, 2631, 1973, 1693, 2189, 1971, 285, 2249, 3018, 2973, 3284, 920, 2507, 1080, 2673, 1940, 3138, 2607, 3216, 2228, 1446, 333, 2920, 946, 582, 2258, 2100, 535, 130, 405, 2950, 1797, 1967, 2179, 2401, 2163, 250, 2646, 577, 1561, 1753, 2520, 1848, 136, 2218, 427, 1387, 3148, 2559, 563, 1961, 1895, 2547, 2346, 1785, 2924, 819, 618, 39, 3029, 744, 1516, 489, 923, 1921, 547, 1170, 2261, 3275, 2209, 2235, 1311, 38, 2383, 641, 359, 1701, 236, 670, 530, 1702, 80, 2595, 1678, 2114, 335, 3099, 595, 1621, 2554, 2560, 1560, 2555, 2080, 2586, 3218, 647, 863, 2604, 2316, 1676, 1705, 1200, 2662, 1088, 713, 2740, 856, 2661, 1002, 2734, 1447, 2312, 2466, 1912, 723, 878, 2811, 751, 2388, 1837, 2953, 553, 2588, 2314, 2085, 1419, 471, 1698, 2739, 2389, 446, 1654, 114, 135, 2315, 28, 2074, 1538, 401, 2313, 142]",1,52.0,27.0,303.0,0.521190261496844 -"[396, 930, 138, 468, 2450, 2661, 1251, 83, 2662, 2346, 1746, 300, 2719, 1676, 1446, 2604, 1845, 641, 2670, 2789, 1131, 535, 1917, 2217, 624, 1455, 3120, 649, 2080, 2519, 1836, 3146, 201, 3133, 736, 1167, 2646, 332, 2964, 2734, 2951, 2464, 825, 371, 2488, 476, 1227, 1105, 2196, 80, 2718, 2292, 2259, 411, 3018, 3029, 2345, 2765, 2032, 1097, 1956, 338, 44, 242, 2451, 2153, 212, 24, 968, 2911, 698, 419, 713, 1961, 3296, 1225, 1721, 455, 2520, 2119, 2642, 878, 751, 2554, 3294, 156, 359, 1080, 1625, 2598, 2134, 1701, 1531, 1136, 2815, 2950, 1324, 1965, 923, 920, 2447, 2920, 2354, 159, 1247, 540, 2449, 3039, 2163, 3148, 1410, 3017, 1220, 2448, 2836, 286, 2779, 92, 2473, 1112, 1737, 1418, 2595, 151, 1419, 2720, 339, 545, 1253, 2339, 812, 3266, 136, 272, 2932, 2816, 395, 792, 1829, 186, 2299, 1677, 3099, 1143, 2005, 882, 234, 1932, 1939, 3076, 155, 2730, 2452, 2063, 2726, 595, 777, 2883, 2672, 485, 1671, 1582, 118, 96, 142, 2507, 744, 1493, 2547, 2497, 1668, 2673, 465, 638, 819, 489, 2952, 379, 1256, 2278, 860, 670, 1004, 154, 783, 1049, 1170, 1848, 1366, 2495, 1660, 2710, 1915, 2302, 2220, 3153, 13, 1894, 18, 1030, 2584, 2479, 1046, 1877, 106, 1200, 1472, 2886, 3046, 316, 2898, 2711, 1672, 642, 2643, 564, 1435, 2486, 542, 567, 2295, 2506, 2752, 2588, 563, 778, 1043, 2609, 1423, 544, 2921, 674, 1516, 2814, 90, 285, 2401, 2531, 215, 2959, 478, 618, 1052, 3218, 879, 2835, 2391, 1551, 2819, 1039, 17, 471, 84, 579, 1384, 1837, 745, 547, 3284, 1566, 1280, 2607, 2498, 1727, 1009, 1002, 130, 653, 2648, 135]",1,52.0,27.0,458.0,0.5058611361587015 -"[590, 1822, 1758, 2900, 818, 2522, 2506, 2645, 1903, 2091, 906, 2639]",0,,,, -"[1822, 906, 1758, 590, 1903, 3209, 2000, 650, 2206, 2565, 2645, 2900, 2119, 269, 1362, 1910, 1655, 3042, 818, 725, 2217, 525, 54, 2494]",0,,,, -"[43, 1429, 136, 1918, 2829, 2475, 2444, 2234, 2828, 2649, 1644, 1130, 2621, 2681, 2396, 31, 2784, 528, 453, 1630, 1131, 2181, 2651, 1663, 2064, 2116, 2053, 2650, 1759, 2023, 2490, 965, 627, 807, 941, 2585, 962, 303, 2398, 2667, 586, 1853, 314, 2257, 51, 2254, 887, 2488, 2665, 220, 492, 16, 2021, 2435, 2115, 2821, 1009, 982, 2785, 2822, 27, 2128, 2519, 2782, 770, 2196, 968, 549, 719, 1724, 912, 398, 1843, 1591, 2230, 2217, 930, 1097, 2139, 2823, 396, 1736, 2783, 616, 138, 1013, 1665, 2668, 1965, 2824, 273, 246, 1422, 2718, 2293, 755, 468, 2719, 2825, 2826]",0,,,, -"[43, 2234, 1644, 1918, 2621, 1429, 2475, 2829, 136, 2053, 2681, 31, 2651, 2021, 1630, 2023, 2181, 2444, 1759, 1131, 2828, 2649, 1625, 965, 2823, 1130, 2784, 2821, 2396, 2490, 453, 1034, 1663, 2116, 1724, 1607, 835, 1238, 528, 1112, 2115, 2064, 492, 627, 2650, 1343, 1071, 1157, 2827, 2824, 2398, 962, 2668, 887, 2293, 2826, 51, 303, 2257, 968, 2822, 2139, 220, 1097, 27, 2667, 2665, 912, 586, 1853, 59, 941, 807, 1219, 2783, 2782, 2585, 1422, 2128, 2435, 2254, 314, 2488, 16, 2825, 2244, 1015, 2785, 1965, 2047, 1591, 1009, 2519, 1470, 1117, 1582, 633, 2230, 137, 982]",0,,,, -"[307, 2097, 2226, 373, 2786, 2782, 719, 2667, 2361, 213, 434, 1190, 2632, 1480, 302, 2859, 912, 493, 43, 2435, 2556, 937, 1290, 2247, 931, 2230, 1214, 1943, 2564, 229, 2416, 1708, 1112, 1605, 2585, 622, 2490, 2418, 755, 2637, 2829, 2282, 2398, 968, 2596, 1219, 2115, 2785, 2475, 1238, 807, 2784, 962, 2651, 941, 1012, 2597, 586, 1117, 246, 2622, 1644, 2650, 2828, 2830, 2023, 887, 237, 2649, 2783, 549, 1343, 1567, 2839, 585, 3093, 2862, 616, 2166, 137, 1262, 2257, 303, 314, 220, 27, 258, 2234, 3186, 1013, 1034, 2444, 965, 453, 2825, 835, 554, 2415, 2820, 2254]",0,,,, -"[2649, 1853, 1795, 2361, 2435, 2250, 2651, 2861, 1877, 994, 1483, 3027, 2488, 549, 1630, 1580, 1176, 2331, 2362, 2422, 2183, 2650, 1761, 2675, 2200, 1130, 1012, 2226, 2122, 1422, 2592, 1399, 138, 492, 2399, 2676, 1214, 2147, 1777, 1308, 627, 2398, 350, 1821, 2564, 2110, 2882, 2421, 2224, 468, 1918, 2360, 2196, 2097, 1517, 1956, 2782, 2427, 2829, 2622, 2632, 144, 1398, 2919, 1, 1128, 1587, 2964, 2520, 2073, 2426, 1609, 1230, 847, 1943, 1429, 887, 1157, 2681, 2234, 158, 220, 2404, 1568, 1879, 2565, 1666, 2774, 2680, 1989, 2933, 2621, 486, 2429, 59, 1238, 1668, 1965, 351, 2967, 894, 1251, 1097, 1894, 2639, 968, 2648, 1605, 2797, 2239, 1131, 1731]",0,,,, -"[2435, 1580, 2649, 1853, 2361, 2488, 2183, 549, 2675, 1176, 2650, 2362, 138, 2226, 492, 994, 1399, 1630, 1422, 1761, 1012, 1795, 2200, 1483, 3027, 2097, 2564, 1918, 1517, 2861, 2651, 1877, 2964, 2196, 2122, 2250, 468, 2882, 627, 2829, 350, 2360, 2592, 2774, 1157, 2427, 1821, 2399, 2782, 1956, 2331, 2224, 486, 2681, 1128, 1943]",0,,,, -"[1034, 51, 453, 528, 962, 220, 59, 968, 137, 1009, 807, 835, 492, 941, 1015, 1013, 616, 314, 1422, 1117, 27, 1071, 181, 775, 30, 29, 527, 799, 495, 77, 303, 586, 16, 99, 887, 69, 585, 965, 246, 450, 289, 105, 633, 1532, 2284, 2297, 2586, 549, 2302, 544, 2799, 2363, 978, 1561, 976, 2370, 603, 682, 2684, 1406, 2364, 881, 353, 2228, 2371, 1222, 2076, 2439, 189, 1699, 2434, 1915, 2369, 3128, 1471, 2174, 148, 1987]",0,,,, -"[83, 2606, 774, 649, 3296, 2497, 860, 1932, 2911, 505, 1147, 3026, 1620, 2451, 921, 1225, 332, 808, 159, 2562, 1836, 923, 1449, 1545, 2316, 3046, 2189, 2931, 2977, 2220, 2440, 1729, 1562, 494, 2449, 465, 3017, 419, 2672, 3009, 1000, 1939, 3146, 142, 2312, 2447, 541, 2920, 2952, 2452, 2066, 1039, 745, 2646, 2734, 2531, 2921, 2292, 455, 637, 2450, 1004, 790, 2973, 865, 1426, 1967, 2578, 778, 1921, 17, 2112, 1797, 408, 2816, 1324, 2153, 2817, 2819, 579, 2345, 721, 383, 3148, 2814, 1033, 285, 1200, 1410, 819, 2815, 92, 379, 304, 624, 3274, 130, 201, 653, 1170, 879, 2448, 84, 1531, 2726, 1660, 300, 18, 3099, 411, 2950, 2610, 1701, 484]",0,,,, -"[649, 2292, 159, 774, 2497, 2606, 505, 1225, 2672, 1147, 3046, 2911, 332, 2646, 83, 2450, 2451, 2316, 860, 1939, 2726, 419, 3296, 1932, 921, 923, 541, 2449, 2220, 2951, 1449, 2950, 819, 3146, 1729, 1836, 2447, 2952, 1039, 2734, 2531, 624, 3026, 3017, 1620, 2312, 721, 1170, 465, 285, 1967, 1562, 3099, 2921, 3009, 142, 3274, 1004, 808, 2562, 2578, 2931, 865, 2920, 2189, 1545, 3148, 2134, 2977, 1921, 201, 2440, 304, 2973, 2066, 494, 2452, 2840, 2448, 1000, 745, 1002, 637, 1239, 790, 1200, 2153, 1701, 698, 2819, 547, 455, 379, 92, 2249, 2816, 178, 8, 989, 383, 1335, 2345, 405, 812, 17, 2391, 778, 2179, 1660, 2762, 531, 879, 587, 18, 2085, 1324, 408, 1033, 2347, 582, 130, 234, 2993, 2611, 2112, 2346, 3294, 2610, 1426, 1797, 489, 1234, 1813, 653, 2817, 2761, 417, 3218, 676, 84, 2612, 2100]",0,,,, -"[1853, 847, 2520, 1112, 529, 1958, 241, 2488, 1666, 2861, 2527, 2404, 1940, 1429, 138, 2506, 1190, 154, 2277, 2299, 2200, 347, 1097, 2839, 450, 2655, 1064, 3014, 428, 2828, 1122, 2435, 1012, 805, 414, 147, 1702, 2443, 396, 1724, 2860, 2542, 2247, 346, 1480, 1230, 1414, 1198, 1351, 2556, 2253, 2824, 2786, 311, 1644, 2444, 2023, 2282, 2667, 2785, 2397, 2230, 2769, 2991, 451, 2021, 242, 1290, 2128, 1599, 2638, 402, 1663, 2475, 887, 2822, 1342, 2859, 493, 3093, 2429, 2315, 2649, 1582, 2651, 1630, 1812, 2585, 453, 2089, 388, 719, 2868, 2032, 2053, 1131, 229, 2064, 1821, 1176, 1214, 559, 1736, 2244, 2820, 807, 2664, 2768, 680, 2047, 1219, 413, 434, 2361, 1647, 1019, 2116, 136, 1238, 2519, 1877, 27]",0,,,, -"[101, 2855, 333, 1574, 1340, 2515, 1686, 1865, 825, 541, 1256, 1162, 2210, 634, 1077, 771, 1103, 319, 70, 2670, 2845, 2911, 294, 1004, 3157, 2450, 777, 1472, 3018, 2920, 2672, 654, 649, 1026, 610, 3046, 624, 2646, 1348, 641, 3017, 1691, 540, 1027, 1265, 2840, 808, 1326, 863, 3029, 429, 1046, 2335, 2259, 100, 2158, 2245, 2218, 2388, 1981, 250, 1932, 2066, 83, 2495, 3026, 584, 3071, 114, 1620, 516, 1817, 2278, 2595, 3284, 923, 2721, 1746, 2389, 359, 1771, 1135, 1260, 2977, 2673, 2132, 2261, 2899, 62, 494, 1891, 2588, 84, 442, 476, 2074, 3318, 130, 2346, 1193, 1225, 3020, 2952, 1446, 1693, 3299, 1842, 921, 1239, 1992, 247, 2924, 2312, 194, 1785, 2677, 471, 2275, 2604, 2562, 1080, 2134, 80, 2345, 1170, 670, 455, 2154, 421, 2447, 615]",0,,,, -"[101, 2855, 333, 1574, 2515, 1340, 1865, 1077, 825, 2210, 1256, 541, 1162, 634, 1004, 1686, 771, 1103, 319, 2920, 2845, 777, 3157, 2450, 2911, 294, 3018, 2670, 3071, 1472, 70, 1691, 2646, 3046, 654, 1265, 2388, 540, 2672, 624, 610, 1026, 2066, 3029, 1348, 429, 808, 649, 1326, 1981, 2840, 3017, 1027, 2218, 863, 2389, 2259, 2495, 923, 1746, 516, 250, 100, 2158, 2335, 584, 2245, 83, 130, 3284, 641, 1260, 3026, 1046, 2261, 1817, 1932, 2977, 1620, 1891, 2278, 2673, 2132, 476, 1135, 1693, 359, 1771, 2595, 114, 2721, 1842, 2588, 1239, 615, 494, 1193, 62, 2154, 2952, 84, 3318, 2899, 3020, 442, 1170, 1446, 80, 729, 247, 1225, 2275, 2562, 1419, 1992, 921, 716, 194, 1247, 3299, 2677, 2312, 1080, 2134, 2074, 2088, 1615, 234, 316, 670, 2447, 2924, 455, 2604, 1967, 2296, 379, 2209, 421, 2950, 2100, 471, 1369, 2258, 2249, 2345, 2380, 92, 2025, 1701, 236, 2346, 553, 1538, 3317, 587, 1200, 2315, 1180, 653, 2662, 2560, 1676, 18, 478, 2054, 203, 371, 332, 2586, 2993, 1826, 2607, 43, 1837, 2580, 819, 1785, 2636, 1017, 3099, 405, 2734, 1971, 1002, 2581, 713, 1656]",0,,,, -"[2059, 1052, 1307, 1046, 465, 638, 95, 110, 1384, 2776, 2779, 2345, 2218, 2531, 3274, 2993, 462, 2252, 342, 2346, 1239, 1087, 2496, 2816, 545, 2283, 2973, 790, 3215, 923, 1211, 2631, 674, 1437, 1865, 2780, 2249, 406]",0,,,, diff --git a/pygip/models/defense/atom/csv_data/attack_Cora.csv b/pygip/models/defense/atom/csv_data/attack_Cora.csv deleted file mode 100644 index f13bac5b..00000000 --- a/pygip/models/defense/atom/csv_data/attack_Cora.csv +++ /dev/null @@ -1,542 +0,0 @@ -Sequence,Label,NCL,Query Budget,Num Sample Nodes,Fidelity,Gamma,Alpha -"[611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690]",0,3.0,5.0,100.0,0.087149187592319,, -"[1224, 1583, 1848, 2291, 2335, 2268, 1120, 1377, 69, 1840, 1729, 2045, 1062, 1661, 1519, 91, 839, 794, 1039, 1293, 586, 1682, 1481, 2123, 1330, 1675, 1192, 1625, 1265, 2301, 436, 1680, 2380, 2576, 2008, 1817, 2044, 2233, 429, 2419, 1926, 1403, 181, 1845, 1464, 373, 45, 1503, 1844, 1329, 133, 1925, 1851, 1810, 2155, 65, 2589, 1395, 2304, 836, 2386, 1842, 1351, 1013, 908, 1025, 1463, 94, 2379, 109, 1992, 1846, 2198, 2267, 1294, 2485, 878, 733, 759, 2131, 506, 2068, 81, 979, 1416, 965, 435, 552, 485, 239, 2035, 2021, 434, 707, 118, 2133, 2056, 2378, 1139, 2055, 2232, 280, 276, 2001, 885, 2121, 471, 2025, 1160, 1117, 6, 2293, 858, 2053, 1185, 524, 1841, 2051, 1535, 2178, 433, 1847, 1635, 383, 1285, 347, 1644, 624, 1279, 1198, 1460, 1042, 964, 2182, 52, 1628, 1677, 1515, 1916, 1818, 423, 2277, 2303, 2266, 901, 278, 597, 2180, 2196, 2203, 2072, 1631, 377, 1908, 2163, 1695, 1026, 315, 2057, 1669, 80, 74, 408, 1388, 731, 2074, 1303, 1204, 2073, 1540, 763, 256, 631, 2054, 1527, 2002, 2201, 2358, 1986, 2311, 751, 2108, 1843, 2333, 736, 100, 257, 2349, 525, 2181, 2003, 2186, 2321, 1889, 530, 2014, 2024, 1118, 2364, 2381, 862, 95, 756, 1309, 1892, 1472, 2219, 88, 510, 2302, 2049, 527, 1498, 574, 2348, 1849, 401, 1521, 678, 2187, 880, 300, 1302, 512, 218, 614, 1475, 888, 2346, 1602, 2469, 2231, 1047, 1580, 204, 593, 2011, 651, 1517, 1623, 2006, 1838, 2183, 1089, 2172, 1125, 1397, 415, 2197, 2052, 693, 737, 1514, 576, 734, 861, 1850, 1078, 291, 642, 1006, 2202, 2470, 334, 680, 1423, 142, 2329, 2330, 1467, 746, 2590, 399, 2199, 1556, 1469, 1692, 1174, 851, 665, 172, 2200, 240, 591, 503, 1769, 456, 966, 281, 544, 2107, 667, 365, 1017, 2472, 818, 2347, 192, 2345, 260]",1,3.0,30.0,300.0,0.6683899556868538,, -"[1743, 969, 389, 359, 1735, 742, 704, 1620, 905, 1412, 562, 2407, 36, 1282, 2252, 1592, 2251, 571, 980, 318, 484, 1313, 2143, 505, 589, 2083, 121, 754, 1370, 1258, 643, 303, 779, 1483, 2107, 387, 1570, 1248, 1773, 1670, 2009, 459, 745, 1358, 1769, 1777, 1337, 2081, 598, 2382, 199, 573, 1726, 1789, 836, 1616, 1144, 563, 2071, 644, 2096, 696, 10, 1810, 2047, 2155, 1224, 1640, 1775, 1623, 1013, 2045, 2046, 542, 2025, 88, 420, 2327, 1249, 1787, 1448, 1289, 1776, 682, 1785, 1158, 2478, 2050, 1622, 416, 2010, 232, 1077, 671, 1635, 1525, 153, 2094, 504, 1120, 2078, 1070, 1245, 519, 1127, 1009, 2079, 551, 2485, 1701, 2113, 1856, 1779, 1045, 1772, 1251, 854, 539, 2048, 1771, 1140, 1725, 1770, 743, 406, 1346, 1651, 112, 308, 1661, 880, 1490, 814, 1792, 2034, 2080, 2095, 115, 1097, 203, 610, 109, 1778, 1624, 244, 1975, 1782, 231, 2026, 1868]",1,5.0,15.0,300.0,0.7319054652880355,, -"[1540, 2054, 2182, 1042, 525, 2172, 95, 1139, 74, 2055, 415, 1131, 142, 1847, 1332, 2015, 2057, 2419, 1410, 1926, 61, 2014, 1396, 2178, 1882, 1732, 621, 1039, 1234, 851, 1635, 910, 1628, 887, 1224, 1655, 2016, 724, 1309, 1121, 2467, 1848, 22, 737, 2010, 2394, 436, 324, 1986, 1013, 2315, 878, 2162, 1274, 1759, 2056, 1741, 789, 2155, 1713, 405, 1152, 454, 619, 696, 963, 1676, 88, 1701, 1675, 1917, 2012, 239, 759, 69, 1464, 1702, 604, 1417, 2386, 1930, 1677, 1351, 1174, 1501, 572, 2053, 1344, 463, 596, 1916, 1043, 1918, 21, 778, 743, 2059, 300, 1117, 331, 1592, 1399, 1362, 2021, 769, 506, 1812, 1908, 1652, 1674, 1359, 1435, 579, 1849, 779, 885, 1008, 1915, 1965, 2418, 203, 1665, 1474, 2217, 65, 175, 1928, 1894, 420, 162, 41, 2305, 1376, 921, 2022, 1539, 718, 1485, 697, 1914, 841, 2156, 1703, 2309, 437, 897, 1923, 461, 1891, 2017, 566, 158, 1889, 1268, 738, 1293, 1288, 2190, 146, 1968, 1002, 1494, 1220, 2295, 1203, 94, 2011, 756, 141, 1529, 586, 2116, 130, 2344, 25, 1966, 543, 1984, 1394, 1909, 1069, 644, 994, 76, 164, 2238, 1527, 1266, 1971, 60, 478, 1097, 1365, 2274, 323, 1512, 305, 2024, 1920, 1927, 1671, 2136, 205, 1919, 1487, 1080, 1178, 1525, 1468, 86, 2013, 791, 2427, 2130, 691, 57, 1970, 277, 199, 2019, 1259, 149, 1218, 1810, 210, 2227, 1912, 860, 597, 1171, 498, 55, 1869, 2450, 1021, 1319, 2388, 1270, 1448, 1497, 955, 1668, 1026, 1273, 2036, 1219, 674, 1329, 830, 465, 2023, 1905, 1985, 1079, 1658, 37, 815, 1964, 1967, 1924, 325, 673, 2018, 2020, 2137, 1156, 1626, 211, 1969, 651, 2039, 1929, 1336, 356, 1906, 2034, 771, 14, 1614, 1907, 438, 1667, 1983, 382, 2042, 787, 224, 248, 1190, 2035, 617, 1654, 49, 2040, 1807, 2037, 417, 2038, 2041, 717]",1,5.0,30.0,300.0,0.7149187592319055,, -"[563, 1070, 109, 318, 1358, 1799, 728, 417, 1918, 1675, 496, 1676, 2553, 1464, 1592, 1368, 992, 1864, 1483, 869, 2082, 506, 1851, 519, 958, 190, 1725, 306, 335, 1539, 604, 773, 1986, 1975, 1740, 1359, 69, 236, 1805, 201, 143, 1733, 244, 1296, 1482, 830, 416, 644, 1505, 1802, 24, 137, 232, 1732, 1917, 2329, 779, 1798, 215, 2331, 1920, 261, 1701, 2412, 2034, 176, 1804, 1370, 1911, 1919, 1485, 973, 1914, 2504, 2095, 1072, 596, 1848, 1377, 778, 139, 1801, 135, 158, 2153, 205, 2144, 2154, 2135, 13, 2388, 1849, 157, 547, 216, 2155, 415, 1660, 1797, 467, 226, 1478, 1526, 18, 1525, 1587, 88, 235, 1908, 633, 343, 1309, 1583, 737, 1291, 325, 2152, 1692, 532, 179, 1224, 682, 1262, 1674, 1131, 1362, 1978, 1916, 1174, 387, 586, 2178, 41, 816, 1472, 65, 172, 197, 955, 512, 240, 1915, 189, 2350, 621, 1527, 2187, 1977, 1981, 76, 1980, 1800, 2017, 1868, 1628, 1469, 921, 1178, 2282, 756, 498, 1417, 1529, 1803, 514, 175, 2021, 1983, 2418, 2186, 162, 1082, 1156, 1494, 815, 1560, 478, 2151, 945, 231, 897, 2018, 239, 299, 1979, 1784, 1786, 787, 2191, 433, 1218, 2386, 1614, 1905, 2145, 55, 483, 1332, 323, 20]",1,7.0,20.0,250.0,0.7559084194977843,, -"[1476, 404, 1170, 1476, 404, 1170, 1476, 404, 1170, 1476, 404, 1170, 1476, 404, 1170, 1476, 404, 1170, 1476, 404, 1170, 1476, 404, 1170, 1476, 404, 1170, 1476, 404, 1170]",0,9.0,5.0,300.0,0.087149187592319,, -"[179, 197, 91, 1913, 484, 215, 1560, 303, 118, 2143, 1773, 1270, 1780, 2123, 18, 406, 189, 1852, 2025, 1851, 2022, 181, 244, 232, 1258, 416, 1830, 895, 1986, 1803, 119, 1296, 603, 1583, 1120, 1144, 2034, 1849, 2425, 1725, 699, 317, 1768, 1262, 505, 1403, 1441, 1464, 1740, 1786, 2327, 1800, 660, 314, 589, 836, 2504, 471, 1699, 980, 2190, 2323, 384, 1701, 510, 1065, 330, 1622, 2329, 2145, 2188, 20, 231, 2026, 341, 506, 2179, 1729, 2189, 1842, 854, 1035, 1070, 1843, 1625, 440, 387, 252, 2096, 139, 1733, 1841, 2082, 554, 1358, 389, 831, 454, 69, 1248]",1,9.0,10.0,300.0,0.7374446085672083,, -"[2269, 1415, 392, 1884, 687, 1295, 1370, 735, 113, 676, 2527, 1917, 1909, 1092, 1414, 2359, 1354, 2176, 739, 1881, 1237, 281, 2247, 1549, 747, 1619, 118, 2022, 2190, 11, 2286, 581, 119, 1987, 2189, 1358, 1655, 645, 1171, 746, 441, 2274, 1958, 1348, 598, 540, 1810, 1073, 2162, 2379, 454, 1169, 552, 371, 2001, 1955, 2002, 2188, 1901, 2284, 2009, 1382, 2006, 510, 1875, 720, 1870, 745, 1553, 1812, 2163, 968, 201, 519, 649, 2424, 2004, 1709, 2003, 1839, 1527, 1095, 67, 2244, 160, 1149, 1995, 2453, 2476, 973, 1661, 1107, 743, 1855, 1998, 2375, 681, 1989, 2000, 68, 1127, 524, 1026, 95, 218, 366, 1303, 1204, 1574, 505, 460, 2283, 2542, 548, 2246, 843, 215, 1986, 1595, 391, 45, 1994, 2, 2555, 1956, 476, 566, 792, 151, 1128, 335, 1023, 1999, 899, 2275, 1441, 232, 1997, 71, 1434, 1873, 2271, 1991, 979, 1697, 1990, 179, 1704, 2338, 1993]",1,9.0,15.0,200.0,0.6680206794682423,, -"[1309, 1072, 109, 2094, 1505, 645, 65, 1358, 1435, 1927, 415, 2418, 519, 1568, 408, 734, 2112, 2132, 2228, 581, 1127, 490, 1882, 1671, 1368, 2395, 1163, 1045, 993, 1881, 736, 807, 1526, 1070, 2403, 60, 2021, 2015, 414, 118, 2458, 2113, 1614, 2317, 2025, 2020, 499, 2056, 1883, 32, 441, 331, 1879, 95, 2394, 2553, 973, 772, 553, 530, 708, 1293, 1566, 42, 1343, 1732, 771, 116, 1900, 1660, 2045, 1156, 2070, 252, 1911, 1203, 1792, 2050, 1637, 693, 711, 1285, 816, 2111, 1983, 1626, 2137, 1171, 1884, 1841, 2371, 2638, 1847, 1644, 1843, 1850, 1399, 120, 2367, 1396, 836, 1131, 323, 2022, 2116, 2155, 1341, 682, 1995, 815, 935, 2078, 1583, 1501, 539, 945, 1729, 661, 1403, 454, 702, 1110, 908, 1661, 1625, 1885, 1842, 1218, 2016, 2457, 2151, 366, 55, 1907, 1616, 891, 1372, 2190, 164, 2309, 1291, 210, 504, 2017, 510, 2154, 1120, 1851, 2069, 2153, 2071, 344, 2485, 1296, 2480, 1683, 334, 2177, 1906, 1974, 1482, 2194, 1421, 376, 1377, 1224, 728, 1353, 1028, 651, 2282, 1973, 2274, 465, 880, 586, 471, 1085, 1880, 1572, 97, 1441, 1402, 1535, 447, 2068, 865, 2188, 280, 2101, 1370, 2156, 2023, 2293, 2653, 1512, 452, 1487, 1587, 1222]",1,9.0,20.0,300.0,0.6713441654357459,, -"[1624, 109, 459, 1337, 1878, 1346, 18, 317, 124, 1158, 1624, 109, 459, 1337, 1878, 1346, 18, 317, 124, 1158, 1624, 109, 459, 1337, 1878, 1346, 18, 317, 124, 1158, 1624, 109, 459, 1337, 1878, 1346, 18, 317, 124, 1158, 1624, 109, 459, 1337, 1878, 1346, 18, 317, 124, 1158, 1624, 109, 459, 1337, 1878, 1346, 18, 317, 124, 1158, 1624, 109, 459, 1337, 1878, 1346, 18, 317, 124, 1158, 1624, 109, 459, 1337, 1878, 1346, 18, 317, 124, 1158, 1624, 109, 459, 1337, 1878, 1346, 18, 317, 124, 1158, 1624, 109, 459, 1337, 1878, 1346, 18, 317, 124, 1158]",0,,10.0,100.0,0.0960118168389955,0.1, -"[490, 1879, 2227, 809, 2228, 490, 1879, 2227, 809, 2228, 490, 1879, 2227, 809, 2228, 490, 1879, 2227, 809, 2228, 490, 1879, 2227, 809, 2228, 490, 1879, 2227, 809, 2228, 490, 1879, 2227, 809, 2228, 490, 1879, 2227, 809, 2228, 490, 1879, 2227, 809, 2228, 490, 1879, 2227, 809, 2228]",0,,5.0,100.0,0.1107828655834564,0.5, -"[306, 109, 656, 476, 1337, 1878, 1490, 1346, 1483, 18, 306, 109, 656, 476, 1337, 1878, 1490, 1346, 1483, 18, 306, 109, 656, 476, 1337, 1878, 1490, 1346, 1483, 18, 306, 109, 656, 476, 1337, 1878, 1490, 1346, 1483, 18, 306, 109, 656, 476, 1337, 1878, 1490, 1346, 1483, 18, 306, 109, 656, 476, 1337, 1878, 1490, 1346, 1483, 18, 306, 109, 656, 476, 1337, 1878, 1490, 1346, 1483, 18, 306, 109, 656, 476, 1337, 1878, 1490, 1346, 1483, 18, 306, 109, 656, 476, 1337, 1878, 1490, 1346, 1483, 18, 306, 109, 656, 476, 1337, 1878, 1490, 1346, 1483, 18]",0,,10.0,250.0,0.0993353028064992,0.5, -"[2112, 2436, 2286, 118, 539, 2112, 2436, 2286, 118, 539, 2112, 2436, 2286, 118, 539, 2112, 2436, 2286, 118, 539, 2112, 2436, 2286, 118, 539, 2112, 2436, 2286, 118, 539, 2112, 2436, 2286, 118, 539, 2112, 2436, 2286, 118, 539, 2112, 2436, 2286, 118, 539, 2112, 2436, 2286, 118, 539]",0,,5.0,50.0,0.0901033973412112,1.0, -"[1787, 306, 1572, 656, 1337, 1787, 306, 1572, 656, 1337, 1787, 306, 1572, 656, 1337, 1787, 306, 1572, 656, 1337, 1787, 306, 1572, 656, 1337, 1787, 306, 1572, 656, 1337, 1787, 306, 1572, 656, 1337, 1787, 306, 1572, 656, 1337, 1787, 306, 1572, 656, 1337, 1787, 306, 1572, 656, 1337]",0,,5.0,150.0,0.0904726735598227,1.0, -"[753, 2009, 366, 45, 68, 1403, 179, 201, 232, 519, 673, 1848, 681, 1849, 1023, 32, 2315, 1074, 1043, 1869, 347, 2054, 2057, 2180, 239, 1139, 2232, 1981, 1985, 553, 1995, 71, 1464, 335, 391, 2045, 252, 666, 1434, 822, 1453, 1558, 1637, 1704, 1021, 548, 2020, 2022, 456, 1540, 2183, 1999, 1285, 1838, 2072, 2133, 424, 483, 885, 1978, 890, 578, 1121, 1671, 1127, 2027, 2611, 1521, 1661, 476, 1333, 1482, 1525, 1976, 604, 1296, 994, 651, 142, 408, 52, 95, 1259, 2116, 878, 1039, 1980, 950, 1495, 1972, 2000, 1293, 151, 1583, 215, 1729, 1841, 566, 792, 899, 968, 1149, 779, 1574, 435, 1609, 2404, 1676, 210, 461, 1370, 2019, 1628, 858, 2233, 433, 668, 816, 718, 725, 1227, 530, 1421, 880, 2, 908, 1120, 1675, 1840, 460, 1844, 1845, 1846, 1288, 1095, 1166, 1975, 1115, 2229, 621, 1929, 438, 2182, 423, 2181, 2131, 2198, 2231, 1131, 1977, 1314, 504, 1655, 1399, 2228, 1842, 1394, 1658, 1847, 1683, 1928, 1013, 1973, 1912, 1501, 266, 2309, 1982, 323, 818, 966, 1850, 1652, 1570, 2172, 1239, 1515, 1332, 516, 118, 600, 1174, 510, 2395, 76, 743, 841, 1713, 1732, 1882, 2013, 2014, 2017, 1268, 1667, 593, 1644, 356, 436, 277, 571, 1986, 55, 490, 2102, 2137, 331, 2450, 1566, 25, 842, 2467, 724, 69, 1420, 836, 162, 415, 498, 1527, 1741, 2016, 2178, 2109, 809, 634, 204, 437, 61, 995, 211, 897, 860, 1668, 191, 1358, 1224, 661, 1178, 161, 272, 2071, 973, 1487, 1027, 181, 334, 1396, 2396, 130, 300, 737, 815, 851, 1309, 1851, 2015, 116, 2110, 2018, 1843, 1630, 1974, 160, 1338, 2103, 60, 1898, 1474, 1529, 2397, 2394, 88, 471, 1085, 696, 1494, 1677, 1908, 2010, 2011, 2012, 1344, 2227, 525, 1269, 2136, 745, 2034, 1625, 2274, 1696, 748, 2593, 514, 733, 1979, 2024, 506, 1880]",1,,30.0,300.0,0.6070901033973413,,0.4 -"[2123, 1733, 2591, 1065, 656, 2451, 859, 13, 1775, 27, 481, 1798, 2091, 1571, 2026, 2047, 831, 660, 1909, 91, 342, 1478, 1800, 1803, 719, 1180, 1258, 1782, 482, 511, 1804, 576, 716, 598, 1618, 556, 2329, 2504, 1144, 1012, 958, 303, 1304, 230, 351, 1799, 1802, 2080, 114, 1013, 215, 341, 314, 1682, 948, 2320, 1635, 65, 1274, 2418, 189, 945, 1740, 2203, 1009, 2048, 2085, 2087, 2089, 2481, 2326, 699, 1852, 741, 1774, 2251, 619, 1220, 1376, 2021, 244, 1070, 2254, 1035, 1245, 406, 843, 2083, 2090, 1701, 387, 836, 826, 695, 2106, 2145, 2319, 802, 2182, 783, 1262, 1725, 655, 1483, 1551, 549, 790, 1574, 1986, 234, 2288, 454, 1851, 544, 441, 1767, 814, 2382, 1069, 1759, 542, 573, 1089, 1490, 1584, 1651, 1770, 224, 568, 2287, 1197, 603, 1248, 1791, 135, 1560, 1768, 1783, 2252, 2376, 20, 1779, 554, 1506, 608, 2328, 1640, 426, 1656, 1781, 1797, 2088, 416, 1158, 2300, 661, 1699, 1801, 2322, 1564, 2046, 384, 1780, 519, 1045, 1772, 2045, 2084, 943, 1061, 773, 1072, 1289, 2081, 1771, 139, 1810, 203, 1910, 231, 409, 1778, 1787, 1337, 1769, 1856, 2093, 330, 530, 2107, 487, 2143, 505, 2318, 29, 43, 706, 1157, 1703, 1448]",1,,20.0,300.0,0.6358936484490398,,0.6 -"[563, 318, 109, 1023, 1070, 1358, 1592, 496, 728, 417, 992, 201, 1918, 779, 1675, 1370, 874, 1676, 1986, 176, 2553, 778, 1799, 1464, 1864, 215, 1368, 1483, 869, 973, 729, 1975, 2154, 255, 519, 506, 1291, 1692, 135, 2095, 335, 2082, 958, 240, 172, 306, 1725, 190, 604, 682, 1539, 158, 24, 596, 1266, 773, 1740, 1851, 1469, 1359, 236, 69, 1805, 1908, 1919, 143, 644, 664, 1733, 1529, 2350, 1905, 226, 586, 2034, 244, 633, 137, 1296, 1482, 65, 1309, 1100, 76, 737, 1614, 1660, 416, 830, 2329, 514, 1701, 897, 767, 1802, 1505, 210, 621, 1977, 784]",1,6.0,10.0,260.0,0.6580502215657311,, -"[2002, 2001, 2003, 2162, 1080, 1127, 1314, 1259, 52, 1812, 890, 1628, 1880, 792, 1121, 391, 1558, 2054, 816, 68, 651, 1859, 2172, 1655, 1420, 1540, 456, 1139, 1085, 2309, 2396, 1434, 2228, 1421, 1980, 1697, 2027, 1992, 142, 1993, 668, 2232, 2057, 347, 504, 1848, 366, 471, 1979, 1416, 1131, 61, 1027, 423, 45, 2397, 519, 553, 2394, 438, 753, 1991, 2227, 1998, 1463, 826, 950, 1869, 878, 1013, 356, 2133, 408, 885, 718, 55, 215, 733, 476, 1704, 1709, 1994, 724, 95, 2024, 2116, 1529, 1990, 566, 1574, 1399, 1671, 1495, 483, 2, 1453, 2009, 335, 973, 1269]",1,6.0,10.0,275.0,0.6185376661742984,, -"[1624, 2093, 88, 2316, 563, 1013, 2436, 289, 711, 1785, 279, 1778, 318, 384, 1645, 133, 1224, 484, 1705, 56, 643, 2050, 2095, 610, 2112, 1773, 693, 989, 1776, 1343, 102, 1131, 176, 2090, 1790, 1851, 836, 1337, 109, 2165, 656, 1842, 1507, 2016, 1849, 1781, 1193, 1789, 1538, 1133, 539, 2185, 1777, 1500, 747, 1780, 161, 573, 910, 1616, 556, 1767, 1998, 2030, 1661, 417, 255, 412, 2094, 660, 2045, 447, 2485, 153, 958, 1787, 1640, 1622, 1784, 2010, 1899, 1045, 1774, 608, 126, 2078, 887, 112, 1779, 2497, 388, 1805, 2092, 581, 719, 1561, 1367, 1572, 103, 459, 2046, 903, 476, 1783, 1266, 1402, 2080, 487, 1771, 139, 1775, 973, 1798, 1072, 2085, 1792, 842, 519, 350, 1772]",1,6.0,12.0,180.0,0.6292466765140325,, -"[2252, 1784, 2251, 1725, 2407, 945, 121, 2382, 562, 1358, 773, 2096, 643, 505, 1740, 1803, 1801, 1070, 682, 935, 1483, 1144, 1313, 1505, 1856, 406, 1258, 1623, 1777, 1158, 1998, 1448, 359, 814, 1733, 2598, 1583, 191, 1072, 980, 2086, 189, 2019, 2045, 1804, 244, 660, 2034, 563, 389, 1262, 661, 880, 18, 459, 1786, 1616, 1771, 958, 2094, 133, 608, 317, 20, 1871, 519, 484, 329, 2143, 2079, 2085, 573, 1785, 1986, 1142, 1572, 1574, 1776, 426, 1478, 836, 1781, 102, 118, 342, 2026, 1253, 589, 1635, 1251, 1337, 454, 2478, 416, 318, 973, 2093, 303, 2092, 387, 598, 1802, 1670, 2095, 581, 1772, 2153, 452, 1009, 610, 1778, 289, 1800, 2047, 859, 542, 417, 1773, 384, 126]",1,6.0,12.0,200.0,0.7259970457902511,, -"[1097, 2052, 470, 544, 1042, 1846, 2490, 1927, 1517, 1529, 1416, 593, 1115, 2116, 809, 880, 1152, 1218, 471, 74, 1850, 2263, 1841, 2357, 2024, 334, 651, 506, 1849, 1845, 1118, 1013, 1729, 1842, 2045, 1625, 242, 2485, 733, 675, 1649, 304, 1655, 1079, 203, 2025, 2110, 973, 1370, 778, 1525, 124, 2280, 2185, 1919, 2355, 94, 1289, 502, 1583, 1843, 779, 1847, 661, 1675, 838, 195, 1840, 2419, 1981, 1309, 530, 1464, 1966, 1844, 1848, 181, 908, 1293, 1403, 1521, 1644, 1930, 32, 666, 1914, 1110, 1120, 325, 88, 1732, 1358, 1652, 1592, 376, 693, 1526, 69, 1376, 2423, 2385, 1402, 2274, 2152, 1538, 2295, 279, 1566, 1622, 1421, 1973, 1203, 2109, 925, 1222, 1127, 1995, 2156, 366, 1341, 934, 2282, 510, 2276, 553, 1661, 748, 1121, 1879, 586, 1500, 836, 1133, 1909, 1676, 1535, 270, 1851, 1975, 280, 490, 2281, 682, 2155, 2293, 702, 826, 1224, 1977, 2165, 118, 2153, 1131, 868, 2105, 1463, 436, 1270, 2425, 2151, 993, 539, 2344, 1143, 1215, 2099, 749, 93, 578, 2294, 324, 2154, 1195, 1587, 1660, 1291, 1058, 2283, 988, 2426]",1,6.0,18.0,180.0,0.6048744460856721,, -"[2077, 2075, 14, 2210, 22, 1809, 1618, 2238, 1558, 790, 673, 1968, 864, 151, 1365, 1808, 1015, 1812, 995, 1993, 1870, 1889, 743, 2004, 2002, 160, 1806, 1234, 1697, 1967, 1574, 2555, 215, 2, 1023, 179, 71, 2605, 201, 232, 88, 2404, 2199, 901, 1669, 104, 2385, 2091, 39, 1702, 180, 1174, 1502, 1709, 443, 1875, 2163, 2155, 2041, 1814, 2000, 1488, 74, 256, 885, 586, 1846, 1906, 544, 1986, 2034, 576, 2003, 1417, 566, 1224, 1987, 2009, 1996, 1969, 1010, 2005, 68, 1185, 1964, 1338, 1460, 792, 1279, 745, 2103, 574, 397, 519, 899, 1873, 306, 1703, 411, 1348, 962, 1218, 1995, 1989, 1020, 362, 2319, 1807, 963, 2680, 2076, 789, 463, 38, 1493, 1166, 1699, 1992, 2348, 1810, 2102, 1498, 2044, 1988, 366, 476, 1876, 1999, 460, 391, 281, 2001, 1704, 1434, 1994, 429, 1453, 1859, 1569, 465, 2211, 1998, 2668, 756, 335, 1997, 968, 681, 482, 1633, 1971, 2233, 1990, 25, 2356, 1127, 2006, 1912, 481, 1324, 2007, 1991, 1970, 2424, 456, 1966, 1149, 2230, 1065, 469, 2357, 1665, 352, 1682, 1294, 486, 86, 2590, 591, 2186]",1,6.0,18.0,275.0,0.662850812407681,, -"[2380, 2063, 157, 2123, 2283, 2176, 285, 1325, 503, 574, 2543, 1502, 827, 91, 2301, 1820, 2381, 1739, 1999, 2034, 316, 1382, 2025, 482, 1295, 598, 1453, 298, 818, 24, 1014, 1701, 2001, 327, 1171, 1583, 1858, 733, 2335, 1346, 1086, 1026, 1810, 218, 267, 2378, 2338, 2004, 269, 2000, 180, 2163, 1870, 230, 2238, 2247, 1993, 1127, 1, 935, 2008, 1991, 1000, 197, 1894, 151, 879, 2044, 1446, 1666, 429, 1875, 985, 1119, 1873, 2348, 1434, 2340, 849, 2121, 2325, 391, 968, 160, 2412, 1023, 1864, 2339, 869, 1996, 2379, 1859, 314, 1987, 1558, 570, 383, 2003, 335, 1488, 1358, 2, 1876, 1812, 306, 109, 1603, 2568, 201, 2064, 1095, 2532, 1781, 1986, 445, 2009, 553, 297, 2555, 1055, 972, 191, 2667, 1738, 2002, 215, 1709, 493, 231, 416, 681, 1191, 1670, 2509, 185, 1445, 2365, 899, 68, 1149, 1995, 1990, 2005, 2236, 2303, 2045, 179, 1454, 232, 366, 1697, 1998, 1992, 1589, 2243, 792, 277, 603, 2007, 519, 2430, 460, 332, 1185, 1704, 1912, 45, 1997, 2668, 1988, 1994, 101, 2326, 1166, 745, 566, 1574, 2244, 2077, 695, 1197, 673, 8, 1989, 743, 281, 387, 396, 2253, 1522, 1303, 2613, 71, 476, 2582, 169, 2006, 1066, 206, 1907]",1,6.0,20.0,215.0,0.6499261447562777,, -"[160, 255, 1343, 1507, 1851, 1791, 1158, 1986, 2046, 351, 2112, 1335, 1029, 695, 1505, 1012, 27, 2166, 2000, 1253, 2485, 2048, 1788, 1402, 441, 224, 1699, 1789, 481, 1538, 859, 2026, 476, 1849, 1810, 1483, 267, 948, 1773, 230, 2190, 447, 446, 302, 2030, 56, 2086, 1917, 2081, 1584, 426, 191, 407, 1616, 1045, 1144, 487, 317, 412, 121, 1780, 1245, 1311, 1690, 2088, 1792, 2165, 1009, 482, 1805, 2050, 112, 1777, 1013, 1770, 2022, 384, 118, 2385, 1367, 308, 1998, 382, 2094, 13, 2319, 554, 1856, 1651, 2087, 603, 573, 459, 1677, 1661, 109, 1283, 1705, 2096, 139, 1797, 452, 2010, 1790, 484, 215, 1193, 1781, 1640, 539, 2188, 1089, 409, 2107, 1072, 1681, 2084, 1775, 1551, 1248, 36, 303, 1830, 2189, 399, 454, 1837, 2143, 960, 638, 406, 1623, 1785, 314, 1787, 102, 134, 519, 2505, 2080, 2034, 2093, 1624, 2092, 644, 589, 289, 1798, 1768, 124, 1701, 2047, 1196, 610, 556, 655, 660, 2325, 1282, 1552, 306, 719, 1802, 2320, 2045, 980, 1656, 1258, 1561, 2095, 318, 958, 1774, 973, 581, 2090, 1852, 563, 1735, 542, 1337, 1799, 176, 1769, 236, 1782, 699, 1346, 417, 661, 505, 1772, 1670, 2324, 1779, 1804, 294, 1786, 103, 153]",1,6.0,20.0,245.0,0.681314623338257,, -"[1420, 1042, 2396, 1856, 1421, 2197, 490, 2051, 436, 2048, 842, 603, 2109, 2480, 2046, 2134, 2181, 2200, 2282, 2052, 1580, 2228, 1330, 41, 2010, 1079, 921, 504, 2054, 1616, 1602, 175, 1396, 118, 1915, 734, 2132, 1485, 736, 1085, 1248, 1359, 126, 2199, 2395, 139, 153, 1973, 1772, 98, 2047, 1905, 88, 2026, 1701, 1628, 1655, 456, 2045, 596, 2056, 2074, 2016, 563, 2278, 2653, 868, 236, 133, 1914, 779, 447, 2281, 412, 666, 724, 1674, 838, 1539, 306, 2276, 2068, 242, 1652, 2405, 1919, 778, 2422, 1898, 1448, 2310, 955, 1337, 32, 1521, 1852, 1174, 318, 1525, 1676, 215, 2078, 699, 1133, 847, 1791, 600, 1416, 1131, 816, 2394, 763, 1917, 2419, 1916, 1830, 1651, 1909, 1309, 2485, 830, 1732, 702, 1979, 56, 907, 1784, 1482, 308, 95, 1980, 109, 1920, 2117, 604, 1972, 1399, 2201, 2183, 1926, 2357, 1121, 1303, 2182, 708, 2194, 1487, 408, 1867, 1849, 1625, 1538, 861, 1787, 1362, 1683, 712, 2397, 1351, 1841, 1776, 1592, 1846, 1203, 341, 768, 1343, 1224, 454, 415, 2013, 1838, 2388, 1535, 519, 1848, 1851, 1844, 65, 1918, 1358, 1850, 973, 2025, 1847, 440, 120, 1842, 693, 2015, 1675, 755, 850, 1572, 1074, 1171, 1566, 725, 945, 2059]",1,6.0,20.0,260.0,0.6133677991137371,, -"[144, 1593, 2192, 145, 213, 495, 1165, 537, 108, 1328, 1327, 2209, 2622, 1647, 2161, 1698, 2160, 1504, 2159, 2157, 1835, 23, 898, 2158, 1836, 92, 2157, 144, 1593, 1165, 145, 2192, 537, 2209, 2160, 1504, 495, 213, 108, 2161, 1836, 144, 1593, 1165, 145, 2192, 23, 537, 2209, 1504, 1327, 495, 2158, 108, 2161, 2159, 1698, 1647, 1328, 2157, 1836, 144, 1593, 1165, 145, 2192, 23, 537, 2209, 1504, 1327, 495, 2158, 108, 2161, 2159, 1698, 1647, 1328, 2157, 1836, 144, 1593, 1165, 145, 2192, 23, 537, 2209, 1504, 1327, 495, 2158, 108, 2161, 2159, 1698, 1647, 1328, 2157, 1836, 144, 1593, 1165, 145, 2192, 23, 537, 2209, 1504, 1327, 495, 2158, 108, 2161, 2159, 1698, 1647, 1328, 2157, 1836, 144, 1593, 1165, 145, 2192, 23, 537, 2209, 1504, 1327, 495, 2158, 108, 2161, 2159, 1698, 1647, 1328, 2157, 1836, 144, 1593, 1165, 145, 2192, 23, 537, 2209, 1504, 1327, 495, 2158, 108, 2161, 2159, 1698, 1647, 1328, 2157, 1836, 144, 1593, 1165, 145, 2192, 23, 537, 2209, 1504, 1327, 495, 2158, 108, 2161, 2159, 1698, 1647, 1328, 2157, 1836, 144, 1593, 1165, 145, 2192, 23, 537, 2209, 1504, 1327, 495, 2158, 108, 2161, 2159, 1698, 1647, 1328, 2157]",0,6.0,20.0,275.0,0.087149187592319,, -"[2268, 1020, 486, 90, 2077, 383, 1894, 1160, 2291, 2034, 14, 78, 972, 665, 429, 1329, 156, 401, 2075, 1739, 155, 49, 218, 794, 435, 174, 1265, 180, 48, 2076, 598, 377, 2380, 1450, 2477, 53, 111, 228, 652, 2045, 994, 553, 2338, 2122, 678, 1583, 316, 2555, 2247, 277, 1864, 1303, 869, 2035, 2379, 157, 1912, 2123, 306, 503, 1127, 185, 1738, 2667, 73, 2246, 297, 2176, 2326, 1682, 1445, 1149, 2044, 215, 759, 1083, 1185, 91, 1294, 1446, 1875, 154, 2668, 1086, 818, 2365, 1662, 197, 862, 72, 2302, 2003, 2412, 1704, 88, 2006, 1062, 733, 332, 1358, 985, 2615, 387, 1171, 482, 1781, 1219, 2011, 2283, 1295, 2243, 1810, 574, 2025, 1817, 1239, 1570, 2121, 416, 836, 2381, 1382, 603, 2001, 607, 1055, 1709, 1192, 1140, 746, 1818, 1670, 1989, 1820, 285, 335, 1666, 109, 1859, 321, 1589, 1488, 173, 2509, 206, 1995, 1701, 1, 1346, 1999, 418, 827, 1907, 519, 1870, 10, 2541, 1992, 366, 445, 695, 34, 1812, 231, 1000, 2348, 792, 230, 2002, 681, 743, 476, 24, 899, 2004, 1998, 170, 101, 2543, 30, 94, 2304, 232, 968, 745, 2009, 2691, 1014, 298, 2303, 2339, 2244, 160, 1191, 673, 201, 1197, 1502, 59, 2301, 1119, 654, 1997, 1558, 1873, 1858, 2550, 71, 191, 1434, 1987, 2340, 1522, 281, 2378, 1095, 179, 566, 2253, 1697, 606, 460, 2008, 45, 879, 1325, 269, 1876, 391, 1453, 2335, 169, 2, 1603, 1986, 849, 1347, 2613, 935, 68, 2582, 2000, 267, 1023, 2238, 1326, 314, 1994, 1993, 1166]",1,6.0,25.0,275.0,0.6683899556868538,, -"[1127, 745, 366, 899, 1023, 1870, 743, 968, 232, 1166, 160, 1453, 1876, 215, 1558, 68, 460, 1434, 792, 1704, 391, 1986, 1574, 1875, 1697, 519, 1780, 332, 1624, 1873, 179, 2, 1623, 1702, 1224, 201, 665, 1788, 1535, 1703, 1505, 586, 45, 1773, 335, 476, 1149, 71, 1709, 74, 588, 2002, 196, 2120, 2003, 442, 698, 2153, 1859, 1118, 1789, 2293, 1812, 1095, 681, 673, 566, 2122, 1785, 1131, 280, 286, 1970, 2040, 1307, 33, 487, 1045, 2121, 2383, 2382, 2379, 1051, 429, 2155, 1042, 2238, 1015, 814, 2123, 2001, 1196, 627, 922, 2044, 943, 1481, 2380, 1618, 1367]",1,8.0,10.0,180.0,0.6237075332348597,, -"[18, 530, 2407, 603, 1248, 1197, 2145, 1780, 1616, 1258, 695, 935, 1142, 1791, 880, 215, 1013, 643, 2016, 10, 682, 118, 252, 1788, 1505, 1253, 1778, 1313, 115, 1583, 1776, 454, 1790, 973, 303, 1851, 660, 1786, 505, 2082, 661, 741, 831, 36, 573, 191, 484, 239, 139, 441, 1986, 671, 1681, 1773, 581, 1701, 630, 2326, 2598, 2106, 1998, 1623, 1358, 476, 836, 1661, 384, 1651, 2094, 406, 1624, 1800, 2288, 2287, 1771, 1670, 1015, 699, 1448, 1367, 1775, 2078, 1072, 452, 1781, 887, 409, 308, 2143, 350, 1852, 910, 1784, 1572, 231, 2047, 2080, 2034, 135, 2045]",1,8.0,10.0,215.0,0.6502954209748892,, -"[1131, 1358, 1133, 1224, 1483, 1733, 524, 989, 1070, 1919, 1740, 1849, 1505, 773, 2491, 1899, 415, 2050, 2062, 2436, 1725, 1804, 1360, 1500, 1975, 161, 2185, 1802, 1525, 244, 1226, 895, 725, 711, 1904, 2207, 447, 331, 1800, 1503, 56, 189, 412, 779, 778, 389, 2235, 458, 1805, 1395, 693, 836, 417, 701, 1555, 1799, 1402, 1914, 1801, 440, 807, 539, 1926, 1072, 500, 1478, 1645, 891, 1953, 1262, 1960, 1538, 682, 306, 1013, 1370, 2153, 88, 1583, 958, 1616, 1851, 647, 661, 979, 643, 2241, 2485, 676, 69, 1798, 1902, 388, 1470, 2450, 252, 847, 1507, 236, 842, 342, 1784, 1842, 2112, 2323, 1955, 645, 109, 1956, 1527, 344, 2010, 2189, 1896, 249, 1829, 193, 1885, 801, 854]",1,8.0,12.0,275.0,0.6816838995568686,, -"[2160, 2157, 108, 1647, 1698, 2159, 1835, 898, 2161, 2209, 2158, 537, 1328, 1504, 92, 23, 1836, 1165, 1593, 2622, 145, 2192, 1327, 495, 213, 144, 2209, 1504, 108, 1836, 92, 2159, 2157, 1835, 2161, 898, 2192, 2209, 1504, 108, 1836, 92, 2159, 2157, 1835, 1647, 898, 2160, 2161, 2622, 1698, 1165, 1328, 1327, 2192, 2209, 1504, 108, 1836, 92, 2159, 2157, 1835, 1647, 898, 2160, 2161, 2622, 1698, 1165, 1328, 1327, 2192, 2209, 1504, 108, 1836, 92, 2159, 2157, 1835, 1647, 898, 2160, 2161, 2622, 1698, 1165, 1328, 1327, 2192, 2209, 1504, 108, 1836, 92, 2159, 2157, 1835, 1647, 898, 2160, 2161, 2622, 1698, 1165, 1328, 1327, 2192, 2209, 1504, 108, 1836, 92, 2159, 2157, 1835, 1647, 898, 2160, 2161, 2622, 1698, 1165, 1328, 1327, 2192, 2209, 1504, 108, 1836, 92, 2159, 2157, 1835, 1647, 898, 2160, 2161, 2622, 1698, 1165, 1328, 1327, 2192, 2209, 1504, 108, 1836, 92, 2159, 2157, 1835, 1647, 898, 2160, 2161, 2622, 1698, 1165, 1328, 1327, 2192, 2209, 1504, 108, 1836, 92, 2159, 2157, 1835, 1647, 898, 2160, 2161, 2622, 1698, 1165, 1328, 1327]",0,8.0,18.0,180.0,0.087149187592319,, -"[2075, 2091, 14, 2004, 2076, 1989, 1966, 1649, 1020, 1558, 1987, 1990, 1434, 1992, 1994, 1988, 1995, 2000, 460, 2357, 2355, 1996, 2009, 745, 2490, 2001, 792, 2263, 160, 2668, 195, 2667, 743, 1875, 1166, 2002, 2003, 2007, 1873, 486, 1127, 519, 1998, 94, 1574, 1697, 1876, 218, 1701, 1023, 1820, 781, 1986, 1453, 366, 482, 391, 1870, 1859, 1812, 899, 68, 215, 230, 2006, 673, 201, 335, 1704, 566, 1999, 232, 1095, 1907, 549, 790, 1997, 1814, 1819, 277, 2005, 1665, 2230, 1809, 1299, 435, 1581, 1806, 1822, 1807, 205, 1811, 13, 586, 1821, 1576, 511, 1149, 934, 2394, 675, 2541, 1671, 716, 1331, 1396, 1993, 2291, 968, 1348, 1929, 1614, 481, 2, 1906, 465, 199, 1079, 383, 71, 420, 151, 2304, 962, 1709, 55, 2008, 1160, 1399, 476, 179, 1991, 1121, 2268, 681, 1382, 2301, 1810, 1501, 2303, 210, 1268, 1869, 162, 401, 21, 1043, 1265, 1654, 835, 45, 1682, 2309, 1418, 1203, 461, 377, 27, 78, 1818, 1021, 1004, 313, 794, 2227, 568, 1062, 2296, 1097, 576, 1192, 351, 733, 1905, 1808, 1219, 678, 164, 994, 1626]",1,8.0,18.0,200.0,0.6163220088626292,, -"[1388, 1919, 2154, 1538, 738, 1587, 146, 257, 1927, 490, 2059, 1500, 1080, 1839, 1133, 697, 1909, 1291, 510, 836, 1370, 1309, 276, 55, 1463, 778, 779, 2162, 52, 1841, 2181, 2266, 736, 109, 2197, 1660, 838, 1975, 1089, 2152, 1966, 438, 1079, 434, 133, 1655, 1158, 2575, 2310, 1110, 861, 1623, 1013, 1914, 195, 1131, 2499, 1695, 1583, 1525, 242, 1467, 734, 702, 2180, 2025, 1851, 964, 80, 771, 718, 2472, 2202, 1121, 604, 2151, 1923, 61, 1224, 1330, 305, 1222, 1152, 1395, 2355, 1008, 437, 30, 1117, 149, 2490, 1156, 2357, 1848, 1769, 1218, 1924, 1333, 2053, 2133, 667, 94, 1293, 408, 1789, 239, 2131, 2364, 1680, 2232, 415, 1204, 693, 454, 733, 1468, 334, 2295, 399, 1215, 1682, 2021, 2312, 934, 2576, 2418, 2263, 2343, 1139, 1635, 2261, 1351, 2056, 1649, 711, 95, 847, 1732, 1275, 2172, 2183, 1285, 1920, 142, 2189, 2422, 1358, 751, 69, 2182, 675, 1618, 315, 2311, 749, 436, 682, 2074, 456, 274, 993, 763, 2107, 572, 2049, 2156, 525, 1474, 965, 2383, 1515, 1341, 1203, 1425, 586, 2072, 347, 65, 429, 1526]",1,8.0,18.0,275.0,0.621861152141802,, -"[1178, 1529, 1729, 1840, 161, 60, 2024, 908, 2045, 1293, 842, 516, 661, 973, 1625, 1120, 2071, 880, 733, 1358, 506, 1464, 860, 181, 25, 1566, 1849, 1487, 55, 1403, 118, 2156, 1583, 1979, 1908, 277, 1846, 897, 1882, 1013, 1842, 2011, 2450, 211, 1914, 2016, 436, 1683, 836, 252, 160, 2137, 356, 1110, 2010, 682, 748, 334, 1527, 2178, 1986, 745, 510, 2153, 1661, 1344, 1482, 1851, 1131, 1494, 2103, 1850, 1844, 1630, 1474, 1269, 1732, 69, 323, 779, 514, 1919, 2018, 1570, 1338, 778, 2017, 1675, 272, 1912, 530, 1845, 2102, 2015, 1501, 593, 1713, 1309, 498, 743, 525, 1843, 1394, 1741, 1525, 571, 2154, 2182, 1668, 1521, 737, 32, 2593, 2227, 2467, 1982, 490, 995, 815, 1841, 1652, 1848, 331, 415, 1644, 1677, 2485, 2155, 1660, 1370, 61, 604, 1074, 76, 1898, 1696, 1676, 191, 666, 2274, 1291, 2012, 162, 696, 2136, 1587, 1174, 1351, 966, 851, 437, 1973, 1288, 1268, 204, 2014, 1974, 1239, 1658, 634, 818, 2228, 586, 1224, 1667, 1526, 822, 1847, 724, 1637, 809, 88, 130, 471, 1975, 300, 1333, 2110, 1085, 2109, 1399, 504, 2034, 1341, 2152, 1222, 1976, 841, 2395, 2309, 1027, 2394, 2397, 1420, 2396, 1421, 600, 2151, 2013, 1396]",1,8.0,20.0,200.0,0.5985967503692762,, -"[1772, 38, 1805, 2291, 2045, 1682, 1099, 863, 133, 862, 1334, 794, 1574, 2004, 563, 519, 426, 2210, 1704, 1265, 2303, 767, 236, 318, 1873, 1994, 1054, 2302, 153, 306, 566, 1337, 2093, 1346, 1045, 1055, 1697, 2094, 2006, 1859, 377, 1542, 1987, 2268, 2487, 745, 383, 2301, 429, 1624, 2488, 1876, 1779, 484, 681, 1577, 481, 1998, 2092, 109, 1789, 899, 678, 1995, 1160, 610, 1787, 54, 743, 1329, 2009, 215, 1820, 2005, 124, 2008, 85, 1505, 2007, 459, 45, 160, 1997, 366, 1989, 1785, 1990, 1465, 1127, 1875, 2035, 733, 864, 460, 2000, 1986, 476, 1294, 104, 2002, 1699, 719, 335, 391, 2601, 1166, 1477, 1769, 289, 1996, 1488, 1192, 102, 1812, 1221, 1434, 218, 1692, 1870, 1453, 330, 68, 482, 2034, 18, 2001, 2335, 151, 1512, 2356, 232, 673, 2380, 1095, 1814, 1988, 201, 1999, 1348, 1991, 1062, 698, 588, 91, 792, 790, 1661, 1992, 95, 1149, 1023, 549, 286, 442, 2145, 2383, 1993, 2304, 1808, 176, 2095, 2119, 1121, 893, 1098, 962, 968, 2609, 230, 1810, 1709, 835, 2211, 627, 2348, 511, 71, 2, 179, 861, 1, 795, 1819, 911, 1454, 2238, 2329, 1809, 2121, 843, 13, 2330, 2203, 1581, 351, 1051, 1004, 27, 1822, 2555]",1,8.0,20.0,260.0,0.6299852289512555,, -"[156, 613, 1560, 341, 1733, 603, 337, 484, 505, 1248, 708, 880, 155, 609, 687, 189, 90, 671, 303, 2106, 1791, 228, 980, 215, 2096, 326, 1725, 59, 246, 616, 689, 2153, 831, 232, 346, 630, 252, 154, 179, 524, 18, 197, 626, 2107, 935, 551, 1740, 773, 68, 1262, 1852, 552, 173, 580, 36, 1197, 342, 1258, 945, 1013, 333, 1070, 441, 72, 364, 1776, 1986, 1358, 2019, 73, 2026, 741, 573, 1142, 2145, 1009, 1564, 1505, 277, 973, 191, 2143, 1767, 808, 836, 2326, 1144, 1801, 1701, 684, 231, 1786, 2329, 1367, 1583, 1810, 103, 1651, 1856, 1788, 719, 1775, 589, 454, 530, 661, 244, 1072, 1851, 2093, 542, 1871, 1781, 501, 101, 695, 1782, 1783, 2046, 2082, 1830, 2217, 1868, 1670, 1798, 2094, 174, 2034, 34, 1448, 686, 2045, 1661, 489, 139, 1778, 2080, 1773, 53, 645, 1804, 2288, 1251, 111, 1998, 2095, 1015, 1270, 1681, 487, 2287, 1803, 1779, 1784, 357, 1478, 1572, 1289, 1245, 1800, 1772, 1640, 1799, 910, 399, 138, 660, 1705, 608, 2085, 2086, 1089, 1787, 958, 682, 114, 170, 115, 2078, 2016, 1770, 1780, 1584, 417, 581, 1483, 350, 135, 476, 826, 126, 466, 655, 2504, 1623, 1789, 329, 2318, 30, 20]",1,8.0,20.0,275.0,0.7031019202363368,, -"[698, 588, 1127, 745, 665, 2000, 160, 2040, 286, 2004, 1787, 2120, 332, 133, 1772, 2009, 1337, 1522, 1705, 924, 2008, 743, 1051, 2383, 476, 673, 1346, 792, 1558, 1166, 1995, 1532, 2007, 681, 1777, 71, 1859, 335, 2003, 1661, 1778, 1805, 1453, 1574, 2045, 1697, 366, 232, 306, 215, 2122, 1095, 426, 442, 566, 2001, 1987, 563, 2005, 2006, 519, 2034, 1992, 124, 1986, 176, 1993, 2, 1810, 1789, 1701, 1991, 201, 277, 45, 1780, 91, 1907, 1812, 2259, 1015, 409, 109, 638, 391, 1624, 236, 733, 39, 1989, 2002, 1875, 460, 899, 1519, 153, 1998, 1876, 1779, 808, 289, 68, 2093, 1870, 1873, 1474, 1481, 2335, 318, 1505, 1788, 1023, 2362, 1045, 1999, 1709, 887, 1385, 384, 1988, 179, 196, 1434, 1196, 2357, 1996, 2092, 1149, 1990, 589, 1569, 968, 1994, 1965, 1704, 1349, 1634, 1908, 593, 1997, 610, 94, 459, 1097, 1964, 1419, 1786, 210, 1529, 1906, 1785, 470, 1966, 2509, 271, 627, 102, 935, 1702, 1010, 1115, 1869, 1178, 2336, 1971, 443, 1365, 2094, 1968, 205, 1671, 2095, 151, 203, 2350, 1336, 854, 22, 2309, 1417, 857, 317, 1452, 1234, 1984, 1703, 463, 759, 1769, 1141, 1240, 1970, 529, 809, 706, 963, 2238, 2237, 2401, 420, 2519, 2399, 2260, 1266, 1793, 199, 375, 2294, 2276, 465, 362, 946, 43, 1884, 2435, 789, 2236, 761, 798, 1381, 1443, 2059, 1369, 1530, 340, 2434, 2645, 29, 805, 1969, 2463, 2548, 1967, 344, 2547, 1094, 584, 258, 152, 2239, 2155, 1653, 2299, 488, 1401, 2240, 89, 2496, 2297, 2400]",1,8.0,25.0,275.0,0.6683899556868538,, -"[1979, 2405, 816, 1152, 1980, 1927, 1729, 885, 1358, 593, 2293, 878, 1846, 985, 1535, 1039, 1351, 835, 2403, 120, 1843, 471, 1840, 280, 483, 11, 897, 1986, 1675, 2305, 2309, 1625, 873, 539, 519, 1810, 708, 2295, 211, 239, 1149, 2135, 1849, 1930, 1143, 69, 415, 1928, 1309, 1529, 1842, 1847, 1839, 2136, 1732, 1572, 749, 334, 74, 738, 530, 2282, 1708, 1909, 1118, 1463, 2045, 2155, 1115, 1616, 461, 1097, 908, 30, 49, 747, 1999, 1053, 836, 1013, 643, 436, 2281, 1121, 470, 1382, 2117, 1494, 1701, 1661, 1110, 181, 1333, 1464, 809, 866, 1432, 1370, 1644, 2034]",1,10.0,10.0,245.0,0.6702363367799113,, -"[81, 1421, 1570, 1420, 1487, 1399, 527, 748, 1396, 2395, 1566, 745, 571, 1986, 1224, 1085, 2049, 1358, 1841, 667, 160, 161, 2396, 514, 661, 471, 2137, 2227, 724, 1732, 842, 734, 2056, 2228, 118, 2071, 504, 743, 423, 2394, 600, 2001, 356, 151, 25, 277, 1344, 696, 347, 55, 2017, 323, 1013, 973, 2180, 315, 1882, 266, 437, 1999, 95, 408, 2010, 2013, 436, 142, 1844, 456, 272, 1713, 2467, 498, 1869, 718, 1628, 1417, 2016, 1982, 1696, 211, 857, 2350, 854, 1139, 2232, 634, 963, 2054, 1984, 1630, 1912, 2011, 2018, 868, 60, 1269, 1979, 815, 1178, 2133, 1741, 1898, 1652, 1027, 1838, 2229, 2012, 1239, 2181, 733, 1266, 2102, 52, 995, 516, 2015, 203, 2183, 2103, 2397, 2131, 1097, 2172, 1625, 544, 2276, 1494, 1338, 1529, 1394, 162, 809, 1385, 1644, 470, 1843, 130, 2335, 1667, 627, 1540, 2336, 1174, 1519, 525, 76, 2259, 2182, 1481, 1850, 2034, 1658, 2261, 2057, 1309, 88, 897, 1288, 860, 858, 841, 2178, 1847, 2593, 1268, 490, 1527, 737, 2231, 2233, 2072, 2450, 666, 61, 239, 1974, 1908, 2274, 1668, 32]",1,10.0,18.0,200.0,0.6318316100443131,, -"[1567, 722, 2501, 2500, 176, 179, 109, 2582, 2314, 417, 873, 1708, 197, 102, 1013, 2379, 306, 962, 926, 1894, 2117, 1810, 519, 1072, 603, 1106, 1874, 2139, 1986, 316, 2573, 2045, 0, 17, 2034, 1862, 465, 1107, 766, 1097, 551, 2357, 1946, 1871, 199, 598, 277, 1945, 1479, 420, 767, 1815, 2141, 228, 1846, 1172, 261, 1171, 1869, 1299, 304, 1873, 231, 1944, 1337, 1875, 1877, 1331, 877, 1799, 311, 1947, 1581, 1465, 1870, 1413, 1820, 2243, 1636, 1857, 1868, 1856, 215, 899, 1272, 467, 13, 387, 1665, 1453, 563, 1935, 1867, 318, 875, 1950, 1859, 24, 1300, 335, 1023, 1876, 143, 402, 2430, 14, 251, 297, 812, 1865, 1864, 1855, 201, 1852, 1948, 203, 570, 2309, 2568, 1340, 1936, 1177, 1100, 776, 1488, 1301, 1940, 1176, 935, 872, 232, 869, 416, 1335, 2096, 699, 343, 1942, 547, 1147, 157, 960, 338, 310, 633, 1615, 1858, 1941, 1931, 352, 892, 219, 2217, 158, 605, 1432, 150, 990, 1440, 1241, 1605, 784, 2247, 1943, 205, 1866, 2327, 1863, 1542, 1055, 1334, 1054, 54, 1701, 2579, 2609, 411, 1096, 2493, 469]",1,10.0,18.0,215.0,0.593426883308715,, -"[2455, 26, 99, 122, 123, 2454, 2604, 127, 127, 2604, 99, 123, 26, 122, 2454, 2455, 127, 2604, 99, 123, 26, 122, 2454, 2455, 127, 2604, 99, 123, 26, 122, 2454, 2455, 127, 2604, 99, 123, 26, 122, 2454, 2455, 127, 2604, 99, 123, 26, 122, 2454, 2455, 127, 2604, 99, 123, 26, 122, 2454, 2455, 127, 2604, 99, 123, 26, 122, 2454, 2455, 127, 2604, 99, 123, 26, 122, 2454, 2455, 127, 2604, 99, 123, 26, 122, 2454, 2455]",0,10.0,18.0,245.0,0.087149187592319,, -"[1583, 792, 1558, 1998, 1120, 1989, 2026, 519, 2000, 149, 1667, 1669, 483, 816, 705, 476, 1977, 1981, 1984, 1991, 1988, 1493, 523, 1110, 1127, 196, 1995, 572, 1987, 1453, 1385, 566, 1980, 179, 2326, 1859, 460, 366, 1814, 2025, 1982, 1979, 192, 2178, 2005, 2004, 1889, 2007, 1820, 1475, 160, 498, 1873, 478, 55, 1807, 1666, 1709, 1997, 1481, 1993, 424, 1876, 1574, 68, 899, 827, 745, 1357, 1816, 1978, 2006, 2009, 1999, 1704, 1662, 2, 391, 429, 436, 651, 673, 1813, 232, 218, 1986, 1697, 482, 1131, 743, 38, 1166, 1046, 1494, 33, 2336, 627, 962, 2122, 2156, 863, 1994, 671, 2381, 1082, 1519, 2024, 2335, 1812, 1332, 2259, 1809, 1819, 1017, 1018, 668, 1121, 2121, 1581, 332, 2386, 299, 1811, 1787, 1039, 383, 1808, 1474, 215, 2023, 2041, 1618, 86, 2472, 794, 2120, 1062, 1160, 2378, 1822, 1821, 878, 201, 1990, 1042, 1810, 1875, 1996, 335, 230, 862, 790, 1348, 1095, 377, 2291, 2268, 549, 835, 2043, 151, 1149, 1192, 716, 2301, 2044, 885, 1434, 678, 511, 91, 1023, 2008, 733, 1265, 1817, 435, 1815, 94, 2304]",1,10.0,18.0,275.0,0.689807976366322,, -"[2046, 563, 1616, 447, 126, 1787, 306, 236, 2026, 897, 56, 2048, 139, 894, 153, 1776, 1337, 1784, 1651, 1248, 1487, 318, 133, 1701, 109, 666, 1867, 1791, 412, 1772, 2078, 1572, 1856, 191, 408, 1849, 603, 308, 734, 2056, 88, 699, 2419, 2276, 1494, 2045, 2117, 415, 1309, 2025, 2034, 736, 973, 1845, 519, 868, 2309, 2357, 1224, 1448, 118, 65, 2178, 1846, 215, 1358, 1110, 1852, 1850, 1841, 1909, 553, 2295, 1652, 95, 1568, 454, 1914, 1882, 498, 341, 382, 604, 1410, 1676, 1127, 1848, 1894, 366, 1995, 1847, 1131, 461, 1844, 2394, 1583, 2013, 2094, 2418, 708, 2185, 712, 1920, 1351, 1359, 2189, 2155, 1661, 693, 1644, 1074, 1851, 850, 1918, 1215, 1843, 1171, 1120, 1926, 1675, 69, 270, 1729, 1133, 668, 262, 1376, 325, 1121, 885, 1500, 1464, 2485, 32, 836, 1039, 1343, 149, 1625, 1293, 506, 878, 436, 2071, 1830, 2317, 2282, 334, 1978, 376, 1482, 1977, 880, 1982, 465, 2047, 1403, 1203, 657, 424, 2135, 1538, 98, 1981, 1505, 1979, 1683, 1352, 433, 510, 1840, 2281, 1377, 530, 1332, 2194, 2136, 772, 1655, 478, 514, 1980, 1521, 2164, 181, 2384, 11, 1013, 908, 816, 483, 1842, 1839, 2403, 643, 1314, 120, 2405, 471, 747]",1,10.0,20.0,200.0,0.6820531757754801,, -"[1867, 1452, 2117, 1264, 1012, 1481, 1701, 248, 1970, 1957, 1969, 1838, 751, 1531, 2319, 695, 1330, 1417, 1013, 2238, 469, 715, 948, 1965, 2233, 963, 22, 1846, 2207, 2262, 1204, 1618, 1634, 1522, 1968, 2541, 2385, 2034, 698, 1789, 1349, 1532, 1602, 39, 2292, 1303, 1425, 1278, 2305, 979, 1050, 1015, 407, 1143, 789, 429, 2306, 1703, 1851, 1583, 2321, 450, 2304, 1682, 1702, 1810, 1848, 1964, 443, 1240, 863, 994, 481, 38, 2152, 2667, 2291, 1065, 1042, 962, 2668, 1219, 1234, 1503, 486, 524, 1818, 1986, 1788, 1569, 1704, 1416, 2154, 78, 309, 1348, 1370, 1906, 88, 482, 1967, 1365, 1096, 2303, 352, 1919, 2295, 104, 1068, 779, 1385, 2076, 2211, 778, 1660, 1291, 2035, 792, 1535, 1587, 1020, 1655, 1732, 1329, 1912, 1519, 280, 1118, 465, 463, 2268, 1110, 1062, 1590, 2320, 1971, 45, 1160, 1975, 2293, 1131, 1152, 2189, 794, 1294, 2623, 2011, 1927, 1817, 2419, 11, 383, 1839, 2008, 401, 2052, 862, 1525, 25, 1324, 1224, 2357, 2155, 435, 682, 946, 1265, 1966, 74, 544, 1692, 2263, 539, 1621, 2153, 759, 2453, 377, 1010, 218, 1222, 1526, 1192, 2151, 1218, 2302, 2301, 586, 733, 1341, 1517, 2355, 749, 1203, 2116, 1079, 2356, 2091, 993, 2424]",1,10.0,20.0,215.0,0.672821270310192,, -"[1708, 2314, 1945, 417, 1272, 873, 1072, 1331, 17, 199, 1815, 2034, 766, 1159, 2117, 1947, 1465, 1810, 2534, 306, 109, 603, 420, 1946, 981, 180, 2609, 1894, 2217, 914, 1124, 892, 962, 767, 54, 1107, 1488, 519, 1944, 2096, 102, 2139, 1172, 927, 669, 1171, 2045, 1799, 1335, 1821, 311, 14, 387, 467, 1297, 845, 935, 1315, 968, 158, 1856, 2582, 2568, 176, 179, 304, 1873, 1857, 316, 1868, 1479, 1013, 1867, 1869, 1950, 0, 875, 2140, 1316, 310, 165, 1665, 1846, 1605, 2141, 729, 1871, 1581, 1106, 1823, 261, 1986, 2707, 24, 411, 637, 1337, 465, 1820, 1075, 1865, 197, 150, 336, 1855, 203, 13, 2309, 1299, 318, 1862, 1473, 343, 563, 352, 1023, 1636, 231, 1874, 699, 2327, 215, 2357, 1432, 722, 1679, 1147, 277, 1870, 1054, 1097, 1875, 1948, 598, 926, 1221, 1096, 633, 232, 2138, 1864, 1852, 228, 416, 335, 1876, 1615, 869, 960, 1859, 1099, 2003, 1453, 1577, 551, 899, 480, 1949, 1003, 784, 143, 157, 1334, 1241, 205, 201, 295, 1936, 1935, 1176, 521, 1940, 1701, 1942, 48, 812, 990, 1932, 1177, 1866, 297, 1413, 1440, 1300, 1863, 776, 2122, 1607, 1344, 2067, 1100, 2493, 2317, 1858, 532, 1542, 507, 1055, 219, 2066]",1,10.0,20.0,245.0,0.6480797636632201,, -"[2199, 6, 2201, 2383, 2025, 1224, 586, 2001, 1841, 2051, 1926, 763, 80, 1842, 1120, 2200, 1118, 1602, 893, 1104, 1416, 1847, 1051, 1467, 1580, 544, 1583, 2576, 1588, 2049, 693, 148, 1843, 2259, 1851, 1203, 2499, 1125, 456, 813, 1303, 2153, 1850, 2119, 2121, 901, 1845, 880, 100, 2073, 911, 325, 373, 1925, 734, 2336, 2311, 2052, 736, 2333, 408, 151, 1428, 280, 149, 2074, 1202, 334, 1635, 638, 597, 1535, 836, 572, 1117, 1047, 2040, 2197, 86, 1680, 2120, 415, 2056, 74, 69, 95, 539, 429, 749, 2335, 1025, 434, 1131, 1517, 1464, 2181, 2155, 1474, 1013, 1849, 2072, 1042, 33, 1729, 2376, 714, 2293, 787, 2045, 627, 196, 1534, 588, 1848, 1661, 52, 2108, 181, 2295, 783, 510, 2183, 61, 1625, 347, 2034, 48, 2172, 94, 1410, 1675, 2332, 142, 2054, 1158, 2294, 1336, 2133, 1061, 2055, 442, 109, 1773, 506, 306, 1358, 1403, 1846, 1840, 1644, 139, 1139, 530, 695, 525, 2180, 471, 2162, 423, 2182, 1618, 1198, 1983, 1838, 2198, 1907, 1623, 1789, 210, 624, 1143, 1628, 1293, 1521, 908, 2419, 426, 1171, 2131, 738, 1519, 887, 2053, 1511, 2021, 1844, 1425, 1669, 861, 63, 2231, 1285, 323, 1652, 146, 1079, 2020, 1540, 55, 1152]",1,10.0,20.0,260.0,0.6329394387001477,, -"[695, 389, 1012, 1336, 1743, 2256, 399, 1701, 1735, 2288, 1801, 215, 1791, 1282, 1015, 1878, 189, 2451, 1786, 1448, 317, 1609, 969, 1681, 1564, 1262, 562, 1248, 2382, 36, 699, 114, 905, 704, 1788, 2107, 2287, 980, 589, 2322, 1412, 1289, 963, 1620, 1851, 2113, 2327, 1774, 544, 1089, 1505, 1249, 2481, 342, 1725, 359, 121, 742, 20, 1358, 1070, 1552, 1013, 1077, 231, 1852, 945, 2034, 2318, 814, 1726, 603, 556, 880, 1790, 416, 610, 191, 1144, 1560, 2505, 1785, 314, 1571, 826, 277, 1483, 1546, 134, 661, 671, 2252, 2251, 102, 1699, 1871, 1624, 341, 1740, 1797, 1769, 234, 18, 409, 808, 1776, 505, 1661, 318, 133, 1784, 1337, 1789, 773, 1998, 1705, 754, 1622, 406, 252, 563, 1799, 1311, 1722, 118, 1856, 2096, 1158, 1778, 2326, 1986, 1651, 1804, 2045, 1623, 1072, 1251, 1733, 384, 1490, 1779, 2082, 1768, 1561, 1781, 2143, 1583, 1783, 2094, 454, 244, 1640, 1777, 1767, 2081, 289, 2086, 218, 303, 935, 1772, 836, 1787, 2075, 1459, 459, 1773, 1805, 2083, 1367, 1313, 2091, 1830, 1245, 1780, 1810, 1142, 581, 2080, 2026, 387, 1045, 1798, 1009, 1196, 2153, 484, 2208, 719, 1670, 973, 1782, 859, 176, 1803, 542, 112, 910, 426, 1771]",1,10.0,20.0,275.0,0.7278434268833087,, -"[69, 1179, 661, 2061, 847, 2235, 1682, 1171, 807, 2207, 1303, 1914, 1226, 1926, 676, 109, 282, 1013, 2062, 2189, 701, 1955, 935, 2699, 344, 1355, 1543, 306, 979, 176, 735, 1839, 119, 2241, 1503, 2094, 1549, 2242, 1881, 500, 1956, 1583, 1884, 739, 880, 2016, 2286, 2457, 1537, 393, 1191, 129, 67, 1661, 1902, 2630, 1901, 255, 1917, 645, 1851, 2112, 1072, 1168, 70, 1395, 842, 671, 554, 1343, 1826, 2010, 747, 2184, 1455, 973, 264, 1507, 2029, 2153, 524, 446, 793, 448, 277, 581, 441, 323, 1828, 1690, 2628, 2060, 1829, 2030, 1267, 252, 1827, 1237, 115, 630]",1,12.0,10.0,180.0,0.6059822747415067,, -"[456, 1580, 1986, 2001, 1142, 2003, 1729, 1625, 1537, 2199, 2093, 181, 1455, 1464, 880, 2326, 1644, 671, 836, 2380, 510, 2347, 908, 935, 2184, 2088, 2079, 565, 1045, 277, 2270, 1911, 831, 70, 2269, 2087, 1337, 519, 563, 1011, 1346, 487, 1851, 695, 1046, 2375, 1661, 2046, 454, 1120, 1293, 1583, 1848, 1610, 1842, 2045, 2091, 176, 1670, 1512, 476, 2078, 1403, 1843, 1841, 69, 2048, 1773, 1013, 318, 973, 1572, 1509, 129, 630, 441, 426, 1772, 1193, 1776, 2123, 741, 544, 126, 1789, 1782, 91, 2080, 1245, 655, 945, 1998, 471, 682, 1009, 1871, 1797, 1282, 20, 139]",1,12.0,10.0,260.0,0.6661742983751846,, -"[426, 109, 2291, 71, 1809, 2308, 1810, 790, 68, 159, 430, 1808, 983, 2302, 1264, 912, 1580, 1806, 1817, 1818, 1265, 1261, 335, 1814, 1682, 215, 1692, 377, 443, 1789, 248, 2076, 1812, 1773, 1062, 783, 1020, 2376, 218, 2263, 482, 224, 151, 1986, 962, 2008, 2268, 2304, 775, 180, 179, 2130, 1125, 2011, 2004, 2301, 94, 45, 1042, 1531, 576, 2303, 574, 1192, 911, 1329, 733, 201, 1892, 232, 1143, 160, 1655, 695, 1012, 2127, 2249, 678, 2335, 920, 897, 893, 2262, 2348, 1519, 791, 1385, 1894, 503, 33, 1002, 2155, 2035, 2295, 1983, 2238, 2037, 1015, 2033, 691, 383, 158, 2042, 2003, 1110, 2230, 2307, 1893, 1890, 2332, 1665, 141, 1481, 2039, 1891, 1068, 1098, 1797, 2, 1294]",1,12.0,12.0,200.0,0.6528803545051699,, -"[483, 1362, 1980, 1351, 816, 1171, 1343, 885, 1494, 1979, 2388, 1637, 2070, 708, 1332, 2078, 777, 149, 1974, 822, 666, 433, 519, 878, 2026, 897, 306, 1359, 1039, 2069, 69, 279, 2028, 175, 2024, 604, 1464, 836, 1482, 826, 596, 86, 2071, 1982, 1572, 702, 436, 2027, 1917, 2101, 955, 88, 32, 2194, 1416, 2282, 651, 1485, 2009, 510, 498, 1535, 118, 2016, 1674, 1463, 41, 1358, 1973, 454, 1909, 1309, 506, 1918, 1072, 1683, 1920, 2025, 572, 1013, 424, 1333, 1592, 504, 1152, 2423, 921, 2295, 324, 1914, 1675, 1042, 1655, 1118, 1652, 2293, 2109, 553, 1566, 1976, 1916, 2263, 490, 2357, 252, 2490, 1498, 2355, 277, 880, 749, 1402, 1927, 1203, 1661, 2052, 1849, 2280, 830, 1079]",1,12.0,12.0,215.0,0.6573116691285081,, -"[2045, 1134, 306, 1583, 2306, 598, 316, 869, 579, 109, 302, 1864, 231, 606, 2430, 2305, 1624, 509, 206, 1655, 2578, 2691, 2353, 1645, 1116, 1410, 1666, 1358, 2605, 1131, 733, 2148, 823, 493, 2360, 197, 2034, 570, 199, 24, 553, 310, 1807, 205, 2354, 396, 394, 603, 519, 2309, 297, 2379, 1223, 218, 1787, 203, 2303, 1818, 1948, 303, 201, 1287, 420, 574, 232, 1129, 2351, 1950, 1101, 2230, 906, 1995, 27, 2136, 1821, 719, 968, 1870, 71, 1665, 277, 605, 1107, 215, 2291, 1127, 1701, 2143, 1518, 1875, 2001, 2667, 2335, 366, 795, 1192, 1869, 179, 2550, 332, 1497, 849, 1299, 1063, 180, 1654, 2008, 2668, 1242, 2004, 1823, 503, 1623, 1988, 1348, 1424, 1817, 2149, 1121, 476]",1,12.0,12.0,260.0,0.5982274741506647,, -"[1810, 344, 1880, 1644, 716, 2182, 1314, 661, 1850, 570, 2430, 258, 2399, 890, 2178, 603, 525, 706, 1013, 1195, 604, 2140, 1094, 416, 1076, 1526, 43, 737, 899, 17, 988, 935, 2706, 1440, 2110, 2099, 490, 2261, 1341, 2139, 2434, 1884, 920, 1868, 805, 1309, 2240, 1443, 363, 753, 1975, 1157, 1847, 1269, 868, 2243, 1574, 1141, 1227, 1821, 1879, 1315, 836, 851, 1373, 2014, 2260, 1316, 963, 1843, 375, 2109, 1710, 300, 2400, 415, 838, 1, 116, 2045, 1346, 1973, 1107, 2401, 1677, 1299, 199, 1986, 88, 1701, 2338, 420, 529, 232, 854, 1823, 714, 201, 1785, 687, 1527, 1474, 211, 1003, 471, 2004, 1630, 1844, 2141, 519, 1073, 1869, 204, 24, 1603, 109, 1974, 1998, 2707, 1417]",1,12.0,12.0,275.0,0.7208271787296898,, -"[454, 598, 2407, 277, 406, 1574, 1313, 118, 1253, 836, 2034, 2079, 643, 1810, 1986, 1661, 685, 1616, 682, 191, 1448, 18, 1998, 1013, 484, 859, 973, 573, 505, 581, 2096, 1624, 519, 102, 1871, 671, 630, 1560, 554, 1791, 1572, 2143, 215, 1787, 2026, 1802, 1798, 1778, 2078, 2080, 387, 1780, 2082, 303, 1248, 1367, 1782, 1773, 719, 2046, 2085, 1772, 563, 1800, 1776, 603, 1561, 1781, 1856, 1769, 416, 417, 1251, 239, 544, 308, 1770, 302, 384, 1775, 2048, 1790, 252, 1337, 115, 2086, 1584, 1622, 1785, 1779, 318, 487, 1089, 1792, 2107, 476, 2047, 1072, 1789, 887, 350, 2089, 1797, 1656, 910, 608, 2045, 1490, 103, 399, 655, 1805, 980, 236, 1651, 126, 2478, 1771, 1705, 139, 2094, 1009, 542, 112, 176, 958, 1258, 1245, 1777, 329, 426, 1852, 1804, 124, 1623, 10, 1045, 2494, 1783, 1767, 1784, 660, 556, 644, 1140, 1346, 2598, 2088, 306, 109, 2090, 2084, 1768, 1670, 294, 459, 341, 138, 699, 1483, 1830, 1799, 1144, 153, 656, 2091, 2087, 133, 2093, 1640, 452, 1158, 1193, 1774, 314, 1551, 610, 2095, 2092, 289]",1,12.0,18.0,180.0,0.6935007385524372,, -"[261, 1479, 2033, 229, 962, 835, 1251, 1952, 1810, 2125, 1111, 740, 1666, 1701, 2126, 1567, 1405, 102, 1215, 505, 2032, 2531, 1654, 722, 482, 695, 622, 306, 1012, 1002, 1797, 1894, 2075, 1671, 1986, 2668, 1264, 210, 1336, 790, 916, 1665, 1119, 549, 1273, 1871, 2077, 1149, 1812, 48, 2129, 1448, 1072, 2124, 49, 1203, 1807, 1615, 443, 2001, 1026, 2076, 1020, 224, 1614, 2045, 14, 417, 141, 1049, 1968, 248, 1929, 2004, 775, 86, 2044, 2494, 863, 158, 1531, 2041, 794, 1966, 429, 38, 186, 1669, 770, 180, 2034, 1319, 382, 536, 283, 2295, 1889, 2250, 841, 705, 1270, 920, 1497, 1893, 1452, 2127, 1303, 88, 1260, 1892, 523, 714, 2550, 1493, 1634, 1618, 2039, 1050, 39, 196, 2037, 2043, 1349, 617, 1891, 1622, 191, 791, 2035, 1569, 2042, 2036, 2040, 159, 1983, 1138, 691, 1580, 2038, 2130, 82, 148, 1390, 1461, 856, 1890, 348, 1385, 2681, 320, 2393, 430, 1597, 912, 546, 983, 602, 378, 2518, 450, 1261, 2249, 368, 2248, 381, 1366, 996, 2538, 1194, 492, 1255, 1545, 688, 1257, 1533, 2678, 2677, 837, 1643, 956]",1,12.0,18.0,200.0,0.6395864106351551,, -"[745, 366, 1697, 1522, 2009, 515, 160, 1127, 1777, 1788, 623, 1876, 792, 1574, 1558, 1988, 743, 1995, 1778, 391, 2001, 2008, 444, 1812, 71, 2093, 2002, 2289, 1453, 828, 1505, 1102, 179, 924, 68, 1709, 1780, 1787, 201, 1997, 1875, 1023, 1474, 1991, 1095, 2465, 2000, 1859, 1965, 1987, 2003, 2004, 133, 1990, 2045, 1992, 1795, 1810, 519, 426, 1986, 681, 968, 1701, 1989, 1634, 2335, 2007, 2464, 2290, 2092, 2095, 638, 1873, 306, 39, 215, 1166, 277, 1805, 2383, 1015, 460, 566, 1207, 1998, 1624, 1349, 733, 45, 1772, 563, 153, 2034, 236, 1196, 1337, 318, 1385, 808, 1908, 593, 673, 1705, 1779, 1907, 1346, 1964, 584, 1532, 1870, 476, 335, 2509, 1704, 2005, 1789, 232, 109, 2357, 887, 1999, 210, 94, 1045, 899, 2, 1434, 1996, 124, 1994, 935, 1097, 459, 1993, 1906, 1529, 1365, 1178, 2006, 1149, 271, 1869, 2309, 205, 1010, 470, 1984, 1966, 1671, 2519, 463, 1786, 384, 589, 409, 1115, 443, 1769, 1266, 1336, 317, 151, 102, 809, 2094, 1569, 1785, 2350, 22, 199, 465, 420, 857, 1968, 854, 176, 289, 946, 610]",1,12.0,18.0,245.0,0.6277695716395865,, -"[962, 1251, 102, 835, 1810, 180, 1871, 740, 505, 2045, 695, 306, 1072, 1797, 1701, 1012, 2004, 1952, 2009, 2001, 2531, 429, 2007, 1807, 2033, 622, 1448, 482, 549, 1998, 215, 705, 1203, 1665, 519, 1892, 2003, 2037, 1671, 210, 229, 158, 2075, 417, 1859, 1870, 1095, 1873, 1666, 1876, 1023, 1875, 1453, 160, 232, 1654, 2077, 1002, 201, 2494, 2002, 745, 335, 230, 2668, 1106, 791, 224, 743, 191, 1092, 1812, 1336, 141, 88, 1987, 476, 2, 1889, 2006, 1704, 186, 1614, 1026, 2000, 2041, 382, 1929, 1531, 1995, 1893, 849, 1968, 1894, 14, 86, 1874, 2351, 1622, 45, 49, 1127, 1129, 1574, 1497, 1989, 2550, 366, 394, 1558, 617, 1319, 2032, 792, 1993, 460, 916, 1303, 2501, 1149, 1986, 1270, 2295, 2353, 1890, 841, 391, 1615, 68, 906, 1518, 1063, 2352, 577, 1891, 1452, 2039, 2005, 2035, 1166, 2034, 2008, 151, 1991, 179, 1434, 2680, 1697, 71, 968, 681, 1321, 1522, 1990, 2354, 2130, 2042, 1996, 2253, 566, 1994, 1709, 912, 430, 1988, 2036, 1997, 1999, 1992, 673, 899, 1567, 691, 2038, 722, 39, 2040, 1983, 870, 2500, 1611, 2249, 1261, 2129, 2128, 2127, 983, 1462, 2124, 1273, 1663, 1119, 2577, 1111, 2126, 2125, 2489, 1235, 546, 1608]",1,12.0,20.0,200.0,0.6100443131462334,, -"[1954, 1507, 1343, 720, 1957, 1690, 1926, 69, 323, 2112, 1884, 1013, 1128, 252, 1958, 2010, 1395, 2275, 2030, 371, 857, 648, 255, 203, 590, 847, 2165, 306, 241, 2153, 1583, 2189, 554, 711, 446, 1538, 2274, 2273, 1885, 880, 50, 265, 1881, 162, 2272, 1851, 1623, 1700, 1441, 682, 470, 2271, 1097, 118, 1682, 498, 1483, 854, 1883, 739, 1848, 604, 735, 1072, 530, 2022, 2016, 1045, 88, 1882, 1743, 1661, 1840, 973, 1914, 1158, 2166, 704, 1029, 842, 630, 1358, 581, 935, 2086, 980, 1878, 733, 1309, 2450, 109, 1077, 415, 1953, 1555, 661, 277, 359, 2015, 2019, 1619, 1725, 2251, 905, 2025, 540, 1353, 389, 175, 176, 1470, 2252, 596, 451, 41, 1880, 97, 830, 1707, 383, 1060, 1444, 815, 750, 1616, 589, 1174, 2077, 2394, 458, 1070, 2385, 1142, 1592, 1917, 2102, 486, 701, 2062, 121, 1338, 2457, 2103, 115, 125, 1122, 1792, 1566, 1704, 1276, 748, 995, 2388, 1028, 388, 1377, 2071, 435, 644, 344, 1791, 671, 2323, 454, 1879, 193, 1167, 597, 1986, 2264, 945, 1829, 1912, 1609, 1652, 1104, 807, 551, 802, 2553, 755, 725, 1435, 1258, 129, 2094, 728, 1910, 1790, 1368, 1972, 441, 2177, 2190, 1537, 702, 440, 1137, 2179, 70]",1,12.0,20.0,215.0,0.6429098966026587,, -"[1975, 1370, 850, 1973, 2278, 2281, 120, 1840, 1683, 1529, 1984, 838, 2153, 712, 992, 725, 1377, 2295, 2109, 1652, 2405, 1215, 496, 1980, 483, 2394, 2019, 210, 1894, 1121, 661, 197, 179, 2178, 1218, 1435, 1701, 2155, 55, 490, 436, 181, 1926, 242, 2280, 2293, 1482, 651, 2480, 1525, 891, 1266, 1410, 252, 232, 1842, 1072, 945, 95, 1352, 1487, 1501, 645, 1224, 2136, 1464, 279, 2384, 644, 1074, 1882, 2457, 176, 2283, 1079, 498, 1908, 728, 277, 935, 1868, 2608, 816, 1309, 1174, 779, 737, 2017, 2649, 415, 2100, 815, 711, 2485, 830, 1909, 1013, 1676, 1359, 551, 1131, 1905, 2071, 1902, 1919, 2310, 1655, 1583, 1677, 1512, 231, 778, 2015, 76, 2388, 596, 693, 2014, 280, 1972, 1920, 973, 1535, 696, 510, 1899, 604, 1494, 1839, 851, 11, 2012, 1847, 539, 506, 2189, 1674, 2653, 1732, 306, 1402, 130, 1675, 69, 702, 1849, 880, 1566, 1142, 1288, 1527, 682, 921, 2185, 1500, 1741, 88, 2023, 2422, 1133, 300, 162, 955, 1917, 1851, 41, 1915, 56, 671, 836, 1661, 412, 989, 447, 190, 2050, 387, 1645, 149, 630, 115, 1592, 1616, 2436, 2165, 1538, 1792, 175, 1507, 2011, 262, 2190, 2135, 743, 2016, 255, 441, 755, 2112, 1918]",1,12.0,20.0,245.0,0.6610044313146234,, -"[213, 1593, 144, 1836, 92, 145, 2192, 537, 898, 1698, 1165, 2159, 2158, 1835, 2622, 2157, 2160, 495, 108, 23, 1327, 2161, 1328, 2209, 1647, 1504, 144, 1593, 1165, 145, 2192, 23, 537, 495, 2158, 92, 213, 2159, 1835, 1698, 1647, 144, 1593, 1165, 145, 2192, 23, 537, 1327, 495, 1836, 92, 2158, 2161, 2159, 1835, 1698, 1328, 898, 2157, 1647, 144, 1593, 1165, 145, 2192, 23, 537, 1327, 495, 1836, 92, 2158, 2161, 2159, 1835, 1698, 1328, 898, 2157, 1647, 144, 1593, 1165, 145, 2192, 23, 537, 1327, 495, 1836, 92, 2158, 2161, 2159, 1835, 1698, 1328, 898, 2157, 1647, 144, 1593, 1165, 145, 2192, 23, 537, 1327, 495, 1836, 92, 2158, 2161, 2159, 1835, 1698, 1328, 898, 2157, 1647, 144, 1593, 1165, 145, 2192, 23, 537, 1327, 495, 1836, 92, 2158, 2161, 2159, 1835, 1698, 1328, 898, 2157, 1647, 144, 1593, 1165, 145, 2192, 23, 537, 1327, 495, 1836, 92, 2158, 2161, 2159, 1835, 1698, 1328, 898, 2157, 1647, 144, 1593, 1165, 145, 2192, 23, 537, 1327, 495, 1836, 92, 2158, 2161, 2159, 1835, 1698, 1328, 898, 2157, 1647, 144, 1593, 1165, 145, 2192, 23, 537, 1327, 495, 1836, 92, 2158, 2161, 2159, 1835, 1698, 1328, 898, 2157]",0,12.0,20.0,260.0,0.087149187592319,, -"[1891, 1889, 2130, 2040, 2001, 2122, 332, 2120, 1129, 2076, 1479, 59, 1248, 1692, 691, 1636, 1057, 429, 2361, 792, 1929, 394, 2039, 2233, 1765, 1453, 2091, 1846, 1067, 1875, 1863, 2353, 2027, 2038, 1358, 2348, 503, 2351, 617, 1853, 2042, 574, 938, 1448, 2352, 1518, 1894, 1872, 377, 2379, 1002, 1026, 1020, 1382, 1063, 906, 791, 2037, 1169, 1866, 784, 849, 1855, 1876, 1614, 874, 1867, 1265, 261, 665, 86, 2409, 1986, 1270, 2296, 1968, 563, 180, 1859, 2041, 141, 218, 1864, 2034, 1742, 532, 382, 841, 1870, 1337, 823, 1682, 767, 1983, 24, 633, 1874, 417, 2221, 869, 2045, 598, 1856, 159, 1871, 2291, 1319, 1336, 1334, 1052, 2559, 577, 1287, 1854, 79, 232, 781, 49, 733, 2268, 1868, 318, 2035, 109, 1701, 1666, 603, 2230, 1655, 306, 606, 157, 397, 2307, 1950, 201, 1440, 1865, 715, 1858, 2119, 231, 1241, 699, 2308, 215, 158, 316, 143, 547, 2594, 1023, 14, 1134, 310, 2360, 1852, 1807, 531, 1665, 1873, 396, 2136, 664, 1100, 2036, 2292, 1948, 1815, 205, 420, 1164, 1812, 1624, 335, 1410, 1131, 1273, 1821, 1799, 1818, 199, 1116, 1223, 481, 576, 729, 1817, 2305, 1075, 1823, 203, 1299, 1869, 1497, 467, 568, 1820, 1217, 1857]",1,12.0,20.0,275.0,0.5926883308714919,, -"[1927, 2132, 1072, 2112, 1368, 1435, 645, 1163, 581, 2395, 60, 2553, 2228, 1671, 2458, 118, 993, 973, 32, 2113, 1614, 2070, 1526, 499, 414, 42, 1911, 490, 252, 807, 1070, 1900, 1879, 2020, 1881, 1983, 331, 116, 1637, 1883, 1566, 815, 1884, 935, 1660, 682, 711, 1907, 2022, 323, 2137, 1792, 1396, 1626, 1285, 1131, 1399, 2155, 2050, 2016, 1045, 441, 1882, 2017, 1885, 539, 702, 164, 1501, 2069, 1906, 945, 2111, 1296, 2015, 2153, 2638, 454, 2116, 1341, 1372, 1732, 55, 2309, 1218, 1850, 210, 2190, 661, 1291, 771, 2154, 2457, 2274, 2151, 344, 1616, 891, 2071, 1377, 728, 1156, 1441, 880, 1028, 651, 1683, 1851, 1974, 280, 2485, 1353, 2068, 1421, 1973, 97, 586, 1512, 865, 2480, 1583, 95, 1482, 2023, 1880, 755, 1224, 2282, 2156, 2188, 1333, 2101, 1402, 452, 505, 447, 1535, 498, 1487, 836, 2110, 1258, 504, 666, 1587, 2653, 510, 777, 1085, 2021, 1878, 787, 1661, 822, 1840, 1222, 1370, 803, 1042, 1975, 1184, 334, 2100, 2293, 1848, 2109, 1972, 644, 2052, 2608, 1919, 2051, 262, 21, 2164, 56, 398, 725, 1120, 2649, 2310, 1110, 412, 1351, 2177, 1074, 1917, 1849, 1926, 1920, 2388, 1976, 2135, 1592, 231, 1652, 2422, 2152, 2217, 779, 847, 496, 2189, 1905, 1525, 181, 1416, 190, 1464, 1079, 778, 1359, 712, 1013, 1676, 96, 1395, 1847, 830, 1403, 1675, 2278, 1915, 551, 1729, 1842, 1644, 850, 471, 1362, 1682, 1843, 1625, 907, 1916, 1293, 1846, 1521, 908, 1845, 506, 1844, 992, 530, 2045, 1914, 2134, 69, 1918, 1841]",1,12.0,25.0,260.0,0.6569423929098966,, -"[117, 2537, 259, 117, 2537, 259, 117, 2537, 259, 117, 2537, 259, 117, 2537, 259, 117, 2537, 259, 117, 2537, 259, 117, 2537, 259, 117, 2537, 259, 117, 2537, 259]",0,,20.0,200.0,0.0993353028064992,0.5, -"[2209, 108, 1647, 898, 2160, 2159, 2157, 1835, 1836, 1327, 1504, 23, 92, 2161, 495, 1698, 2158, 2192, 1328, 144]",0,2.0,2.0,60.0,0.087149187592319,, -"[2202, 1540, 2172, 2053, 1923, 347, 2055, 423, 2133, 2181, 1388, 1926, 2183, 751, 1925, 525, 1118, 1008, 74, 1042, 734, 1006, 1330, 693, 65, 2182, 334, 1628, 1838, 1635, 95, 964, 2054, 1303, 861, 736, 1535, 1468, 2074, 305]",0,3.0,4.0,80.0,0.1351550960118168,, -"[2209, 108, 1647, 898, 1836, 1504, 1327, 1835, 2157, 92, 2160, 1328, 495, 2161, 2159, 2158, 144, 1698, 23, 1593]",0,4.0,2.0,60.0,0.087149187592319,, -"[318, 563, 1998, 2045, 2086, 484, 1584, 133, 1771, 519, 308, 1772, 2143, 1651, 2046, 542, 1346, 2048, 1778, 610]",0,4.0,2.0,130.0,0.0945347119645494,, -"[1957, 590, 1954, 1958, 400, 1128, 20, 1354, 1828, 1700, 2375, 1619, 540, 2269, 1183, 371, 1441, 2270, 1060, 2271]",0,5.0,2.0,40.0,0.1340472673559822,, -"[2639, 917, 2639, 917, 2639, 917, 2639, 917, 2639, 917, 2639, 917, 2639, 917, 2639, 917, 2639, 917, 2639, 917]",0,5.0,2.0,130.0,0.1351550960118168,, -"[318, 1785, 563, 2046, 1624, 610, 1584, 2094, 133, 487, 459, 2185, 2093, 1787, 2090, 2045, 1367, 887, 2095, 2086, 1346, 719, 1782, 153, 1770, 1572, 1769, 1798, 1337, 109, 2088, 118, 656, 112, 581, 1789, 2080, 1500, 2089, 1623]",0,5.0,4.0,130.0,0.129615952732644,, -"[27, 606, 2578, 1998, 2230, 571, 1583, 519, 1658, 2010, 1574, 2001, 1120, 2360, 696, 2018, 1095, 2016, 1110, 2009, 2004, 618, 1812, 323, 460, 2000, 2, 1982, 572, 76, 2011, 277, 2002, 1709, 2003, 1999, 1908, 1498, 938, 815, 1570, 851, 1704, 1989, 179, 1987, 553, 2006, 1741, 673, 792, 130, 1992, 2012, 1166, 1023, 1876, 366, 476, 149, 232, 1995, 1127, 2025, 1873, 498, 162, 566, 68, 391, 2027, 88, 1732, 1558, 1357, 1677, 1986, 743, 2017, 2178]",1,5.0,8.0,130.0,0.6148449039881831,, -"[417, 420, 199, 2034, 1072, 519, 766, 603, 1097, 935, 1873, 2045, 1986, 277, 1013, 316, 203, 465, 2314, 109, 228, 1869, 899, 1894, 1871, 179, 1221, 306, 598, 1875, 926, 1868, 2534, 1159, 1172, 1856, 1147, 14, 2096, 311, 1335, 1636, 1865, 2357, 2582, 1862, 1708, 2139, 2141, 1846]",1,2.0,5.0,150.0,0.5775480059084195,, -"[2324, 1282, 1735, 2130, 1889, 2039, 1283, 1336, 118, 2107, 2042, 86, 791, 2096, 695, 691, 1986, 215, 18, 741, 1505, 2000, 1499, 1851, 2026, 831, 141, 638, 36, 337, 1002, 487, 484, 2145, 1791, 303, 1856, 454, 1798, 1616, 191, 1270, 1812, 2080, 554, 158, 655, 121, 1705, 1072]",1,2.0,5.0,300.0,0.6152141802067946,, -"[766, 1297, 845, 24, 201, 463, 1703, 94, 1636, 1846, 519, 316, 143, 1299, 1964, 297, 1701, 962, 2118, 1234, 1235, 1100, 229, 547, 1111, 1971, 1797, 2126, 1462, 1379, 1810, 2355, 165, 794, 102, 1697, 2125, 2295, 2238, 2117, 1107, 157, 1966, 1671, 835, 598, 306, 261, 1215, 1473, 733, 1968, 1451, 1119, 2045, 33, 1986, 897, 1405, 770, 55, 2129, 1655, 482, 2357, 788, 2001, 1251, 2128, 1149, 790, 1611, 1012, 1665, 1273, 1702, 210, 1663, 869, 870, 1807, 2120, 248, 920, 1003, 2564, 2041, 1491, 362, 2646, 1871, 2335, 473, 549, 22, 1203, 2356, 86, 2124, 1303]",1,2.0,10.0,300.0,0.5753323485967504,, -"[1013, 1133, 1500, 2021, 1538, 65, 2418, 510, 1224, 242, 2185, 2359, 2155, 1376, 687, 836, 2280, 1655, 2419, 838, 1394, 2385, 1812, 88, 1848, 995, 619, 1121, 325, 1908, 910, 887, 1174, 1338, 230, 2282, 2034, 1822, 1095, 164, 1759, 549, 239, 1171, 2182, 1295, 1092, 586, 1821, 1811, 681, 211, 1652, 1355, 482, 695, 1855, 716, 835, 465, 1869, 1331, 224, 1576, 511, 1131, 1012, 2231, 1986, 948, 2319, 1814, 1809, 407, 2304, 1808, 962, 1581, 1065, 486, 790, 1823, 1930, 649, 2011, 1020, 1819, 1069, 2076, 2667, 792, 1096, 1609, 469, 2283, 1909, 1682, 1382, 2077, 429, 78, 2321, 104, 1220, 2189, 25, 356, 2320, 2035, 1912, 636, 2233, 1274, 1810, 994, 218, 2008, 2668, 2594, 1692, 1329, 1324, 1219, 1820, 2302, 781, 863, 862, 38, 1590, 401, 2091, 1815, 568, 1348, 54, 1296, 2541, 490, 27, 2301, 1107, 13, 795, 1703, 351, 1966, 794, 2119, 1816, 1004, 1818, 719, 1299, 1806, 2357, 576, 2303, 1787, 823, 2355, 1813, 2292, 733, 1287, 2552, 2306, 2490, 2263, 453, 678, 45, 2305, 383, 1265, 1704, 934, 481, 1649, 759, 435, 1215, 94, 1621, 352, 715, 1294, 1062, 1160, 2268, 675, 2291, 1817, 436, 309, 1192, 377, 2281, 1278, 195]",1,2.0,20.0,200.0,0.6314623338257016,, -"[1701, 2045, 1072, 563, 1337, 318, 1743, 1919, 1483, 139, 236, 603, 126, 519, 175, 704, 1805, 2450, 308, 153, 1787, 779, 1920, 103, 1359, 1986, 1820, 69, 426, 596, 1624, 1772, 138, 1583, 778, 1675, 41, 1676, 506, 2388, 2025, 1077, 1464, 1296, 1779, 1913, 306, 1918, 1592, 895, 955, 604, 1873, 905, 880, 389, 1346, 359, 1914, 2019, 1527, 630, 1184, 973, 2016, 252, 682, 2106, 2528, 133, 2189, 2153, 112, 459, 36, 88, 1824, 1485, 530, 2101, 1905, 1826, 2100, 2179, 440, 1849, 1539, 1226, 803, 581, 2086, 1362, 1142, 203, 115, 329, 277, 302, 1309, 1674, 857, 921, 1079, 671, 1915, 514, 451, 392, 1785, 1916, 1956, 979, 1917, 2190, 294, 124, 747, 458, 118, 163, 2241, 2094, 2630, 830, 2022, 725, 1619, 720, 470, 1882, 490, 67, 1444, 2375, 176, 1537, 47, 1269, 1526, 1978, 753, 2285, 193, 113, 1553, 540, 578, 516, 119, 2269, 1879, 2527, 1825, 807, 2184, 70, 854, 2323, 1455, 935, 1880, 2256, 1957, 479, 1571, 344, 1883, 454, 2644, 1579, 441, 1183, 1878, 1773, 2061, 2271, 648, 1045, 661, 1525, 241, 2060, 97, 1954, 1885, 448, 1884, 1700, 2273, 1881, 249, 264, 735, 1182, 2274, 50, 1353, 1789, 2062, 1955]",1,2.0,20.0,250.0,0.697562776957164,, -"[154, 364, 609, 970, 819, 155, 90, 1041, 764, 613, 156, 797, 73, 59, 341, 796, 758, 326, 2039, 552, 2042, 687, 111, 876, 524, 2130, 748, 466, 1891, 173, 691, 744, 1417, 1929, 1889, 2350, 346, 1979, 626, 616, 1826, 86, 957, 1732, 873, 1824, 902, 357, 978, 882, 333, 2178, 1358, 580, 708, 2018, 757, 1898, 754, 1980, 951, 1268, 2295, 1156, 14, 158, 244, 1079, 1614, 815, 1267, 985, 1336, 337, 831, 1895, 851, 816, 1677, 737, 1152, 1713, 415, 246, 68, 72, 1039, 1040, 289, 53, 1908, 76, 228, 689, 211, 1174, 823, 2055, 1270, 489, 34, 2333, 2040, 101, 604, 1741, 2017, 1038, 1848, 878, 483, 1900, 1658, 1273, 1047, 853, 885, 1309, 771, 919, 645, 69, 684, 999, 686, 593, 1394, 1529, 2014, 382, 417, 130, 1025, 1847, 1894, 1981, 2034, 1115, 2041, 891, 49, 300, 897, 2394, 2155, 1448, 1288, 1882, 2010, 1978, 1178, 1982, 1927, 2038, 1665, 1807, 356, 436, 1899, 2037, 2035, 2020, 2198, 210, 617, 2419, 88, 1026, 1410, 1319, 323, 2189, 2011, 224, 668, 1977, 141, 2022, 743, 1901, 2012, 1332, 1920, 146, 162, 1497, 55, 2013, 95, 433, 2116, 2015, 424, 827, 2025, 1654, 1812, 255, 1131, 1259]",1,2.0,20.0,300.0,0.6894387001477105,, -"[698, 661, 2123, 1885, 588, 2003, 332, 665, 91, 286, 745, 2040, 2001, 868, 1127, 743, 2009, 160, 1844, 2120, 973, 2121, 2004, 2383, 1987, 2002, 2122, 2008, 1558, 924, 442, 2000, 1051, 1873, 792, 1045, 673, 113, 1958, 232, 2, 176, 196, 1875, 1884, 1859, 389, 1787, 2034, 306, 277, 1701, 335, 1812, 1989, 1705, 935, 563, 1907, 681, 1337, 124, 566, 638, 1876, 1453, 476, 1992, 733, 887, 1532, 344, 45, 1908, 2007, 896, 2357, 1346, 1095, 215, 808, 1810, 1870, 1993, 249, 2362, 391, 133, 1634, 1805, 1349, 2006, 2066, 460, 465, 1779, 451, 1986, 318, 210, 349, 1015, 2509, 1166, 2276, 68, 1991, 1777, 1780, 409, 899, 2005, 1023, 39, 71, 1772, 553, 1869, 1789, 271, 1965, 1661, 1996, 1999, 2309, 1778, 236, 205, 1624, 109, 384, 1906, 2093, 1709, 1419, 102, 1522, 1196, 1990, 2323, 289, 1988, 1785, 696, 1365, 1970, 1984, 968, 441, 1671, 179, 1336, 1452, 1994, 2092, 1968, 759, 2237, 1998, 1769, 317, 1574, 2259, 1266, 2519, 610, 153, 589, 420, 529, 199, 1417, 94, 1995, 1519, 2260, 1964, 2229, 2059, 1474, 1178, 1702, 789, 805, 1529, 1240, 275, 2239, 627, 151, 1703, 706, 1967, 1385, 2155, 463, 152, 2238, 2496, 1505, 415, 201, 2045, 29, 2435, 2236, 2290, 963, 1971, 2547, 2548, 515, 140, 2400, 444, 582, 828, 375, 623, 1653, 488, 443, 426, 2549, 1795, 366, 1234, 1010, 2645, 1102, 584, 2228, 2401, 1697, 1207, 2289, 43, 2465, 258, 519, 2464, 340, 1094, 1794, 1381, 1097, 203, 266, 1369, 548]",1,2.0,25.0,300.0,0.6894387001477105,, -"[2155, 1131, 1224, 2094, 1740, 2394, 2080, 2013, 95, 1926, 604, 693, 1782, 2086, 487, 661, 2046, 1873, 1914, 682, 1245, 1851, 1770, 1849, 708, 910, 1733, 451, 471, 854, 1110, 65, 2048, 2085, 1798, 935, 2403, 2326, 2045, 139, 112, 426, 1464, 1705, 1359, 655, 2189, 1343, 1804, 836]",1,4.0,5.0,200.0,0.5926883308714919,, -"[1725, 1740, 59, 68, 1735, 2324, 18, 1070, 1282, 232, 90, 1786, 2145, 2054, 101, 1013, 1583, 1289, 1262, 179, 773, 189, 505, 2155, 1258, 2472, 2217, 155, 1283, 1483, 1733, 2153, 880, 111, 1416, 1986, 154, 1478, 1868, 114, 2201, 34, 30, 945, 2200, 1358, 525, 2034, 699, 1701]",1,4.0,5.0,300.0,0.5797636632200887,, -"[2158, 108, 2157, 2209, 1647, 144, 145, 92, 537, 2159, 1593, 1504, 1698, 213, 2192, 1835, 2622, 898, 2160, 1836, 1165, 23, 2161, 1327, 495, 1328, 2158, 2209, 1504, 108, 1327, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 1327, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 1327, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 1327, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 1327, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 1327, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 1327, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145]",0,4.0,10.0,150.0,0.087149187592319,, -"[2071, 1474, 1178, 745, 160, 1529, 725, 1592, 1986, 593, 748, 356, 211, 666, 32, 2018, 1570, 1358, 842, 1850, 2485, 252, 1535, 907, 571, 470, 1916, 1696, 323, 1314, 277, 262, 1908, 60, 1880, 1115, 897, 843, 2155, 1525, 514, 2136, 2016, 890, 696, 1494, 55, 1351, 753, 1269, 1219, 579, 1097, 1359, 1713, 1370, 1918, 1732, 1227, 436, 1926, 2276, 1566, 822, 164, 1630, 2134, 2282, 2274, 778, 2102, 1341, 1110, 506, 1338, 1222, 682, 1658, 2015, 1972, 661, 2156, 1291, 41, 1203, 1920, 1523, 1843, 955, 1076, 1362, 1583, 205, 1701, 76, 525, 161, 851, 548, 1973]",1,4.0,10.0,250.0,0.5646233382570163,, -"[118, 454, 505, 1499, 337, 1735, 191, 1289, 1622, 1283, 1616, 2322, 1699, 1483, 1560, 1791, 554, 2478, 1448, 2324, 973, 1661, 1801, 2081, 808, 18, 573, 1248, 1282, 124, 2094, 121, 1776, 1998, 603, 1258, 1072, 417, 2085, 1572, 36, 1705, 1785, 1552, 1830, 236, 1013, 341, 103, 608, 2326, 2082, 1146, 289, 2086, 139, 1158, 1251, 215, 2089, 660, 610, 958, 1584, 2092, 2093, 1774, 1797, 910, 1651, 1784, 1786, 699, 1778, 2318, 1367, 1337, 2090, 484, 963, 1871, 153, 1193, 2045, 1144, 239, 1640, 656, 1769, 406, 109, 1564, 1618, 859, 1681, 2048, 317, 943, 2107, 1780, 1767, 1787, 1768, 655, 409, 1623, 407, 719, 308, 112, 530, 2327, 2080, 1346, 544, 330, 384, 581, 2300, 556, 133, 1045, 638, 1837, 2091, 303, 1505, 176, 960, 1810, 2046, 1798, 1792, 1779, 1196, 2325, 1551, 476, 2078, 2087, 1783, 1852, 1781, 519, 1089, 1061, 1015, 426, 773, 459]",1,4.0,15.0,200.0,0.6669128508124077,, -"[1405, 1508, 229, 2117, 2125, 1479, 2118, 2126, 2124, 261, 1655, 33, 210, 2120, 2003, 2121, 920, 1710, 2127, 770, 2123, 1121, 1149, 1671, 102, 1894, 1893, 1871, 2122, 2359, 1810, 1701, 1890, 91, 482, 2077, 687, 1614, 1986, 1273, 55, 2406, 1251, 1812, 1119, 2002, 1874, 983, 185, 1868, 17, 2249, 2141, 2, 949, 2075, 480, 293, 2129, 1261, 1111, 1929, 2398, 1049, 1927, 962, 224, 2295, 766, 2130, 549, 24, 1681, 316, 1215, 407, 442, 2430, 1160, 1821, 2038, 1864, 691, 2039, 1662, 1400, 570, 869, 2004, 523, 2500, 1297, 2042, 835, 2381, 845, 862, 1100, 886, 38, 1666, 1385, 1143, 2139, 1531, 49, 714, 899, 1542, 968, 606, 1012, 2040, 791, 1889, 1823, 1107, 1590, 158, 1270, 148, 1567, 438, 2550, 1807, 863, 2043, 1426, 1622, 2493, 1654, 1618, 574, 1452, 2036, 2380, 1203, 1324, 695, 2033, 2034, 201, 2206, 1319, 1636, 722, 912, 916, 1425, 430, 191, 1015, 1924, 1336, 1998, 2041, 2379, 48, 1073, 1951, 1268, 536, 733, 1665, 1983, 1303, 1968, 180, 740, 1703, 14, 141, 1875, 1299, 2668, 1417, 897, 1026, 2001, 622, 2531, 833, 505, 1682, 1448, 143, 297, 1891, 2494, 503, 2335, 942, 1870, 306, 948, 2348, 2319, 417, 2035, 186]",1,4.0,20.0,250.0,0.6329394387001477,, -"[1045, 1656, 426, 608, 1346, 887, 519, 565, 1551, 109, 1367, 1610, 910, 350, 661, 1705, 302, 1770, 415, 1885, 88, 124, 1245, 719, 500, 656, 655, 1584, 103, 129, 1193, 2242, 153, 1555, 2094, 1805, 126, 2062, 1395, 1490, 452, 2016, 306, 1226, 778, 542, 1953, 329, 2235, 676, 1507, 1682, 1798, 176, 344, 487, 842, 701, 573, 138, 1537, 2241, 1651, 1191, 801, 476, 119, 1668, 1772, 2046, 2045, 671, 1851, 244, 1775, 193, 682, 60, 2079, 2450, 1009, 446, 1914, 1781, 564, 1360, 1527, 647, 2628, 139, 554, 1975, 1802, 1900, 1955, 112, 1904, 2184, 2491, 1895, 847, 891, 1543, 950, 880, 1495, 1898, 958, 1623, 854, 1804, 1901, 1661, 1179, 1470, 1013, 441, 1799, 1171, 935, 807, 2189, 1800, 1797, 1779, 1370, 2029, 1884, 1572, 630, 1343, 1640, 1251, 1262, 1926, 1899, 2207, 735, 308, 2274, 645, 282, 1956, 1549, 440, 189, 1029, 1483, 1355, 1954, 1784, 1505, 1919, 1740, 2284, 2286, 1801, 739, 255, 2061, 773, 779, 1957, 1725, 973, 728, 1158, 1771, 47, 1960, 1803, 67, 1224, 1267, 1525, 342, 69, 1579, 2630, 1060, 233, 1072, 1896, 2553, 1237, 2273, 1707, 2078, 238, 2699, 750, 265, 945, 236, 590, 1142, 1958, 2010, 118, 1455, 1168, 559, 331, 1897, 1070, 1733, 70, 844, 294, 113, 2275, 1128, 1167, 860, 1616, 2271, 2485, 1690, 793, 472, 1881, 128, 1509, 1583, 1902, 2166, 1839, 747, 1441, 2165, 115, 2153, 388, 2019, 1183, 1358, 1503, 2060, 393, 252, 1538, 1828, 417, 277, 2112, 1553, 360, 249, 2457, 1182, 979, 392, 371, 2527, 540, 1917, 940, 581, 2481, 648, 241, 2272, 1478, 50, 1824, 2190, 2374, 1700, 2375, 720, 370, 264, 454, 1825, 2030, 1303, 1409, 1792, 709, 1829, 1619, 2592, 1404, 1827, 400, 524, 1903, 448, 1408, 1354, 840, 2022, 20, 1415, 1414, 1826, 2270, 323, 2269, 2188]",1,4.0,30.0,300.0,0.6580502215657311,, -"[415, 525, 301, 1737, 1336, 1040, 2476, 409, 2220, 1838, 1145, 1169, 1714, 552, 524, 863, 88, 38, 188, 689, 1139, 1681, 2492, 1703, 1734, 1986, 2172, 2355, 1998, 1590, 78, 748, 2131, 2357, 1704, 486, 2180, 1797, 1719, 52, 1285, 2668, 407, 1219, 95, 759, 2133, 72, 1395, 104]",1,6.0,5.0,200.0,0.6100443131462334,, -"[99, 2454, 26, 2455, 122, 2604, 123, 127, 123, 26, 99, 2455, 2604, 122, 2454, 127, 123, 26, 99, 2455, 2604, 122, 2454, 127, 123, 26, 99, 2455, 2604, 122, 2454, 127, 123, 26, 99, 2455, 2604, 122, 2454, 127, 123, 26, 99, 2455, 2604, 122, 2454, 127, 123, 26, 99, 2455, 2604, 122, 2454, 127, 123, 26, 99, 2455, 2604, 122, 2454, 127, 123, 26, 99, 2455, 2604, 122, 2454, 127, 123, 26, 99, 2455, 2604, 122, 2454, 127]",0,6.0,10.0,150.0,0.087149187592319,, -"[1894, 429, 2034, 1739, 383, 1738, 1346, 306, 109, 1358, 1810, 91, 985, 2379, 1149, 2044, 2123, 1864, 869, 2325, 157, 503, 2365, 1670, 1185, 1820, 2532, 1171, 2326, 818, 1907, 1295, 1583, 1014, 1781, 482, 2003, 1912, 1558, 24, 297, 574, 416, 2077, 598, 1875, 1709, 566, 1446, 1704, 1991, 695, 792, 1191, 230, 2001, 733, 879, 285, 2303, 2339, 1701, 206, 2543, 2045, 335, 2412, 1859, 2340, 460, 218, 827, 2338, 1999, 1812, 10, 2613, 2348, 570, 418, 1998, 2555, 445, 2006, 1992, 1800, 519, 1574, 1445, 1488, 493, 2236, 2002, 1066, 553, 1219, 603, 2005, 1870, 1]",1,6.0,10.0,200.0,0.5867799113737076,, -"[350, 302, 2153, 719, 308, 426, 2185, 2016, 1572, 1551, 1133, 656, 88, 973, 1193, 1131, 252, 1224, 1705, 989, 643, 294, 2010, 1535, 496, 1072, 1245, 1848, 1584, 41, 487, 1847, 1500, 1346, 236, 1916, 1045, 2436, 1583, 109, 161, 1645, 1538, 1771, 175, 1640, 1920, 958, 1158, 1251, 139, 103, 573, 910, 1203, 1367, 711, 126, 519, 153, 1676, 542, 644, 2022, 69, 1152, 1770, 279, 1917, 586, 596, 2293, 1482, 87, 306, 1483, 2294, 280, 2045, 887, 414, 1623, 124, 1285, 329, 2111, 836, 2113, 181, 1441, 417, 842, 955, 2316, 476, 1490, 908, 1525, 608, 2155, 1656, 1845, 530, 1029, 1674, 1362, 452, 2017, 112, 747, 2190, 655, 1842, 74, 2388, 880, 138, 779, 778, 510, 1118, 2189, 1507, 1501, 1651, 604, 1464, 1359, 1675, 1120, 993, 1919, 454, 1914, 2165, 2166, 921, 1343, 2485, 1851, 1143, 1592, 2112, 2295, 539, 749, 1661, 693, 1013, 1918]",1,6.0,15.0,200.0,0.5723781388478582,, -"[1128, 52, 1954, 2275, 2054, 1540, 2172, 1958, 1139, 347, 1614, 1628, 1399, 1742, 1027, 2309, 590, 580, 1919, 1396, 1957, 720, 2133, 359, 778, 562, 423, 389, 408, 2020, 1671, 2057, 1725, 902, 1735, 2136, 323, 724, 454, 2022, 2272, 2131, 1077, 95, 2183, 750, 1483, 704, 142, 241, 344, 712, 742, 1525, 1156, 2480, 331, 355, 779, 1983, 1668, 2180, 1359, 1843, 356, 1421, 1604, 349, 1975, 55, 371, 2395, 1838, 2072, 456, 998, 1224, 1847, 1269, 265, 1907, 164, 1441, 2394, 1268, 1619, 1979, 1420, 745, 525, 118, 2021, 1358, 905, 2228, 1700, 1103, 858, 757, 1115, 239, 2010, 210, 1570, 1370, 2034, 771, 593, 1644, 1184, 2450, 897, 76, 2182, 1906, 1284, 1427, 498, 2181, 737, 1527, 1515, 1620, 2024, 851, 2428, 2018, 969, 211, 540, 415, 1412, 648, 162, 2274, 787, 1013, 2137, 1743, 2015, 2427, 2396, 1732, 1630, 1344, 815, 2178, 50, 2113, 1085, 2198, 600, 161, 973, 1758, 300, 504, 1309, 651, 748, 1394, 754, 2397, 1658, 1079, 436, 661, 2485, 571, 1652, 1190, 1986, 471, 966, 1529, 490, 204, 437, 61, 1625, 1974, 1174, 634, 1387, 160, 818, 1667, 842, 2102, 995, 1060, 2103, 60, 1285, 2273, 1905, 1474, 2016, 1338, 1566]",1,6.0,20.0,250.0,0.6418020679468243,, -"[1644, 1810, 2017, 716, 2182, 211, 603, 1394, 604, 2178, 76, 1880, 1076, 1677, 2137, 1850, 1314, 920, 899, 258, 2399, 1821, 2706, 737, 743, 1288, 204, 890, 651, 525, 1998, 661, 2045, 116, 1675, 1013, 2261, 109, 706, 1014, 1986, 1094, 2407, 1908, 570, 1914, 841, 415, 416, 643, 344, 17, 1313, 2430, 350, 1309, 1195, 43, 1879, 1713, 2401, 306, 988, 573, 1526, 18, 1623, 1823, 130, 1882, 1868, 529, 102, 2434, 1560, 815, 490, 1732, 838, 1624, 1974, 1984, 2140, 2243, 2014, 1558, 2110, 1440, 420, 1849, 935, 1844, 316, 2240, 1847, 1487, 2004, 1443, 2338, 2099, 1975, 1141, 1421, 1157, 165, 2340, 1574, 792, 363, 1529, 753, 162, 766, 1107, 1785, 2309, 868, 1843, 851, 2260, 1341, 1073, 805, 1741, 1346, 966, 1315, 1269, 605, 2379, 88, 438, 818, 879, 375, 199, 1884, 1501, 2139, 1316, 2109, 519, 1474, 387, 963, 300, 1527, 1973, 2400, 232, 1126, 2505, 185, 470, 1027, 1074, 134, 1178, 836, 1676, 600, 2397, 2698, 1573, 367, 2707, 1779, 1299, 724, 1227, 2013, 201, 1373, 1701, 1174, 1473, 2011, 406, 1869, 452, 652, 854, 1417, 1266, 845, 644, 696, 1658, 1710, 1630, 1251, 1297, 297, 277, 1399, 1003, 471, 1, 687, 2034]",1,6.0,20.0,300.0,0.7573855243722304,, -"[2123, 91, 1160, 1015, 505, 1559, 1143, 1481, 1479, 1251, 1666, 2121, 962, 1405, 1508, 261, 432, 2667, 1655, 897, 2238, 2295, 1303, 1618, 2356, 2380, 2471, 503, 2003, 2335, 48, 1425, 2122, 1810, 1703, 1702, 794, 1149, 102, 1789, 1448, 2205, 1031, 316, 863, 1701, 1260, 283, 2206, 1966, 473, 2001, 229, 2117, 598, 1894, 22, 1215, 1665, 2406, 916, 2381, 49, 574, 1615, 2045, 1234, 55, 2357, 1597, 1203, 1452, 1662, 442, 2118, 1986, 714, 733, 1807, 2355, 2125, 1026, 293, 1235, 1012, 94, 1385, 788, 2041, 306, 1987, 1846, 770, 194, 33, 2668, 549, 1230, 2348, 482, 1379, 1380, 1342, 835, 1964, 2680, 1590, 2004, 2332, 740, 2563, 2040, 1608, 218, 1081, 2378, 1611, 1010, 14, 695, 2126, 2646, 38, 2120, 1522, 2038, 1462, 2002, 210, 1952, 1965, 378, 1072, 1449, 2075, 148, 1871, 2076, 1020, 180, 1049, 1797, 2531, 790, 1812, 196, 186, 1532, 158, 2043, 833, 1697, 1889, 1111, 2648, 224, 920, 523, 536, 2684, 2379, 1497, 1491, 2564, 1669, 1983, 2044, 171, 1451, 2124, 39, 862, 2077, 1533, 2033, 82, 2675, 2538, 1050, 602, 775, 1639, 1671, 429, 1071, 1461, 443, 1548, 1324, 1273, 2129, 1119, 622, 2250, 141, 1968, 1002, 722, 1663, 1320, 870, 382, 912, 1569, 2681, 1336, 2550, 86, 1893, 1614, 381, 2308, 159, 1319, 2035, 1654, 2036, 450, 949, 209, 2128, 1567, 417, 248, 2683, 1929, 1493, 546, 1890, 1531, 2034, 361, 1892, 2037, 1891, 320, 726, 2039, 1927, 1634, 1109, 1138, 1349, 1268, 1390, 2674, 1688, 1622, 1264, 177, 1270, 791, 2671, 2518, 705, 2679, 191, 492, 617, 438, 2032, 88, 1135, 2127, 1257, 841, 2673, 2130, 2682, 2670, 2494, 1206, 691, 1422, 856, 837, 2672, 1437, 2248, 2042, 2249, 2393, 1643, 428, 688, 1194, 1545, 348, 1580, 198, 368, 983, 2677, 1261, 430, 2678, 1255, 1366, 996, 956]",1,6.0,30.0,300.0,0.6067208271787297,, -"[69, 1395, 1914, 323, 109, 2275, 1954, 1128, 2189, 2274, 1957, 1958, 2094, 590, 176, 1072, 720, 371, 1013, 1583, 1901, 252, 1849, 1826, 1960, 2022, 1824, 1478, 1441, 448, 1904, 1360, 847, 540, 2271, 2485, 306, 2153, 70, 1224, 1358, 1133, 2184, 264, 2273, 1961, 447, 1661, 1917, 1500, 1903, 801, 1455, 1537, 880, 1267, 844, 2185, 682, 1131, 1060, 750, 648, 241, 265, 671, 1619, 1707, 1700, 50, 2272, 1829, 747, 1529, 441, 56, 1495, 193, 1984, 711, 973, 2457, 344, 1902, 88, 1538, 793, 1842, 2031, 440, 935, 2436, 645, 129, 891, 836, 703, 2016, 989, 2050]",1,8.0,10.0,150.0,0.5568685376661743,, -"[2157, 2158, 2159, 108, 1835, 145, 2209, 1504, 537, 1593, 2622, 1165, 1647, 23, 2160, 213, 144, 2161, 898, 1698, 1836, 2192, 92, 1327, 495, 1328, 2158, 2209, 1504, 108, 495, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 495, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 495, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 495, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 495, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 495, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145, 495, 2158, 2209, 1504, 108, 1836, 92, 1165, 1593, 145]",0,8.0,10.0,300.0,0.087149187592319,, -"[2667, 2199, 486, 1143, 2155, 2380, 2200, 1789, 1257, 1425, 2332, 2123, 1015, 95, 2002, 456, 2518, 1662, 544, 316, 91, 503, 45, 1010, 546, 1481, 1020, 2076, 1966, 2381, 1618, 2248, 2122, 2668, 788, 2003, 2335, 2671, 1580, 775, 1964, 2379, 48, 2121, 1986, 2075, 2001, 733, 920, 1203, 574, 1669, 1666, 2120, 2, 33, 2077, 2406, 2249, 1160, 2295, 443, 2117, 2127, 1703, 180, 1119, 2118, 1952, 2040, 1508, 1701, 1452, 1303, 2033, 2124, 2041, 306, 261, 1671, 1479, 794, 442, 1590, 863, 2125, 2091, 1797, 1405, 1111, 210, 862, 1251, 2126, 229, 1704, 523, 606, 2043, 1810, 196, 770, 2129, 1149, 1012, 1871, 482, 897, 1072, 1110, 962, 38, 55, 293, 102, 248, 1655, 368, 1894, 1273, 2348, 1614, 1215, 1968, 1665, 695, 2378, 1812, 2035, 1807, 1264, 835, 740, 2045, 949, 549, 158, 1336, 505, 1983, 2044, 417, 1002, 1615, 1892, 49, 429, 14, 224, 833]",1,8.0,15.0,200.0,0.593426883308715,, -"[327, 2063, 1191, 2339, 231, 794, 482, 1042, 1769, 1682, 598, 2064, 2338, 1739, 88, 2073, 1358, 399, 277, 1295, 297, 1325, 760, 1912, 429, 2200, 230, 332, 2044, 2668, 1346, 985, 2301, 2303, 1160, 2253, 1818, 2011, 1666, 1738, 536, 109, 2550, 1894, 2203, 695, 2054, 2340, 869, 157, 396, 306, 2555, 1055, 1864, 2568, 1858, 1996, 2325, 1810, 1171, 1513, 197, 24, 1219, 680, 387, 2034, 2123, 1701, 1014, 1558, 2667, 218, 2077, 416, 1812, 1820, 2291, 1817, 435, 401, 2052, 665, 493, 849, 570, 94, 74, 1709, 899, 1704, 2268, 935, 973, 1149, 2, 95, 201, 836, 2365, 179, 1875, 968, 45, 1580, 2238, 232, 110, 2381, 1870, 2003, 1023, 206, 1522, 1697, 1670, 1454, 8, 1988, 1062, 298, 731, 1603, 567, 525, 1197, 1991, 1446, 733, 383, 2412, 519, 1095, 2045, 2008, 1347, 1873, 2430, 901, 2691, 2201, 377, 1265, 1445, 1262, 68, 553, 290, 91]",1,8.0,15.0,300.0,0.6935007385524372,, -"[327, 2063, 91, 2044, 2339, 2303, 429, 2064, 553, 733, 1358, 722, 2123, 2340, 2380, 1013, 1325, 2412, 2501, 2582, 2667, 2121, 1295, 879, 1106, 191, 1197, 1738, 1739, 827, 1221, 314, 1124, 766, 2509, 1119, 493, 2500, 465, 1097, 1522, 206, 218, 1583, 396, 387, 1907, 1996, 2534, 2381, 1191, 383, 1950, 197, 1567, 1, 2335, 1171, 960, 2691, 228, 985, 54, 1577, 1140, 45, 1055, 2325, 2001, 310, 230, 2668, 277, 1072, 836, 1346, 669, 2532, 1666, 10, 1874, 1558, 1894, 873, 1709, 306, 574, 2573, 482, 1149, 1159, 1697, 2045, 603, 1995, 2365, 2348, 199, 1708, 2008, 2217, 1987, 0, 1219, 298, 68, 417, 1603, 366, 1781, 1998, 2338, 1166, 109, 8, 1127, 2009, 1066, 2379, 1147, 1999, 476, 71, 566, 1988, 1997, 673, 160, 2555, 2117, 391, 2003, 729, 1862, 2000, 285, 1075, 1445, 1542, 1670, 784, 2, 1095, 745, 519, 316, 1810, 2077, 1846, 420, 2034, 1454, 297, 1446, 681, 1991, 849, 1334, 1241, 151, 88, 1868, 1347, 1800, 792, 1488, 1869, 598, 14, 1239, 695, 818, 1877, 962, 968, 1812, 267, 899, 1615, 743, 332, 102, 231, 179, 1479, 503, 2002, 445, 767, 1855, 2378, 304, 2238, 1941, 1856, 2253, 1574, 926, 1014, 460]",1,8.0,20.0,300.0,0.6410635155096012,, -"[306, 180, 65, 1702, 1807, 887, 2021, 910, 1787, 2163, 2280, 2356, 1819, 1502, 1394, 14, 1295, 1121, 1581, 2418, 1234, 2419, 649, 1500, 1821, 1964, 463, 2176, 443, 1971, 1995, 1355, 1488, 39, 687, 1224, 1814, 1999, 1569, 224, 362, 2001, 568, 576, 1709, 1997, 835, 1822, 2185, 619, 719, 13, 1993, 838, 1015, 2004, 1655, 2605, 2034, 1538, 1133, 1994, 1816, 242, 1107, 1010, 476, 1013, 790, 2359, 1576, 356, 864, 1811, 74, 1331, 934, 230, 2155, 1966, 2210, 1092, 2091, 675, 1987, 716, 1069, 2002, 1703, 1171, 836, 2357, 1846, 519, 1859, 2075, 1873, 33, 2003, 1218, 211, 1382, 1348, 2680, 239, 429, 85, 2000, 2355, 510, 586, 1820, 549, 1697, 1287, 1131, 88, 2076, 1848, 1815, 1810, 27, 2231, 1998, 1020, 2182, 745, 792, 482, 2283, 1908, 946, 1215, 104, 2668, 1095, 2282, 1376, 962, 2005, 2263, 1065, 2119, 1809, 160, 2009, 2555, 2077, 2594, 1812, 281, 164, 1855, 1652, 1558, 1869, 1986, 511, 743, 1823, 1990, 1166, 2424, 2402, 1574, 2233, 823, 636, 695, 2308, 2292, 481, 1989, 2667, 767, 1453, 195, 1991, 460, 1870, 1992, 1808, 436, 1759, 366, 1127, 781, 681, 490, 2385, 1149, 1875, 45, 1338, 54, 68, 1434, 1909, 1174, 94, 218, 2008, 1806, 2319, 401, 1096, 2230, 391, 2035, 1274, 2103, 38, 2006, 794, 1220, 465, 2102, 2490, 486, 1329, 995, 1930, 1012, 2007, 1299, 2, 407, 2291, 151, 1988, 1649, 215, 948, 325, 1813, 1265, 232, 863, 78, 1817, 733, 1219, 179, 994, 1192, 2301, 1621, 862, 566, 1590]",1,8.0,25.0,300.0,0.6905465288035451,, -"[1681, 160, 948, 68, 1283, 1743, 366, 1583, 661, 2324, 742, 1282, 1499, 935, 905, 1358, 699, 2000, 335, 743, 2252, 1012, 704, 391, 2318, 1852, 1740, 1013, 389, 277, 1620, 1682, 1335, 337, 407, 1725, 2251, 416, 598, 1701, 1735, 151, 1878, 2385, 2382, 1077, 1837, 1196, 1262, 1574, 969, 232, 1733, 2319, 2113, 1571, 201, 1412, 239, 1830, 179, 409, 685, 2143, 1791, 1677, 1253, 2325, 1070, 121, 1635, 2327, 387, 1726, 695, 562, 244, 880, 643, 505, 359, 341, 1546, 1803, 1986, 1336, 754, 814, 1768, 1448, 1871, 589, 189, 191, 1776, 215, 382, 1483, 1142, 460, 454, 289, 1616, 2256, 118, 1045, 2083, 2047, 1661, 1856, 2598, 2087, 1158, 2034, 2407, 1249, 303, 406, 2016, 1564, 960, 314, 603, 1311, 836, 2, 554, 1313, 2096, 459, 2045, 638, 2090, 102, 2026, 519, 681, 1624, 530, 1289, 973, 573, 1197, 2082, 1804, 673, 2081, 1248, 945, 1779, 544, 1367, 1258, 1777, 1797, 859, 71, 1670, 2451, 318, 1337, 980, 563, 1800, 2075, 45, 887, 133, 1072, 441, 1778, 566, 308, 294, 1705, 1560, 2326, 581, 329, 399, 1787, 484, 1251, 1622, 2080, 1772, 252, 671, 218, 1799, 1773, 302, 487, 2208, 1784, 2091, 2095, 1459, 1769, 1782, 682, 910, 18, 1009, 115, 2322, 2084, 1801, 1789, 1465, 630, 1798, 426, 1490, 476, 1346, 1998, 1478, 2079, 2046, 109, 1584, 1651, 350, 2478, 1089, 2078, 417, 1788, 124, 1770, 112, 384, 342, 2019, 2086, 1561, 1699, 1193, 1245, 1780, 2153, 2093, 176, 153, 2085, 1785, 542, 655, 1805, 1802, 1775, 138, 656, 1552, 1505, 1790, 719, 1771, 1572, 1792, 2048, 1810, 2094, 1140, 10, 773, 1656, 2088, 1623, 306, 236, 958, 1781, 20, 452, 608, 126, 103, 2107, 2106, 330, 36, 610, 139, 2089, 2092, 1144, 1551, 1640, 556, 1146, 1783, 1767, 660, 2591, 1506, 2254, 2494, 1774]",1,8.0,30.0,300.0,0.7511078286558346,, -"[1784, 1891, 41, 459, 791, 1894, 1889, 1783, 14, 1336, 596, 158, 133, 86, 711, 2177, 2038, 1002, 1654, 1776, 1497, 141, 2096, 1777, 1614, 1767, 1144, 2080, 660, 1780, 644, 1026, 1072, 1665, 1798, 49, 1773, 1797, 1311, 1812, 2036, 224, 487, 1871, 1367, 2034, 1772, 1245, 1805, 426]",1,10.0,5.0,200.0,0.5542836041358936,, -"[2155, 1131, 1358, 490, 280, 306, 2186, 74, 2293, 1729, 30, 539, 1269, 2052, 1979, 1839, 901, 61, 1926, 139, 2068, 373, 1630, 572, 2163, 1517, 2113, 1394, 252, 1842, 414, 2013, 1158, 399, 493, 642, 305, 2108, 1171, 1397, 1118, 498, 552, 2383, 979, 2450, 1713, 2085, 1974, 591]",1,10.0,5.0,300.0,0.5915805022156573,, -"[2255, 225, 2255, 225, 2255, 225, 2255, 225, 2255, 225, 2255, 225, 2255, 225, 2255, 225, 2255, 225, 2255, 225]",0,10.0,10.0,100.0,0.0904726735598227,, -"[482, 1810, 2253, 1738, 1739, 218, 2339, 1912, 306, 2550, 109, 2077, 1864, 1812, 1346, 869, 383, 230, 2340, 1191, 2034, 1358, 1701, 231, 598, 733, 2045, 2365, 24, 2338, 1325, 1894, 553, 298, 1704, 1709, 1095, 1858, 232, 1583, 2555, 1445, 297, 1446, 2002, 332, 985, 1697, 416, 2003, 2001, 1870, 1875, 387, 1670, 1522, 1558, 2668, 197, 2303, 2009, 201, 2613, 2008, 1995, 519, 2691, 1127, 2667, 206, 2582, 215, 1119, 1149, 277, 1998, 1453, 570, 1876, 2335, 1859, 2430, 1, 1873, 366, 1066, 335, 681, 792, 745, 1907, 1239, 1014, 1023, 879, 968, 899, 1166, 179, 1055, 2, 476, 743, 1171, 45, 1574, 1996, 88, 1570, 673, 160, 1488, 71, 1997, 2532, 1603, 68, 2568, 1140, 391, 566, 10, 151, 1991, 2000, 2004, 1666, 1986, 460, 1800, 2238, 2236, 1992, 1454, 1987, 836, 493, 1989, 2005, 1999, 1219, 2007, 1990, 445, 1434, 909, 1993, 1994, 2006, 1988]",1,10.0,15.0,150.0,0.6211225997045791,, -"[366, 745, 1558, 792, 1952, 49, 1127, 1866, 1951, 1697, 2034, 2381, 743, 1095, 2398, 160, 1812, 1990, 429, 1869, 68, 788, 460, 476, 391, 633, 1989, 2206, 1665, 1988, 71, 2205, 2041, 1662, 1031, 1873, 158, 1859, 1872, 1810, 1666, 1986, 205, 1820, 148, 1865, 1337, 563, 2493, 1856, 1863, 1987, 179, 1867, 855, 231, 306, 2045, 681, 318, 1846, 603, 2, 1385, 532, 1876, 109, 215, 1858, 232, 699, 1023, 1479, 714, 673, 1861, 1862, 566, 45, 1799, 1334, 1701, 261, 151, 1453, 1852, 13, 1149, 1709, 716, 606, 335, 1434, 1166, 1868, 1704, 216, 2140, 1346, 2004, 1440, 927, 1871, 1998, 1542, 1821, 1299, 387, 729, 1316, 784, 1994, 467, 899, 235, 1075, 1212, 1877, 1874, 1870, 1241, 1860, 664, 1854, 519, 1323, 226, 869, 1052, 1853, 2142, 767, 874, 1855, 1857, 343, 1823, 687, 17, 1875, 157, 1864, 2372, 2139, 1426, 1301, 2359, 1574, 1670, 2141]",1,10.0,15.0,200.0,0.5564992614475628,, -"[1224, 604, 1914, 1848, 1849, 1583, 373, 921, 61, 262, 1131, 2153, 1468, 1921, 1377, 1025, 2134, 2295, 80, 1675, 475, 1592, 1923, 1008, 2162, 1047, 2207, 1464, 434, 1850, 1156, 1925, 69, 2333, 1362, 55, 1143, 1644, 771, 2418, 2531, 1588, 1922, 2282, 257, 2485, 1120, 539, 485, 149, 1152, 1840, 1104, 2499, 1117, 506, 2051, 30, 1416, 1519, 707, 2311, 1073, 1395, 543, 586, 1351, 305, 572, 916, 2006, 2219, 993, 2155, 1293, 1521, 2576, 6, 836, 510, 1916, 2419, 1042, 2068, 2049, 1680, 65, 1026, 1926, 415, 1503, 2021, 1502, 1467, 2383, 1839, 979, 1851, 239, 1203, 2034, 2293, 839, 2163, 2045, 1845, 1125, 1013, 597, 619, 281, 1924, 1661, 524, 746, 2130, 1118, 2055, 1957, 2108, 1891, 908, 109, 1198, 1842, 763, 2133, 133, 907, 315, 1158, 1635, 1847, 718, 2056, 2198, 1729, 1625, 2180, 1330, 749, 2294, 1972, 181, 2364, 52, 624, 1204, 2072, 1279]",1,10.0,15.0,250.0,0.654357459379616,, -"[661, 1736, 854, 1360, 344, 2112, 1763, 1744, 1761, 2184, 1881, 1408, 1884, 1825, 1743, 1829, 1416, 718, 1738, 1752, 979, 1712, 1766, 541, 118, 2597, 1902, 1718, 70, 1226, 1739, 1546, 1827, 1355, 1961, 1755, 1566, 1714, 807, 440, 739, 1709, 2274, 2031, 1757, 193, 446, 1713, 1731, 2094, 2151, 1728, 1754, 1720, 2235, 735, 2491, 1183, 1765, 1237, 1762, 1750, 1312, 498, 1179, 1764, 1470, 1409, 1719, 935, 1751, 1562, 1962, 2510, 1737, 1734, 454, 1749, 1873, 1276, 1343, 973, 1725, 1711, 360, 1732, 1205, 479, 1721, 1568, 1724, 676, 2025, 1756, 1070, 1060, 1483, 371, 1619, 1963, 550, 750, 1716, 388, 1723, 1742, 441, 1727, 720, 155, 1022, 448, 1717, 1516, 2436, 647, 1169, 1957, 957, 1985, 904, 1729, 1955, 264, 1895, 616, 1072, 437, 1824, 687, 1389, 53, 1168, 1740, 1826, 1735, 524, 1730, 1748, 1958, 466, 30, 1753, 1492, 1103, 853, 93, 795, 1154, 1758]",1,10.0,15.0,300.0,0.5712703101920237,, -"[1538, 65, 1203, 1914, 242, 890, 1215, 510, 255, 175, 2418, 2164, 1080, 239, 1842, 1133, 1500, 838, 2034, 231, 2022, 1907, 2405, 897, 1905, 1628, 1268, 2282, 1344, 661, 356, 61, 1906, 1614, 342, 1417, 1269, 1131, 2309, 437, 424, 1121, 2021, 771, 2217, 1919, 1884, 2155, 779, 2350, 778, 1525, 1156, 1671, 718, 1115, 1079, 2136, 1985, 1314, 841, 37, 1266, 1908, 816, 1983, 2182, 96, 1338, 1980, 1218, 593, 1652, 498, 1529, 120, 525, 1370, 1677, 1630, 1174, 210, 1512, 2018, 586, 483, 490, 1848, 1494, 2102, 851, 2103, 1979, 2145, 815, 1178, 325, 2154, 945, 162, 323, 1882, 2394, 2017, 1309, 2016, 164, 2010, 60, 1692, 836, 130, 737, 995, 1660, 516, 1291, 1626, 438, 2153, 2014, 1658, 2187, 1527, 1982, 2015, 1472, 415, 1587, 172, 1975, 1741, 1625, 88, 300, 512, 1288, 1526, 1847, 1981, 240, 2186, 1977, 249, 1974, 436, 1224, 756, 76, 787, 1732, 1469, 1843, 2023, 1984, 1644, 966, 1394, 433, 204, 2012, 696, 818, 668, 514, 1850, 1713, 55, 299, 1978, 2020, 256, 1332, 621, 743, 2194, 2011, 2178, 324, 2013, 1667, 2386, 878, 827, 1082, 682, 1341, 2152, 1851, 1583, 1039, 1110, 478, 1222, 2156, 2151, 885, 2024, 651, 2326]",1,10.0,20.0,200.0,0.6115214180206795,, -"[1735, 2252, 2251, 1412, 1726, 1389, 905, 121, 1620, 389, 1358, 1740, 969, 562, 1743, 2407, 2083, 2081, 935, 704, 2107, 716, 1873, 2113, 2451, 1313, 742, 880, 1801, 682, 2019, 1725, 754, 2096, 945, 1282, 2326, 359, 1564, 795, 1012, 613, 1077, 1583, 399, 1616, 1701, 2143, 1070, 1505, 1821, 980, 661, 1483, 1311, 36, 1803, 1733, 1546, 79, 189, 1810, 589, 1289, 603, 341, 643, 1336, 1262, 505, 244, 191, 1804, 118, 1142, 1089, 1158, 1013, 2153, 859, 973, 1776, 573, 303, 289, 1998, 1986, 2208, 1856, 416, 454, 1777, 1705, 1784, 2026, 1852, 699, 1781, 1009, 1574, 277, 215, 1249, 2327, 133, 773, 484, 1797, 1072, 1624, 1871, 1623, 218, 1778, 1459, 1787, 556, 1622, 1335, 1337, 20, 2082, 581, 2075, 1769, 1773, 1780, 2086, 1782, 836, 1799, 1772, 1779, 1670, 2091, 1448, 2034, 598, 318, 630, 814, 1791, 1367, 1196, 2382, 2094, 563, 409, 1144, 2080, 1661, 2322, 1258, 406, 102, 2318, 1798, 69, 608, 1251, 1771, 671, 887, 1045, 2016, 554, 2090, 112, 685, 719, 2087, 252, 1640, 115, 1770, 18, 1805, 519, 1572, 487, 1775, 660, 1490, 910, 2046, 176, 1465, 1478, 2088, 2078, 542, 342, 1789, 1571, 1584, 139, 459, 153, 2045, 1800]",1,10.0,20.0,250.0,0.689807976366322,, -"[327, 2063, 91, 2044, 429, 2064, 733, 2339, 553, 2123, 2303, 2340, 2412, 2380, 1013, 722, 1325, 2582, 1358, 2501, 1106, 2667, 1295, 1221, 827, 766, 1197, 314, 1097, 879, 1119, 465, 1522, 2509, 1124, 218, 197, 206, 2121, 1738, 2500, 2381, 2534, 1907, 1583, 396, 1996, 1739, 1, 383, 387, 1171, 2335, 191, 1191, 493, 2691, 1567, 1577, 2325, 1950, 1140, 1055, 230, 310, 45, 2001, 2668, 2532, 277, 836, 1666, 1346, 228, 1894, 10, 574, 54, 1072, 669, 1874, 985, 1558, 199, 306, 482, 1219, 603, 8, 2045, 1159, 2008, 873, 1697, 2348, 0, 1995, 2365, 1709, 1066, 960, 1166, 1781, 1987, 366, 1708, 1997, 298, 1603, 2573, 417, 1127, 2379, 1998, 2338, 1147, 109, 2217, 1999, 2009, 1149, 476, 285, 673, 1988, 2003, 1862, 2117, 849, 566, 1445, 2000, 1670, 71, 2, 1347, 729, 420, 1075, 68, 1095, 160, 316, 2555, 1542, 745, 519, 2077, 1810, 2034, 1991, 391, 1454, 1446, 681, 1869, 1846, 1241, 1488, 1868, 1615, 695, 88, 297, 1800, 792, 14, 598, 962, 1239, 151, 1812, 818, 332, 267, 1334, 1479, 784, 231, 179, 968, 503, 2002, 743, 1855, 899, 2378, 102, 1877, 767, 304, 1574, 1856, 460, 445, 1014, 699, 1941, 926, 318]",1,10.0,20.0,300.0,0.6418020679468243,, -"[869, 231, 1013, 2396, 2335, 109, 1529, 2691, 2430, 24, 1178, 1420, 1701, 1864, 1487, 1399, 1396, 2045, 1346, 2238, 218, 1085, 230, 2397, 1446, 471, 836, 306, 387, 197, 1421, 598, 2303, 232, 161, 514, 297, 206, 331, 2365, 2025, 1445, 570, 1583, 1858, 151, 2137, 504, 2360, 2668, 1219, 2395, 733, 516, 201, 842, 1474, 1, 1625, 416, 1488, 60, 1644, 2228, 1850, 118, 2236, 1843, 1668, 1666, 493, 2667, 179, 2532, 1454, 973, 1224, 724, 1907, 1869, 71, 1875, 1876, 335, 519, 2394, 45, 1873, 1670, 1870, 1859, 2001, 2, 215, 1979, 1453, 2024, 681, 1023, 1095, 1709, 391, 897, 1847, 1998, 2027, 792, 2450, 2102, 332, 25, 55, 1566, 2010, 1358, 2071, 1908, 1344, 1882, 1527, 899, 211, 1912, 1171, 2013, 1140, 748, 476, 436, 2568, 566, 2017, 1055, 2008, 356, 968, 460, 272, 2103, 2003, 2555, 2011, 673, 1149, 2016, 995, 1732, 298, 68, 1800, 10, 593, 2009, 938, 1704, 2178, 1027, 2467, 267, 1394, 1995, 323, 366, 191, 1338, 2018, 1574, 1630, 1982, 1269, 553, 1999, 818, 1127, 1494, 2182, 525, 314, 2002, 1558, 2012, 2015, 2274, 277, 1741, 2000, 1603, 1713, 2034, 1989, 860, 1812, 600, 498, 1986, 815, 661, 1434, 415, 2593, 737, 1992, 1997, 1697, 76, 1677, 1993, 1987, 204, 1658, 745, 2004, 851, 1309, 300, 1898, 1696, 1166, 130, 1652, 634, 1174, 2136, 160, 841, 1988, 2005, 1974, 1991, 696, 1239, 1994, 1990, 2006, 437, 162, 966, 88, 571, 1996, 743, 2007, 1667, 1268, 1570, 1288, 2611, 2014, 61, 490]",1,10.0,25.0,250.0,0.7507385524372231,, -"[1807, 1358, 460, 1149, 160, 1989, 1127, 1567, 1894, 745, 1815, 482, 1171, 1581, 1814, 722, 2501, 1709, 230, 2037, 179, 1811, 1558, 1812, 197, 1106, 366, 1941, 1072, 962, 176, 199, 68, 2357, 1348, 1995, 743, 790, 151, 228, 605, 1434, 673, 935, 681, 1822, 2500, 54, 420, 476, 1819, 203, 277, 1988, 1095, 2309, 310, 2034, 795, 835, 2076, 792, 1615, 1697, 2162, 1020, 549, 2582, 1708, 873, 1809, 391, 1950, 1874, 985, 417, 1813, 1172, 1121, 180, 1166, 687, 1062, 1097, 2573, 1004, 2117, 2217, 1889, 79, 848, 1808, 1331, 669, 1855, 1576, 1787, 1869, 2534, 311, 1810, 465, 2607, 1990, 1986, 1665, 1710, 71, 304, 791, 2379, 511, 224, 2163, 45, 1991, 306, 2359, 2, 159, 231, 1334, 261, 1315, 1124, 1479, 0, 767, 1577, 1542, 2568, 2314, 2045, 109, 14, 603, 1107, 1846, 1873, 1373, 1013, 1248, 1877, 27, 719, 467, 1221, 351, 1862, 1799, 568, 1871, 343, 2041, 1502, 1994, 1159, 576, 158, 1865, 566, 1993, 1666, 760, 775, 729, 1303, 1856, 1003, 1857, 17, 1346, 102, 1817, 1023, 944, 1893, 1241, 363, 1998, 2140, 1816, 1704, 1301, 481, 1992, 716, 1820, 1818, 563, 960, 318, 2327, 2096, 899, 1641, 1147, 699, 2139, 926, 2372, 1316, 519, 1073, 1337, 1867, 25, 215, 1687, 1335, 1852, 1662, 2409, 836, 2243, 1465, 297, 968, 1473, 551, 1987, 232, 2141, 1574, 1670, 2138, 2706, 48, 165, 2471, 1075, 633, 387, 2493, 1453, 2206, 1426, 1823, 637, 2004, 784, 13, 1821, 2707, 548, 1297, 1876, 157, 216, 570]",1,10.0,25.0,300.0,0.6262924667651403,, -"[1337, 153, 95, 109, 1787, 693, 1127, 65, 1920, 1856, 1362, 1248, 133, 1676, 408, 586, 1118, 139, 553, 280, 539, 74, 973, 1448, 2217, 270, 1772, 2418, 1074, 563, 236, 2485, 1914, 306, 1343, 126, 1487, 1885, 734, 1791, 2026, 318, 2419, 1701, 2016, 699, 2153, 1353, 1926, 708, 2094, 2047, 1592, 2078, 2056, 2014, 1535, 1784, 1651, 2034, 1879, 2317, 2046, 175, 1143, 850, 1878, 255, 334, 736, 1616, 2117, 603, 2025, 2045, 2071, 519, 1045, 465, 772, 1482, 1830, 1852, 1572, 2048, 56, 454, 1358, 447, 191, 657, 1152, 2018, 1867, 935, 1675, 1776, 341, 1995, 1293, 366, 868, 712, 1884, 325, 2155, 1403, 1849, 2017, 1883, 1918, 1359, 412, 2189, 993, 97, 836, 69, 1972, 604, 388, 308, 2357, 1841, 1851, 1110, 995, 118, 830, 2013, 1583, 838, 1464, 2394, 2403, 1526, 1846, 1661, 2010, 1741, 2293, 215, 1894, 2164, 880, 61, 908, 1568, 1224, 2276, 815, 2280, 382, 661, 1215, 506, 1652, 415, 841, 851, 356, 1677, 1729, 1848, 1351, 2015, 1732, 1174, 490, 471, 1658, 98, 1120, 1844, 149, 737, 76, 897, 1840, 621, 2294, 1171, 181, 1713, 1683, 2178, 666, 1309, 1908, 262, 2295, 2103, 1850, 1521, 1909, 1377, 530, 130, 1376, 376, 87, 88, 749, 2194, 894, 1028, 1338, 1013, 2134, 1981, 1644, 190, 1625, 2135, 1880, 1203, 1630, 1845, 510, 1847, 1538, 1500, 2309, 249, 1527, 1843, 461, 2136, 162, 498, 300, 2185, 242, 1352, 1410, 1269, 1121, 1842, 32, 2011, 1332, 885, 2156, 2102, 1979, 1039, 2024, 1131, 1133, 807, 344, 424, 1977, 747, 1314, 2227, 907, 753, 1505, 437, 651, 643, 827, 323, 2012, 890, 120, 478, 1881, 1655, 1288, 2282, 2386, 1494, 1227, 1882, 514, 1839, 878, 1982, 1930, 743, 756, 436, 696, 1978, 2104, 11, 2405, 1980, 1394, 483, 2283, 816, 516, 668, 2384, 2281, 2326, 433]",1,10.0,30.0,300.0,0.7123338257016248,, -"[2045, 1851, 306, 1090, 1251, 1093, 1598, 1623, 1271, 1483, 1779, 133, 1448, 1147, 15, 1743, 573, 1913, 765, 1358, 1583, 1729, 118, 880, 895, 1661, 2450, 973, 704, 2370, 1848, 440, 1304, 1846, 1180, 35, 2083, 2369, 1560, 1077, 2155, 452, 2367, 1625, 1224, 1701, 1616, 2368, 255, 1131, 1042, 836, 1845, 510, 395, 894, 1167, 748, 2113, 1690, 1470, 2030, 2485, 1402, 1843, 1013, 908, 505, 176, 1972, 1844, 1868, 1725, 1841, 232, 1070, 2112, 471, 1652, 1850, 1644, 1296, 1842, 1403, 334, 56, 1370, 1926, 2371, 447, 539, 412, 1829, 1975, 2553, 1521, 1368, 2293, 530, 181]",1,2.0,10.0,215.0,0.6251846381093058,, -"[1462, 2126, 1697, 2125, 1111, 1012, 1235, 2117, 1797, 2118, 306, 210, 962, 1663, 2077, 1251, 1701, 695, 1119, 55, 2295, 1986, 261, 1508, 1671, 102, 482, 2075, 229, 1611, 740, 1072, 1264, 1871, 2129, 2076, 1608, 1273, 897, 2124, 2122, 870, 1810, 790, 196, 1405, 1020, 1952, 835, 863, 1703, 794, 1479, 2045, 733, 1968, 2001, 1203, 2033, 505, 1807, 1567, 549, 1614, 722, 2004, 248, 1654, 1336, 1448, 417, 38, 443, 622, 1002, 1665, 1983, 2128, 2494, 1531, 1522, 14, 86, 775, 33, 2040, 1655, 1149, 705, 770, 22, 158, 2044, 1580, 2035, 141, 2668, 2564, 382, 1215]",1,2.0,10.0,245.0,0.594903988183161,, -"[1787, 109, 1337, 1665, 1785, 24, 1779, 1781, 1623, 1740, 563, 1636, 1358, 1708, 1778, 1804, 887, 306, 2080, 1780, 350, 1725, 1656, 1798, 124, 1299, 1846, 467, 608, 869, 216, 139, 1245, 236, 2095, 384, 2034, 317, 1089, 2451, 1479, 201, 2203, 13, 215, 2331, 1035, 1070, 158, 1640, 409, 2330, 1542, 1805, 980, 2144, 1584, 2045, 1624, 962, 1013, 1196, 633, 664, 261, 1334, 318, 1212, 231, 1701, 417, 1797, 1705, 958, 1100, 532, 695, 2048, 2143, 1241, 399, 589, 407, 1367, 1012, 1810, 699, 137, 1799, 547, 244, 576, 542, 1775, 2046, 1075, 335, 1453, 1552, 2329]",1,2.0,10.0,275.0,0.6089364844903988,, -"[2080, 2045, 1798, 487, 2046, 1772, 1782, 2048, 2085, 1787, 350, 308, 426, 1224, 1770, 519, 2089, 366, 236, 1505, 1013, 153, 910, 655, 1656, 608, 1367, 1584, 1856, 1245, 2090, 1127, 112, 530, 302, 1346, 1651, 139, 553, 329, 160, 887, 109, 1640, 2326, 126, 2078, 103, 1805, 1705, 476, 958, 1775, 656, 1490, 189, 836, 1799, 1801, 1262, 2084, 773, 719, 1797, 1784, 1986, 1572, 1070, 1800, 2086, 244, 2026, 1358, 1779, 1869, 1483, 1810, 306, 1958, 1733, 1804, 2153, 1701, 2088, 542, 1128, 2485, 1193, 2275, 1441, 138, 1583, 1956, 1158, 417, 1851, 1955, 682, 1551, 1072, 603, 1824, 2009, 590, 1296, 895, 1097, 277, 880, 231, 1045, 470, 1267, 807, 857, 203, 1725, 1802, 1009, 452, 1826, 1884, 1820, 176, 903, 1881, 1142, 1899, 1803, 2094, 2091, 1954, 1781, 573, 696, 1251, 252, 20, 1973, 1771, 711, 725, 2179, 440, 1885, 2087, 294, 376, 1110, 1740, 1900, 458, 854, 945, 581, 342, 1478, 97, 1873, 193, 1353, 2031, 950, 115, 2030, 795, 1880, 2316, 2016, 891, 973, 2112, 2028, 36, 1901, 2323, 661, 1879, 645, 1549]",1,2.0,18.0,245.0,0.6506646971935007,, -"[1558, 885, 1929, 438, 2054, 68, 792, 1149, 391, 1121, 1671, 1985, 718, 651, 1332, 366, 1655, 553, 210, 816, 1139, 1978, 1127, 1039, 456, 2024, 1848, 1973, 604, 45, 95, 1980, 2009, 1495, 878, 1704, 1995, 1131, 725, 621, 950, 1453, 504, 2109, 483, 437, 460, 1540, 1358, 2181, 1434, 1628, 1667, 1224, 142, 151, 2136, 899, 1085, 1986, 1115, 436, 666, 32, 1166, 566, 232, 1652, 1979, 2034, 1420, 1421, 2396, 1013, 55, 179, 968, 2183, 2071, 673, 71, 201, 1023, 1095, 52, 519, 1268, 668, 2020, 215, 1529, 681, 1974, 1021, 1043, 842, 1838, 1697, 471, 2309, 2, 1912, 1879, 433, 2000, 1999, 809, 724, 748, 1566, 118, 161, 423, 323, 1178, 2022, 548, 994, 461, 1105, 424, 1981, 1259, 578, 1972, 1977, 514, 525, 2172, 60, 1074, 2103, 2057, 1843, 1869, 601, 1344, 593, 2102, 2229, 1625, 1474, 1644, 2467, 476, 1570, 1338, 2394, 973, 1850, 347, 1027, 1574, 733, 2180, 1676, 2397, 995, 600, 2133, 2019, 272, 818, 1732, 498, 2450, 266, 1898, 2016, 571, 61, 2274, 1696, 860, 191, 661, 2178, 2131, 1174, 2593]",1,2.0,18.0,260.0,0.6905465288035451,, -"[2324, 1735, 1282, 1283, 2130, 1891, 1889, 1499, 337, 484, 1812, 118, 691, 1336, 86, 18, 14, 1505, 791, 1483, 1791, 2327, 454, 1786, 2143, 141, 1002, 1681, 695, 303, 2083, 2042, 1358, 638, 1270, 1986, 808, 1776, 158, 191, 2096, 848, 409, 1197, 1798, 1013, 1797, 1788, 530, 1552, 1616, 1072, 554, 836, 2288, 2000, 322, 1009, 2039, 1367, 1490, 2090, 228, 2082, 1196, 2089, 382, 2085, 841, 441, 1248, 1856, 2046, 103, 1792, 958, 1656, 973, 581, 1670, 1560, 2318, 1778, 215, 1830, 2287, 1810, 1584, 1622, 1705, 2035, 2034, 2326, 1273, 542, 2036, 603, 1802, 699, 1998, 2325, 1654, 2047, 487, 1775, 1651, 589, 1574, 308, 617, 476, 1572, 1026, 2088, 2145, 2329, 656, 1787, 49, 1777, 1624, 344, 1837, 329, 1851, 505, 239, 2048, 2038, 1804, 1158, 1929, 350, 406, 2504, 417, 2107, 1782, 2078, 960, 843, 1768, 399, 544, 1722, 1807, 135, 1773, 317, 1180, 2086, 139, 1497, 1968, 1319, 2045, 1193, 387, 831, 1894, 655, 1571, 294, 1015, 1640, 887, 573, 910, 2040, 1661, 1799, 1353, 138, 1781, 1258, 1045, 2095, 935, 97, 1245]",1,2.0,18.0,275.0,0.7141802067946824,, -"[1998, 519, 109, 2045, 745, 160, 2001, 306, 1950, 743, 1171, 1948, 310, 598, 574, 201, 1701, 215, 1807, 1131, 1655, 503, 1548, 2348, 420, 277, 171, 1875, 1624, 1424, 429, 205, 695, 199, 1410, 1995, 603, 1325, 1787, 1692, 211, 2136, 2360, 681, 302, 316, 1665, 298, 391, 1134, 366, 1574, 88, 476, 1299, 606, 1859, 180, 1869, 68, 1621, 2000, 1815, 1870, 1223, 352, 203, 1127, 1989, 579, 2309, 2034, 38, 1023, 1821, 460, 2143, 1116, 1121, 1097, 2379, 239, 2605, 1909, 1823, 1876, 2578, 863, 1623, 1012, 790, 2307, 1848, 303, 2319, 407, 719, 948, 994, 1453, 1813, 1126, 1581, 27, 2163, 1497, 1819, 1820, 968, 1489, 1287, 576, 829, 568, 1331, 577, 1348, 232, 795, 2035, 13, 1806, 335, 2613, 716, 823, 1095, 2541, 1004, 2550, 481, 549, 1682, 1816, 2292, 1590, 1329, 2076, 1812, 2233, 962, 2302, 715, 1324, 2321, 1645, 2230, 2189, 2409, 1192, 1704, 482, 1065, 2303, 2291, 835, 1294, 2308, 2320, 794, 862, 665, 230, 2011, 351, 759, 1062, 25, 1873, 465, 1817, 781, 511, 1654, 1576, 1382, 2301, 2668, 2119, 1810, 2552, 377, 678, 1265, 1160, 2268, 453, 151, 1278, 104, 383, 78, 1912, 733, 1096, 71, 2306, 1558, 1994, 1703]",1,2.0,20.0,260.0,0.6403249630723782,, -"[236, 109, 1954, 1914, 1128, 153, 126, 124, 2395, 2228, 138, 2275, 1957, 1958, 139, 1362, 1351, 1396, 590, 2094, 720, 1358, 70, 1592, 103, 1487, 724, 1072, 371, 2184, 1537, 1420, 176, 2396, 2102, 344, 645, 1455, 1623, 995, 1912, 1529, 1655, 306, 1898, 661, 504, 540, 440, 733, 1849, 1399, 112, 2272, 1421, 1178, 329, 129, 703, 1570, 471, 750, 2282, 2031, 50, 2135, 441, 2273, 1085, 490, 436, 175, 2136, 1474, 55, 1338, 69, 302, 571, 308, 816, 725, 1441, 1131, 2394, 241, 1908, 265, 1344, 60, 2271, 2274, 748, 593, 193, 1682, 231, 2467, 1013, 2103, 1979, 1843, 935, 1741, 1583, 2485, 1700, 2497, 755, 1974, 1578, 747, 2019, 1644, 1850, 2397, 252, 1625, 1882, 815, 2137, 1224, 682, 1917, 514, 56, 1661, 880, 836, 1926, 2153, 696, 2018, 1309, 2178, 711, 516, 897, 2457, 1840, 2011, 2013, 671, 412, 1269, 1239, 323, 737, 76, 447, 1372, 1713, 1494, 966, 1658, 1677, 818, 211, 277, 860, 2017, 743, 1174, 1266, 331, 415, 2050, 204, 1851, 973, 634, 1527, 2436, 191, 300, 1792, 1142, 25, 1288, 2450, 1847, 2185, 2071, 61, 1986, 2593, 115, 539, 2015, 162, 1500, 1984, 630, 1667, 1652, 1268, 648, 446, 1616, 1690]",1,2.0,20.0,275.0,0.6133677991137371,, -"[1042, 1846, 2052, 1120, 2051, 1508, 2117, 74, 1844, 1625, 261, 1850, 1479, 2154, 868, 1405, 1729, 1291, 847, 1661, 1118, 1416, 1583, 1851, 334, 1333, 1701, 1660, 1637, 822, 770, 2034, 885, 32, 1526, 252, 1039, 1981, 1926, 2099, 1110, 1309, 666, 908, 229, 1847, 1521, 1644, 95, 1911, 880, 86, 1013, 2553, 1977, 1848, 1152, 452, 530, 1894, 1396, 1293, 1975, 1403, 1296, 2424, 1285, 1974, 1616, 2156, 2113, 1341, 1587, 651, 2105, 702, 2305, 1395, 737, 1973, 1841, 1085, 539, 1982, 2112, 1222, 2109, 454, 2422, 2367, 471, 1845, 270, 2070, 2189, 1909, 979, 1163, 2111, 2453, 878, 1370, 1149, 575, 2069, 711, 232, 1839, 1843, 1840, 1332, 536, 1224, 239, 56, 505, 519, 1868, 211, 836, 708, 414, 1204, 412, 398, 387, 643, 2485, 1070, 1215, 1171, 1572, 1372, 1979, 436, 1927, 1049, 447, 2118, 1402, 197, 826, 490, 749, 55, 1535, 179, 2403, 2078, 668, 1525, 1377, 1203, 1976, 510, 118, 1676, 1849, 712, 280, 850, 176, 747, 1072, 1074, 1675, 945, 1501, 1842, 891, 2295, 1732, 2151, 2608, 69, 2068, 1512, 231, 728, 42, 778, 1917, 2457, 2177, 11, 1683, 1258, 725, 1487, 2371, 1920, 1538, 1919, 1343, 1494, 838, 1916, 1674, 2649, 2310]",1,2.0,20.0,300.0,0.6606351550960118,, -"[744, 1169, 1154, 609, 1123, 1040, 1103, 364, 819, 853, 524, 1041, 552, 796, 59, 876, 154, 580, 90, 156, 764, 873, 73, 823, 1145, 999, 72, 957, 1677, 613, 155, 88, 553, 985, 797, 970, 415, 346, 2021, 815, 341, 1628, 326, 687, 173, 1047, 2333, 851, 745, 901, 2055, 366, 1073, 34, 1469, 239, 1741, 757, 1125, 748, 2590, 979, 2015, 758, 919, 708, 645, 304, 1358, 951, 337, 323, 624, 2405, 1929, 626, 1479, 261, 2020, 1625, 2349, 816, 1288, 502, 1303, 1995, 174, 831, 616, 2117, 1909, 882, 2423, 2198, 244, 1980, 902, 1229, 120, 1070, 1198, 501, 333, 1013, 754, 1348, 289, 836, 978, 1149, 666, 1481, 2054, 2280, 1538, 769, 929, 485, 86, 1127, 228, 242, 787, 1042, 1209, 838, 1508, 489, 160, 1500, 1038, 2118, 2034, 770, 436, 2073, 357, 1979, 2281, 1498, 2185, 170, 684, 2165, 651, 1133, 1205, 246, 279, 229, 1405, 1351, 95, 737, 510, 55, 1072, 973, 111, 995, 686, 1842, 210, 689, 1502, 32, 1583, 1894, 1920, 2282, 60, 2052, 2102, 69, 1338, 1204, 2051, 2170, 2135, 1839, 2453, 2096, 2424, 1671, 1118, 2164, 1152, 2163, 1927, 441, 2283, 671, 53, 1189, 1218, 2009, 1333, 100, 1131, 490, 1357, 270, 1309, 451, 619, 1049, 2072, 544, 1923, 536, 1120, 2384, 1926, 376, 1602, 2295, 1215, 1925, 315, 1025, 2155, 1275, 1293, 2194, 11, 942, 1810, 1435, 2028, 697, 74, 2620, 2343, 2178, 1924, 405, 586, 827, 305, 1468, 1921, 466, 6, 2025, 1655, 1922, 149, 30, 1156, 1110, 1416, 2344, 771, 146, 738, 2045, 438, 1986, 1928, 1882, 1079, 2026, 835, 2419, 1080, 572, 2312, 1983, 2418, 1008, 2360, 101, 65, 1195, 897, 1410, 1224, 68, 373, 1203]",1,2.0,28.0,300.0,0.619645494830133,, -"[2181, 1042, 456, 2199, 1580, 779, 2074, 1602, 2200, 2054, 1919, 2197, 2182, 1628, 921, 2052, 778, 2048, 596, 1416, 1592, 41, 2046, 2051, 563, 2201, 1651, 1772, 1856, 1915, 306, 1830, 816, 1337, 318, 1838, 1248, 1916, 519, 1535, 2108, 2388, 973, 2282, 1917, 763, 2134, 603, 95, 1525, 341, 120, 109, 830, 2405, 1791, 868, 2276, 1980, 2026, 175, 133, 415, 1914, 2045, 1926, 1572, 955, 2025, 1485, 2422, 2653, 32, 1303, 1309, 1448, 708, 1655, 2357, 2047, 1358, 1625, 734, 880, 2485, 1995, 861, 1844, 1776, 1920, 657, 1683, 1973, 1918, 1644, 699, 1120, 1110, 1852, 454, 376, 2071, 1377, 1729, 1850, 2194, 506, 465, 1787, 693, 1676, 1847, 2132, 2480, 69, 2278, 1675, 1842, 908, 1583]",1,4.0,12.0,215.0,0.5960118168389956,, -"[1121, 1410, 1358, 603, 1072, 985, 1203, 306, 2500, 1567, 417, 722, 1708, 1810, 197, 179, 2045, 420, 1013, 962, 2034, 935, 228, 2314, 1944, 304, 102, 926, 1819, 1874, 567, 1097, 465, 1873, 2117, 981, 270, 1159, 551, 1856, 1857, 873, 1149, 1869, 203, 1344, 1815, 1871, 336, 1945, 927, 1581, 2141, 1147, 1820, 968, 1799, 1335, 1867, 165, 199, 2534, 1215, 570, 1846, 1131, 2011, 1665, 1862, 1947, 1171, 231, 2430, 729, 2138, 766, 0, 1337, 109, 2501, 669, 1096, 1868, 1821, 48, 1864, 563, 2118, 1870, 1299, 2139, 352, 1636, 1488, 232, 54, 536, 2317, 318, 767, 2707, 2217, 25, 1053, 157, 2493, 13, 2096, 869, 185, 480, 149, 637, 2003, 24, 2582, 1172, 205, 1297, 598]",1,4.0,12.0,275.0,0.6037666174298375,, -"[1325, 298, 1346, 2063, 1670, 327, 157, 396, 1810, 836, 869, 109, 416, 2034, 482, 1738, 231, 2064, 598, 553, 88, 297, 603, 1701, 169, 1185, 985, 445, 1820, 1095, 2339, 1570, 1855, 2582, 2509, 1812, 1848, 849, 519, 1358, 232, 218, 2613, 1131, 1013, 2667, 1864, 2668, 230, 1988, 2009, 1912, 206, 1873, 277, 2691, 1957, 1457, 1355, 649, 2430, 1998, 211, 173, 570, 2550, 239, 1995, 2045, 1859, 1907, 1909, 745, 1522, 1127, 1992, 743, 1023, 1725, 1546, 1994, 1106, 2501, 2002, 306, 1453, 387, 1434, 1586, 215, 383, 1721, 1166, 2001, 1993, 673, 179, 879, 1704, 1858, 2303, 1191, 1583, 2340, 733, 2325, 899, 197, 565, 1610, 24, 2365, 1894, 2008, 1870, 1996, 1171, 1987, 2372, 2253, 2077, 1739, 2568, 1119, 681, 1445, 1239, 8, 201, 1697, 935, 1875, 391, 1834, 1800, 1092, 1219, 1, 1433, 2338, 2238, 510, 1055, 290, 1709, 1140, 2236, 1149, 1999, 1997, 2006, 480, 493, 45, 1295, 160, 2305, 1558, 2007, 71, 366, 797, 1876, 2335, 1189, 687, 1446, 68, 335, 1991, 2555, 1710, 968, 566, 2003, 2359, 2004, 1986, 1033, 476]",1,4.0,18.0,200.0,0.6846381093057607,, -"[175, 2388, 596, 955, 2016, 2045, 69, 306, 1625, 2490, 2052, 1917, 604, 544, 1920, 1359, 1916, 1846, 1729, 1464, 454, 1097, 675, 277, 1849, 671, 88, 1309, 470, 666, 195, 1675, 2019, 1674, 1652, 1966, 1914, 252, 1847, 830, 1649, 1918, 733, 1539, 1905, 41, 1850, 1845, 94, 1072, 630, 1416, 74, 553, 2263, 530, 908, 1535, 973, 1293, 1079, 1403, 1127, 1995, 1644, 1521, 1362, 1841, 115, 1840, 1517, 539, 921, 749, 1013, 1676, 2419, 1358, 93, 2357, 1566, 1592, 325, 2426, 778, 682, 586, 1927, 2385, 1042, 124, 181, 1587, 2025, 376, 779, 1844, 1973, 1583, 993, 2152, 270, 1368, 925, 2274, 1732, 2154, 777, 1919, 1222, 1485, 506, 1341, 2151, 1538, 1195, 1848, 2425, 1291, 1500, 510, 279, 2276, 2155, 1133, 836, 1224, 2280, 2293, 1152, 838, 1218, 1370, 32, 868, 1526, 1529, 2355, 1118, 934, 2116, 1622, 822, 280, 1842, 702, 1115, 593, 1110, 1977, 176, 880, 809, 935, 1421, 334, 826, 1142, 1843, 1981, 1915, 1661, 2485, 366, 324, 1270, 578, 693, 1289, 661, 471, 748, 502, 118, 2109, 1121, 1120, 1525, 2344, 2294]",1,4.0,18.0,215.0,0.6395864106351551,, -"[1481, 2332, 2122, 1149, 1810, 91, 1015, 1160, 2380, 2002, 733, 1986, 2419, 962, 1789, 482, 242, 2118, 816, 120, 862, 1508, 973, 1669, 2335, 229, 1979, 2281, 835, 2040, 711, 1980, 1425, 2121, 1405, 1416, 838, 1671, 461, 673, 442, 1812, 566, 682, 779, 1218, 2052, 2424, 2280, 2117, 94, 1351, 1919, 1141, 2379, 38, 2406, 544, 949, 2123, 778, 2035, 224, 1616, 196, 1479, 1531, 1204, 261, 1968, 2405, 1927, 1848, 2283, 1929, 1930, 95, 2039, 523, 863, 1807, 1660, 2262, 1291, 2136, 1839, 1583, 2164, 2453, 2282, 979, 2185, 2043, 510, 1842, 2003, 1618, 2042, 2135, 539, 149, 1042, 424, 1614, 2416, 1133, 1468, 1654, 74, 1270, 1121, 1002, 1049, 1894, 1341, 417, 617, 1333, 674, 2033, 702, 1203, 2152, 141, 1273, 1889, 1222, 737, 1526, 1975, 55, 586, 1080, 1118, 705, 1851, 2294, 1525, 274, 1448, 1131, 921, 2032, 1152, 11, 429, 1224, 1892, 2130, 1497, 1370, 1493, 436, 158, 1590, 783, 1535, 1410, 2044, 836, 1890, 1655, 2381, 2153, 2001, 2376, 490, 1156, 2034, 2348, 14, 1319, 1026, 2038, 794, 180, 2036, 86, 788, 1013]",1,4.0,18.0,245.0,0.7326440177252584,, -"[69, 2155, 839, 280, 2189, 965, 1279, 1416, 2018, 1669, 731, 1524, 1015, 2186, 1376, 2293, 1892, 1118, 572, 2335, 61, 680, 636, 2620, 1630, 1668, 1481, 1921, 1974, 1848, 2502, 57, 1013, 1926, 1692, 2332, 1909, 815, 1920, 2499, 2330, 2136, 1397, 76, 591, 74, 1925, 2024, 55, 1351, 2576, 1232, 1916, 2103, 2311, 1535, 666, 1529, 2292, 1695, 1204, 1882, 860, 751, 2274, 434, 1680, 763, 1017, 586, 964, 1999, 2232, 552, 1117, 1851, 334, 737, 1588, 1047, 2199, 2178, 2333, 995, 711, 2277, 1309, 1759, 897, 1979, 2016, 1580, 1158, 2201, 1463, 861, 693, 2383, 1635, 2200, 1850, 858, 2056, 1269, 2419, 2231, 490, 2291, 1156, 2021, 2418, 2233, 2010, 851, 2015, 642, 211, 1517, 1644, 746, 1288, 624, 1527, 1841, 1125, 901, 2198, 597, 818, 100, 1625, 984, 1394, 2197, 1026, 80, 276, 1285, 60, 2264, 2202, 1388, 399, 2068, 1220, 2347, 1178, 1274, 485, 2219, 315, 1115, 2108, 2450, 1912, 2102, 130, 1741, 62, 2073, 1338, 1658, 1713, 1080, 162, 736, 2017, 497, 1732, 1006, 2013, 1395, 696, 1104, 1474, 498, 257, 841, 743, 32]",1,4.0,18.0,260.0,0.6008124076809453,, -"[745, 743, 160, 2130, 366, 1316, 775, 549, 2041, 2042, 791, 603, 1891, 835, 511, 2004, 2668, 935, 716, 2142, 2076, 1654, 180, 1497, 927, 2039, 920, 1020, 691, 766, 845, 2077, 1576, 1331, 391, 45, 1297, 277, 1301, 1889, 790, 1807, 476, 2582, 304, 795, 2075, 310, 1448, 17, 316, 306, 1147, 1106, 2573, 1708, 855, 1121, 159, 1873, 962, 230, 1822, 1542, 926, 2096, 199, 420, 1335, 2117, 1857, 1062, 1968, 2037, 48, 0, 1814, 519, 2493, 960, 1273, 1862, 297, 899, 68, 2141, 2038, 2140, 1865, 482, 1856, 2398, 1868, 1172, 1815, 1874, 1823, 1821, 2314, 792, 2534, 1315, 841, 1159, 311, 2707, 1577, 1567, 1808, 1894, 387, 1819, 2471, 873, 1241, 416, 1877, 102, 1581, 1026, 1863, 2035, 2500, 1983, 1336, 1820, 382, 460, 532, 216, 157, 673, 2034, 1473, 722, 2501, 1319, 1870, 1003, 1867, 1871, 14, 185, 49, 568, 2138, 1013, 224, 158, 784, 1799, 1787, 1816, 203, 1073, 480, 1811, 24, 86, 228, 2309, 2, 417, 576, 1986, 2357, 1095, 165, 1864, 1810, 1299, 151, 1818, 1124, 1875, 1615, 1950, 27, 1812, 2040]",1,4.0,18.0,275.0,0.6307237813884786,, -"[103, 139, 153, 236, 112, 487, 1775, 2380, 308, 1325, 1810, 1820, 298, 2301, 327, 138, 665, 230, 1782, 2477, 91, 180, 2063, 332, 1781, 2379, 2045, 1020, 656, 109, 2089, 1701, 869, 733, 1666, 695, 2381, 306, 426, 1864, 124, 2303, 1623, 1095, 316, 746, 2532, 2090, 2077, 24, 383, 2006, 2335, 1382, 416, 603, 1502, 2008, 2085, 1557, 126, 957, 2123, 1912, 88, 1295, 1670, 1346, 2378, 1876, 302, 397, 2004, 1348, 1066, 1998, 482, 1023, 598, 314, 1704, 335, 2001, 1640, 285, 1197, 519, 1873, 2002, 2168, 8, 1200, 1996, 1685, 2121, 18, 1096, 1875, 752, 2348, 650, 2003, 1483, 827, 1239, 2365, 2064, 2084, 2091, 2025, 460, 1453, 2326, 503, 673, 387, 429, 191, 1574, 1347, 2238, 1987, 2, 102, 1, 1999, 2568, 1219, 1446, 1894, 2083, 2009, 979, 781, 1185, 396, 294, 1624, 367, 818, 1550, 879, 2034, 2613, 277, 2243, 445, 1989, 417, 1938, 1937, 1014, 2594, 151, 2044, 2329, 350, 1709, 743, 1171, 218, 45, 1434, 1503, 1779, 1522, 1995, 1997, 2207, 160, 1907, 169, 1445, 1992, 157, 1358, 1988, 2229, 2076, 266]",1,4.0,18.0,300.0,0.604135893648449,, -"[2120, 2040, 2291, 790, 1810, 2045, 665, 1787, 382, 2076, 1809, 2, 306, 332, 1265, 1264, 417, 248, 159, 1051, 519, 153, 236, 2008, 1203, 1062, 133, 377, 215, 1779, 1772, 1797, 563, 2301, 1346, 68, 391, 1152, 1337, 1782, 318, 2362, 1812, 1623, 1780, 1817, 2268, 1012, 1818, 841, 124, 1894, 1777, 897, 459, 1535, 1692, 1806, 2293, 791, 1998, 1769, 280, 180, 1785, 1624, 1983, 33, 442, 1125, 2036, 1614, 383, 2302, 2294, 1428, 539, 2032, 1805, 109, 1026, 749, 1534, 1045, 1929, 893, 691, 102, 2640, 610, 289, 1192, 2122, 2092, 1661, 2108, 2004, 176, 2094, 1131, 286, 1511, 1519, 1273, 1202, 2093, 232, 1682, 588, 45, 218, 2155, 443, 2153, 224, 2011, 2335, 695, 576, 1336, 810, 1385, 1160, 2034, 1020, 401, 1448, 160, 1261, 638, 627, 2263, 2308, 201, 1655, 1110, 487, 2037, 2303, 2041, 1042, 1270, 2042, 1808, 2379, 430, 983, 863, 1986, 2490, 1068, 1590, 2123, 71, 2044, 920, 2332, 911, 2002, 1814, 775, 330, 158, 1891, 151, 1224, 1497, 1968, 2066, 141, 2249, 366, 1319, 962, 2038, 1890, 678, 2130, 1893, 733, 1531, 1002, 435, 503, 698, 426, 1015, 993, 179, 2378, 574, 482, 1046, 862, 2295, 2348, 196, 1654, 1773, 2304]",1,4.0,20.0,260.0,0.5971196454948301,, -"[2054, 1628, 604, 456, 95, 2172, 2136, 1351, 142, 1394, 1838, 2189, 2016, 1171, 2018, 1540, 519, 772, 52, 211, 1341, 1269, 2282, 1121, 1658, 1505, 1912, 1926, 2185, 69, 1630, 1918, 1500, 1139, 1133, 1690, 2403, 1655, 1572, 2274, 815, 598, 908, 2131, 2276, 2112, 1909, 2181, 1919, 1732, 1538, 2094, 778, 2010, 530, 454, 2133, 1463, 988, 2450, 2155, 1914, 1110, 2013, 2012, 61, 525, 510, 868, 506, 1844, 2109, 860, 738, 696, 836, 1494, 2103, 30, 2102, 1529, 1131, 149, 1224, 737, 2162, 1338, 1592, 1930, 1879, 1979, 2283, 693, 347, 711, 1849, 1464, 1352, 2030, 1403, 841, 2099, 851, 181, 1521, 2057, 2233, 1359, 2017, 408, 162, 2183, 826, 1370, 973, 995, 88, 1285, 1841, 1675, 842, 2312, 2178, 467, 490, 76, 1920, 471, 2105, 2132, 581, 1851, 2166, 2180, 749, 1527, 1981, 1847, 1115, 2281, 1526, 1402, 1013, 2198, 578, 1683, 2024, 228, 743, 2110, 1677, 2045, 1172, 1843, 436, 498, 285, 2078, 838, 1003, 191, 2316, 2182, 423, 2014, 242, 1178, 334, 1625, 1708, 1842, 1644, 634, 130, 708, 1616, 1850, 858, 2072, 60, 1661, 1668, 1848, 363, 1701, 1882, 1373, 1857, 331, 1195, 1908, 1029, 239, 1474, 880, 2396, 697, 1309, 2011, 1713]",1,4.0,20.0,275.0,0.656573116691285,, -"[563, 318, 201, 994, 109, 2309, 2045, 232, 215, 205, 869, 2230, 1873, 310, 24, 157, 1986, 1842, 1950, 203, 1251, 2117, 416, 102, 1321, 231, 1192, 1623, 606, 733, 343, 143, 1092, 465, 216, 1869, 199, 633, 2291, 306, 1624, 770, 664, 1508, 210, 2155, 1119, 1671, 2243, 2143, 1787, 335, 737, 2305, 1203, 302, 1948, 277, 2308, 547, 2605, 2124, 699, 603, 1797, 2126, 2125, 1097, 1701, 1424, 2118, 505, 1489, 2127, 1126, 467, 1111, 1448, 598, 1815, 482, 2136, 1548, 579, 88, 2122, 429, 2295, 1012, 1818, 1871, 1538, 2296, 1405, 695, 2282, 1062, 2379, 2613, 2075, 2307, 2360, 790, 2384, 1820, 2409, 1500, 235, 1224, 420, 2501, 1095, 2668, 1324, 218, 1645, 2281, 1133, 1807, 1581, 2077, 2129, 298, 186, 1821, 2578, 2531, 27, 916, 313, 1072, 1107, 2306, 1452, 1823, 716, 605, 1303, 316, 1336, 1348, 1531, 719, 829, 1655, 191, 2040, 1909, 886, 838, 833, 1026, 303, 2035, 1217, 2163, 1215, 180, 1325, 171, 1817, 2550, 1812, 1134, 1331, 1614, 1666, 1622, 382, 1819, 795, 1004, 1813, 823, 705, 55, 510, 1968, 2033, 86, 2205, 1149, 33, 14, 962, 2185, 1814, 1806, 784, 229, 983, 149, 2280, 1319, 767, 788, 836, 1808, 1116, 1049, 1816, 841, 230, 2041, 2036, 1268, 226, 622, 2120, 665, 532, 2348, 729, 1924, 2032, 1270, 2249, 1106, 1703, 293, 1131, 242, 1576, 2283, 1894, 1952, 1889, 490, 2500, 1930, 2038, 577, 481, 1273, 1927, 942, 1223, 2521, 261, 417, 141, 511, 1479, 1839, 48, 2494, 158, 2042, 11, 1261, 691, 351, 1567, 835, 1810, 1299, 2130, 1497, 1893, 1892, 897, 549, 740, 1410, 1811, 1665, 1809, 1002, 1929, 576, 1874, 2037, 568, 1031, 2204, 13, 2039, 722, 1890, 2034]",1,4.0,28.0,300.0,0.6739290989660266,, -"[210, 790, 1012, 1203, 2118, 2076, 2033, 482, 2117, 775, 835, 306, 2075, 1111, 1020, 1797, 695, 2044, 2668, 1215, 1452, 740, 1810, 196, 2077, 2126, 55, 962, 102, 1846, 1264, 1251, 229, 1508, 1671, 1964, 2680, 2295, 443, 1966, 1697, 248, 1336, 1072, 2032, 536, 2125, 1986, 722, 2001, 549, 505, 770, 622, 1614, 916, 1149, 1701, 2045, 1479, 1405, 1871, 1615, 705, 261, 1268, 1119, 186, 1655, 1666, 1894, 1807, 2129, 1002, 1567, 1702, 1665, 1273, 1703, 1812, 2531, 1654, 523, 180, 1448, 1522, 382, 1952, 14, 417, 1618, 794, 1531, 1234, 429, 1929, 2124, 1026, 1622, 863, 86, 94, 1319, 1968, 2238, 48, 141, 1493, 49, 38, 1497, 2004, 1270, 617, 158, 1983, 1965, 1491, 22, 2357]",1,12.0,12.0,245.0,0.5989660265878878,, -"[2245, 2243, 745, 2001, 298, 519, 2002, 1558, 160, 1127, 1993, 681, 792, 2009, 1873, 366, 743, 1574, 1095, 1876, 476, 1998, 2003, 45, 1992, 1995, 1358, 1875, 2246, 2006, 566, 1171, 1589, 2000, 1704, 68, 899, 2008, 2007, 1709, 2, 1988, 71, 180, 1701, 2244, 2335, 1999, 1986, 1595, 1870, 1859, 2005, 101, 1149, 391, 2238, 1997, 151, 1325, 1989, 1023, 1234, 460, 22, 1966, 1968, 1969, 1812, 1697, 2034, 1453, 1906, 335, 232, 215, 2004, 746, 1479, 201, 261, 968, 456, 2182, 95, 316, 229, 525, 463, 1971, 1703, 1405, 1702, 2199, 1894, 2172, 1417, 789, 1166, 1602, 1124, 69, 1347, 1990, 963, 1434, 142, 544, 2118, 179, 1416, 1991, 2057, 327, 1867, 2063, 669, 2054, 1970, 1920, 1139, 1240, 1508, 1655, 1540, 536, 1994, 1049, 1295, 1846, 2198, 1013, 1086, 673, 1926, 2189, 1215, 1967, 2133, 2419, 1987, 759, 65, 2055, 2186, 1628, 731, 901, 770, 74, 1000, 1996, 751, 972, 964, 2073, 2332, 2247, 2117, 2021, 485, 1517, 2052, 2219, 2579, 1481, 6, 62, 1279, 1588, 2580, 1669, 1118, 839, 1042, 1365, 2064, 2051, 1125, 597]",1,12.0,18.0,215.0,0.6266617429837519,, -"[2039, 1889, 2130, 2042, 1891, 154, 155, 2333, 1270, 1826, 1025, 691, 1047, 1273, 326, 90, 346, 357, 156, 1417, 1979, 2037, 60, 333, 2040, 1929, 2350, 364, 1410, 244, 1895, 1898, 382, 1665, 2055, 1900, 1628, 624, 1824, 791, 1894, 1268, 617, 771, 1614, 885, 2024, 14, 1807, 2295, 337, 1079, 1125, 86, 158, 1156, 49, 2034, 604, 2198, 1655, 417, 1980, 2178, 341, 111, 1198, 2038, 1358, 1267, 2021, 1527, 1978, 1981, 228, 68, 170, 2035, 59, 645, 1713, 1336, 1332, 1703, 1002, 141, 1042, 901, 210, 878, 373, 1481, 1529, 1039, 2156, 1131, 2023, 173, 891, 1968, 34, 73, 415, 55, 897, 1309, 1448, 1152, 2018, 224, 651, 851, 1115, 1178, 1677, 1908, 174, 2054, 2041, 816, 246, 436, 53, 101, 1652, 69, 815, 466, 489, 1927, 1848, 72, 1741, 1174, 2155, 289, 725, 95, 737, 76, 211, 1982, 2015, 2335, 2071, 1080, 1812, 593, 1497, 2036, 2073, 483, 1732, 162, 485, 1026, 2016, 2017, 1654, 1319, 130, 1901, 841, 1494, 1907, 2014, 738, 1920, 2418, 1882, 1288, 1658, 300, 1972, 1926, 1977, 1847, 315, 2151, 827]",1,12.0,18.0,260.0,0.697562776957164,, -"[1412, 1282, 121, 704, 1735, 2324, 544, 1786, 1743, 389, 1283, 337, 1015, 1701, 1726, 589, 880, 1248, 562, 2327, 1852, 1583, 1725, 905, 436, 671, 1077, 399, 1618, 699, 1505, 1030, 189, 2075, 742, 215, 2025, 2325, 505, 1552, 1013, 1358, 252, 773, 969, 1499, 277, 2113, 1196, 115, 118, 1199, 2143, 2251, 2107, 1681, 1733, 2318, 2252, 1620, 36, 1478, 603, 1574, 859, 2096, 1262, 762, 1791, 416, 1070, 1089, 1986, 2081, 303, 980, 2322, 1740, 1448, 1313, 1564, 359, 1012, 935, 2382, 1546, 878, 1483, 409, 1776, 2083, 2256, 2082, 1768, 1878, 1837, 18, 341, 1336, 1624, 2091, 1249, 1769, 1801, 1311, 1871, 1335, 1789, 1616, 1856, 661, 244, 973, 454, 318, 1337, 1158, 1258, 563, 1778, 406, 2000, 814, 2407, 1651, 2208, 1490, 218, 1830, 102, 133, 2026, 1799, 836, 1787, 191, 2045, 1459, 1705, 2086, 1661, 1289, 1009, 573, 754, 1560, 1784, 2034, 1779, 1072, 2153, 1251, 554, 1781, 1998, 342, 1804, 2451, 1797, 2087, 2016, 1777, 1670, 1367, 289, 484, 638, 2094, 2090, 134, 581, 1640, 1810, 519, 1773, 643, 2080, 2505, 960, 459]",1,12.0,18.0,275.0,0.7289512555391433,, -"[68, 1283, 1681, 1743, 948, 661, 160, 1583, 935, 742, 1282, 1499, 1358, 366, 2324, 699, 2000, 905, 704, 391, 2252, 1013, 1852, 743, 1012, 2318, 335, 337, 416, 407, 1682, 1620, 389, 1740, 1725, 277, 1735, 1571, 1077, 1878, 1677, 1701, 2113, 969, 2382, 1335, 151, 2251, 1733, 1830, 1412, 2319, 598, 201, 179, 387, 232, 1837, 2385, 2143, 1635, 409, 1262, 244, 1726, 685, 2325, 562, 239, 1196, 1070, 121, 1803, 2327, 505, 1546, 1574, 695, 880, 341, 754, 359, 814, 1791, 1986, 1253, 382, 191, 589, 1448, 643, 1336, 1871, 1776, 1483, 460, 289, 215, 1768, 1142, 2083, 1045, 1661, 1249, 454, 2256, 118, 1158, 189, 406, 1616, 2, 2407, 303, 2034, 2047, 2087, 960, 314, 638, 1856, 836, 2598, 2096, 1313, 603, 2451, 459, 681, 2026, 1258, 945, 2016, 71, 530, 519, 673, 2081, 544, 1624, 859, 1289, 573, 2045, 2082, 554, 1248, 1804, 1777, 1311, 973, 318, 563, 2075, 1337, 1564, 102, 1197, 2090, 1800, 1797, 1072, 566, 2095, 441, 1779, 1778, 1705, 1670, 1560, 887, 2326, 1367, 399, 45, 218, 1787, 2208, 980, 1773]",1,12.0,18.0,300.0,0.7108567208271788,, -"[231, 505, 406, 1448, 2287, 2288, 1878, 1776, 2318, 387, 826, 2089, 1851, 114, 836, 2256, 661, 1871, 1701, 1770, 1782, 1651, 1681, 2088, 695, 1245, 1572, 2078, 2090, 980, 556, 1251, 1560, 2082, 2085, 598, 1584, 2080, 573, 963, 2326, 1767, 660, 1564, 1013, 487, 102, 1986, 1144, 610, 1783, 1773, 138, 1367, 176, 1779, 1490, 416, 2084, 1775, 1798, 191, 608, 1670, 139, 484, 910, 1856, 2046, 103, 2045, 1574, 542, 350, 417, 519, 409, 112, 344, 1797, 656, 1561, 1661, 126, 1772, 589, 655, 476, 2079, 308, 1656, 1786, 1780, 1799, 2048, 1551, 1623, 318, 544, 1045, 302, 1998, 1358, 133, 1740, 306, 808, 1785, 1705, 1337, 1805, 426, 2327, 563, 1158, 1722, 1804, 887, 1725, 236, 719, 1784, 973, 1196, 1733, 1346, 1802, 1070, 1193, 581, 1777, 1229, 329, 2451, 1483, 1789, 452, 459, 399, 958, 2481, 234, 454, 244, 1771, 1035, 124, 1800, 1778, 1072, 1769, 1768, 314, 1787, 2087, 1197, 289, 671, 1009, 1571, 1699, 1622, 1801, 153, 2123, 193, 2478, 317, 20, 109, 1774, 1624, 2203, 91, 189, 1478, 330, 294, 2591, 1262, 1781, 554, 1015, 2092, 530, 384, 945, 773, 2095, 1455, 1506, 70, 1803, 2091, 2107, 1537, 342, 1505, 1089, 441]",1,12.0,20.0,215.0,0.6901772525849336,, -"[474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181]",0,12.0,20.0,260.0,0.0993353028064992,, -"[36, 2107, 745, 743, 1505, 1127, 366, 2130, 1002, 1775, 968, 1448, 160, 350, 1870, 102, 484, 417, 1894, 1558, 2472, 598, 1807, 681, 391, 141, 617, 1889, 317, 1968, 1677, 384, 2042, 1772, 836, 1789, 1891, 2039, 1709, 335, 1149, 1791, 1623, 1777, 887, 1875, 1989, 49, 1987, 426, 1023, 740, 2045, 1983, 1805, 1670, 1859, 133, 289, 1876, 1574, 691, 88, 232, 791, 456, 460, 68, 563, 1319, 1778, 1785, 2094, 14, 2037, 236, 153, 2188, 1788, 1072, 139, 2038, 1337, 318, 1781, 476, 1346, 201, 1988, 1986, 2034, 1780, 608, 1015, 306, 1705, 1665, 581, 215, 673, 1013, 962, 103, 2422, 1787, 1624, 2041, 429, 158, 1095, 1416, 899, 1993, 667, 1812, 191, 792, 2033, 519, 1779, 179, 2022, 1434, 482, 1929, 1614, 109, 1602, 1270, 1654, 1920, 2295, 2084, 841, 1197, 1703, 788, 2231, 1769, 2182, 1992, 566, 1697, 1991, 176, 1810, 151, 1042, 1926, 2190, 973, 124, 2095, 1661, 224, 1914, 1609, 1873, 86, 454, 1990, 2092, 459, 2072, 69, 2052, 1045, 2263, 1497, 1196, 2668, 382, 610, 847, 863, 1166, 1453, 71, 441, 2036, 2, 118, 38, 1917, 1452, 1417, 2093, 2305, 2051, 604, 1786, 1998, 1704, 309, 1441, 1026, 1065, 2011, 1966, 1096, 2292, 1278, 2233, 2385, 1336, 1797, 2040, 1273, 2490, 1351, 589, 2088, 2306, 2189, 387, 862, 401, 383, 1817, 104, 2667, 94, 1912, 794, 2541, 675, 1818, 409, 2091, 486, 25, 416, 2318, 2303, 1012, 636, 1062, 2364, 1160, 1348, 1590, 2268, 481, 586, 934, 695, 2035, 2326, 78, 2304, 678, 2355, 2302, 2357, 1219, 195, 45, 1649, 1692, 469, 759, 465, 2076, 54, 2032, 1020, 435, 994, 2267, 1287, 2008, 377, 733, 2291, 1790, 1265, 1681, 2321, 1329, 823]",1,12.0,28.0,300.0,0.7640324963072378,, -"[133, 109, 1118, 1798, 2080, 2024, 74, 1417, 1367, 1782, 586, 1452, 1042, 1224, 2065, 139, 426, 1772, 756, 1995, 651, 655, 306, 1039, 749, 102, 2238, 789, 1990, 963, 2236, 1131, 2120, 126, 436, 2295, 2008, 1998, 878, 763, 2026, 1969, 1234, 112, 2615, 1068, 22, 698, 1436, 1989, 1558, 1240, 1203, 1158, 1966, 885, 2300, 2153, 442, 1997, 1993, 487, 397, 1697, 519, 1623, 1152, 2155, 1511, 2004, 2337, 943, 1628, 1971, 147, 1419, 2293, 1045, 1535, 1018, 2263, 1785, 1991, 286, 2640, 2259, 1789, 1453, 1635, 1703, 1968, 1987, 1705, 1196, 151, 1072, 2066, 2009, 280, 745]",1,14.0,10.0,275.0,0.5971196454948301,, -"[2324, 1283, 2080, 530, 1735, 181, 1282, 2476, 68, 1583, 2327, 1584, 1464, 1625, 1804, 598, 1721, 2088, 53, 1729, 1490, 1848, 880, 337, 2090, 1367, 1164, 1821, 1798, 1656, 505, 2096, 484, 1770, 960, 1847, 1851, 2086, 2025, 73, 2094, 1810, 1120, 1499, 2089, 744, 481, 1846, 1483, 1765, 973, 883, 2084, 1850, 1335, 1845, 449, 716, 608, 2359, 1609, 1245, 2085, 1251, 1546, 2444, 191, 487, 655, 112, 836, 1776, 1797, 1841, 350, 1782, 1873, 1986, 1840, 30, 69, 1775, 79, 34, 1675, 1013, 1072, 1758, 1009, 105, 2143, 290, 795, 2445, 334, 1574, 1799, 1389, 1358, 1287, 573, 656, 580, 2026, 1705, 719, 1193, 613, 1843, 1742, 797, 542, 958, 1701, 699, 1779, 1998, 103, 1661, 1623]",1,14.0,12.0,260.0,0.6048744460856721,, -"[1174, 815, 2153, 737, 1224, 1142, 2326, 1741, 327, 2178, 1847, 2064, 415, 2339, 218, 1309, 1191, 76, 482, 1346, 1810, 1882, 383, 199, 818, 1670, 2394, 1288, 2017, 1713, 130, 1120, 851, 869, 91, 1013, 2253, 1732, 1820, 2011, 2340, 2014, 157, 2016, 162, 1677, 849, 2380, 2010, 300, 429, 841, 8, 2063, 1394, 420, 2338, 1908, 682, 2555, 1912, 2301, 1445, 2045, 733, 1072, 2123, 671, 88, 2012, 1325, 1996, 2348, 1907, 176, 2034, 230, 1358, 1295, 2044, 598, 1894, 2015, 2412, 1864, 2550, 1527, 2509, 109, 1869, 503, 297, 1701, 2013, 1446, 1812, 571, 2381, 1738, 935, 973, 316, 1347, 1782, 1197, 719, 1993, 2325, 2309, 2477, 1784, 2025, 880, 1558, 169, 1097, 205, 1583, 498, 603]",1,14.0,12.0,300.0,0.6488183161004432,, -"[213, 1593, 144, 23, 145, 2159, 2192, 2161, 537, 1165, 898, 2622, 1836, 1835, 2157, 2160, 92, 108, 1698, 1327, 2158, 1647, 495, 2209, 1328, 1504, 144, 1593, 1165, 145, 2192, 537, 1836, 2161, 23, 213, 2209, 144, 1593, 1165, 145, 2192, 537, 1327, 495, 1836, 92, 23, 2161, 1698, 1328, 898, 2159, 2158, 2209, 144, 1593, 1165, 145, 2192, 537, 1327, 495, 1836, 92, 23, 2161, 1698, 1328, 898, 2159, 2158, 2209, 144, 1593, 1165, 145, 2192, 537, 1327, 495, 1836, 92, 23, 2161, 1698, 1328, 898, 2159, 2158, 2209, 144, 1593, 1165, 145, 2192, 537, 1327, 495, 1836, 92, 23, 2161, 1698, 1328, 898, 2159, 2158, 2209, 144, 1593, 1165, 145, 2192, 537, 1327, 495, 1836, 92, 23, 2161, 1698, 1328, 898, 2159, 2158, 2209, 144, 1593, 1165, 145, 2192, 537, 1327, 495, 1836, 92, 23, 2161, 1698, 1328, 898, 2159, 2158, 2209, 144, 1593, 1165, 145, 2192, 537, 1327, 495, 1836, 92, 23, 2161, 1698, 1328, 898, 2159, 2158, 2209, 144, 1593, 1165, 145, 2192, 537, 1327, 495, 1836, 92, 23, 2161, 1698, 1328, 898, 2159, 2158]",0,14.0,18.0,200.0,0.087149187592319,, -"[596, 41, 2026, 137, 1336, 1193, 1312, 1311, 86, 1346, 809, 1248, 468, 1797, 2096, 14, 728, 1651, 2504, 135, 2329, 1191, 1787, 2047, 342, 699, 294, 1705, 945, 1790, 1937, 1558, 1490, 158, 2087, 1002, 1072, 1785, 2138, 1337, 617, 49, 1856, 1505, 1497, 417, 141, 644, 603, 2088, 548, 1614, 563, 519, 2340, 1798, 1998, 973, 2327, 2144, 1009, 1313, 215, 1013, 1804, 2331, 2095, 224, 2034, 1799, 1810, 317, 2339, 2046, 487, 36, 958, 382, 1656, 1788, 1624, 1552, 2143, 1665, 1196, 1512, 318, 2045, 2107, 426, 1523, 109, 2086, 1167, 2326, 1802, 841, 1367, 1584, 1670, 1158, 2080, 329, 1805, 306, 2091, 1282, 191, 1784, 1538, 409, 2177, 2324, 236, 2048, 2085, 1735, 1273, 1483, 103, 1871, 589, 124, 1283, 711, 484, 691, 937, 454, 118, 1270, 1782, 655, 1781, 138, 1772, 643, 1572, 2078, 1789, 1791, 1245, 1776, 554, 2084, 1045, 1777, 1640, 791, 1319, 121, 302, 1778, 1654, 1026, 308, 2079, 399, 1681, 476, 2023, 1773, 1616, 2089, 957, 656, 112, 1557, 581, 341, 608, 1780, 2407, 1770, 303, 350, 1852, 2090, 133, 1499]",1,14.0,18.0,245.0,0.6731905465288035,, -"[1581, 1819, 1331, 1998, 1576, 1821, 1062, 1787, 2001, 1846, 1864, 790, 563, 1337, 2044, 1820, 1822, 716, 45, 729, 1852, 232, 2381, 2043, 1799, 1860, 226, 606, 1862, 13, 869, 467, 201, 1873, 2385, 1866, 1856, 532, 1867, 1791, 1811, 733, 230, 2155, 1818, 1121, 2045, 24, 27, 547, 1863, 1854, 1334, 1859, 608, 231, 1095, 191, 2667, 1299, 1869, 767, 1870, 1872, 1704, 1986, 1865, 1814, 318, 511, 1453, 157, 568, 1212, 1810, 2004, 1020, 633, 481, 1817, 695, 1677, 962, 306, 1100, 1855, 235, 1876, 1875, 1701, 1636, 1323, 482, 1662, 1052, 1868, 1808, 1348, 205, 1809, 2117, 1874, 1440, 2076, 549, 719, 794, 1815, 1666, 429, 215, 863, 1493, 1072, 180, 1023, 486, 1655, 2077, 2668, 835, 699, 38, 335, 2091, 88, 1004, 2075, 2, 1813, 48, 1203, 1618, 1669, 416, 1119, 343, 2295, 2124, 1853, 874, 775, 1671, 2118, 1111, 196, 2126, 2127, 1861, 1807, 1567, 351, 2125, 1251, 1797, 2129, 722, 1508, 576, 1075, 1890, 1857, 1241, 1012, 1892, 2120, 1823, 1858, 1665, 1812, 1479, 788, 143, 2041, 1889, 33, 2122, 1149, 210, 216]",1,14.0,18.0,275.0,0.6074593796159528,, -"[869, 1864, 553, 2668, 1583, 205, 1358, 1889, 332, 570, 1701, 2667, 420, 1891, 546, 1213, 24, 206, 2605, 2236, 1131, 277, 2243, 2163, 1950, 1, 503, 2039, 2691, 823, 316, 691, 1346, 1664, 199, 14, 2040, 1948, 2009, 2130, 2430, 1454, 465, 1223, 2238, 2509, 1654, 158, 86, 2409, 49, 836, 232, 10, 141, 2042, 2246, 598, 180, 2, 2489, 231, 306, 416, 2348, 2532, 1995, 1127, 2309, 574, 1666, 2305, 2004, 2306, 607, 366, 2379, 791, 201, 1002, 2045, 2001, 1665, 1287, 1709, 1655, 2036, 302, 1336, 151, 2360, 1326, 665, 1869, 1140, 2365, 45, 1410, 109, 2003, 1800, 1348, 303, 2335, 1424, 310, 2035, 476, 1149, 2303, 2543, 1875, 579, 2577, 297, 1116, 928, 1870, 1219, 1023, 387, 1453, 417, 1608, 1299, 2253, 733, 1859, 335, 215, 71, 2127, 519, 2041, 13, 68, 160, 1987, 1876, 2037, 745, 606, 1894, 1107, 1873, 197, 576, 1489, 1614, 2247, 972, 2034, 603, 2143, 722, 1815, 1807, 1818, 1134, 1820, 1448, 2578, 2002, 1821, 1000, 171, 719, 1121, 568, 218, 460, 1026, 1813, 281, 1787, 1806, 1929, 2063, 2136, 716]",1,14.0,18.0,300.0,0.5997045790251108,, -"[1809, 2308, 743, 476, 159, 1806, 160, 745, 1264, 1810, 443, 792, 460, 2, 2307, 576, 1773, 426, 1046, 1692, 201, 248, 109, 1814, 366, 232, 391, 68, 790, 1261, 519, 179, 783, 2376, 218, 482, 2348, 1986, 1062, 920, 215, 151, 1808, 665, 332, 2291, 2262, 1818, 2302, 1817, 1580, 962, 503, 1531, 1812, 574, 983, 1682, 1125, 1042, 1789, 377, 1265, 224, 430, 2008, 2076, 1020, 673, 45, 2263, 1143, 2004, 94, 2120, 698, 91, 2130, 1385, 678, 2301, 1893, 1192, 912, 2335, 2303, 2127, 71, 330, 627, 2033, 681, 2249, 180, 588, 2040, 691, 1655, 335, 897, 566, 788, 1519, 695, 1015, 286, 1002, 733, 2003, 1012, 2230, 2268, 1983, 2155, 1894, 791, 1892, 158, 383, 2123, 2122, 2380, 2295, 1788, 442, 14, 1481, 2002, 1110, 2035, 2037, 2042, 1051, 141, 2332, 1665, 1890, 1797, 2001, 775, 1452, 1336, 2011, 1891, 1068, 2039, 1807, 401, 2121, 2378, 1329, 2304, 2041, 1294, 759, 2034, 1889, 435, 2379, 862, 794, 1618, 1160, 196, 2381, 1614, 1425, 1273, 863, 1448, 2406, 2362, 417, 841, 1929, 2032, 2043, 48, 2066, 1319, 1590, 1968, 1654, 382, 86, 2044, 1669, 38, 1026, 1270, 1497, 2036, 49, 2038, 429, 1493, 523, 617, 949, 705]",1,14.0,20.0,200.0,0.6214918759231906,, -"[2358, 1185, 1460, 426, 1782, 1992, 1998, 318, 109, 2004, 1772, 1987, 792, 1996, 1434, 655, 2007, 2001, 112, 2005, 1844, 153, 1994, 563, 2080, 236, 1798, 335, 476, 968, 1558, 519, 2003, 126, 1661, 603, 1453, 672, 2002, 306, 308, 1859, 1999, 899, 2349, 1870, 391, 460, 1873, 45, 133, 68, 487, 756, 1843, 2, 1988, 1692, 1846, 1709, 1875, 1989, 1023, 341, 1993, 2045, 1876, 176, 1986, 2000, 1072, 1990, 2346, 2094, 1574, 139, 179, 743, 2590, 1849, 1705, 1729, 1469, 151, 836, 2155, 1851, 1919, 376, 1625, 779, 2154, 1370, 454, 778, 1697, 2008, 681, 859, 324, 673, 1149, 1291, 1997, 935, 1660, 1840, 1013, 1239, 2006, 1464, 181, 2153, 586, 215, 1131, 239, 530, 74, 2156, 1841, 69, 1118, 1704, 1991, 160, 510, 1847, 1587, 566, 1537, 2194, 71, 1521, 220, 1894, 2195, 2193, 1975, 1166, 214, 201, 65, 232, 1644, 1525, 1293, 1848, 277, 1455, 1850, 1095, 1773, 1121, 1845, 2184, 880, 2021, 1842, 506, 70, 2295, 1812, 2282, 745, 471, 1675, 973, 334, 1410, 441, 897, 1839, 366, 1403, 1923, 908, 1655, 1224, 738, 1926, 1602, 697, 6, 553, 938, 682, 2344, 252, 1120, 1080, 2136, 630, 1008, 11, 30, 2096, 129, 193, 344]",1,14.0,20.0,245.0,0.6790989660265879,, -"[1787, 1785, 2153, 153, 1779, 1507, 1224, 69, 1772, 1926, 1769, 1624, 1343, 1690, 1623, 2112, 1954, 2045, 1957, 1120, 2010, 2009, 236, 648, 1013, 682, 1357, 1376, 203, 1146, 252, 426, 1884, 306, 1781, 1583, 2030, 1110, 436, 356, 379, 720, 1901, 323, 857, 459, 1909, 563, 1395, 610, 1505, 1958, 133, 289, 241, 847, 2028, 2273, 255, 1128, 437, 519, 1351, 1805, 2165, 1848, 2026, 1851, 318, 2275, 1959, 102, 1337, 2189, 1640, 2027, 711, 590, 498, 554, 1998, 446, 2107, 2285, 2025, 265, 371, 2274, 1538, 149, 124, 1346, 1885, 109, 1628, 1549, 50, 2272, 1552, 1986, 880, 1789, 1700, 1883, 2271, 530, 1441, 1881, 1072, 604, 470, 1097, 854, 118, 2016, 1878, 36, 1791, 2109, 119, 162, 2086, 973, 1089, 1682, 1914, 1483, 739, 2093, 630, 1358, 1840, 1661, 88, 1158, 980, 597, 2092, 581, 735, 451, 325, 1882, 2022, 1045, 842, 1592, 589, 1498, 2019, 1743, 277, 733, 1972, 935, 572, 1029, 2166, 41, 2015, 596, 661, 1309, 175, 646, 1953, 704, 1555, 1470, 176, 359, 1077, 1142, 415, 1880, 2450, 1917, 1725, 1619, 1353, 1707, 1060, 121, 1444, 830, 1070, 383, 2251, 1377, 1338, 905, 458, 1174, 748, 2252, 802, 815, 2385, 2388, 115]",1,14.0,20.0,275.0,0.593426883308715,, -"[306, 118, 581, 656, 476, 306, 118, 581, 656, 476, 306, 118, 581, 656, 476, 306, 118, 581, 656, 476, 306, 118, 581, 656, 476, 306, 118, 581, 656, 476, 306, 118, 581, 656, 476, 306, 118, 581, 656, 476, 306, 118, 581, 656, 476, 306, 118, 581, 656, 476]",0,,5.0,200.0,0.0978581979320531,0.01, -"[109, 1789, 656, 1337, 1878, 1346, 317, 887, 124, 1772, 109, 1789, 656, 1337, 1878, 1346, 317, 887, 124, 1772, 109, 1789, 656, 1337, 1878, 1346, 317, 887, 124, 1772, 109, 1789, 656, 1337, 1878, 1346, 317, 887, 124, 1772, 109, 1789, 656, 1337, 1878, 1346, 317, 887, 124, 1772, 109, 1789, 656, 1337, 1878, 1346, 317, 887, 124, 1772, 109, 1789, 656, 1337, 1878, 1346, 317, 887, 124, 1772, 109, 1789, 656, 1337, 1878, 1346, 317, 887, 124, 1772, 109, 1789, 656, 1337, 1878, 1346, 317, 887, 124, 1772, 109, 1789, 656, 1337, 1878, 1346, 317, 887, 124, 1772]",0,,10.0,150.0,0.096381093057607,0.01, -"[1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587]",0,,10.0,250.0,0.0993353028064992,5.0, -"[656, 306, 1623, 1624, 1913, 656, 306, 1623, 1624, 1913, 656, 306, 1623, 1624, 1913, 656, 306, 1623, 1624, 1913, 656, 306, 1623, 1624, 1913, 656, 306, 1623, 1624, 1913, 656, 306, 1623, 1624, 1913, 656, 306, 1623, 1624, 1913, 656, 306, 1623, 1624, 1913, 656, 306, 1623, 1624, 1913]",0,,5.0,250.0,0.0945347119645494,10.0, -"[525, 142, 1628, 2198, 2201, 2199, 2182, 544, 2181, 347, 2183, 456, 1580, 100, 2196, 2200, 2054, 1680, 2074, 861]",0,2.0,2.0,50.0,0.1351550960118168,, -"[1593, 213, 2159, 2157, 23, 537, 1835, 2158, 898, 1165, 145, 1504, 2160, 1836, 108, 2209, 144, 1647, 2192, 1698, 92, 2622, 2161, 1328, 1327, 495, 1328, 1593, 145, 23, 1328, 1593, 145, 23, 2158, 2209, 1328, 1593, 145, 23, 2158, 2209, 1328, 1593, 145, 23, 2158, 2209, 1328, 1593, 145, 23, 2158, 2209, 1328, 1593, 145, 23, 2158, 2209]",0,2.0,6.0,80.0,0.087149187592319,, -"[1770, 1245, 1772, 487, 1705, 426, 719, 1640, 1551, 1656, 1775, 103, 126, 608, 1367, 302, 910, 138, 1584, 1779]",0,3.0,2.0,60.0,0.0904726735598227,, -"[2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236]",0,3.0,4.0,40.0,0.087149187592319,, -"[123, 2604, 99, 2455, 26, 127, 122, 2454, 2455, 26, 2455, 26, 2455, 26, 2455, 26, 2455, 26, 2455, 26]",0,4.0,2.0,60.0,0.087149187592319,, -"[144, 145, 1593, 2622, 213, 1165, 2192, 495, 537, 108, 1328, 1327, 23, 2161, 2209, 2160, 1647, 1698, 2159, 1835]",0,4.0,2.0,90.0,0.087149187592319,, -"[991, 307, 991, 307, 991, 307, 991, 307, 991, 307, 991, 307, 991, 307, 991, 307, 991, 307, 991, 307]",0,4.0,4.0,40.0,0.087149187592319,, -"[302, 236, 350, 109, 153, 138, 308, 126, 426, 124, 139, 306, 103, 417, 452, 294, 329, 2087, 1335, 112]",0,5.0,2.0,20.0,0.0982274741506647,, -"[2597, 1742, 1734, 1736, 1763, 2444, 1710, 1761, 2359, 1709, 1725, 1738, 1731, 1721, 1762, 1483, 1072, 1746, 1154, 687, 1753, 1389, 1749, 1743, 1745, 1727, 1754, 1189, 1339, 2365, 155, 1723, 2445, 1735, 1070, 1758, 758, 1546, 689, 2685, 1229, 154, 1740, 797, 2221, 328, 73, 1717, 1714, 466, 754, 1281, 957, 2450, 1041, 1292, 1205, 970, 111, 1715, 1620, 341, 1733, 326, 156, 1752, 174, 1317, 1737, 1646, 951, 853, 1729, 1430, 609, 1708, 1243, 1384, 1756, 1471, 244, 59, 1568, 1209, 1149, 1724, 744, 732, 985, 1751, 1755, 228, 1712, 873, 1565, 999, 1757, 1499, 357, 1711]",0,2.0,10.0,200.0,0.163589364844904,, -"[426, 519, 109, 2045, 487, 153, 126, 308, 139, 350, 1980, 112, 302, 2405, 1979, 816, 124, 2046, 2269, 1203, 2270, 1805, 2079, 564, 2117, 565, 1610, 103, 1572, 86, 236, 2078, 1509, 2034, 1508, 329, 436, 1784, 306, 2155, 836, 1110, 1026, 1224, 2295, 95, 1416, 138, 2476, 1798, 1262, 734, 244, 189, 2054, 417, 1502, 1802, 6, 1169, 2182, 452, 2074, 1133, 552, 2281, 1441, 2198, 835, 2163, 2185, 1810, 1954, 1842, 1500, 897, 542, 1838, 2118, 1200, 1358, 1483, 2164, 294, 1733, 1725, 554, 1804, 1740, 1410, 1799, 1351, 1303, 20, 2202, 1797, 2135, 737, 1405, 1070, 524, 1671, 342, 2282, 2136, 770, 1801, 1602, 746, 1837, 2068, 2073, 1505, 1330, 2630, 315, 100, 1072, 2219, 1478, 2243, 1355, 979, 945, 1204, 773, 751, 1803, 1957, 1121, 2384, 476, 2375, 1215, 77, 838, 659, 1839, 11, 2280, 1503, 354, 1655, 2453, 2207, 120, 149, 1013, 1538, 736]",1,2.0,15.0,175.0,0.6598966026587888,, -"[454, 1776, 2080, 1773, 2184, 70, 2001, 2045, 1986, 1661, 1455, 2085, 1772, 1537, 487, 318, 1782, 1789, 1245, 2046, 426, 133, 1144, 563, 102, 608, 2122, 519, 671, 1770, 103, 2089, 484, 910, 1367, 139, 581, 2048, 1561, 657, 112, 655, 153, 289, 126, 1490, 441, 542, 1775, 1337, 867, 1780, 1572, 2123, 459, 871, 2442, 1045, 476, 138, 2440, 1777, 610, 1769, 1254, 973, 2326, 1584, 1013, 1251, 1627, 91, 1346, 1623, 191, 719, 544, 302, 1656, 1560, 1798, 1768, 1308, 129, 980, 234, 2439, 109, 452, 1767, 176, 1158, 1035, 2078, 1651, 236, 1740, 1774, 2084, 1787, 1856, 124, 440, 308, 193, 344, 2327, 1783, 1229, 1551, 1705, 1805, 2451, 1771, 660, 1196, 887, 656, 1779, 409, 1804, 1046, 1015, 1785, 2591, 589, 1784, 2441, 1155, 330, 2095, 1699, 314, 556, 1749, 1778, 1193, 2087, 1384, 329, 2088, 350, 2203, 1800, 317, 1065, 2090, 1801, 306, 2093]",1,2.0,15.0,200.0,0.6155834564254062,, -"[1692, 1998, 1472, 172, 2187, 512, 519, 240, 2395, 324, 1421, 504, 1396, 2228, 1501, 191, 2156, 1351, 382, 724, 1986, 1972, 1973, 1879, 661, 1399, 1085, 2396, 1848, 1420, 1984, 2186, 1999, 2071, 725, 1142, 604, 118, 1410, 279, 1469, 55, 708, 2105, 1013, 1343, 973, 842, 2099, 1358, 1178, 1529, 1344, 1921, 2020, 827, 490, 1849, 2394, 651, 471, 756, 60, 826, 1652, 25, 1463, 1566, 745, 2326, 1121, 1909, 211, 1655, 718, 1171, 2116, 1487, 1974, 160, 1474, 836, 2136, 818, 2023, 161, 277, 1850, 2024, 2397, 1224, 510, 2034, 600, 1912, 1782, 1842, 1644, 299, 2103, 1338, 2016, 593, 995, 2102, 1203, 2015, 1526, 897, 748, 2137, 950, 1269, 1082, 1908, 1027, 242, 2274, 1630, 2164, 331, 860, 1668, 323, 838, 2450, 356, 1527, 2018, 815, 2282, 437, 1625, 2386, 1570, 1882, 743, 621, 1495, 2010, 1713, 61, 2405, 1309, 120, 1843, 1677, 1538, 2280, 516]",1,2.0,15.0,225.0,0.6731905465288035,, -"[109, 1787, 153, 563, 1095, 1337, 230, 519, 1873, 2001, 318, 1410, 231, 1062, 1868, 1998, 1581, 1819, 2044, 1331, 2381, 1863, 1576, 1822, 215, 126, 1869, 716, 1842, 608, 2045, 1808, 1821, 1799, 1876, 1811, 302, 719, 27, 1846, 308, 733, 139, 1852, 232, 532, 511, 236, 1655, 869, 1859, 138, 606, 1818, 1856, 205, 1023, 416, 1440, 1820, 1417, 1121, 948, 1677, 1682, 1817, 1791, 424, 897, 2076, 45, 549, 795, 1334, 306, 1299, 1618, 351, 1669, 766, 962, 24, 102, 13, 1862, 1986, 968, 1636, 191, 2385, 2117, 874, 1416, 521, 1704, 1815, 1020, 201, 2319, 1877, 1015, 1810, 1003, 547, 2077, 1872, 2088, 1866, 1853, 1816, 1865, 2118, 1072, 2075, 1861, 1701, 729, 784, 1508, 1479, 2668, 481, 1823, 103, 196, 1075, 2043, 1004, 429, 1892, 1493, 1662, 1857, 576, 216, 261, 1134, 775, 1107, 229, 770, 1348, 1405, 2120, 2667, 664, 1212, 33, 1323, 486, 210, 2122, 2126, 143, 157, 2138, 38, 1119, 2091, 2127, 2125, 1614, 1149, 235, 1860, 1854, 767, 124, 835, 1215, 1203, 1790, 430, 1251, 2035, 2040, 2124, 2500, 1241, 180, 1890, 695, 294, 598, 1894, 637, 55, 1473, 1531, 1261, 1448, 2295, 1052, 14, 158, 1301, 1807, 1855, 1665, 1111]",1,2.0,20.0,325.0,0.6288774002954209,, -"[1824, 2476, 1267, 1826, 1317, 1282, 2365, 2390, 2447, 650, 1740, 1721, 2448, 391, 105, 1986, 2221, 480, 2450, 1306, 2093, 2391, 1735, 109, 261, 1479, 1734, 2212, 2372, 1738, 2216, 2651, 657, 1606, 2045, 1627, 2313, 1743, 1720, 2162, 1037, 1725, 1709, 2446, 372, 738, 534, 883, 2503, 817, 16, 1719, 1557, 2642, 1483, 263, 2597, 741, 1214, 1898, 1833, 2558, 1011, 645, 684, 1745, 689, 1776, 1632, 1284, 1900, 1901, 1589, 2492, 1761, 1292, 1620, 1041, 609, 970, 1895, 1035, 290, 1229, 1565, 1638, 1169, 732, 1763, 2530, 796, 1716, 848, 1708, 1710, 2359, 489, 1729, 1712, 1040, 1899, 274, 1072, 1736, 834, 873, 708, 524, 281, 1499, 629, 322, 891, 228, 1149, 68, 1262, 1103, 1713, 1374, 2654, 1495, 1750, 1748, 754, 616, 819, 764, 2083, 615, 1520, 2201, 831, 364, 1070, 748, 567, 1610, 59, 687, 1205, 1737, 1612, 2553, 979, 1604, 757, 341, 1718, 101, 337, 950, 1617, 1189, 2421, 1145, 1136, 1389, 758, 1550, 825, 1694, 136, 985, 1749, 2366, 1741, 1711, 823, 1546, 552, 513, 415, 1123, 902, 1568, 558, 289, 111, 626, 1726, 1480, 1766, 836, 1685, 1384, 156, 333, 1209, 2443, 1730, 1216, 2274, 1646, 1871, 2445, 1765, 936, 1471, 403, 2343, 1687, 876, 1752, 1305, 853, 60, 466, 1762, 1756, 1727, 244, 2643, 72, 90, 1038, 53, 1492, 1747, 613, 1243, 1722, 1562, 1764, 1281, 1586, 1753, 1739, 1528, 30, 1358, 501, 88, 1527, 978, 1723, 1516, 1287, 34, 686, 882, 1383, 797, 1689, 1339, 752, 929, 2334, 1728, 951]",1,2.0,25.0,325.0,0.5919497784342689,, -"[1824, 2476, 2390, 2446, 519, 1826, 836, 1267, 2365, 1738, 1610, 1740, 732, 2391, 109, 306, 629, 263, 1772, 563, 318, 1756, 1787, 2558, 391, 223, 236, 126, 2444, 534, 657, 883, 1709, 1742, 1721, 2366, 1725, 1282, 1749, 1986, 2597, 2445, 2450, 2334, 915, 1426, 1761, 1739, 1712, 2642, 1729, 2168, 1199, 1013, 1586, 1716, 1632, 308, 2651, 1898, 480, 1033, 1627, 565, 2596, 1734, 489, 1229, 1072, 105, 1776, 1731, 1710, 1620, 274, 2585, 1284, 1041, 2448, 817, 174, 133, 848, 1337, 1306, 2515, 1706, 1565, 1726, 1154, 1687, 687, 1483, 1714, 616, 1856, 1568, 2372, 1651, 1711, 1752, 1612, 1617, 1754, 139, 1755, 1727, 1713, 744, 970, 364, 2447, 1495, 979, 760, 244, 645, 1900, 754, 1719, 68, 1175, 1599, 708, 1589, 1374, 1169, 1070, 1149, 985, 290, 53, 1216, 1801, 609, 2503, 891, 1746, 1103, 748, 1389, 1433, 689, 1766, 1753, 1205, 153, 1765, 684, 1317, 1040, 1724, 2212, 524, 2421, 1689, 1694, 1604, 1784, 2643, 1899, 1339, 580, 154, 1895, 686, 466, 1744, 823, 1145, 1718, 757, 1715, 1717, 950, 1499, 1733, 957, 1038, 1871, 1550, 1546, 1480, 1747, 1123, 1562, 101, 1730, 902, 1757, 796, 1384, 1171, 1035, 1646, 72, 1732, 1448, 2525, 2307, 90, 1471, 1243, 819, 228, 1492, 1444, 333, 155, 1764, 2556, 1708, 156, 768, 1292, 341, 2045, 326, 281, 764, 2216, 501, 626, 613, 111, 2546, 1281, 2492, 2359, 873, 346, 1751, 1735, 1305, 1238, 831, 853, 999, 1358, 1852, 2047, 951, 1516, 738, 1741, 978, 882, 758, 919, 34, 929, 2377, 1750, 1791, 1214, 2046, 558, 372, 1606, 797, 1486, 1743, 1209, 1758, 1722, 1763, 1833, 603, 1720, 1748, 1762, 1189, 2048, 215, 1287, 1248, 1736, 697, 2162, 357, 173, 1759, 889, 567, 2652, 1737, 936, 170, 1831, 2201, 1830, 2553, 2343, 699, 289, 246, 800, 834, 2554, 337]",1,2.0,30.0,325.0,0.587149187592319,, -"[1731, 1528, 1711, 2448, 1755, 1751, 1735, 1398, 2596, 1752, 1753, 1754, 427, 1730, 1719, 1041, 1733, 1766, 1734, 628, 1339, 2476, 1744, 2447, 1483, 1281, 1729, 1761, 1361, 1568, 1763, 1723, 1620, 1736, 290, 1746, 1762, 1169, 1072, 1724, 2279, 1726, 1243, 1748, 154, 1471, 1710, 1646, 1189, 1499]",0,4.0,5.0,225.0,0.1598966026587887,, -"[1210, 1648, 1210, 1648, 1210, 1648, 1210, 1648, 1210, 1648, 1210, 1648, 1210, 1648, 1210, 1648, 1210, 1648, 1210, 1648]",0,4.0,10.0,200.0,0.163589364844904,, -"[1772, 1798, 1782, 484, 973, 1705, 176, 487, 2046, 1725, 1661, 350, 1775, 454, 2089, 2085, 102, 426, 1785, 1584, 1740, 958, 2090, 302, 2267, 2048, 236, 1856, 656, 1640, 329, 1801, 773, 1337, 1490, 308, 138, 318, 1656, 1926, 794, 1251, 69, 542, 289, 1733, 112, 1160, 476, 1572, 1624, 1045, 1505, 2045, 581, 1651, 2078, 2266, 2042, 1158, 1779, 1358, 2086, 2268, 1771, 342, 1912, 719, 139, 563, 218, 1770, 1193, 2039, 2318, 691, 1262, 1329, 452, 133, 1784, 2072, 1817, 1818, 2302, 1800, 1812, 109, 759, 1803, 1891, 2189, 2130, 417, 2092, 1070, 306, 2080, 1072, 573, 945, 2091, 841, 1062, 1367, 2087, 1395, 1478, 1497, 158, 2326, 1791, 1265, 847, 554, 1968, 1614, 519, 1654, 2037, 1986, 1551, 1789, 1245, 14, 429, 610, 435, 910, 1787, 655, 1623, 1804, 1805, 1273, 2320, 733, 224, 49, 617, 1799, 1681, 1448, 1609, 189, 86, 2319, 126, 1889, 887]",1,4.0,15.0,225.0,0.6850073855243722,, -"[1583, 2407, 1729, 1120, 2251, 1313, 2025, 1873, 1625, 671, 643, 704, 2287, 935, 880, 1335, 118, 826, 2288, 2019, 1740, 716, 831, 1849, 573, 1513, 121, 79, 454, 1067, 191, 908, 1635, 562, 1197, 115, 510, 603, 973, 1986, 2252, 630, 277, 1850, 252, 1847, 1616, 2034, 1842, 1840, 2153, 334, 234, 506, 1521, 1675, 610, 505, 1403, 2199, 1140, 613, 1830, 1142, 1580, 1844, 1841, 476, 389, 114, 1483, 1158, 1998, 1013, 1546, 341, 1791, 2478, 1804, 231, 1800, 2322, 1644, 1856, 795, 2080, 2093, 1798, 387, 2090, 1358, 644, 1248, 1499, 1144, 608, 1701, 2326, 695, 1448, 1258, 236, 456, 1783, 1389, 2046, 2347, 1767, 2087, 554, 2382, 441, 889, 2481, 1337, 661, 2145, 10, 1289, 2026, 1821, 289, 836, 181, 2078, 416, 359, 1733, 1699, 1245, 1774, 417, 699, 1464, 1846, 814, 1572, 1851, 2092, 1552, 103, 2048, 1656, 239, 329, 1799, 980, 18, 153, 958]",1,4.0,15.0,325.0,0.7149187592319055,, -"[1133, 1131, 1224, 1500, 2436, 2185, 989, 1826, 1824, 1525, 1583, 1919, 1825, 1851, 1538, 2485, 415, 88, 645, 903, 472, 2242, 1975, 1914, 2062, 252, 1792, 69, 1507, 500, 1191, 1543, 1013, 701, 2010, 643, 446, 1072, 2030, 2241, 56, 1842, 1682, 1395, 119, 161, 282, 1829, 847, 2112, 1828, 2316, 2061, 1616, 1183, 1408, 778, 323, 836, 1645, 1827, 779, 306, 2207, 1142, 1690, 2628, 115, 67, 1503, 747, 1549, 1537, 2189, 1343, 735, 1623, 581, 1899, 2527, 118, 360, 129, 1881, 2153, 388, 682, 176, 1060, 2272, 1661, 540, 1370, 750, 241, 2022, 370, 1455, 331, 50, 20, 973, 2699, 1182, 2275, 344, 1885, 661, 2497, 1415, 392, 2166, 1128, 109, 87, 2270, 1926, 1884, 2016, 676, 2190, 1839, 255, 2274, 880, 2094, 128, 1171, 1409, 1956, 842, 1266, 854, 2060, 2284, 1579, 2165, 1355, 2184, 554, 1527, 1441, 193, 113, 2457, 1901, 2374, 277, 440, 249, 630, 2019, 2286, 1029, 807, 1404, 2269, 935, 739, 685, 979, 441, 1414, 2235, 1226, 2630, 1917, 2375, 2273, 648, 1700, 1707, 265, 2271, 1619, 2450, 671, 1553, 454, 860, 70, 1354, 1958, 1957, 1955, 1237, 720, 60, 1668, 1954, 524, 590, 1303, 371, 1168, 1179, 393, 1903, 400, 2188]",1,4.0,20.0,200.0,0.6650664697193501,, -"[563, 576, 103, 109, 230, 232, 126, 1811, 1822, 1998, 719, 1873, 1809, 231, 318, 124, 1808, 302, 1864, 716, 1870, 1821, 1853, 1861, 608, 1867, 1337, 1860, 503, 215, 699, 1331, 1876, 1787, 1874, 1818, 511, 1453, 1869, 1872, 226, 1576, 568, 2121, 2667, 1440, 235, 486, 456, 2045, 2117, 1023, 1410, 1348, 1100, 236, 574, 1986, 549, 532, 2118, 1323, 1857, 139, 261, 1823, 1107, 351, 1508, 1479, 1854, 1075, 24, 1241, 1704, 1855, 153, 1567, 95, 306, 1405, 869, 461, 1149, 2348, 1810, 229, 722, 2126, 770, 1111, 1119, 2125, 2091, 733, 1862, 1334, 1004, 835, 2122, 962, 1215, 1121, 2124, 27, 1701, 1859, 1095, 1894, 2127, 729, 546, 1655, 1927, 1062, 664, 1820, 2129, 201, 1817, 1268, 1072, 536, 143, 2077, 862, 1791, 1049, 481, 467, 2075, 442, 1203, 1273, 1812, 2155, 1858, 2550, 196, 2033, 1580, 2002, 2, 1846, 2003, 482, 216, 13, 33, 335, 2248, 788, 983, 2200, 1015, 1856, 1863, 2335, 1452, 1799, 1636, 1868, 1842, 205, 2199, 1251, 1797, 740, 897, 1813, 157, 1481, 1677, 1929, 622, 695, 2671, 1852, 48, 1299, 1654, 45, 1875, 505, 1002, 294, 316, 1257, 2385, 794, 1212, 1814, 191, 138, 544, 417, 1020, 1052, 1877, 2076, 1816, 14, 2379, 1866, 2004, 1865, 691, 1662, 1669, 2378, 874, 416, 424, 1703, 1815, 343, 547, 1890, 1952, 790, 633, 784, 767, 2043, 1789, 795, 775, 2668, 2130, 2120, 2518, 2123, 55, 920, 1819, 1581, 112, 1143, 159, 833, 2332, 2001, 1615, 438, 1614, 2406, 1324, 2036, 606, 2037]",1,4.0,25.0,325.0,0.6462333825701625,, -"[2238, 1530, 627, 89, 22, 152, 2107, 1401, 529, 18, 1087, 454, 2120, 121, 2350, 2260, 1966, 1801, 530, 1240, 196, 1369, 1653, 1967, 1365, 1072, 1702, 973, 2119, 399, 407, 1452, 1529, 1971, 2288, 665, 1574, 761, 1417, 2066, 706, 275, 836, 1234, 2251, 598, 416, 1572, 1640, 1703, 2463, 759, 1505, 1699, 2082, 308, 302, 699, 286, 1146, 258, 350, 2322, 1968, 2252, 2040, 544, 2081, 773, 1799, 1245, 1670, 442, 2327, 1681, 2122, 2106, 43, 2240, 1798, 139, 36, 1289, 463, 698, 1560, 1661, 1082, 2046, 1782, 2080, 1015, 1202, 1986, 2287, 789, 191, 2435, 2094, 1158, 1448, 1781, 1651, 33, 1784, 519, 813, 980, 2326, 1656, 1584, 910, 1248, 441, 1969, 1251, 2336, 2089, 2645, 1788, 2092, 236, 2048, 608, 1705, 332, 1013, 1783, 581, 1144, 1776, 387, 1153, 1851, 1767, 239, 1791, 660, 695, 805, 719, 1358, 2145, 2386, 2084, 1197, 1772, 114, 102, 1852, 231, 2262, 1701, 542, 2121, 857, 2401, 289, 1561, 2093, 1483, 1367, 505, 887, 341, 2434, 1564, 2520, 1337, 126, 1810, 124, 2376, 655, 2086, 1484, 112, 573, 1804, 1906, 2400, 958, 2088, 1622, 2078, 2085, 1789, 1787, 109, 317, 831, 1780, 2382, 1797, 1193, 2362, 215, 1830, 476, 1009, 2329, 2498, 2155, 330, 1869, 1115, 1510, 1722, 589, 1785, 885, 135, 1552, 1350, 1998, 741, 176, 409, 1094, 452, 854, 1157, 2478, 1443, 1786, 1428, 783, 556, 2236, 1768, 661, 1571, 826, 314, 138, 417, 133, 2095, 2045, 2294, 2617, 1774, 2026, 1162, 1769, 1802, 1623, 2079, 2318, 2143, 1770, 1346, 306, 603, 103, 859, 375, 610, 1805, 1143, 1490, 588, 656, 763, 329, 487, 91, 2504, 1778, 1970, 2399, 1878, 1871, 1775, 2090, 203, 426, 1061, 1618, 1045, 1624, 963, 1051, 1551, 563, 299, 459, 1534, 1856, 1046, 2256, 1635, 2087, 29, 911, 318, 1196, 166, 814, 1779]",1,4.0,30.0,325.0,0.7234121122599705,, -"[1763, 1736, 1734, 565, 1761, 768, 1720, 1753, 1738, 1735, 1725, 1762, 1742, 1719, 372, 2597, 1743, 820, 1731, 1755, 1765, 1748, 1741, 2596, 1709, 1722, 1724, 1721, 1750, 1038, 1205, 1749, 1483, 1739, 1710, 1758, 1754, 155, 1339, 1756, 1711, 1766, 244, 687, 1169, 1723, 1589, 1154, 1717, 1070]",0,8.0,5.0,200.0,0.1661742983751846,, -"[1041, 1243, 1284, 609, 1389, 1154, 552, 1238, 466, 1040, 1384, 1229, 831, 754, 951, 1169, 1149, 929, 757, 1103, 170, 501, 1145, 341, 1209, 853, 1287, 156, 970, 902, 1471, 489, 873, 357, 1305, 1123, 154, 744, 876, 796, 999, 957, 524, 764, 985, 90, 364, 59, 819, 1492]",0,8.0,5.0,225.0,0.1698670605612998,, -"[869, 1864, 1869, 563, 232, 1440, 1358, 318, 2117, 1810, 1866, 633, 873, 1299, 767, 1405, 1919, 778, 24, 1732, 2118, 1370, 229, 1535, 1337, 2154, 1636, 1592, 1867, 1171, 1039, 13, 1820, 1508, 2153, 280, 211, 1872, 424, 1410, 1849, 1982, 539, 74, 1453, 235, 205, 1873, 2195, 1241]",1,8.0,5.0,325.0,0.6129985228951256,, -"[1358, 244, 1782, 1772, 2080, 1798, 1740, 1262, 1367, 1775, 1725, 189, 1245, 1656, 1800, 2045, 1070, 1733, 1770, 487, 1251, 1584, 20, 2085, 2086, 1483, 426, 350, 1490, 1771, 1802, 1804, 1505, 2046, 1856, 608, 329, 655, 958, 112, 2326, 1448, 2048, 2089, 476, 1784, 1787, 2090, 236, 2084, 124, 139, 1072, 103, 126, 1705, 153, 109, 308, 1805, 910, 1651, 1799, 342, 773, 519, 542, 656, 302, 1789, 1193, 1346, 1779, 1158, 1986, 973, 306, 1045, 1703, 1788, 1143, 409, 733, 459, 529, 963, 2260, 887, 2078, 1529, 2350, 1572, 1640, 191, 1551, 1618, 2001, 2318, 2262, 2155]",1,8.0,10.0,200.0,0.568685376661743,, -"[935, 2024, 88, 748, 830, 2110, 280, 1152, 366, 1535, 1118, 1270, 1289, 2109, 693, 1914, 2293, 1045, 1105, 1676, 2136, 651, 94, 1421, 1592, 1974, 2274, 97, 2485, 1878, 973, 749, 1927, 1353, 1351, 539, 553, 93, 1358, 149, 1622, 498, 1402, 74, 124, 1494, 1732, 2025, 661, 1332, 1847, 1810, 1652, 885, 578, 1309, 2405, 1675, 747, 1975, 2156, 1218, 334, 2419, 376, 835, 266, 506, 69, 2155, 1127, 1583, 1851, 1995, 1980, 1919, 1376, 1849, 816, 2135, 586, 1080, 708, 1979, 1973, 779, 1121, 1525, 807, 1842, 1370, 778, 1028, 239, 11, 1410, 1464, 702, 2151, 324, 344, 1839, 1203, 483, 1655, 2282, 880, 1848, 809, 2305, 2153, 1120, 118, 993, 1572, 682, 325, 2295, 925, 1930, 1729, 1110, 1039, 2281, 575, 1538, 1171, 1909, 878, 2403, 1382, 1566, 1343, 1222, 666, 2283, 2294, 32, 836, 424, 518, 1850, 1143, 2423, 2164, 1224, 519, 2078, 1976, 1879]",1,8.0,15.0,225.0,0.6418020679468243,, -"[549, 1870, 230, 157, 1864, 1809, 887, 1581, 201, 962, 661, 306, 1885, 1998, 441, 1779, 1856, 1868, 869, 1787, 1986, 511, 1095, 231, 553, 1479, 1859, 1819, 1636, 160, 2094, 1789, 1788, 426, 1813, 232, 2335, 1440, 1844, 1816, 24, 1857, 1212, 1453, 1334, 568, 519, 2092, 1872, 1705, 896, 1858, 1812, 1866, 1865, 249, 266, 351, 27, 1808, 1045, 461, 696, 1873, 261, 1811, 835, 716, 1817, 343, 1062, 482, 548, 1665, 1474, 1772, 1822, 1505, 1799, 719, 481, 1876, 124, 2229, 1863, 1821, 216, 2045, 1121, 1023, 868, 215, 1805, 1403, 143, 1810, 973, 790, 1346, 1884, 1867, 633, 277, 532, 226, 335, 153, 1015, 344, 236, 784, 1814, 935, 1875, 176, 1846, 1701, 1820, 1852, 1107, 465, 2304, 767, 1777, 416, 289, 109, 2383, 809, 1823, 576, 924, 151, 442, 1576, 1818, 2276, 627, 1331, 1624, 224, 563, 733, 1861, 699, 1871, 318, 2034, 2093, 415]",1,8.0,15.0,325.0,0.6174298375184638,, -"[1128, 719, 519, 656, 1914, 487, 2275, 1954, 103, 302, 323, 426, 350, 476, 294, 1193, 847, 1682, 109, 236, 69, 1045, 2457, 112, 139, 1926, 138, 1700, 153, 1957, 655, 910, 126, 608, 590, 308, 1917, 1958, 542, 1060, 306, 1072, 1158, 1623, 2189, 720, 887, 329, 958, 124, 371, 540, 1901, 417, 836, 1849, 2274, 1441, 2094, 1070, 2022, 452, 176, 1583, 1824, 648, 1358, 1478, 1826, 1013, 252, 1829, 1360, 1960, 1962, 747, 750, 448, 573, 1455, 1707, 1224, 379, 1904, 1537, 241, 1619, 2184, 1276, 1903, 1661, 264, 1963, 70, 541, 1267, 132, 1470, 2273, 2272, 2271, 2153, 1131, 1500, 1902, 1022, 1953, 1395, 441, 682, 50, 265, 880, 129, 2485, 1984, 1529, 711, 1555, 671, 844, 2185, 1009, 56, 1898, 694, 793, 447, 2436, 973, 193, 1842, 2016, 440, 1133, 989, 643, 88, 1538, 1266, 344, 412, 645, 2050, 2553, 2491, 2010, 1168, 279, 2031, 842, 161, 1851, 1900, 454, 2112, 554, 1343, 2165, 1645, 581, 1895, 539, 255, 87, 950, 1495, 935, 2190, 1961, 1507, 118, 1616, 277, 2516, 891, 1690, 801, 115, 2166, 1029, 1792, 1142, 2019, 630, 2030, 685, 2316, 2517, 703, 1899, 903, 2188, 1578, 2497, 2510, 388, 446, 693, 1402]",1,8.0,20.0,200.0,0.6067208271787297,, -"[102, 2126, 2129, 1871, 55, 1479, 210, 1111, 1567, 722, 1215, 770, 1508, 1671, 505, 1251, 2117, 1119, 1149, 1927, 261, 1797, 2531, 897, 1405, 2125, 2045, 2124, 1742, 1615, 1067, 1203, 1655, 1815, 306, 2118, 2296, 622, 1894, 1273, 2559, 1012, 1448, 1929, 229, 1817, 1268, 1952, 1787, 1072, 1983, 606, 740, 2033, 579, 1819, 1614, 2295, 1049, 2307, 1666, 1701, 1654, 1581, 1622, 695, 788, 1002, 1121, 1331, 1026, 417, 2668, 1968, 1813, 531, 2091, 2230, 191, 1452, 1986, 1576, 733, 1336, 453, 27, 1821, 1015, 1811, 48, 438, 1303, 916, 1665, 1822, 2123, 1192, 549, 1095, 230, 835, 1818, 2075, 1808, 91, 2550, 719, 2122, 576, 1481, 2308, 2381, 141, 536, 2003, 1270, 716, 2667, 382, 1820, 1107, 1299, 2380, 88, 2130, 49, 2379, 1869, 316, 511, 2002, 1810, 1789, 1160, 1812, 1062, 938, 792, 1703, 2494, 1891, 862, 218, 503, 1425, 13, 1806, 2332, 1889, 1890, 2076, 14, 1987, 1348, 1143, 574, 2348, 2335, 775, 2077, 2041, 1893, 171, 1531, 1020, 1669, 2035, 2032, 863, 1548, 2121, 1497, 781, 841, 2039, 691, 2594, 38, 1892, 196, 2001, 1618, 962, 2004, 2119, 1382, 1816, 186, 617, 794, 791, 224, 705, 482, 2040, 2406, 158, 2034, 1319, 1823]",1,8.0,20.0,225.0,0.6214918759231906,, -"[2168, 629, 1887, 2390, 1895, 883, 2045, 2391, 1826, 1741, 1776, 88, 1888, 1824, 534, 95, 1267, 657, 1303, 1214, 1735, 1566, 2447, 2444, 1739, 1751, 2199, 834, 480, 1736, 2556, 1033, 1886, 2093, 1282, 1712, 1586, 136, 2596, 290, 1761, 2616, 1638, 2446, 891, 1733, 1689, 741, 950, 2421, 109, 1687, 1740, 1067, 391, 2372, 322, 1759, 2359, 1763, 800, 2503, 5, 1610, 2448, 952, 1743, 2413, 1742, 936, 1627, 1716, 489, 2445, 831, 1368, 2083, 645, 1713, 2652, 1427, 2365, 817, 1898, 2515, 1745, 1709, 223, 1503, 2450, 1589, 1499, 728, 609, 1833, 1710, 616, 1715, 650, 1711, 2642, 2366, 1606, 1729, 337, 357, 1754, 1986, 1011, 1470, 732, 263, 1726, 2201, 876, 2216, 155, 1766, 738, 1756, 565, 749, 2212, 836, 1899, 2162, 1035, 1750, 1479, 1136, 797, 1732, 998, 2558, 1483, 261, 1760, 174, 1229, 1911, 64, 1749, 2313, 1041, 2546, 687, 1205, 246, 1738, 53, 757, 580, 156, 1758, 105, 1620, 173, 1747, 708, 1721, 1154, 626, 1632, 1171, 1072, 979, 1730, 170, 1737, 73, 1752, 364, 767, 2553, 1103, 1708, 68, 558, 1717, 1596, 228, 1189, 2651, 819, 873, 244, 686, 1070, 2221, 1520, 1284, 341, 1725, 1038, 1148, 372, 1901, 2207, 684, 1724]",1,8.0,20.0,325.0,0.5668389955686853,, -"[2662, 12, 1318, 1001, 2661, 1001, 12, 2662, 1318, 2661, 1001, 12, 2662, 1318, 2661, 1001, 12, 2662, 1318, 2661, 1001, 12, 2662, 1318, 2661, 1001, 12, 2662, 1318, 2661, 1001, 12, 2662, 1318, 2661, 1001, 12, 2662, 1318, 2661, 1001, 12, 2662, 1318, 2661, 1001, 12, 2662, 1318, 2661]",0,8.0,25.0,325.0,0.1816838995568685,, -"[154, 1721, 819, 1718, 1169, 341, 1389, 1041, 1238, 1103, 1713, 609, 1444, 1339, 1189, 748, 904, 73, 132, 364, 902, 1070, 873, 1040, 1715, 1914, 228, 1568, 156, 1145, 1546, 1154, 109, 1499, 1714, 326, 72, 1708, 689, 1483, 876, 2153, 1471, 88, 357, 176, 1725, 155, 1284, 466, 1229, 754, 1792, 1723, 59, 744, 333, 60, 687, 1720, 853, 970, 1604, 1243, 289, 69, 1646, 323, 1719, 1224, 1722, 1281, 613, 174, 2019, 1727, 111, 552, 30, 1903, 331, 415, 1492, 170, 919, 779, 823, 1728, 1525, 1123, 797, 1724, 778, 90, 337, 1384, 53, 1726, 1480, 796, 1919, 580, 34, 647, 1620, 1305, 1674, 1072, 2094, 847, 2450, 1205, 2485, 985, 306, 1550, 1358, 524, 1599, 1149, 1926, 1292, 957, 1975, 1583, 1959, 1712, 2016, 1917, 882, 244, 1898, 437, 757, 1527, 758, 173, 1287, 1710, 1022, 282, 379, 2010, 764, 1516, 682, 2608, 1851, 1623, 68, 346, 999, 1370, 2189, 1013, 1716, 277, 973, 1661, 2022, 252, 581, 671, 1495, 950, 93, 1171, 101, 1209, 128, 1142, 67, 1395, 978, 891, 1825, 935, 684, 2269, 842, 1565, 2184, 626, 20, 2270, 2457, 1900, 880, 1824, 554, 1562, 1414, 1709, 747, 2112, 344, 370, 1682, 392, 1717, 1415, 1343, 70, 1455, 1826, 854, 2375, 441, 1507, 118, 630, 115, 255, 2207, 1881, 661, 1191, 2628, 1267, 860, 701, 1038, 1183, 246, 1668, 440, 951, 929, 708, 979, 2241, 1537, 1690, 739, 454, 2165, 410, 1839, 129, 1503, 1827, 2030, 1409, 371, 119, 1237, 1538, 446, 113, 1549, 501, 2166, 1029, 489, 1543, 720, 1958, 2242, 540, 1408, 1901, 2190, 1711, 1957, 2284, 645, 2062, 2527, 193, 676, 686, 500, 831, 388, 1303, 616, 1884, 1616, 735, 1404, 807, 1355, 1179, 1885, 1553, 2699, 2274, 2630, 1700, 1354, 1954, 646, 2273, 1060, 2272, 2235, 2286, 2528, 393, 2621]",1,8.0,30.0,325.0,0.6067208271787297,, -"[2126, 1215, 1203, 1111, 1893, 1072, 1671, 102, 1119, 962, 2125, 191, 548, 1892, 482, 2124, 210, 429, 695, 1251, 2129, 1797, 2494, 88, 1615, 1998, 1807, 1049, 1273, 306, 1012, 382, 109, 1810, 920, 1871, 1303, 705, 1894, 180, 716, 1889, 1665, 1149, 2045, 1968, 603, 417, 1929, 549, 1614, 1448, 916, 536, 2140, 261, 505, 1452, 1812, 2075, 1315, 2138, 722, 1026, 1346, 1986, 2004, 841, 770, 2531, 2041, 1890, 17, 2668, 2077, 232, 687, 86, 229, 1701, 1336, 2359, 1126, 1821, 2372, 2130, 158, 1823, 791, 186, 2037, 2295, 2139, 1299, 14, 519, 1710, 224, 1301, 1870]",1,10.0,10.0,200.0,0.611890694239291,, -"[1013, 766, 2582, 1097, 2534, 1221, 387, 1124, 465, 1577, 199, 1950, 1894, 310, 722, 603, 417, 669, 54, 306, 1159, 960, 2117, 2045, 1171, 420, 0, 1147, 1542, 1072, 109, 316, 2034, 962, 1862, 297, 304, 1166, 519, 1708, 197, 729, 1869, 873, 1615, 1846, 102, 598, 1941, 1810, 1874, 1334, 570, 1172, 926, 1075, 1241, 681, 1868, 1856, 2004, 277, 2357, 176, 228, 2314, 1998, 1149, 1871, 767, 784, 2217, 1812, 1865, 1855, 1665, 2327, 2009, 1479, 160, 745, 2430, 180, 1857, 2096, 699, 2001, 2139, 2141, 476, 1335, 311, 1337, 1873, 231, 179, 261, 203, 14, 1799, 743, 935, 318, 1867, 1299, 1995, 1636, 1852, 2309, 563, 2002, 2003, 899, 1095, 2000, 1820, 467, 1870, 216, 1987, 968, 1864, 201, 143, 1875, 2568, 158, 366, 1127, 1558, 1574, 547, 633, 1465, 792, 24, 869, 215, 1100, 343, 391, 1989, 13, 68, 460, 157, 551, 1858, 1023, 2]",1,10.0,15.0,200.0,0.6613737075332349,, -"[109, 1346, 1998, 602, 687, 17, 1808, 1106, 416, 2125, 1871, 2122, 1701, 180, 306, 378, 733, 210, 1479, 2139, 55, 1868, 2140, 790, 2501, 232, 191, 1641, 146, 2359, 1787, 261, 1814, 549, 1671, 1670, 962, 1440, 836, 2372, 2335, 899, 332, 185, 2296, 2230, 2550, 1508, 719, 1062, 1813, 1531, 1315, 2249, 833, 2045, 1809, 2117, 770, 2518, 2004, 1820, 438, 2361, 2360, 716, 1316, 1073, 102, 2127, 1405, 912, 1268, 1818, 230, 1710, 1542, 897, 603, 1348, 519, 738, 1131, 2136, 2607, 482, 1869, 1811, 2124, 1817, 2126, 1119, 13, 148, 1215, 24, 579, 1986, 2141, 1126, 1, 2120, 1111, 511, 944, 381, 1636, 297, 2706, 25, 1410, 158, 1983, 2430, 196, 1655, 869, 1251, 27, 2035, 1468, 2037, 1874, 606, 1687, 1864, 1152, 1095, 2040, 1614, 1324, 480, 547, 2379, 1100, 88, 1049, 1822, 1807, 1261, 1385, 1454, 2001, 1301, 2118, 1015, 794, 229, 417, 1875]",1,10.0,15.0,325.0,0.5841949778434269,, -"[1735, 1358, 1726, 1655, 1131, 1538, 52, 1620, 562, 197, 754, 359, 1284, 2131, 742, 389, 1139, 554, 1637, 1412, 2208, 446, 179, 2172, 1143, 388, 387, 2133, 2010, 1282, 905, 2295, 2180, 969, 842, 1507, 1851, 1333, 1483, 95, 490, 816, 525, 239, 1152, 2294, 989, 903, 2450, 1976, 749, 415, 1527, 1743, 2282, 2016, 231, 118, 306, 1343, 1070, 1072, 704, 1838, 2155, 2182, 581, 702, 1224, 1077, 586, 232, 74, 2031, 1535, 1118, 1249, 454, 2153, 2083, 2165, 2166, 993, 1973, 2436, 1029, 1013, 32, 1792, 858, 1616, 1583, 2030, 1975, 822, 491, 973, 255, 176, 1683, 2233, 280, 2232, 447, 1849, 2132, 181, 2293, 2485, 2198, 1218, 56, 666, 1351, 504, 1370, 1464, 880, 2050, 682, 1917, 1266, 412, 2109, 2231, 1359, 778, 2367, 2371, 2113, 935, 2278, 671, 850, 1402, 1515, 252, 2177, 1482, 190, 398, 630, 604, 2112, 2553, 115, 2072, 1142, 1919, 539, 1974, 69, 2134, 712, 2189, 505, 1676, 277, 2019, 1675, 2071, 779, 1926, 262, 1074, 830, 1525, 945, 1487, 1501, 1674, 1916, 728, 1203, 1079, 1914, 2480, 907, 891, 2217, 661, 2023, 1690, 1539, 1918, 2457, 452, 2608, 210, 955, 2164, 55, 506, 1296, 2100, 1920, 1285, 41, 551, 644]",1,10.0,20.0,225.0,0.6547267355982275,, -"[1127, 698, 588, 1885, 745, 661, 665, 2123, 231, 286, 232, 160, 1337, 133, 2000, 467, 1787, 2040, 2229, 1522, 2004, 332, 924, 176, 553, 869, 2008, 476, 1051, 2003, 1346, 2007, 1532, 2383, 2120, 2009, 792, 1777, 673, 1558, 1844, 113, 1995, 868, 633, 743, 1705, 2228, 1772, 681, 874, 91, 1166, 1958, 415, 266, 1859, 1805, 1884, 2001, 1574, 366, 1697, 2045, 2122, 1403, 201, 24, 563, 71, 215, 261, 1778, 389, 442, 306, 664, 179, 973, 1453, 335, 566, 1045, 1661, 809, 426, 143, 1987, 2006, 1095, 124, 2066, 1992, 2005, 548, 1993, 519, 45, 409, 1986, 344, 1701, 1991, 896, 1789, 2121, 157, 1780, 2034, 935, 343, 729, 2, 1870, 1907, 2276, 391, 277, 1810, 638, 109, 1015, 899, 39, 733, 465, 699, 2259, 1519, 1876, 1624, 289, 2093, 1998, 593, 1812, 1875, 158, 2304, 1908, 808, 441, 318, 2002, 784, 2362, 416, 68, 1873, 236, 153, 1989, 349, 2335, 384, 1474, 451, 1481, 1779, 1788, 1505, 460, 1999, 2094, 13, 2092, 1023, 196, 532, 887, 767, 249, 1385, 1709, 547, 216, 1196, 235, 2357, 1569, 1434, 226, 1996, 1990, 1988, 610, 1149, 94, 968, 1869, 1994, 1704, 1419, 1965, 1349, 854, 1786, 1906, 210, 1997, 1115, 1966, 1097, 1634, 459, 589, 1964, 1529, 470, 2323, 102, 151, 1968, 2509, 627, 1336, 463, 2336, 271, 1971, 857, 205, 2095, 203, 1010, 1365, 1703, 696, 1178, 1671, 1240, 443, 1702, 1785, 1452, 2309, 1417, 1234, 22, 529, 1769, 2350, 789, 1970, 317, 2260, 759, 946, 362, 652]",1,10.0,25.0,325.0,0.6827917282127031,, -"[306, 1072, 2360, 417, 606, 1806, 17, 1655, 519, 2578, 1316, 1645, 1821, 2582, 1682, 2501, 1812, 795, 1410, 1807, 766, 2045, 1325, 2230, 2140, 2534, 171, 1223, 465, 603, 962, 180, 1098, 1116, 1131, 2141, 598, 790, 2291, 316, 1823, 722, 1329, 1548, 109, 823, 25, 1147, 302, 27, 2610, 605, 228, 935, 1567, 298, 1287, 0, 1097, 678, 1787, 1809, 2034, 1159, 2568, 2066, 2348, 295, 1654, 2067, 102, 2065, 794, 1813, 1315, 2613, 1106, 481, 1172, 14, 2139, 420, 1124, 1986, 1799, 1121, 2500, 926, 1871, 2309, 1062, 1095, 1814, 507, 387, 482, 2304, 568, 1856, 1862, 1497, 218, 716, 2303, 1933, 1363, 1013, 1894, 377, 1808, 1301, 311, 1217, 1817, 1874, 719, 45, 1865, 665, 199, 1623, 253, 304, 2302, 873, 1160, 1615, 24, 1708, 918, 2243, 2268, 2011, 1107, 1873, 669, 1134, 1869, 1665, 1636, 576, 1875, 1023, 759, 185, 549, 179, 1265, 215, 1852, 1870, 203, 318, 1868, 1337, 835, 2493, 416, 19, 143, 899, 158, 699, 230, 2357, 729, 1477, 563, 1822, 503, 467, 1100, 862, 157, 401, 547, 2117, 2314, 94, 1335, 981, 574, 176, 733, 197, 2008, 2035, 1075, 1864, 297, 201, 1348, 1049, 2409, 1055, 1176, 54, 1192, 1810, 869, 1299, 767, 2379, 343, 1479, 1857, 2508, 303, 277, 261, 1453, 1171, 1811, 1859, 2308, 511, 1934, 577, 1846, 232, 224, 1939, 2307, 335, 251, 1876, 383, 633, 1489, 570, 2430, 231, 1940, 219, 1867, 1931, 1004, 2301, 960, 1300, 1177, 2096, 1936, 1855, 1413, 2573, 812, 1465, 402, 1932, 2327, 1941, 1820, 2217, 784, 1877, 1858, 435, 1818, 551, 205, 1440, 1866, 338, 1701, 1935, 1221, 1577, 866, 532, 1099, 1340, 877, 13, 351, 1863, 1542, 235, 776, 872, 1323, 411, 1331, 1943, 1819, 1607, 1816, 1576, 216, 1581, 1054, 874, 1432, 1294, 1241, 226, 1815, 1854, 1052, 1860]",1,10.0,30.0,325.0,0.6672821270310192,, -"[92, 1698, 2209, 1165, 144, 1836, 2159, 1328, 23, 1327, 108, 495, 2192, 1593, 213, 2158, 2161, 1504, 537, 145, 1835, 2157, 2160, 1647, 2622, 898]",0,,5.0,200.0,0.087149187592319,,0.2 -"[635, 2058, 1378, 2150, 1544]",0,,15.0,300.0,0.0993353028064992,,0.2 -"[1029, 129, 193, 344, 671, 682, 109, 1914, 1395, 1682, 2235, 1839, 1171, 500, 2242, 2062, 661, 778, 1975, 2078, 1343, 1455, 1537, 2094, 1623, 252, 2019, 282, 119, 807, 1168, 2699, 393, 779, 1525, 1919, 60, 2450, 1509, 1572, 1507, 1616, 1792, 115, 176, 630, 935, 1583, 1926, 2284, 524, 1303, 1503, 2060, 2061, 331, 415, 126, 138, 2079, 842, 1538, 1851, 2030, 2184, 1142, 2016, 2153, 2457, 69, 847, 2628, 1179, 701, 249, 854, 860, 1668, 564, 153, 47, 388, 446, 554, 1690, 2010, 2165, 2166, 70, 1897, 1549, 2286, 1226, 2207, 1191, 1885, 2485, 88, 2045, 2046, 559, 392, 2112, 648, 1958, 277, 880, 264, 940, 1013, 323, 1960, 973, 1661, 676, 2188, 113, 735, 1224, 1800, 1898, 2270, 1167, 1553, 20, 1060, 1707, 2273, 238, 840, 2592, 1182, 441, 581, 2527, 1901, 979, 739, 1884, 306, 128, 233, 1828, 1957, 2274, 1825, 645, 118, 1917, 67, 747, 1956, 2630, 1237, 244, 342, 945, 1478, 1725, 1801, 1470, 1408, 2029, 1829, 1903, 1409, 1072, 1902, 1579, 1370, 1527, 417, 773, 1262, 1505, 1733, 1803, 1804, 891, 1895, 1900, 728, 2269, 400, 50, 1700, 440, 2271, 448, 1824, 709, 2190, 1955, 1543, 1881, 1784, 1797, 1805, 844, 1267]",1,,20.0,250.0,0.6539881831610044,,0.2 -"[2042, 391, 460, 745, 792, 935, 1166, 1434, 1574, 1990, 511, 1991, 1995, 716, 1576, 1822, 1823, 2142, 920, 2004, 2668, 775, 1968, 2038, 2041, 151, 566, 743, 1127, 722, 568, 1993, 1994, 576, 795, 2001, 2002, 1809, 1818, 48, 165, 1073, 2075, 159, 791, 1807, 382, 691, 1270, 1497, 185, 160, 1106, 790, 1999, 835, 1004, 1095, 1348, 1815, 2140, 521, 1297, 1301, 2471, 2077, 49, 86, 1002, 1273, 1319, 1448, 1614, 1983, 2036, 2, 45, 476, 681, 1149, 1567, 2501, 1697, 1704, 1989, 1992, 1997, 1107, 1121, 1331, 1787, 1811, 1813, 1316, 2138, 855, 141, 617, 1929, 2035, 366, 673, 603, 2500, 27, 230, 1996, 2000, 1062, 1812, 1814, 1816, 1819, 927, 1315, 480, 968, 1003, 2398, 2037, 2076, 1889, 1891, 2039, 2040, 2130, 68, 71, 1097, 102, 228, 417, 2573, 1709, 224, 1987, 481, 482, 784, 549, 1863, 637, 845, 17, 1473, 1573, 2707, 1020, 1026, 1654, 143, 232, 2309, 1558, 869, 1052, 719, 1299, 1998, 1581, 1852, 1858, 1859, 1817, 1865, 1821, 1873, 1874, 1877, 2141, 2045, 841, 1336, 1941, 1465, 157, 1147, 420, 231, 261, 563, 1075, 1100, 1241, 1453, 1799, 1861, 1862, 1868, 1869, 1871, 1876, 1810, 2034, 297, 2430, 197, 2217, 1708, 962, 1335, 158, 318, 343, 729, 874, 1440, 1479, 1665, 1856, 1857, 1866, 2493, 14, 1542, 176, 387, 1894, 109, 1159, 1221, 2314, 310, 1615, 1013, 926, 201, 199, 203, 226, 416, 532, 351, 1323, 1337, 1860, 1864, 1870, 598, 316, 180, 179, 669, 2534, 1124, 1950, 306, 1072, 2357, 1171]",1,,25.0,300.0,0.630354505169867,,0.2 -"[1610, 426, 608, 655, 1029, 887, 910, 1490, 129, 1551, 1656, 671, 682, 1395, 1171, 500, 2242, 661, 138, 565, 139, 329, 476, 487, 1343, 1245, 1251, 344, 1705, 1770, 2094, 1775, 1779, 1583, 119, 1549, 2061, 2062, 249, 1975, 1507, 1009, 1346, 1640, 1651, 1455, 1537, 1792, 109, 1926, 1168, 1179, 393, 1525, 1919, 60, 2450, 801, 564, 2045, 112, 294, 452, 719, 1538, 1623, 115, 176, 277, 69, 807, 2235, 2207, 1191, 779, 415, 1555, 2553, 1509, 153, 124, 302, 542, 656, 1616, 1045, 1851, 1193, 630, 880, 935, 2016, 1914, 2286, 701, 2060, 2485, 88, 1572, 2046, 350, 519, 842, 1690, 2030, 1771, 1781, 2153, 1013, 2457, 847, 282, 2628, 2284, 1543, 778, 647, 1953, 126, 2078, 47, 388, 1158, 2010, 2112, 2166, 193, 1584, 1772, 323, 1226, 524, 1303, 1839, 854, 1885, 1224, 331, 1668, 2079, 308, 1619, 255, 573, 554, 590, 2165, 1367, 70, 440, 2184, 1072, 1142, 2019, 645, 1682, 581, 973, 67, 306, 2481, 1414, 1441, 1553, 1825, 750, 1700, 2271, 793, 2592, 264, 1182, 1579, 2189, 2190, 2527, 979, 1527, 860, 1802, 2269, 20, 233, 1957, 1958, 2273, 252, 1404, 1070, 118, 2022, 2188, 747, 1956, 1355, 1884, 1370, 844, 189, 1478]",1,,20.0,300.0,0.6244460856720827,,0.8 -"[306, 2045, 109, 621, 387, 231, 687, 610, 249, 1508, 1850, 2131, 1677, 822, 2015, 415, 1333, 661, 2359, 2025, 666, 519, 1092, 502, 1840, 41, 356, 461, 1120, 2118, 2034, 651, 2133, 1671, 1976, 2071, 1416, 1295, 1695, 1844, 1675, 88, 61, 304, 1644, 1729, 1841, 1479, 86, 276, 1974, 1482, 2117, 2287, 504, 1572, 1403, 252, 979, 737, 2326, 32, 2078, 1882, 2425, 1847, 1846, 1741, 2178, 2132, 596, 851, 1683, 382, 2024, 2023, 1661, 1405, 1843, 1973, 578, 880, 1855, 1332, 1171, 478, 1978, 681, 1314, 2423, 530, 749, 1521, 1845, 437, 1288, 211, 181, 1293, 1625, 1463, 471, 1583, 229, 2424, 498, 1464, 1110, 1013, 261, 1914, 149, 279, 1309, 955, 1402, 1080, 2388, 491, 2485]",1,14.0,12.0,275.0,0.6362629246676514,, -"[2335, 1224, 1481, 133, 2123, 1519, 1848, 839, 1583, 1908, 2268, 91, 1120, 2380, 1729, 109, 1677, 1293, 2379, 429, 1039, 69, 1847, 2044, 1840, 2045, 1680, 1661, 45, 2178, 1675, 1403, 1844, 373, 1395, 2576, 1682, 1377, 2267, 586, 1625, 2025, 485, 2002, 2291, 1464, 1669, 1330, 794, 1160, 965, 2589, 1025, 1527, 1845, 1810, 552, 94, 2304, 383, 2386, 2303, 81, 1986, 1818, 1644, 88, 1309, 1925, 1302, 2003, 878, 436, 1992, 181, 1623, 435, 2024, 1695, 377, 1265, 624, 1013, 1062, 1889, 2008, 1329, 2001, 2311, 2068, 1926, 527, 2014, 1817, 2321, 1463, 885, 836, 2108, 1851, 733, 2419, 278, 1294, 530, 631, 737, 678, 1842, 257, 1351, 2121, 1192, 401, 2381, 2155, 2266, 276, 2131, 2301]",1,14.0,12.0,295.0,0.6377400295420975,, -"[1870, 1701, 160, 232, 1876, 2155, 745, 1873, 2000, 1127, 691, 391, 1574, 1983, 1023, 1995, 1218, 460, 2001, 2002, 1614, 1867, 74, 1266, 1149, 617, 335, 1704, 743, 1998, 158, 1984, 519, 1859, 1908, 1665, 2294, 141, 1558, 1992, 792, 1081, 2037, 366, 2038, 1994, 1095, 1869, 68, 2040, 215, 194, 1875, 1989, 49, 1453, 2130, 1891, 1996, 2003, 109, 1224, 1230, 405, 1894, 2042, 2563, 1812, 1002, 2039, 86, 897, 2435, 1999, 2564, 1889, 586, 201, 1319, 2117, 1079, 164, 55, 1491, 1178, 1448, 1342, 1846, 1986, 1709, 14, 2041, 2034, 1336, 2309, 1905, 1270, 1138, 2248, 218, 1807, 1380, 1435, 383, 1050, 2036, 1654, 2594, 1434, 1626, 673, 1119, 1529, 210, 2004, 2006, 2122, 1013, 146, 566]",1,14.0,12.0,300.0,0.6093057607090103,, -"[175, 955, 596, 1787, 2062, 1337, 563, 2450, 2388, 41, 1346, 1743, 359, 524, 1920, 880, 2184, 1142, 857, 1483, 1592, 115, 1701, 1624, 905, 506, 236, 393, 1072, 1503, 1918, 2016, 803, 701, 124, 519, 69, 604, 1184, 2019, 1674, 70, 704, 470, 1676, 318, 1905, 426, 176, 1077, 779, 1358, 1525, 153, 2285, 1772, 1079, 2153, 1919, 1785, 203, 682, 1097, 252, 973, 306, 778, 1820, 1675, 389, 1915, 1914, 1824, 1779, 1789, 289, 109, 1370, 459, 2100, 1986, 2207, 891, 1527, 129, 193, 1849, 979, 1464, 1267, 1826, 440, 441, 671, 88, 451, 1583, 1359, 2241, 1873, 854, 2189, 1769, 735, 739, 2101, 2284, 630, 1415, 1444, 2286, 2269, 392, 1895, 645, 1899, 2094, 1537, 1309, 277, 1455, 647, 1900, 2242, 904, 1191, 1414, 500, 1917, 490, 1485, 1495, 2060, 133, 1839, 458, 581, 747, 2274, 1959, 119, 1543, 113, 921, 1619, 454, 2022, 1360, 950, 1773, 1303, 610, 1539, 935, 1362, 2190, 1916, 1882, 540, 1661, 603, 102, 2323, 379, 67, 118, 2061, 1237, 720, 1898, 2527, 830, 371, 2699, 1879, 1881, 1269, 753, 703, 801]",1,14.0,18.0,250.0,0.6957163958641064,, -"[160, 2000, 2001, 722, 745, 2501, 1106, 2034, 2379, 2500, 277, 1812, 873, 2009, 2534, 743, 1894, 2002, 417, 71, 1577, 1567, 199, 391, 420, 176, 2117, 68, 1995, 460, 1127, 962, 2003, 1996, 366, 179, 1874, 1992, 1149, 1989, 476, 14, 1999, 605, 1665, 1097, 1709, 1159, 1708, 935, 1147, 465, 1221, 1941, 1704, 311, 1615, 2140, 551, 102, 2357, 1315, 0, 1997, 1558, 2582, 1479, 1072, 681, 792, 2381, 1810, 926, 2314, 2041, 1869, 306, 1950, 261, 1542, 1986, 1171, 1334, 1865, 960, 197, 2008, 2327, 310, 109, 2573, 767, 1166, 2007, 1172, 1987, 2243, 1988, 1095, 17, 1846, 1862, 1871, 304, 729, 2, 2045, 1991, 639, 158, 1013, 1873, 1107, 2359, 2006, 687, 1867, 603, 2096, 2004, 231, 944, 699, 1856, 669, 716, 203, 151, 1434, 1241, 1124, 185, 1301, 2139, 467, 180, 343, 1346, 784, 1373, 1820, 900, 45, 1877, 2372, 2309, 1998, 1335, 673, 1337, 563, 1799, 1857, 1316, 1993, 566, 54, 1697, 1666, 1990, 2005, 318, 1994, 228, 899, 2607, 215, 633, 13, 1855, 232, 1821, 2706, 1710, 2138, 1299, 1023, 836, 216, 2568]",1,14.0,18.0,265.0,0.6421713441654358,, -"[1651, 816, 1772, 109, 2034, 1248, 153, 2405, 1980, 2048, 1448, 2056, 236, 65, 1337, 454, 2094, 1156, 935, 306, 133, 2418, 415, 2078, 712, 1983, 2046, 1351, 1568, 736, 318, 1683, 2136, 630, 120, 95, 1505, 341, 938, 563, 850, 734, 2096, 1979, 262, 220, 1074, 1701, 1856, 451, 2013, 176, 408, 441, 2394, 252, 672, 1784, 604, 2357, 447, 1787, 519, 56, 126, 1080, 745, 487, 1336, 1572, 2189, 2071, 139, 429, 1852, 1309, 1894, 1616, 1867, 88, 1535, 2276, 146, 603, 1986, 2024, 1239, 376, 1661, 2045, 2282, 1293, 530, 1842, 1776, 1410, 699, 215, 1907, 1358, 1203, 1377, 412, 973, 86, 2317, 2117, 436, 1841, 115, 118, 1843, 1121, 160, 1592, 1846, 1464, 1926, 772, 2419, 2027, 1644, 1840, 308, 1416, 748, 471, 2047, 277, 1973, 1995, 651, 1850, 366, 572, 897, 1676, 1729, 1127, 69, 1847, 2281, 1830, 1421, 553, 1675, 1849, 98, 1359, 2009, 181, 2293, 149, 94, 868, 1566, 1652, 666, 1118, 1920, 1494, 2026, 1914, 1655, 2109, 836, 1487, 693, 74, 2485, 539, 490, 1343, 32, 708, 1791, 1403, 1482, 657, 1538]",1,14.0,18.0,275.0,0.6868537666174298,, -"[2238, 277, 210, 22, 2117, 1671, 1508, 1811, 553, 1869, 2667, 1703, 2118, 1823, 1787, 1822, 598, 2668, 48, 297, 2033, 2009, 1666, 197, 570, 55, 306, 2122, 733, 332, 740, 770, 1454, 24, 2357, 2430, 1324, 191, 1797, 790, 1405, 1813, 429, 1864, 1614, 1583, 2594, 869, 1299, 2075, 1331, 1818, 1452, 719, 1062, 1072, 2008, 231, 1219, 1927, 2120, 1821, 2, 2303, 1107, 293, 705, 45, 229, 1655, 2032, 201, 1808, 1817, 1968, 2045, 261, 102, 1819, 788, 1, 1809, 2613, 2691, 2003, 1892, 1929, 1581, 160, 2035, 1634, 1251, 33, 1012, 39, 1121, 1893, 1995, 1576, 962, 1358, 1349, 1894, 1127, 482, 1336, 1049, 2531, 206, 968, 179, 1203, 366, 1870, 2040, 151, 1701, 1965, 1532, 2041, 1479, 2001, 519, 1998, 1875, 2680, 795, 1820, 2550, 745, 49, 1871, 2335, 1810, 232, 695, 382, 899, 180, 493, 1215, 1002, 476, 298, 912, 536, 1807, 2295, 2038, 716, 1325, 1952, 71, 417, 14, 1814, 1303, 835, 351, 1448, 27, 1873, 1889, 1026, 2077, 13, 1095, 1321, 1261, 1709, 505, 1812, 141, 897, 86, 2494, 396, 916, 511, 1149]",1,14.0,18.0,300.0,0.655096011816839,, -"[2122, 2123, 2003, 2524, 86, 1267, 1826, 885, 1039, 1824, 1979, 617, 1156, 429, 91, 1980, 725, 442, 483, 1534, 2121, 878, 2024, 1669, 14, 1982, 158, 1428, 1848, 1131, 2016, 2381, 816, 2380, 2013, 1436, 1986, 2020, 1898, 771, 49, 1981, 2295, 1202, 714, 1899, 1909, 2044, 1652, 668, 627, 2010, 436, 1419, 1900, 2018, 1332, 2071, 2034, 2002, 2332, 2156, 891, 2337, 424, 2012, 2178, 1972, 2259, 645, 417, 827, 2001, 1152, 1511, 130, 604, 2379, 325, 55, 1046, 2021, 1713, 2155, 1977, 2419, 1655, 2394, 382, 2336, 815, 224, 651, 897, 1079, 147, 76, 1519, 743, 1732, 1481, 1410, 1385, 1174, 323, 2017, 60, 1658, 1895, 1015, 851, 2108, 2238, 2023, 691, 210, 191, 69, 1741, 141, 1417, 151, 1494, 1882, 2015, 88, 356, 2350, 1080, 1309, 2418, 1358, 841, 164, 1042, 1927, 1703, 2182, 162, 2151, 737, 1527, 2116, 1978, 2378, 2348, 2054, 2236, 1907, 1901, 2052, 2073, 503, 1529, 2072, 1097, 146, 470, 1677, 1517, 525, 2640, 550, 498, 1920, 95, 211, 1115, 574, 433, 1259, 1435, 1268, 2011, 2189, 2022, 809, 738, 93, 2014, 1288, 415, 1394, 2162, 1847, 1926, 1924, 697, 1906, 950, 1983, 2051, 74, 2276, 203, 942, 1118, 787, 1667, 2074]",1,14.0,20.0,250.0,0.6410635155096012,, -"[2355, 2357, 149, 2263, 1015, 572, 1416, 1145, 1309, 35, 1103, 1913, 1074, 1441, 880, 861, 1972, 2394, 1042, 934, 454, 1850, 1410, 578, 2608, 181, 935, 1070, 1527, 630, 1649, 1953, 88, 895, 1842, 2194, 1912, 2490, 440, 826, 1142, 2116, 2052, 725, 711, 1652, 1072, 1330, 2015, 2396, 95, 897, 279, 94, 2132, 496, 2135, 1169, 1618, 1926, 2480, 1358, 2105, 2419, 147, 1966, 1555, 528, 544, 2262, 1421, 1501, 277, 324, 2649, 2016, 1357, 2068, 1655, 891, 262, 1463, 1296, 1498, 733, 502, 195, 1517, 973, 1013, 306, 671, 661, 693, 1616, 2189, 1470, 2025, 2278, 748, 2195, 74, 551, 2028, 1118, 1792, 1927, 666, 1879, 1487, 32, 118, 712, 2009, 490, 55, 1661, 2376, 1359, 412, 644, 2274, 1568, 1163, 2653, 1683, 1203, 2457, 1143, 56, 801, 1120, 1218, 504, 2310, 1617, 461, 850, 737, 675, 2423, 2027, 2134, 2422, 783, 1974, 210, 651, 1121, 803, 1917, 728, 1402, 1674, 1675, 539, 274, 447, 1976, 1464, 1538, 2281, 1550, 992, 553, 1920, 1152, 2282, 1435, 1372, 1910, 1676, 1849, 945, 838, 242, 2280, 1512, 2026, 1351, 1824, 2071, 1915, 252, 2019, 518, 2109, 2295, 1732, 2388, 115, 1482, 424, 382, 1079, 2110, 1930, 1848, 1535, 176]",1,14.0,20.0,300.0,0.662850812407681,, -"[722, 2034, 1567, 927, 68, 1894, 2500, 2000, 391, 1995, 1013, 1221, 743, 417, 49, 2117, 160, 1952, 1999, 745, 2501, 1950, 1951, 2582, 1812, 420, 148, 1577, 962, 606, 151, 1316, 277, 366, 2217, 1810, 1991, 2009, 0, 1385, 1941, 197, 2314, 2142, 2001, 935, 460, 1708, 180, 1095, 873, 788, 1997, 310, 1874, 465, 199, 1127, 14, 1166, 228, 176, 1709, 2002, 1147, 714, 1149, 2243, 203, 1697, 25, 304, 429, 1106, 2041, 1989, 2140, 1869, 45, 2003, 1072, 1996, 1334, 2309, 1846, 2206, 1987, 17, 1665, 836, 158, 2205, 1871, 2379, 1031, 2045, 1479, 261, 2007, 1986, 179, 306, 1862, 1171, 2534, 1666, 681, 2004, 1558, 1704, 2006, 1172, 716, 1865, 1542, 1241, 729, 792, 1857, 767, 1820, 1107, 1867, 311, 109, 1873, 1990, 1994, 13, 231, 2, 2141, 1159, 2005, 1124, 603, 1301, 605, 673, 2381, 476, 1434, 1856, 102, 687, 548, 669, 1299, 633, 2359, 54, 1823, 1335, 71, 1988, 1821, 1097, 1799, 467, 1337, 2096, 1346, 363, 566, 926, 2139, 1992, 899, 2008, 1315, 1998, 699, 2372, 2138, 1993, 1465, 185, 944, 2357, 1852, 563, 343, 1453, 1075, 551, 1855, 1710, 318, 215, 1615, 1687, 1859, 960, 335, 2573, 232, 205, 2607, 1003, 48, 1866, 2327, 1858, 1662, 968, 1868, 480, 1023, 1875, 1574, 1870, 2706, 784, 24, 297, 519, 1876, 1373, 2568, 1603, 1877, 1864, 1636, 1573, 2471, 766, 2707, 2430, 387, 598, 869, 165, 2698, 1670, 216, 1126, 900, 639, 1701, 1440, 201, 1863, 570, 157, 845, 521, 1473, 1100, 811, 316]",1,14.0,25.0,275.0,0.6639586410635155,, -"[2555, 2338, 2340, 1346, 1191, 2339, 836, 1858, 1131, 1701, 1358, 205, 1654, 24, 869, 1894, 420, 1948, 1950, 297, 1864, 1670, 2365, 416, 2163, 503, 546, 2582, 1055, 306, 985, 316, 199, 465, 1446, 332, 823, 231, 445, 1223, 2409, 1800, 1665, 277, 1445, 598, 553, 1558, 2034, 1666, 2348, 387, 303, 2489, 2305, 1655, 109, 2306, 1488, 2045, 2360, 570, 2243, 2379, 1709, 879, 899, 302, 201, 1454, 574, 1149, 310, 2309, 2001, 519, 1410, 232, 2605, 1869, 1870, 792, 1287, 383, 606, 1583, 1875, 180, 2, 579, 733, 2577, 215, 1014, 2667, 1023, 2430, 1608, 197, 1116, 1166, 1299, 1912, 1876, 1859, 2668, 2130, 1348, 2303, 2253, 2247, 335, 1873, 1434, 476, 206, 88, 665, 1453, 1807, 603, 1424, 68, 2003, 1107, 681, 576, 2143, 13, 1818, 2004, 2691, 391, 2009, 2568, 2578, 1489, 2127, 1995, 722, 218, 327, 1140, 2002, 2335, 745, 160, 1820, 719, 2238, 1134, 1127, 45, 366, 2236, 1815, 1821, 493, 1704, 2063, 171, 1121, 1787, 179, 743, 1192, 151, 1, 716, 1570, 1996, 568, 835, 1806, 27, 1819, 2136, 1998, 1273, 968, 2077, 2291, 71, 1624, 1817, 1548, 224, 1497, 2307, 1907, 2008, 1663, 1097, 1813, 1171, 1235, 1823, 1697, 1239, 1808, 10, 1325, 2124, 1004, 1812, 790, 605, 1219, 962, 2230, 2532, 203, 1574, 1111, 1062, 1581, 566, 2125, 673, 1611, 1522, 460, 481, 1000, 2308, 482, 1331, 1066, 1986, 1603, 2129, 1809, 1462, 795, 1623, 1987, 1119, 2000, 2126, 2550, 1989, 829, 1988, 2007, 1126, 1811, 1816, 1814, 298, 2128]",1,14.0,25.0,285.0,0.6436484490398818,, -"[1159, 109, 981, 2534, 2291, 766, 2582, 199, 519, 482, 790, 481, 1265, 14, 968, 311, 180, 218, 1869, 2707, 1814, 873, 1297, 0, 2117, 1868, 927, 2268, 1121, 1160, 716, 316, 306, 1787, 17, 1147, 2011, 401, 2430, 1986, 377, 1335, 54, 1682, 845, 387, 1172, 1865, 1812, 179, 254, 2034, 960, 1708, 570, 176, 669, 2045, 2304, 1211, 1171, 1862, 2141, 568, 866, 1615, 511, 637, 45, 1192, 2568, 1873, 1808, 1817, 2096, 2314, 2008, 304, 729, 197, 1821, 962, 1856, 1124, 719, 1894, 1473, 158, 794, 165, 1465, 1348, 835, 1316, 318, 767, 926, 1679, 343, 1337, 1871, 2140, 1665, 1023, 224, 2303, 1867, 1062, 2357, 205, 1096, 2139, 563, 1818, 1846, 48, 784, 1003, 467, 733, 1479, 1855, 1095, 416, 549, 1072, 383, 2609, 1488, 2003, 1799, 2138, 261, 1013, 215, 2035, 201, 94, 40, 1816, 521, 2493, 1870, 231, 24, 1875, 232, 598, 2301, 1636, 1876, 678, 1857, 1453, 1859, 633, 143, 335, 1049, 1810, 230, 25, 1004, 1315, 1809, 699, 1864, 1863, 435, 411, 1820, 1852, 2217, 1075, 157, 869, 1221, 2327, 1866, 2317, 1811, 899, 795, 1822, 1701, 759, 551, 532, 1344, 862, 1299, 1813, 1823, 507, 1107, 1329, 2066, 2302, 2122, 1440, 2610, 480, 1573, 1872, 2065, 2508, 1100, 1301, 13, 351, 1860, 1052, 547, 1858, 576, 1300, 1941, 1323, 297, 1874, 1098, 2067, 226, 1853, 402, 874, 1819, 1176, 1212, 1940, 664, 235, 253, 1854, 1933, 1363, 1877, 1861, 1935, 1413, 872, 185, 1477, 1581, 1432, 27, 1932, 1936, 1576, 1334]",1,14.0,25.0,295.0,0.5952732644017725,, -"[1729, 814, 1625, 323, 498, 1658, 2394, 2382, 2018, 2252, 572, 162, 1416, 815, 1269, 1912, 2010, 586, 1474, 121, 1882, 471, 2251, 60, 69, 893, 62, 1840, 2102, 2016, 1494, 897, 1115, 696, 1338, 1692, 1713, 2012, 1630, 1351, 1174, 2045, 2189, 306, 731, 743, 61, 2017, 2186, 1661, 588, 2132, 995, 1669, 1403, 2136, 1026, 593, 76, 2020, 1848, 1158, 555, 506, 1274, 1079, 1484, 808, 1120, 2383, 1583, 1999, 2335, 1288, 1741, 2013, 88, 1732, 1851, 1921, 2155, 1983, 510, 591, 1279, 737, 1309, 1920, 1017, 2332, 1675, 642, 299, 1925, 787, 55, 589, 1013, 1926, 1051, 2620, 130, 210, 1908, 1974, 984, 887, 302, 1527, 1849, 1892, 924, 910, 851, 2068, 680, 1330, 280, 707, 836, 1909, 1759, 74, 1118, 1464, 2364, 2178, 818, 181, 1839, 901, 1220, 2014, 2293, 497, 1846, 2015, 880, 399, 6, 2472, 2199, 485, 1156, 1481, 2419, 1376, 1395, 2011, 2347, 1394, 1769, 544, 979, 1303, 1847, 908, 1677, 1463, 1535, 543, 1602, 1204, 1845, 530, 1844, 1842, 1397, 1521, 634, 636, 1080, 2291, 763, 300, 192, 2418, 651, 239, 667, 2163, 2502, 693, 861, 2051, 490, 841, 334, 415, 315, 2233, 2052, 1125, 725, 1580, 2264, 1644, 1843, 1850, 1293, 2292, 771, 1104, 2108, 1015, 2072, 746, 1635, 858, 2219, 2330, 1069, 1838, 456, 2021, 1695, 1916, 2201, 1042, 2333, 2073, 65, 95, 2200, 57, 2232, 2054, 1517, 52, 1588, 966, 204, 423, 276, 2133, 408, 2074, 1047, 597, 1841, 100, 2131, 1139, 2182, 751, 552, 2576, 619, 373, 1467]",1,14.0,25.0,300.0,0.6853766617429837,, -"[885, 878, 1203, 1013, 1978, 1851, 1980, 1847, 1085, 270, 2485, 424, 1420, 2288, 514, 1421, 279, 1644, 1039, 1810, 816, 1370, 1843, 181, 1979, 118, 1908, 835, 2024, 2017, 1474, 1732, 1402, 483, 2481, 114, 2312, 2426, 1587, 1869, 1215, 2396, 1894, 471, 1572, 1174, 1464, 600, 461, 2403, 1848, 162, 518, 2110, 1676, 1332, 1195, 1410, 2109, 1919, 1291, 815, 130, 234, 1178, 841, 116, 1958, 2350, 1222, 502, 436, 505, 249, 1343, 2182, 1394, 778, 519, 836, 2154, 1417, 1487, 113, 702, 1494, 610, 652, 1982, 1592, 666, 724, 1625, 2153, 2394, 1850, 1583, 1975, 32, 204, 2280, 1266, 1074, 1268, 857, 735, 2014, 2010, 1984, 739, 306, 1027, 2195, 1529, 2155, 2078, 838, 1914, 510, 2287, 61, 1043, 1844, 1660, 1333, 525, 818, 963, 1021, 708, 682, 779, 966, 598, 1121, 203, 415, 2156, 252, 973, 2309, 1224, 109, 668, 1396, 2013, 1314, 1677, 2644, 1501, 470, 2256, 1683, 2449, 1637, 1097, 210, 854, 242, 1543, 994, 88, 1655, 2016, 2178, 1482, 2131, 843, 1938, 2282, 2133, 1909, 1884, 955, 102, 1525, 1671, 749, 1881, 1538, 2152, 76, 433, 822, 1171, 504, 2339, 593, 498, 2397, 2425, 2395, 1882, 1523, 711, 147, 1226, 944, 743, 1937, 2542, 1956, 1955, 2138, 2012, 935, 1527, 586, 2136, 1977, 1341, 1885, 1976, 1695, 276, 1500, 479, 1110, 1878, 696, 2528, 344, 807, 2185, 1981, 1399, 737, 753, 2151, 1133, 1880, 2228, 1045, 868, 851, 1131, 1463, 2132, 826, 548, 1115, 451, 441, 1526, 1873, 1713, 890, 2323, 1269, 389, 830, 2015, 2011, 1658, 1741, 300, 1571, 1309, 1227, 1973, 988, 1630, 1622, 490, 2102, 2276, 2227, 1288, 1879, 516, 1974, 2099, 1353, 2105, 2103, 995, 555, 661, 1773, 517]",1,14.0,28.0,295.0,0.6000738552437223,, -"[2133, 1464, 1655, 156, 2131, 1695, 154, 1791, 1848, 2280, 1851, 347, 1421, 326, 1842, 1975, 1139, 242, 155, 1909, 1133, 838, 2181, 239, 90, 1120, 2155, 2282, 1849, 2395, 1609, 2054, 1121, 2045, 1500, 2228, 53, 276, 423, 1568, 1396, 181, 408, 170, 244, 1683, 608, 2281, 578, 1521, 173, 1583, 2185, 252, 973, 1482, 1538, 1628, 59, 95, 72, 1998, 1661, 1224, 246, 341, 779, 1930, 1704, 471, 868, 456, 1841, 1526, 1846, 142, 2136, 506, 610, 1463, 52, 2528, 289, 334, 382, 191, 2132, 1131, 1652, 118, 1999, 1358, 68, 1420, 2425, 2019, 1085, 2397, 1986, 1487, 1540, 880, 1013, 836, 228, 2304, 1529, 34, 346, 2183, 822, 1115, 1293, 2172, 2287, 101, 1844, 724, 1403, 733]",1,16.0,12.0,300.0,0.6078286558345642,, -"[2380, 91, 1810, 2064, 2123, 1779, 429, 2025, 1251, 1180, 1807, 790, 759, 1448, 2477, 573, 230, 377, 678, 2613, 2381, 1814, 1171, 2378, 1304, 794, 1781, 327, 985, 2348, 2044, 1820, 180, 2325, 285, 2121, 818, 306, 849, 185, 2034, 2550, 2307, 109, 1347, 2001, 1295, 157, 505, 435, 401, 2063, 1325, 267, 482, 2077, 1446, 746, 1185, 2582, 2045, 2006, 1346, 1912, 1812, 1522, 1937, 1523, 2138, 1095, 2308, 1348, 603, 2301, 314, 574, 2230, 1119, 1583, 733, 1999, 1907, 2379, 1666, 1996, 387, 598, 935, 2003, 1358, 1701, 298, 316, 169, 1738, 548, 231, 2076, 2009, 1445, 297, 1993, 406, 1991, 1864, 869, 1020, 553, 681, 1992, 1697, 2000, 1670, 1997, 24, 809, 1873, 2083, 1870, 695, 1066, 2004, 1998, 2008, 1149, 1704, 1239, 2253, 2412, 383, 1894, 1166, 519, 1560, 1739, 1096, 197, 827, 416, 2365, 673, 2509, 1858, 1258, 566, 1709, 94, 1987, 2002, 1994, 1876, 215, 1875, 2667, 232, 396, 1197, 2335, 335, 332, 1728, 1453, 503, 836, 1127, 1995, 277, 1574, 2326, 201, 191, 968, 1859, 476, 2091, 366, 179, 2303, 2599, 1988]",1,16.0,18.0,250.0,0.6451255539143279,, -"[745, 1584, 743, 2002, 2001, 303, 2004, 1798, 1473, 2089, 68, 2707, 2048, 1989, 391, 1656, 958, 460, 1572, 160, 2003, 719, 887, 2, 1997, 2009, 673, 2005, 1072, 1987, 1127, 2006, 792, 2080, 165, 1297, 1775, 2090, 1640, 366, 1995, 1367, 1797, 1770, 2085, 1483, 350, 1166, 1782, 608, 2084, 1558, 910, 1990, 487, 2046, 1066, 191, 2091, 655, 1991, 899, 71, 708, 656, 1245, 1573, 2000, 236, 316, 48, 103, 2088, 302, 2412, 521, 151, 1802, 1705, 297, 637, 1624, 1804, 2326, 2138, 417, 2078, 2403, 1823, 1805, 1013, 329, 1771, 1490, 112, 1772, 845, 426, 766, 836, 1131, 1986, 1197, 1993, 1821, 1799, 139, 126, 306, 1781, 1697, 1704, 695, 1996, 1858, 1670, 308, 1158, 1787, 1661, 598, 1551, 681, 153, 973, 1193, 968, 1779, 2086, 1998, 1871, 1846, 45, 2045, 573, 102, 124, 2094, 566, 542, 1994, 1434, 1574, 138, 133, 1789, 1873, 1812, 1003, 1785, 1251, 1856, 1992, 2008, 1149, 1875, 1870, 452, 1337, 175, 201, 1095, 459, 1867, 1343, 2087, 1651, 1299, 1999, 109, 341, 2007, 1988, 1820, 143, 1709, 2388, 476, 24, 1479]",1,16.0,18.0,265.0,0.6248153618906942,, -"[1538, 2185, 1500, 1133, 838, 1909, 510, 2054, 6, 1789, 2282, 146, 456, 2347, 836, 1906, 2305, 1013, 2280, 1602, 1120, 1930, 22, 2025, 2199, 1655, 490, 925, 2117, 1452, 1925, 901, 469, 1531, 1867, 242, 2238, 2055, 1628, 149, 1198, 356, 2281, 1385, 1468, 1204, 2051, 2262, 1914, 1969, 210, 1435, 1923, 868, 1131, 373, 1425, 1973, 485, 1121, 1618, 1769, 1580, 1050, 1926, 572, 1929, 2073, 1592, 1922, 1692, 465, 61, 11, 2453, 1583, 1924, 979, 248, 1481, 1634, 2385, 1927, 2304, 1848, 738, 1222, 1417, 1839, 1264, 1928, 2198, 1042, 624, 1015, 405, 544, 1701, 1008, 1278, 1846, 1416, 55, 2485, 2283, 1968, 2034, 399, 2306, 1125, 2116, 30, 948, 2189, 1079, 1143, 1851, 2333, 1110, 65, 25, 309, 1703, 450, 1215, 1964, 1525, 695, 1349, 973, 1047, 2321, 429, 1970, 1569, 769, 1234, 779, 2274, 1025, 1324, 2295, 1818, 39, 2319, 2292, 497, 2152, 718, 1240, 1919, 778, 863, 1702, 1810, 305, 1382, 443, 38, 1967, 789, 1068, 698, 1341, 1975, 963, 1526, 2052, 1370, 2425, 1065, 1986, 486, 1704, 1532, 1965, 2011, 2667, 436, 1012]",1,16.0,18.0,285.0,0.6052437223042836,, -"[588, 2040, 2383, 2009, 2001, 1051, 2120, 698, 743, 661, 2229, 91, 160, 1885, 2123, 665, 2121, 1777, 236, 2008, 153, 1532, 745, 1789, 133, 792, 1149, 2228, 1346, 1558, 1779, 2298, 1958, 2005, 2003, 266, 1434, 306, 332, 389, 681, 366, 1998, 1574, 2045, 215, 335, 1870, 2000, 286, 1127, 113, 566, 1634, 1045, 2066, 415, 2002, 1780, 1805, 2122, 1884, 1697, 426, 1994, 519, 1993, 809, 2004, 1987, 179, 1661, 442, 68, 899, 460, 344, 2094, 441, 973, 232, 673, 1769, 1986, 2034, 589, 2006, 935, 968, 1015, 548, 2, 1778, 1787, 868, 1989, 2259, 1812, 1705, 924, 1349, 1992, 1786, 2007, 277, 1873, 1772, 476, 1995, 2335, 1876, 1907, 1522, 109, 2336, 1701, 459, 391, 39, 733, 451, 887, 1624, 1519, 124, 1996, 1337, 1908, 1453, 1474, 593, 808, 563, 2297, 318, 1671, 249, 409, 610, 1505, 1788, 1385, 2276, 1810, 1859, 2093, 1990, 1997, 201, 176, 1875, 384, 45, 1481, 1166, 1095, 1023, 210, 2509, 196, 1991, 638, 102, 2323, 1709, 854, 1097, 1965, 271, 1968, 1419, 1964, 71, 1704, 2357, 94, 1988, 1999, 470, 1869]",1,16.0,18.0,295.0,0.6746676514032496,, -"[364, 524, 1542, 552, 2045, 90, 687, 1577, 289, 109, 2034, 14, 603, 819, 609, 297, 417, 326, 341, 154, 1124, 173, 1941, 68, 1072, 59, 708, 156, 199, 2166, 669, 310, 306, 155, 899, 101, 519, 764, 580, 689, 754, 758, 1526, 598, 333, 1616, 960, 1159, 466, 420, 73, 2501, 962, 1851, 2534, 796, 1465, 757, 2357, 246, 244, 2099, 2030, 1171, 722, 797, 0, 744, 102, 1567, 1928, 613, 1106, 1950, 684, 111, 1358, 1221, 1334, 1029, 2112, 489, 2500, 738, 1690, 311, 766, 1856, 2096, 316, 2573, 1810, 1337, 2582, 1986, 1852, 72, 2162, 158, 581, 30, 1799, 215, 176, 1645, 1013, 1335, 318, 1874, 216, 748, 836, 255, 1665, 501, 823, 699, 357, 697, 563, 1341, 1894, 626, 170, 1299, 935, 2316, 729, 1075, 61, 179, 784, 1224, 1875, 2117, 180, 1241, 1873, 465, 1507, 551, 1999, 1869, 1870, 346, 197, 686, 1846, 1871, 2475, 1862, 1100, 1865, 2568, 1121, 143, 547, 1636, 387, 554, 1877, 869, 228, 2141, 201, 118, 343, 1655, 1864, 203, 1203, 1858, 510, 2139, 2327, 2309, 490, 1343, 1538, 2155]",1,16.0,18.0,300.0,0.6868537666174298,, -"[794, 863, 766, 1889, 1191, 316, 1807, 1301, 2339, 2338, 86, 38, 157, 1297, 845, 48, 1046, 2043, 33, 1347, 1820, 180, 2034, 1299, 2041, 1385, 2336, 2412, 523, 143, 547, 1185, 637, 1810, 1325, 818, 827, 2555, 383, 482, 1662, 1107, 1474, 169, 327, 429, 230, 2063, 746, 1018, 2301, 1738, 1603, 598, 1858, 1100, 1997, 2340, 1692, 733, 2326, 1481, 1066, 1669, 1493, 165, 314, 1701, 1739, 1542, 1473, 8, 1558, 2040, 2253, 1003, 1051, 2335, 2120, 306, 849, 2064, 1999, 603, 109, 1781, 197, 2259, 298, 1666, 1870, 2004, 1358, 1992, 1873, 2045, 218, 1996, 1864, 1522, 480, 869, 1346, 574, 2121, 2365, 553, 1894, 2378, 1709, 2025, 1505, 935, 1994, 2006, 1197, 1570, 1519, 1912, 879, 1583, 1704, 231, 705, 1812, 1998, 576, 1095, 91, 24, 1295, 1995, 968, 2008, 445, 503, 2348, 1697, 2381, 2009, 416, 521, 2000, 2379, 985, 1574, 2477, 285, 2325, 1993, 519, 1171, 232, 330, 1875, 267, 1488, 396, 745, 1446, 1618, 191, 387, 792, 2077, 627, 1876, 1014, 695, 1453, 1670, 366, 1149, 1119, 743, 1445, 201, 277, 2303, 397, 215, 899, 160, 335, 1987, 1859, 2044, 1239, 476, 1127, 2509, 1434, 1991, 1023, 570, 206, 1055, 1140, 2691, 196]",1,16.0,20.0,250.0,0.6454948301329394,, -"[189, 1620, 1139, 454, 359, 52, 1535, 1583, 446, 1262, 581, 1131, 1412, 236, 1515, 2233, 822, 525, 969, 2131, 2133, 905, 2010, 2172, 1735, 1507, 1683, 903, 1284, 1637, 842, 554, 2153, 389, 415, 280, 2016, 95, 2457, 850, 1282, 2072, 993, 1527, 858, 742, 1333, 1143, 2180, 20, 176, 1343, 1538, 586, 2310, 490, 702, 2182, 2295, 773, 1224, 2294, 682, 1505, 1203, 1917, 891, 2155, 1974, 2278, 2031, 2450, 2198, 754, 958, 749, 2050, 1074, 74, 115, 2231, 2649, 1838, 989, 181, 2293, 306, 1483, 2189, 239, 1792, 2282, 562, 1851, 118, 2165, 1013, 973, 1501, 1072, 704, 712, 1743, 1351, 1118, 1916, 1487, 1973, 2608, 1358, 2653, 1975, 1464, 1370, 2436, 2019, 2166, 2071, 1029, 231, 671, 1266, 417, 1623, 1616, 1920, 2208, 262, 1218, 277, 1090, 255, 2232, 880, 1077, 1674, 56, 1152, 666, 504, 244, 2485, 1726, 907, 1849, 1482, 1919, 210, 2030, 1147, 1598, 1402, 388, 1676, 1093, 935, 2164, 447, 412, 1675, 1359, 1251, 1368, 1779, 69, 1271, 1926, 1525, 342, 2480, 778, 2422, 15, 32, 55, 779, 1911, 1362, 1913, 1725, 945, 1592, 2109, 1372, 630, 2370, 1142, 1478, 1079, 35, 1976, 2134, 765, 1905, 573, 1249, 2367, 895, 2217, 452]",1,16.0,20.0,265.0,0.656573116691285,, -"[2017, 452, 1013, 514, 1268, 1882, 1309, 815, 1843, 2015, 211, 1394, 2178, 1623, 1644, 2110, 1463, 779, 1131, 1624, 134, 2010, 994, 2407, 2105, 2137, 1975, 109, 2318, 1288, 2016, 2045, 2034, 1344, 838, 1224, 102, 1313, 830, 1850, 1043, 350, 644, 130, 2434, 306, 277, 2012, 1525, 643, 367, 1785, 2011, 1494, 2425, 1914, 1779, 199, 162, 604, 1883, 1333, 1021, 1258, 1732, 2309, 1341, 1849, 490, 76, 1741, 1675, 841, 1045, 807, 2099, 1878, 851, 2399, 573, 1028, 1847, 1527, 696, 743, 18, 1560, 1658, 1487, 666, 406, 498, 32, 1251, 88, 465, 2014, 461, 519, 1448, 420, 97, 2401, 1314, 255, 471, 1844, 1677, 2083, 822, 2109, 1353, 1973, 2240, 661, 375, 1701, 1869, 1421, 1174, 1976, 1526, 1676, 1981, 1885, 651, 415, 1977, 2335, 1443, 1299, 2505, 252, 205, 737, 818, 43, 2182, 937, 2013, 210, 1085, 2400, 24, 868, 1810, 724, 1637, 826, 1157, 963, 1884, 201, 1821, 479, 1912, 1875, 1100, 547, 438, 1482, 1636, 1880, 1908, 1266, 143, 1304, 165, 1864, 1870, 525, 316, 1984, 48, 529, 869, 1881, 1703, 1094, 157, 297, 505, 2707, 2260, 1474, 1180, 890, 1823, 1683, 1107, 1074, 753, 598, 1473, 1195, 854, 766, 1269, 1297, 1879]",1,16.0,20.0,275.0,0.7355982274741507,, -"[745, 1127, 743, 230, 1095, 1473, 2471, 1301, 68, 2668, 549, 45, 2707, 482, 366, 48, 165, 160, 1316, 2130, 2077, 476, 2035, 1816, 2036, 1315, 1614, 1808, 417, 855, 1812, 391, 1891, 2075, 1497, 1811, 1929, 566, 2037, 1889, 1020, 71, 224, 1002, 2076, 1331, 2041, 2138, 790, 691, 151, 1026, 2004, 141, 2042, 1807, 2039, 1817, 681, 1297, 2140, 1809, 791, 1821, 1003, 845, 86, 1822, 511, 1823, 2493, 1968, 1814, 673, 1336, 716, 792, 1386, 14, 576, 927, 795, 351, 2034, 1270, 1986, 1787, 1654, 521, 1576, 159, 306, 935, 1558, 1983, 962, 603, 1121, 1810, 420, 1106, 568, 835, 2142, 180, 1581, 1072, 460, 480, 1319, 727, 1448, 228, 17, 775, 310, 766, 1871, 722, 1434, 1567, 277, 598, 1171, 1273, 2501, 199, 1166, 1815, 316, 1542, 1107, 102, 1149, 1819, 2572, 2570, 2500, 109, 1097, 617, 1873, 1874, 2309, 2582, 719, 1950, 1062, 2045, 2398, 968, 519, 1894, 1869, 2, 49, 2040, 297, 841, 2038, 1151, 1573, 2117, 382, 2141, 899, 0, 27, 1577, 873, 1708, 1348, 2139, 1856, 185, 465, 1870, 1941, 1073, 1334, 203, 1818, 481, 1799, 1862, 1868, 1813, 2314, 1004, 1665, 1124, 669, 1820, 1337, 304, 1479, 179, 1875, 1013, 920]",1,16.0,20.0,285.0,0.6270310192023634,, -"[745, 1995, 366, 2041, 2042, 1127, 743, 2668, 2039, 391, 790, 511, 792, 1822, 460, 1166, 2004, 1020, 775, 160, 2077, 48, 549, 1807, 2075, 1823, 716, 1814, 1576, 476, 68, 1574, 2138, 482, 1448, 1558, 1991, 1497, 1989, 1889, 2002, 45, 1998, 603, 1992, 835, 2038, 2142, 1654, 2001, 2707, 1990, 1434, 1473, 2130, 1297, 1987, 1996, 1811, 920, 1336, 382, 795, 1968, 1815, 1816, 1891, 2076, 180, 2037, 845, 2000, 1988, 935, 1808, 568, 86, 1704, 791, 1821, 480, 2500, 1149, 165, 1106, 1331, 855, 1787, 102, 1997, 14, 1316, 1002, 2471, 230, 1567, 1301, 1812, 521, 1986, 2582, 1095, 968, 2398, 681, 2040, 1121, 2140, 2034, 141, 1581, 962, 691, 722, 1994, 71, 766, 927, 1810, 316, 1817, 1273, 673, 1003, 277, 1107, 310, 519, 306, 2501, 1709, 1929, 417, 2493, 1874, 841, 598, 1819, 2036, 351, 1873, 1097, 109, 17, 1315, 2045, 1950, 2141, 1062, 617, 1894, 1809, 1004, 1871, 1171, 1270, 719, 1542, 1072, 228, 1813, 481, 49, 224, 1993, 1073, 899, 151, 1697, 566, 1999, 1983, 2117, 1614, 159, 1026, 2035, 1319, 297, 2, 1708, 199, 1869, 420, 2139, 1577, 1875, 1665, 1856, 873, 1818, 1862, 304, 465, 1857, 0, 185, 2314, 27, 1013]",1,16.0,20.0,300.0,0.6148449039881831,, -"[736, 665, 734, 482, 2380, 2197, 2063, 1602, 2543, 2246, 1535, 1628, 503, 285, 2122, 1502, 2034, 827, 2054, 1503, 1580, 1739, 1086, 2200, 2123, 2243, 2199, 157, 2283, 316, 1348, 1838, 91, 2074, 2181, 2381, 2176, 916, 456, 1014, 1820, 334, 2183, 1999, 1810, 2201, 818, 95, 2025, 574, 327, 2247, 2301, 1073, 1295, 269, 218, 2182, 1894, 1026, 2531, 2338, 693, 1382, 2207, 191, 2340, 2378, 1325, 267, 180, 879, 1812, 332, 2064, 2163, 654, 2121, 2339, 1000, 2509, 2001, 1589, 1781, 1558, 1996, 2412, 1912, 598, 979, 972, 524, 1358, 1666, 1171, 1839, 2004, 306, 2555, 1738, 935, 429, 1870, 2044, 733, 1185, 985, 1701, 1709, 2008, 1873, 1987, 109, 1191, 1583, 2045, 185, 2009, 2613, 2325, 1864, 2326, 2162, 1998, 861, 1346, 1, 2077, 2244, 2003, 101, 298, 603, 553, 869, 297, 1957, 652, 2379, 197, 383, 1704, 231, 230, 2000, 1095, 24, 1197, 695, 2365, 2348, 519, 387, 1670, 416, 232, 1992, 1997, 1995, 8, 1858, 792, 215, 849, 314, 2582, 1149, 201, 1875, 396, 277, 1127, 1445, 1991, 366, 968, 1446, 2303, 745, 2002, 1023, 281, 1303, 160, 1993, 169, 681, 445, 88, 2253, 1522, 1119, 743, 68, 1907, 1859, 1876, 335, 1988, 45, 1488, 1697, 2335, 1219, 1453, 836, 476, 1570, 1994, 1239, 2550, 1166, 2691, 1140, 179, 1603, 1066, 1574, 2667, 673, 2668, 206, 1800, 2568, 899, 570, 2430, 2532, 460, 1347, 1434, 1986, 10, 2, 566, 1989, 391, 71, 2236, 1990, 2006, 2477, 1055, 151, 746, 493, 2238, 1454, 2005, 2007]",1,16.0,25.0,250.0,0.7234121122599705,, -"[232, 231, 444, 261, 24, 1479, 828, 1636, 515, 1868, 1864, 201, 664, 924, 1861, 441, 1986, 2289, 869, 1788, 1862, 1859, 335, 249, 1870, 157, 1858, 638, 1885, 1665, 532, 1854, 1820, 1867, 2290, 1795, 1873, 623, 1958, 1075, 2229, 767, 1860, 226, 215, 1846, 2045, 1866, 1863, 661, 1023, 2335, 160, 896, 1856, 13, 729, 2094, 1102, 143, 784, 1403, 1440, 699, 633, 415, 1884, 1207, 1701, 176, 343, 2228, 216, 2464, 547, 1453, 266, 1505, 2465, 2383, 519, 1299, 1852, 696, 1871, 384, 652, 973, 1474, 1998, 589, 1810, 306, 1799, 344, 1787, 1241, 808, 465, 1853, 1337, 563, 1772, 1857, 809, 553, 416, 151, 1015, 318, 277, 1045, 236, 271, 153, 426, 2304, 2034, 133, 593, 935, 1778, 235, 2323, 113, 1844, 158, 584, 1908, 868, 1100, 1334, 1346, 887, 1624, 1052, 467, 102, 1777, 1805, 1789, 874, 1779, 1855, 1212, 627, 1865, 1705, 1323, 470, 1780, 349, 109, 2276, 1097, 124, 205, 548, 2093, 733, 1869, 1115, 1786, 409, 389, 210, 1178, 1906, 94, 1964, 459, 2095, 1966, 1970, 451, 854, 1702, 857, 1907, 463, 1365, 442, 1196, 1529, 2309, 362, 289, 2238, 317, 1785, 2059, 2122, 2350, 1671, 1984, 420, 1336, 1971, 610, 946, 2092, 1234, 1010, 1569, 203, 22, 443, 1968, 1661, 1769, 2509, 2519, 199, 2463, 340, 1246, 1417, 1266, 529, 2260, 1419, 2155, 2237, 1530, 89, 1087, 1703, 2294, 2239, 2236, 759, 1452, 2434, 1969, 706, 39, 1141, 1653, 1967, 1240, 258, 375, 2435, 2401, 963, 152, 2645, 2400, 2399, 43]",1,16.0,25.0,265.0,0.6248153618906942,, -"[2075, 2077, 775, 1072, 417, 2707, 927, 2501, 1020, 306, 2668, 1134, 302, 2037, 165, 48, 1473, 733, 2076, 1192, 2291, 2471, 2140, 791, 1159, 722, 579, 521, 637, 1073, 1806, 203, 2142, 1301, 2534, 1316, 159, 2117, 920, 2348, 2308, 1807, 603, 1655, 665, 14, 180, 218, 298, 519, 2379, 935, 303, 2136, 503, 1623, 1106, 171, 719, 2305, 605, 2550, 1171, 1297, 2230, 2573, 2163, 1116, 2582, 1799, 1131, 2306, 576, 102, 17, 465, 1894, 1315, 2096, 855, 2004, 1287, 1567, 1325, 1217, 316, 2034, 1062, 1410, 2314, 1818, 2500, 2143, 1577, 574, 606, 1874, 1856, 1654, 845, 669, 86, 2307, 1335, 1223, 1645, 766, 480, 1424, 598, 420, 1871, 2493, 1465, 109, 1542, 1708, 2045, 835, 2578, 0, 1948, 1787, 2360, 1817, 795, 482, 1821, 1873, 926, 962, 2398, 1121, 729, 2141, 1862, 1809, 481, 1107, 1624, 1126, 310, 199, 1950, 716, 1812, 1986, 1095, 968, 304, 1548, 27, 2139, 1867, 1869, 1877, 1855, 899, 1941, 568, 1221, 297, 277, 1124, 2138, 1823, 1348, 1808, 790, 1334, 230, 1489, 1846, 1813, 228, 1857, 1497, 2409, 416, 1815, 873, 387, 49, 141, 2243, 1870, 2605, 1013, 1241, 1003, 2613, 1172, 1573, 179, 1875, 1868, 577, 1097, 1636, 2309, 1337, 1819, 823, 1479, 1581, 1331, 143, 1004, 549, 1859, 767, 1665, 224, 2357, 261, 1864, 158, 215, 563, 335, 176, 869, 1876, 467, 829, 699, 201, 1147, 633, 1453, 311, 1852, 1858, 1865, 1023, 318, 232, 157, 1615, 1100, 1814, 547, 24, 2327, 1299, 1820, 1576, 511, 1810, 197]",1,16.0,25.0,285.0,0.629615952732644,, -"[1015, 359, 1878, 831, 1012, 1788, 1618, 389, 36, 1786, 2106, 2252, 231, 441, 773, 1784, 699, 661, 1699, 1505, 2256, 337, 1620, 1735, 905, 741, 2287, 121, 2082, 1564, 1412, 1248, 399, 2425, 1077, 2107, 317, 826, 384, 1768, 544, 695, 1070, 1197, 530, 1701, 1546, 244, 2251, 1681, 1358, 1336, 1726, 2451, 2327, 1282, 1743, 2288, 1448, 1465, 1767, 314, 1144, 1013, 1801, 704, 2081, 1199, 1774, 1030, 671, 1733, 341, 2326, 18, 2094, 1270, 603, 1560, 1852, 252, 1851, 2025, 2113, 969, 556, 2093, 742, 2382, 1791, 960, 630, 1571, 1783, 963, 2208, 589, 1089, 1622, 2096, 1725, 114, 562, 1789, 1624, 980, 387, 215, 102, 1785, 133, 115, 2322, 762, 859, 2034, 1249, 2083, 808, 1483, 1776, 20, 610, 454, 2329, 1583, 1998, 1561, 935, 505, 1781, 409, 1769, 814, 1623, 1797, 2145, 1799, 1158, 468, 176, 638, 945, 1740, 1722, 234, 1640, 880, 1289, 1142, 1313, 484, 1651, 2318, 1661, 1804, 118, 1045, 682, 303, 1778, 416, 2045, 2143, 1871, 2095, 1609, 191, 2481, 1856, 2026, 754, 1705, 1312, 1262, 1251, 1777, 1830, 1072, 2019, 1986, 1337, 135, 2218, 836, 2086, 1335, 1773, 1311, 2504, 563, 1803, 318, 2407, 1478, 218, 1792, 1670, 973, 459, 1780, 1772, 1790, 1782, 189, 289, 1635, 1490, 1779, 1805, 1196, 2075, 1810, 581, 573, 2153, 1616, 1787, 2091, 1459, 719, 1245, 1798, 1775, 887, 406, 2046, 1367, 1258, 1574, 2087, 643, 1770, 1009, 2090, 1771, 2078, 608, 660, 139, 350, 2085, 426, 239, 2089, 2048, 519, 153, 910, 452]",1,16.0,25.0,295.0,0.7514771048744461,, -"[2280, 242, 838, 1085, 2185, 2200, 687, 1501, 2054, 436, 2395, 1174, 2228, 2109, 2359, 1421, 1295, 2199, 456, 215, 2155, 153, 891, 1500, 1580, 1396, 681, 1784, 236, 2164, 551, 1333, 1637, 1133, 210, 1602, 945, 724, 1370, 847, 2074, 1487, 2217, 702, 2649, 2457, 308, 563, 1527, 1121, 2293, 2047, 1399, 341, 2419, 1042, 600, 1628, 2048, 644, 2182, 2046, 1975, 2608, 1163, 496, 1838, 126, 1337, 1131, 280, 55, 1372, 1171, 252, 1420, 412, 1538, 306, 2394, 2181, 1909, 2422, 2283, 728, 133, 1701, 1358, 603, 88, 649, 2197, 1215, 109, 661, 318, 95, 415, 1830, 992, 2485, 2201, 2051, 1980, 2396, 1203, 2653, 816, 1979, 1787, 1930, 868, 2388, 1855, 1074, 2045, 1616, 1079, 1224, 1027, 1218, 1416, 822, 2052, 1674, 1856, 65, 56, 1572, 1905, 1655, 490, 1092, 699, 1535, 1248, 1525, 2078, 1448, 973, 734, 118, 779, 1676, 596, 504, 2282, 1848, 861, 2189, 1772, 1651, 2108, 331, 2056, 1926, 454, 1973, 1309, 1917, 736, 1791, 447, 1919, 139, 1915, 2405, 2026, 1355, 2357, 2132, 1463, 36, 1852, 778, 1849, 41, 1402, 2397, 1920, 2183, 519, 830, 1362, 1395, 1867, 604, 1776, 693, 1652, 1976, 175, 921, 2094, 325, 408, 2025, 1376, 1568, 1846, 120, 553, 2281, 1851, 2135, 1661, 1682, 1842, 1847, 1377, 1110, 510, 366, 1343, 708, 763, 2310, 1841, 1592, 1303, 2117, 2013, 1505, 1583, 1995, 1850, 1729, 2100, 334, 1127, 836, 69, 666, 32, 1974, 2418, 2276, 1843, 1972, 1625, 1675, 1644, 1482, 2134, 471, 2071, 955, 1293, 1683, 657, 1120, 880, 1840, 1916, 1914, 376, 1351, 1844, 1403, 465, 2403, 2317, 772, 1918, 1845, 850, 1013, 908, 894, 1464, 1521, 98, 2194, 530, 907, 2278, 1539, 712, 506, 262]",1,16.0,28.0,285.0,0.6528803545051699,, -"[1282, 2324, 905, 2081, 1499, 484, 1283, 1788, 1089, 337, 1552, 441, 1015, 1735, 671, 562, 2025, 1012, 1546, 695, 969, 389, 1743, 1681, 1336, 2451, 1142, 2096, 454, 704, 118, 1790, 1851, 1459, 2325, 2327, 121, 1249, 2106, 2016, 1289, 1077, 115, 1412, 2252, 643, 409, 814, 1620, 1725, 2407, 36, 1358, 2034, 2113, 387, 1335, 1801, 1505, 1583, 530, 2075, 239, 1196, 2251, 741, 1776, 2107, 935, 277, 1618, 1740, 1258, 231, 945, 1070, 1483, 2083, 742, 2382, 317, 1571, 303, 2322, 1837, 685, 1311, 2318, 359, 1635, 1804, 1800, 1768, 1792, 630, 2153, 1313, 341, 10, 598, 399, 2026, 831, 973, 754, 20, 2288, 1616, 505, 1733, 2094, 1013, 1856, 1778, 1199, 1009, 699, 1465, 554, 1574, 859, 2000, 808, 638, 1705, 2086, 1158, 1622, 191, 103, 589, 1701, 573, 1661, 661, 1986, 1367, 581, 18, 2326, 135, 2143, 2087, 1784, 1775, 2091, 244, 1726, 1797, 2046, 773, 1722, 1448, 2287, 542, 880, 980, 476, 2080, 1072, 544, 1490, 114, 2090, 1478, 1789, 218, 2145, 2019, 1624, 1781, 1262, 1799, 252, 1670, 189, 1560, 215, 406, 836, 762, 1998, 960, 2208, 1030, 342, 1791, 2082, 2084, 1045, 1773, 1640, 1785, 416, 289, 459, 1871, 608, 1852, 519, 887, 1777, 417, 910, 719, 826, 176, 452, 314, 1197, 682, 1780, 1798, 1572, 1878, 1779, 1803, 1656, 2093, 1786, 1251, 2078, 610, 958, 2481, 2598, 139, 133, 2079, 2095, 350, 1140, 1771, 1769, 138, 308, 2085, 329, 1770, 487, 563, 1787, 1245, 1337, 1248, 655, 1623, 1802, 1810, 2045, 318, 1584, 1193, 236, 302, 1253, 1782, 2089, 2256, 603, 656, 112, 126, 124, 426, 1767, 1651, 1783, 2494, 2504, 1774, 306, 1346, 2048, 660, 102, 384, 2047, 2329, 1144]",1,16.0,28.0,295.0,0.741506646971935,, -"[2266, 2267, 2395, 666, 1616, 581, 695, 55, 1370, 56, 323, 2022, 1012, 1973, 2319, 456, 733, 2608, 2155, 447, 440, 436, 1008, 2228, 1852, 407, 2364, 373, 412, 2054, 728, 1628, 280, 210, 891, 1580, 2405, 2649, 2291, 2321, 563, 308, 2485, 1517, 644, 544, 118, 1468, 1772, 1104, 948, 1830, 6, 74, 1402, 1137, 1118, 1701, 2333, 1602, 1025, 1501, 2055, 305, 441, 2188, 1420, 1441, 572, 2072, 1979, 2190, 1922, 2048, 2281, 1448, 718, 2457, 1395, 318, 1682, 1980, 1337, 1203, 1651, 661, 306, 153, 2046, 1525, 437, 32, 2073, 149, 2132, 2198, 725, 1482, 551, 816, 236, 945, 1047, 1079, 454, 901, 2164, 1674, 2293, 109, 1042, 2394, 1399, 1416, 1776, 1975, 498, 2276, 1481, 779, 496, 1131, 126, 1396, 1919, 1224, 1683, 95, 1248, 1655, 679, 1856, 1838, 1849, 2135, 1915, 2388, 2056, 1218, 139, 778, 702, 2078, 120, 2025, 88, 2419, 1924, 341, 699, 2217, 1535, 992, 65, 2045, 1572, 215, 2052, 1905, 1787, 30, 1867, 1538, 1850, 2418, 596, 868, 1358, 2282, 1125, 1842, 830, 1909, 1925, 1309, 485, 2094, 1343, 1421, 1376, 519, 41, 415, 734, 1923, 1851, 61, 2051, 1487, 133, 2320, 2480, 973, 1303, 2278, 624, 1917, 1074, 603, 1972, 1359, 1844, 1198, 861, 1847, 175, 1676, 2100, 1848, 408, 1568, 1592, 1110, 2071, 736, 553, 1846, 2026, 1085, 356, 1784, 1921, 712, 850, 2110, 1661, 921, 366, 1377, 1583, 21, 1995, 708, 1729, 2117, 2013, 1171, 865, 955, 1127, 1916, 1841, 2310, 1625, 2189, 693, 1120, 1652, 1840, 836, 2109, 1464, 2653, 1914, 1843, 510, 657, 2047, 325, 2317, 1372, 1644, 1675, 1293, 880, 334, 1362, 1926, 1403, 465, 2194, 2357, 376, 1791, 181, 907, 2134, 1918, 894, 471]",1,16.0,28.0,300.0,0.6894387001477105,, -"[1494, 897, 342, 1072, 512, 195, 231, 737, 1980, 415, 945, 2052, 2187, 733, 2263, 506, 668, 816, 1908, 544, 1692, 1628, 164, 718, 96, 1905, 1907, 2490, 1614, 1472, 2350, 172, 498, 935, 1042, 1174, 2145, 2164, 1416, 1979, 175, 240, 176, 2217, 239, 306, 1082, 2116, 880, 454, 1649, 2309, 2034, 483, 65, 252, 88, 596, 1626, 2016, 41, 1512, 1417, 1152, 94, 299, 1966, 2022, 1333, 2418, 69, 277, 256, 210, 2019, 1527, 2357, 621, 2355, 1978, 1671, 973, 74, 2018, 1115, 2021, 1156, 2386, 1118, 2282, 1517, 1268, 1985, 756, 1983, 1309, 2186, 1358, 1927, 436, 1906, 1178, 818, 1013, 2178, 1529, 1469, 1914, 1973, 1652, 118, 76, 1142, 934, 1661, 630, 2295, 1218, 1376, 424]",1,18.0,12.0,250.0,0.5985967503692762,, -"[1702, 2238, 1234, 794, 863, 1871, 1235, 210, 1703, 1671, 770, 2118, 102, 835, 1810, 962, 2295, 229, 2126, 1405, 1797, 2117, 482, 1701, 733, 94, 2128, 1251, 22, 1846, 2355, 261, 1215, 1149, 1655, 549, 55, 1966, 306, 1111, 897, 248, 1462, 695, 1012, 2471, 1986, 622, 33, 788, 1072, 1697, 1508, 870, 2125, 790, 1614, 1479, 1611, 2129, 1669, 1493, 1894, 2001, 1968, 1952, 2122, 2044, 2335, 38, 2206, 1119, 1964, 775, 196, 88, 2120, 1665, 2357, 1203, 1303, 293, 1264, 1812, 2033, 417, 2531, 2356, 2205, 186, 1608, 1807, 443, 2075, 2124, 1615, 429, 1273, 833, 1654, 2040, 2494, 1448, 1336, 2032, 1031, 505, 740, 1452, 86, 1002, 1929, 224, 2045, 1531, 2077, 2041, 48, 2043, 1385]",1,18.0,12.0,265.0,0.5960118168389956,, -"[1257, 1224, 1071, 74, 836, 1135, 2268, 2002, 2155, 383, 428, 377, 2249, 530, 2007, 2001, 1583, 1015, 2008, 1843, 2000, 1995, 1062, 2250, 1661, 1265, 348, 471, 2003, 2301, 996, 2291, 1160, 401, 1644, 1992, 1625, 1844, 2474, 1464, 1558, 1675, 1574, 1545, 519, 1453, 743, 673, 792, 1709, 1255, 1729, 366, 1451, 1194, 1192, 678, 181, 217, 1873, 1120, 745, 586, 794, 160, 2248, 1366, 506, 1379, 1013, 1704, 368, 510, 1452, 566, 450, 1987, 1870, 2035, 2011, 1842, 1840, 733, 1127, 1682, 218, 2424, 1998, 1403, 1859, 2004, 473, 2304, 2263, 1531, 2009, 1907, 856, 334, 1294, 1264, 1986, 1988, 1149, 1841, 1875, 460, 1293, 2647, 2335, 243, 1994, 1138, 1989, 2303, 1701, 194, 2564, 1273, 1817]",1,18.0,12.0,275.0,0.6129985228951256,, -"[391, 68, 519, 878, 1838, 1655, 2001, 1131, 1880, 1993, 2180, 2003, 1981, 2002, 1994, 1121, 2, 2183, 1704, 2133, 1995, 1848, 1990, 1998, 753, 885, 2611, 366, 1515, 1876, 1574, 1709, 1671, 950, 2172, 2000, 1989, 553, 718, 968, 2054, 1992, 2027, 95, 460, 1628, 1495, 1697, 1558, 792, 456, 1039, 2072, 476, 1149, 1139, 1227, 1224, 71, 566, 490, 215, 1972, 578, 1991, 2009, 1999, 1127, 1239, 2004, 151, 1358, 1980, 1987, 232, 1973, 2136, 1986, 437, 661, 1314, 433, 1269, 816, 2233, 890, 1978, 2131, 436, 52, 438, 45, 2232, 745, 142, 2182, 1929, 1859, 1812, 1332, 1540, 1652, 32, 2024, 651, 1166, 525, 483, 1875, 424, 621, 504, 2181, 1396, 160, 1974, 1013, 2057, 514, 725, 1985, 2227, 2228, 858, 2231, 666, 1399, 2395, 1421, 1869, 191, 733, 2005, 1979, 2137, 335, 1420, 1870, 1023, 2309, 277, 1259, 2071, 1453, 818, 1912, 347, 179, 1997, 55, 423, 899, 201, 2396, 724, 2034, 2394, 681, 1115, 1625, 471, 748, 1850, 673, 1977, 1095, 1630, 2020, 1085, 1873, 1996, 1529, 1434, 1988, 1644, 356, 60, 408, 973, 2019]",1,18.0,18.0,265.0,0.6846381093057607,, -"[456, 1729, 1580, 2001, 1142, 1625, 1537, 2093, 2003, 1455, 181, 1464, 671, 1986, 2380, 935, 2199, 880, 1644, 2347, 836, 2326, 90, 68, 567, 34, 1911, 277, 2184, 565, 831, 908, 30, 2270, 728, 1011, 110, 70, 2269, 101, 1045, 1293, 563, 487, 53, 1337, 1851, 1848, 2375, 1120, 695, 2388, 2046, 510, 1842, 1583, 1610, 73, 519, 454, 1346, 1661, 176, 2079, 2087, 1512, 1403, 1841, 2088, 1509, 2045, 2078, 1843, 2048, 973, 129, 630, 69, 1013, 2091, 945, 1773, 1670, 1046, 1470, 2123, 682, 441, 1572, 318, 1358, 1193, 741, 426, 1772, 471, 1776, 91, 655, 1167, 544, 1782, 1789, 1871, 1245, 2080, 476, 59, 126, 1262, 1009, 1998, 1282, 20, 289, 244, 252, 236, 1070, 139, 1705, 103, 112, 1797, 2494, 72, 581, 1035, 1770, 153, 1840, 608, 109, 1846, 484, 342, 910, 308, 1733, 102, 699, 542, 564, 1856, 1651, 2122, 1798, 1675, 1849, 189, 2095, 440, 74, 2052, 1775, 1802, 1490, 1158, 306, 1787, 1779, 138, 1144, 2085, 506, 1847, 1829, 1780, 1777, 660, 1725, 1483, 417, 1845, 191, 573, 887, 1623, 1251, 2019]",1,18.0,18.0,275.0,0.6779911373707533,, -"[1567, 2500, 1807, 733, 722, 603, 1072, 1818, 2230, 2139, 823, 962, 1062, 218, 401, 1316, 2140, 1287, 417, 1217, 2291, 519, 1065, 606, 420, 2578, 306, 316, 1508, 481, 2501, 1098, 766, 2360, 230, 109, 2117, 1986, 465, 577, 1821, 176, 1808, 1787, 770, 1335, 1708, 1874, 17, 792, 1171, 1405, 277, 935, 1107, 2045, 482, 1465, 2067, 199, 1172, 2034, 1106, 180, 1873, 795, 1812, 1013, 1934, 1097, 102, 2568, 2534, 27, 2307, 2065, 2141, 1799, 1633, 598, 14, 1823, 1817, 2493, 1348, 2573, 387, 2066, 1095, 179, 716, 873, 1655, 54, 1809, 1876, 40, 397, 1096, 2582, 2601, 304, 1294, 2118, 790, 1023, 2314, 1004, 1856, 1869, 224, 1932, 0, 1159, 467, 549, 1149, 1875, 1636, 229, 536, 918, 1868, 1855, 1871, 1846, 1870, 1488, 2357, 507, 1215, 1665, 143, 1147, 24, 215, 1894, 1301, 157, 1857, 511, 1121, 1477, 1867, 1337, 1877, 1859, 1453, 318, 1862, 335, 201, 1933, 1822, 563, 416, 158, 197, 2430, 719, 203, 254, 228, 729, 669, 767, 1865, 1211, 551, 231, 232, 1864, 699, 2508, 1124, 899, 1315, 2327, 570, 2309]",1,18.0,18.0,295.0,0.6037666174298375,, -"[2109, 1085, 2395, 2054, 1973, 1421, 2181, 2074, 724, 1986, 490, 663, 504, 2201, 1535, 2155, 1042, 2396, 1602, 2153, 175, 2349, 1396, 1469, 1133, 2590, 594, 2199, 838, 2480, 2185, 2046, 1580, 2096, 1916, 2228, 1420, 2200, 2197, 1975, 318, 456, 600, 1526, 1592, 2182, 586, 1628, 1500, 1333, 2154, 2388, 763, 563, 1395, 1791, 955, 436, 779, 596, 1291, 935, 215, 1399, 41, 1660, 2048, 1362, 242, 1852, 2394, 1121, 1732, 666, 32, 1838, 2051, 2108, 160, 252, 1915, 2397, 1498, 2052, 220, 1784, 907, 1525, 778, 95, 236, 2016, 308, 2170, 441, 1072, 176, 745, 1856, 630, 2282, 306, 868, 682, 1587, 938, 1482, 1370, 1830, 1919, 2156, 1701, 1538, 2278, 2195, 1351, 1131, 139, 451, 1682, 847, 1239, 1416, 1080, 2071, 1142, 699, 2485, 2009, 921, 1174, 366, 1995, 1127, 487, 2019, 519, 1905, 1203, 88, 712, 693, 2134, 1337, 1683, 973, 1359, 1224, 2422, 1776, 109, 277, 1309, 2189, 816, 1772, 1914, 1655, 1980, 1979, 2045, 2653, 153, 604, 2026, 1926, 126, 1787, 1357, 415, 1572, 2078, 2027, 1027, 1358, 736, 734, 1377, 2094, 1930]",1,18.0,18.0,300.0,0.629615952732644,, -"[213, 1593, 144, 2192, 145, 1165, 1647, 495, 537, 92, 2622, 1327, 898, 1836, 108, 2209, 1504, 1835, 2161, 1328, 2157, 1698, 2160, 2158, 23, 2159, 144, 1593, 1165, 145, 2192, 537, 1504, 1327, 495, 2209, 92, 213, 2161, 1647, 23, 144, 1593, 1165, 145, 2192, 537, 1504, 1327, 495, 1836, 92, 2209, 2161, 1698, 1647, 1328, 898, 2158, 1835, 23, 144, 1593, 1165, 145, 2192, 537, 1504, 1327, 495, 1836, 92, 2209, 2161, 1698, 1647, 1328, 898, 2158, 1835, 23, 144, 1593, 1165, 145, 2192, 537, 1504, 1327, 495, 1836, 92, 2209, 2161, 1698, 1647, 1328, 898, 2158, 1835, 23, 144, 1593, 1165, 145, 2192, 537, 1504, 1327, 495, 1836, 92, 2209, 2161, 1698, 1647, 1328, 898, 2158, 1835, 23, 144, 1593, 1165, 145, 2192, 537, 1504, 1327, 495, 1836, 92, 2209, 2161, 1698, 1647, 1328, 898, 2158, 1835, 23, 144, 1593, 1165, 145, 2192, 537, 1504, 1327, 495, 1836, 92, 2209, 2161, 1698, 1647, 1328, 898, 2158, 1835, 23, 144, 1593, 1165, 145, 2192, 537, 1504, 1327, 495, 1836, 92, 2209, 2161, 1698, 1647, 1328, 898, 2158, 1835, 23, 144, 1593, 1165, 145, 2192, 537, 1504, 1327, 495, 1836, 92, 2209, 2161, 1698, 1647, 1328, 898, 2158, 1835]",0,18.0,20.0,250.0,0.087149187592319,, -"[698, 2123, 665, 1127, 588, 745, 286, 661, 2229, 2000, 1885, 160, 1522, 2040, 2003, 332, 2004, 2120, 2008, 1558, 924, 91, 2383, 2228, 1337, 1051, 1705, 792, 743, 1772, 2007, 133, 266, 1995, 2009, 673, 1166, 2001, 389, 1958, 1346, 1777, 476, 113, 1859, 681, 415, 1697, 1778, 2122, 1805, 1532, 563, 215, 1884, 2045, 442, 809, 176, 1453, 2121, 1574, 1992, 335, 1787, 366, 45, 232, 426, 1987, 306, 201, 973, 1045, 1095, 344, 71, 2034, 1661, 566, 2259, 409, 1789, 2066, 519, 935, 1701, 124, 391, 868, 548, 2005, 1986, 899, 1519, 2, 1015, 733, 1780, 441, 277, 638, 1993, 1907, 1991, 39, 1876, 1812, 2006, 1875, 451, 1998, 808, 2002, 1810, 1908, 1989, 1481, 109, 593, 289, 1873, 1474, 1624, 2362, 68, 318, 236, 2093, 1999, 2335, 460, 249, 384, 153, 2094, 2357, 1569, 1870, 1505, 196, 2276, 1788, 1779, 2092, 1434, 887, 1709, 1023, 1385, 1996, 1196, 94, 1149, 1634, 1349, 1419, 1988, 271, 610, 2323, 1964, 1965, 1097, 1240, 179, 210, 1906, 854, 1966, 470, 2509, 1704, 1178, 589, 1115, 968, 1994, 102, 463, 1529, 857, 1968, 1702, 1997, 459, 1990, 627, 1869, 1703, 205, 1417, 1336, 443, 1970, 2336, 1786, 1010, 1234, 789]",1,18.0,20.0,295.0,0.6827917282127031,, -"[306, 1535, 171, 109, 2117, 261, 2045, 1351, 770, 1548, 1950, 1223, 218, 1416, 2293, 298, 1508, 1116, 603, 1382, 1807, 1665, 2360, 302, 733, 1701, 519, 2291, 665, 1497, 503, 1118, 303, 1325, 180, 1479, 598, 2605, 310, 1217, 2078, 606, 1572, 146, 2143, 1927, 747, 2118, 2348, 1645, 1787, 1204, 74, 280, 1192, 1624, 1907, 1405, 577, 2578, 1986, 1815, 1287, 719, 576, 1348, 120, 229, 575, 2163, 795, 568, 2294, 1299, 790, 1983, 2025, 2230, 536, 2034, 1095, 1049, 2307, 1821, 1894, 2405, 239, 1343, 2154, 2613, 643, 1152, 979, 2283, 1424, 1919, 1149, 1370, 1013, 230, 482, 27, 118, 149, 779, 13, 1583, 1818, 574, 1526, 1062, 481, 1823, 778, 962, 1817, 2152, 716, 1107, 549, 2164, 2379, 483, 1839, 816, 868, 1869, 1660, 1820, 1291, 1851, 1110, 1980, 1623, 1930, 1525, 2155, 1587, 2243, 382, 1975, 2453, 1973, 2282, 2099, 1812, 897, 1979, 1538, 1806, 2156, 2424, 586, 2305, 2296, 1848, 86, 1331, 1813, 1842, 1143, 1581, 270, 1879, 1224, 2308, 279, 823, 1981, 1341, 605, 2281, 829, 1909, 1489, 1977, 2409, 2550, 838, 1576, 836, 1203, 2280, 1156, 11, 2135, 1822, 885, 461, 2153, 1809, 1814, 2151, 2295, 511, 1816, 1654, 878, 1004, 224, 510, 211, 1039, 1215, 1080, 1655, 490, 993, 749, 539, 1171, 1811, 682, 1222, 1494, 1732, 1133, 1134, 579, 1808, 1952, 1810, 1567, 708, 242, 424, 436, 1121, 1410, 2105, 1309, 2403, 1819, 1463, 1131, 2384, 826, 2185, 835, 1500, 2136, 351, 433, 668, 498, 737, 1332, 1978, 1982, 1671]",1,18.0,25.0,250.0,0.7514771048744461,, -"[669, 1124, 306, 1171, 1346, 2359, 109, 2002, 1095, 1810, 2001, 1894, 199, 2000, 2372, 1072, 2007, 1149, 2568, 605, 2003, 687, 1335, 836, 363, 2534, 2314, 1687, 1708, 1666, 203, 1670, 2379, 54, 873, 716, 1992, 2117, 1995, 2096, 176, 1013, 2582, 1127, 962, 2034, 745, 1856, 2045, 1812, 179, 2493, 1998, 603, 1869, 681, 160, 1172, 743, 197, 1434, 14, 185, 476, 1662, 1159, 1126, 304, 151, 2008, 2206, 0, 2, 2706, 180, 2471, 729, 767, 1855, 2243, 1107, 1799, 2205, 1823, 1075, 2004, 13, 519, 926, 1987, 1147, 1950, 1704, 2357, 310, 1465, 2009, 391, 1862, 1073, 2217, 1003, 1820, 1479, 960, 1603, 1337, 2041, 1221, 920, 261, 1166, 318, 71, 480, 1697, 2327, 1710, 1821, 563, 1023, 1846, 68, 366, 467, 1574, 1857, 158, 48, 387, 311, 1615, 699, 1413, 1865, 877, 1665, 1300, 1988, 231, 1299, 1426, 1986, 1935, 1994, 1577, 1031, 812, 1558, 1241, 792, 1867, 1941, 1873, 402, 2607, 251, 1334, 1709, 1989, 1993, 784, 990, 1340, 1852, 1936, 1940, 460, 1607, 45, 776, 1176, 1177, 2707, 219, 1373, 1473, 1999, 872, 548, 165, 316, 899, 215, 343, 1641, 633, 968, 1931, 157, 338, 1868, 1943, 551, 1997, 335, 297, 714, 1996, 944, 1542, 205, 2698, 2138, 232, 637, 1858, 2005, 1990, 2006, 673, 566, 1870, 1991, 1864, 1212, 1866, 1573, 2317, 1344, 2011, 1876, 1453, 845, 2141, 226, 766, 25, 1701, 1863, 1859, 2139, 869, 2142, 24, 547, 235, 532, 1875, 1636, 918, 1297, 416, 1477, 598, 1939, 874, 1942, 216, 201]",1,18.0,25.0,275.0,0.595642540620384,, -"[1798, 519, 1193, 1705, 1656, 2045, 1998, 1856, 1782, 2048, 1986, 1651, 318, 1772, 1779, 1158, 778, 1787, 1770, 133, 958, 1797, 1804, 426, 1337, 655, 905, 2046, 487, 112, 603, 779, 452, 2080, 630, 910, 236, 1805, 563, 1072, 1701, 1799, 1346, 2285, 139, 459, 1624, 1483, 1184, 1905, 1362, 704, 329, 126, 1367, 1775, 1572, 476, 1918, 1781, 1584, 153, 1245, 887, 350, 610, 2085, 1820, 1919, 1079, 608, 2450, 124, 1592, 682, 103, 302, 506, 308, 1802, 2084, 417, 2016, 1743, 306, 1370, 1551, 1464, 1789, 542, 1485, 2101, 2153, 2078, 921, 109, 1873, 115, 1077, 2286, 1490, 138, 2284, 359, 676, 389, 1640, 596, 2100, 1914, 2235, 1359, 1674, 175, 2388, 1785, 1525, 656, 1009, 41, 973, 1916, 1920, 451, 554, 69, 1527, 719, 604, 1142, 294, 803, 1915, 88, 1773, 1549, 955, 1675, 1251, 1676, 857, 119, 1179, 2095, 1955, 176, 1978, 470, 1097, 1583, 118, 2269, 1415, 621, 880, 1901, 392, 645, 2189, 2019, 203, 581, 1917, 252, 277, 1539, 1355, 854, 1956, 2274, 1771, 1619, 102, 1537, 2630, 671, 1824, 289, 2241, 1769, 979, 1957, 514, 1414, 1849, 1661, 249, 1226, 1826, 371, 1309, 2190, 2093, 454, 2022, 747, 2094, 573, 807, 1553, 1882, 2375, 479, 1168, 2092, 2184, 720, 67, 1354, 113, 1045, 540, 70, 441, 1444, 1880, 1455, 2273, 1182, 1825, 935, 1183, 344, 830, 516, 2527, 1269, 1884, 2528, 753, 2256, 1878, 2323, 448, 1579, 490, 47, 2188, 193, 1954, 472, 163, 2029, 1902, 129, 264, 2275, 440, 1885, 559]",1,18.0,25.0,285.0,0.6477104874446086,, -"[1702, 463, 180, 775, 2063, 2123, 2025, 306, 1325, 2045, 191, 1964, 197, 2348, 1234, 1864, 2339, 2380, 1346, 2385, 91, 298, 158, 10, 2356, 327, 2340, 2077, 869, 1224, 1583, 1569, 109, 316, 1781, 285, 2238, 2326, 1014, 1670, 297, 445, 2412, 443, 2306, 1358, 1692, 185, 1446, 1703, 1603, 503, 1191, 1445, 2121, 1966, 2338, 598, 2155, 946, 416, 231, 1701, 2424, 2305, 157, 2034, 2568, 603, 1010, 1858, 2378, 1738, 1, 1171, 2381, 24, 746, 1820, 1666, 2004, 1055, 1418, 579, 1020, 396, 2076, 1558, 1739, 74, 2001, 1998, 88, 2064, 14, 675, 215, 934, 1197, 2555, 1239, 574, 985, 1278, 1295, 476, 232, 968, 2477, 681, 553, 2003, 1996, 519, 1095, 2509, 899, 2357, 2355, 2582, 1434, 2075, 179, 586, 2613, 1012, 1894, 2091, 1023, 2325, 2044, 469, 1818, 201, 2006, 2009, 1999, 1488, 230, 1709, 195, 206, 695, 1870, 1873, 362, 2365, 1995, 429, 387, 366, 2, 1810, 2379, 486, 1454, 314, 2319, 1127, 849, 1522, 1875, 2430, 2691, 1149, 1704, 1812, 1988, 2304, 68, 879, 2236, 743, 1997, 2550, 863, 1987, 570, 2035, 827, 309, 2302, 2011, 818, 151, 1218, 277, 994, 1453, 335, 1876, 38, 169, 78, 745, 935, 1166, 2253, 407, 1859, 332, 1991, 948, 1697, 2335, 1119, 792, 481, 1907, 2000, 71, 2320, 267, 1682, 160, 465, 2002, 482, 962, 1993, 8, 1065, 1140, 1347, 1800, 1348, 2321, 1994, 673, 794, 1590, 1817, 1062, 391, 2309, 1066, 104, 1096, 1574, 1185, 2667, 759, 2668, 1324, 1160, 493, 1570, 1329, 2268, 2291]",1,18.0,25.0,295.0,0.6850073855243722,, -"[486, 2667, 1966, 2668, 2490, 1703, 2263, 1020, 1251, 1767, 482, 391, 2076, 2355, 1166, 104, 1995, 792, 2000, 2357, 1783, 1065, 1782, 126, 2107, 968, 2005, 2004, 719, 314, 505, 1096, 1987, 68, 863, 2007, 2001, 160, 1989, 1787, 1560, 1768, 745, 2119, 54, 1870, 1996, 1670, 1999, 2009, 195, 1805, 1505, 1968, 1574, 1434, 1558, 598, 94, 1417, 1993, 836, 1778, 309, 743, 460, 350, 1219, 1142, 759, 2003, 1873, 1776, 1777, 556, 2002, 1561, 151, 542, 2045, 215, 566, 1196, 1788, 112, 426, 218, 1785, 1772, 1448, 45, 102, 401, 1807, 1709, 899, 1988, 1337, 201, 1773, 36, 1876, 1812, 1649, 878, 133, 1784, 660, 675, 435, 1023, 910, 2326, 14, 1771, 1015, 1994, 887, 1986, 1780, 1623, 71, 1991, 476, 236, 139, 317, 306, 1127, 1875, 366, 589, 934, 429, 1346, 581, 781, 563, 1072, 1704, 980, 1789, 519, 452, 2092, 2594, 484, 1624, 2091, 387, 1775, 1045, 794, 1160, 1697, 586, 383, 318, 2130, 1889, 1769, 1149, 1997, 1770, 1144, 1348, 1912, 1791, 1781, 1705, 1661, 2268, 384, 2039, 109, 1891, 1774, 1382, 2006, 1894, 2094, 454, 2041, 608, 416, 681, 2042, 1992, 2, 673, 791, 691, 103, 179, 1779, 2035, 417, 124, 232, 2084, 1453, 1002, 1859, 153, 1095, 141, 335, 1990, 788, 740, 2034, 158, 2385, 1926, 1609, 1197, 1786, 1983, 2304, 1797, 2088, 1452, 69, 847, 2040, 2033, 1294, 191, 2266, 1929, 1273, 382, 733, 2072, 2036, 1818, 2037, 1665, 2038, 2008, 2267, 224, 1654, 49, 1026, 1329, 2189, 86, 1319, 2093, 1614, 862, 1270, 1497, 841, 617, 1336, 176, 289, 2095, 610, 2303, 459, 2301, 2321, 1817, 1265, 2302, 1790, 377, 1395, 2233, 1062, 2291, 2011, 1998, 678, 2032, 2295, 1192]",1,18.0,28.0,295.0,0.7167651403249631,, -"[604, 921, 1677, 88, 1131, 737, 1908, 910, 1527, 851, 181, 2332, 1914, 69, 1143, 300, 1309, 887, 1889, 1669, 80, 907, 2014, 1843, 2282, 1152, 591, 2189, 2178, 62, 2134, 1847, 1909, 1120, 1848, 2335, 1174, 1348, 1583, 1069, 1464, 818, 1274, 1675, 1625, 1224, 1015, 1846, 993, 1695, 1844, 771, 942, 1502, 1302, 2364, 2419, 1840, 1158, 2034, 1927, 1519, 916, 836, 1842, 2266, 1463, 471, 510, 415, 642, 738, 2153, 262, 1644, 2203, 1592, 2329, 2049, 1920, 1503, 1481, 1916, 524, 1403, 2295, 475, 2312, 1017, 1351, 1892, 1362, 1655, 1203, 1849, 2267, 506, 880, 239, 2163, 1521, 276, 1080, 2293, 711, 1729, 281, 1013, 2025, 2045, 2155, 2162, 1377, 1125, 1924, 2383, 146, 2330, 539, 1117, 2620, 1156, 2499, 257, 619, 2207, 61, 2234, 55, 1759, 1279, 1073, 1358, 2485, 1635, 1220, 485, 434, 1376, 1026, 65, 1926, 118, 979, 2472, 1395, 593, 707, 2294, 2133, 731, 1923, 2219, 1006, 1293, 552, 2418, 2575, 586, 1851, 667, 2051, 1850, 305, 530, 746, 1692, 749, 543, 1845, 1285, 2059, 1769, 679, 2131, 399, 1515, 2186, 30, 966, 1661, 74, 697, 2021, 204, 1957, 2108, 763, 2502, 2531, 544, 192, 149, 2006, 1104, 2180, 572, 1925, 1972, 2343, 1388, 1588, 1540, 52, 839, 81, 1118, 1008, 2068, 1232, 901, 525, 11, 1535, 2052, 2053, 2202, 964, 2172, 858, 908, 1467, 597, 2347, 1468, 2198, 1303, 142, 2181, 280, 718, 751, 2055, 948, 423, 1839, 861, 1628, 736, 1047, 2182, 334, 965, 2183, 624, 2333, 527, 347, 2054, 1838, 437, 734, 2261, 438, 2199, 1416, 456, 1139, 2197, 1042, 693, 1275, 95, 2424, 2057, 1580, 2277, 1517, 2196, 1397, 1198, 2201, 1841, 1330, 1922, 2233, 373, 2232, 2074, 1631]",1,18.0,28.0,300.0,0.6875923190546529,, -"[2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236]",0,20.0,12.0,295.0,0.087149187592319,, -"[1676, 1675, 1583, 1120, 950, 1661, 895, 2069, 1851, 1913, 645, 1847, 2070, 1525, 1234, 1625, 1702, 1724, 1365, 1972, 1850, 1966, 1296, 181, 1495, 1971, 1723, 2134, 2050, 415, 1842, 908, 2039, 164, 1655, 2130, 955, 1846, 1370, 1985, 737, 702, 41, 1448, 1792, 441, 748, 463, 921, 1919, 1848, 1920, 1729, 175, 1969, 1973, 1918, 2238, 2485, 22, 506, 2280, 596, 1644, 1359, 1133, 1626, 1464, 2045, 440, 1844, 2185, 1121, 69, 248, 1889, 510, 1293, 1013, 787, 778, 1654, 2179, 1358, 76, 437, 2041, 1224, 490, 1909, 1538, 859, 2042, 2136, 597, 1362, 789, 880, 691, 1377, 1967, 130, 2040, 436, 768, 1895, 1914, 1970, 498, 1336, 1843, 1652, 300, 791, 252, 973, 1891, 2282, 2281, 1002]",1,20.0,12.0,300.0,0.6809453471196455,, -"[1735, 895, 1131, 440, 1358, 1913, 1623, 1726, 197, 1620, 1538, 1701, 359, 1868, 1655, 1412, 1284, 2131, 554, 562, 1840, 389, 133, 754, 35, 842, 446, 742, 179, 2208, 1637, 2180, 2172, 989, 232, 52, 95, 2133, 1527, 2294, 388, 1333, 525, 748, 387, 415, 2070, 2010, 905, 1282, 1139, 87, 1725, 1507, 749, 490, 969, 1911, 1483, 1296, 1851, 239, 903, 231, 2016, 1910, 2069, 2450, 581, 1368, 1976, 1143, 1152, 816, 1838, 1072, 2295, 2282, 306, 1743, 118, 1249, 702, 1343, 452, 1377, 704, 2071, 2182, 1077, 2553, 32, 858, 2083, 1070, 1535, 1224, 2155, 1167, 586, 993, 2165, 1013, 2436, 74, 2031, 1973, 454, 1512, 945, 822, 1266, 666, 2233, 2232, 651, 2166, 1029, 1118, 2023, 2153, 255, 504, 1470, 1616, 1829, 1792, 1583, 2030, 1975, 280, 2231, 973, 2164, 2068, 1370, 2198, 1849, 1351, 56, 1683, 176, 447, 682, 2485, 2132, 505, 181, 55, 1515, 412, 1912, 880, 1917, 491, 1464, 1218, 778, 2050, 728, 1482, 2113, 406, 671, 277, 2293, 1919, 2072, 630, 1974, 2217, 1525, 1359, 779, 935, 1079, 1402, 252, 115, 2019]",1,20.0,18.0,250.0,0.6277695716395865,, -"[2064, 327, 157, 2045, 2063, 1858, 1701, 1325, 277, 109, 1894, 416, 869, 603, 2555, 1820, 231, 2349, 2346, 2302, 1347, 1738, 2365, 2345, 306, 8, 598, 172, 218, 1996, 1864, 879, 2338, 197, 1171, 1445, 48, 553, 1358, 1781, 1346, 1488, 24, 2325, 2339, 215, 1295, 1066, 2303, 788, 1469, 1870, 1014, 2430, 298, 1603, 1873, 1185, 2340, 383, 733, 756, 240, 1875, 792, 1191, 232, 2187, 1446, 512, 985, 1023, 482, 1709, 2412, 1739, 1789, 445, 201, 297, 570, 206, 1570, 1907, 968, 2326, 2532, 1810, 1149, 1876, 2130, 1558, 387, 1453, 335, 2034, 1890, 836, 1859, 519, 1670, 2568, 1472, 1997, 681, 2613, 1998, 1219, 1583, 88, 179, 2550, 2041, 2253, 695, 2667, 1095, 1991, 2691, 1454, 1107, 1891, 1197, 899, 1015, 1692, 2039, 1995, 160, 1812, 169, 230, 818, 45, 2186, 1336, 1666, 2668, 1662, 396, 1574, 1140, 366, 673, 291, 743, 2009, 745, 1127, 1119, 476, 391, 1434, 1514, 1987, 2040, 1697, 1704, 71, 1160, 68, 2077, 1999, 566, 285, 1481, 1889, 2582, 2238, 2, 1994, 1143, 1425, 962, 442, 1800, 1519, 1522, 191, 617]",1,20.0,18.0,265.0,0.6144756277695717,, -"[1889, 1891, 165, 1654, 1821, 716, 2075, 1819, 141, 691, 1567, 2037, 841, 2077, 102, 2668, 1581, 1787, 2500, 568, 2036, 27, 1473, 1812, 927, 1823, 2707, 775, 1336, 2038, 1807, 1062, 521, 845, 2076, 1020, 968, 86, 1002, 48, 1315, 417, 1448, 482, 1809, 722, 835, 1817, 1811, 1815, 1301, 2138, 549, 603, 2140, 230, 719, 935, 1316, 2096, 1497, 899, 2034, 1822, 382, 351, 1273, 2501, 962, 1813, 1004, 14, 1348, 617, 2471, 576, 1106, 1810, 1003, 306, 228, 2035, 1871, 1968, 1614, 511, 1818, 180, 310, 766, 1319, 465, 791, 1121, 17, 316, 1874, 109, 2582, 598, 1095, 224, 1072, 790, 1331, 1297, 1335, 599, 2004, 1983, 1814, 2398, 1097, 2045, 920, 1808, 1073, 1465, 1576, 1950, 1986, 1856, 2117, 519, 1869, 855, 2420, 1873, 297, 795, 2493, 2309, 1929, 1542, 637, 420, 199, 1107, 0, 1858, 1894, 1799, 1026, 1013, 2139, 277, 203, 1270, 49, 480, 1816, 481, 2357, 1708, 1271, 1862, 873, 1093, 1845, 1577, 159, 1573, 1337, 1846, 1636, 1852, 1868, 2141, 1864, 15, 1479, 304, 640, 1299, 1665, 1875, 158, 1870, 179, 1172]",1,20.0,18.0,275.0,0.6100443131462334,, -"[302, 910, 1623, 1572, 308, 181, 1995, 1464, 655, 350, 656, 836, 2046, 1481, 126, 608, 581, 719, 112, 139, 153, 880, 1120, 452, 893, 487, 542, 2045, 1158, 792, 745, 103, 2383, 911, 2009, 2120, 1998, 519, 329, 2270, 554, 1875, 1583, 1666, 2006, 1519, 1558, 1675, 2374, 1848, 1987, 1812, 2238, 1851, 573, 2008, 426, 1704, 2065, 1098, 294, 334, 1403, 441, 1870, 484, 1669, 1521, 138, 887, 2375, 1709, 2269, 1661, 236, 2004, 471, 33, 1729, 2078, 1780, 160, 2079, 1849, 1662, 1777, 1784, 1477, 1127, 1015, 1847, 1610, 743, 384, 442, 1625, 1778, 1013, 2034, 366, 306, 1574, 1051, 565, 1509, 20, 2007, 1781, 968, 2615, 1842, 1798, 973, 151, 1876, 109, 2066, 2, 1779, 299, 2040, 69, 1293, 1095, 1644, 215, 1991, 1845, 2000, 638, 1705, 589, 908, 1841, 1045, 1640, 1807, 1844, 1262, 1787, 2335, 1193, 2381, 124, 1986, 1997, 2041, 627, 510, 1990, 86, 564, 1889, 665, 1107, 1385, 1840, 68, 1989, 506, 1850, 1358, 1846, 1805, 244, 1988, 391, 189, 1843, 1996, 417, 681, 863, 476, 460, 2005, 673, 576, 794, 2094]",1,20.0,18.0,285.0,0.758493353028065,, -"[796, 346, 466, 999, 687, 156, 2501, 420, 797, 1106, 199, 764, 364, 970, 155, 1171, 1941, 0, 326, 109, 985, 2139, 819, 754, 758, 935, 2141, 154, 2500, 90, 73, 951, 722, 489, 684, 14, 102, 289, 613, 341, 277, 899, 645, 297, 580, 686, 316, 902, 962, 876, 2034, 757, 1097, 823, 34, 173, 111, 744, 957, 609, 2217, 1567, 59, 306, 1072, 321, 2314, 54, 417, 603, 519, 1013, 1708, 2582, 1038, 1894, 176, 1874, 1358, 1083, 2309, 570, 244, 873, 929, 357, 2045, 304, 1869, 170, 68, 766, 926, 978, 203, 179, 748, 2004, 2096, 2117, 1542, 1295, 708, 1862, 598, 2588, 2430, 2573, 2357, 552, 524, 767, 310, 174, 1172, 1873, 1636, 72, 1950, 1147, 1055, 2327, 1334, 1986, 919, 1810, 1856, 1665, 24, 201, 1871, 2551, 1846, 1855, 1702, 465, 1857, 626, 143, 318, 1107, 53, 228, 1337, 386, 1799, 1450, 633, 158, 1868, 197, 1858, 689, 831, 30, 1241, 2115, 563, 1479, 1054, 333, 853, 882, 1877, 699, 923, 616, 246, 337, 1186, 387, 501, 1870, 261, 1865, 467, 231, 469, 416, 1996]",1,20.0,18.0,295.0,0.6056129985228951,, -"[2399, 661, 1880, 344, 2140, 716, 17, 1314, 706, 43, 1094, 2434, 935, 363, 490, 1884, 890, 899, 1443, 2240, 1810, 603, 2706, 737, 258, 416, 1309, 1157, 1440, 570, 2430, 2139, 805, 851, 204, 1574, 1644, 1315, 753, 1868, 1373, 375, 300, 1346, 1879, 2401, 199, 1526, 1316, 1677, 1986, 1269, 2400, 420, 963, 1527, 1710, 232, 1821, 525, 1227, 88, 1785, 2243, 415, 1141, 1, 1973, 1003, 687, 1701, 2004, 1073, 2338, 1998, 854, 2206, 109, 714, 1603, 2359, 1670, 2372, 201, 2309, 2045, 1299, 1974, 1869, 1426, 2141, 2471, 24, 480, 306, 519, 2241, 1107, 205, 1301, 1636, 1630, 1823, 869, 1557, 968, 297, 966, 652, 2707, 1473, 1624, 1687, 2607, 792, 634, 350, 165, 48, 1870, 920, 1703, 1875, 1623, 1908, 547, 868, 957, 316, 2493, 836, 1014, 1864, 102, 1641, 29, 1558, 1542, 277, 1100, 650, 1666, 879, 1662, 452, 143, 1685, 2168, 1174, 504, 1191, 203, 1399, 766, 1779, 1031, 818, 1421, 637, 1417, 157, 2205, 2041, 1483, 857, 465, 1420, 1560, 2394, 2407, 752, 1396, 2083, 598, 2379, 1251, 2396, 1178, 25, 1085, 605, 845, 593, 1246, 578, 406, 185, 2109, 1097, 573, 1126, 1313, 2276, 470, 643, 1474, 134, 2397, 2350, 1115]",1,20.0,20.0,250.0,0.71602658788774,, -"[1567, 2500, 2668, 2667, 2314, 1708, 722, 2691, 1710, 2045, 1358, 2335, 14, 206, 605, 603, 1950, 1894, 2002, 1662, 2034, 310, 109, 2001, 332, 1172, 1810, 2493, 1666, 2303, 2236, 733, 1577, 1, 2164, 417, 277, 2023, 1072, 2534, 2041, 1013, 973, 669, 687, 2582, 2388, 2309, 553, 873, 1583, 716, 199, 306, 1159, 935, 175, 2501, 1874, 2327, 45, 1171, 304, 2359, 1465, 1149, 2568, 1346, 1147, 1603, 1542, 71, 0, 2379, 570, 1106, 1316, 467, 2004, 420, 1812, 962, 151, 699, 2206, 1124, 228, 2, 2372, 2117, 2003, 2471, 1337, 1852, 311, 102, 1221, 1241, 2430, 2217, 1334, 2706, 1709, 318, 1998, 767, 1869, 363, 944, 54, 493, 1995, 1857, 548, 1095, 1865, 13, 2141, 1641, 176, 1856, 1097, 563, 1126, 2412, 729, 1823, 1799, 180, 2008, 68, 2698, 17, 1941, 1107, 158, 2191, 1127, 2238, 366, 2009, 1821, 1479, 1873, 1820, 1219, 160, 215, 1862, 1670, 1987, 1166, 2140, 2365, 2006, 261, 745, 2139, 2138, 551, 1665, 836, 1301, 920, 48, 480, 681, 519, 1855, 391, 1996, 1299, 343, 1023, 1871, 1315, 1997, 1454, 1846, 216, 25, 1615, 96, 633, 2096, 185, 968, 203, 2357, 157, 197, 476, 465, 637, 387, 2000, 1573, 1075, 1867, 743]",1,20.0,20.0,285.0,0.7023633677991138,, -"[306, 417, 1072, 2360, 17, 519, 1316, 2578, 465, 1807, 2268, 2045, 2008, 2501, 606, 2582, 2291, 1812, 603, 180, 2140, 823, 2304, 766, 2534, 722, 2141, 962, 383, 2301, 1567, 228, 2011, 2066, 0, 795, 1159, 109, 401, 14, 2034, 1682, 19, 2065, 1160, 1363, 1787, 926, 316, 1097, 1821, 27, 1986, 387, 481, 1062, 2500, 1106, 2568, 1315, 420, 1147, 1934, 2139, 1871, 482, 790, 935, 719, 866, 1287, 218, 1817, 1862, 102, 377, 1172, 794, 2309, 1856, 1192, 1124, 678, 1799, 1874, 1265, 1013, 199, 1823, 1808, 1098, 1329, 25, 1813, 304, 873, 45, 716, 179, 1814, 54, 1708, 1873, 311, 1869, 1217, 215, 2067, 669, 1894, 981, 1865, 1933, 598, 1809, 1023, 203, 1852, 2493, 733, 318, 158, 2117, 197, 2357, 24, 862, 1335, 549, 1868, 416, 1337, 1822, 699, 1665, 2035, 507, 1636, 1939, 729, 759, 563, 253, 176, 1095, 230, 1615, 1049, 2314, 277, 1301, 899, 1870, 1875, 1348, 2303, 467, 94, 1857, 918, 1477, 1171, 1846, 1811, 1859, 1479, 1075, 1876, 201, 1453, 767, 1932, 261, 157, 2430, 335, 143, 343, 570, 2508, 633, 511, 1867, 1864, 1810, 577, 2573, 231, 232, 224, 960, 338, 1413, 1855, 1936, 869, 2327, 1055, 1877, 1299]",1,20.0,20.0,295.0,0.6148449039881831,, -"[815, 2017, 2062, 2178, 2046, 1974, 2227, 2045, 656, 1193, 2048, 139, 887, 1174, 737, 910, 1732, 103, 1527, 1798, 1847, 1245, 1630, 1651, 1705, 2015, 2090, 415, 1775, 1713, 1782, 2061, 701, 2086, 1677, 478, 890, 2085, 696, 743, 1770, 1494, 1871, 130, 1804, 610, 2088, 1802, 162, 1973, 487, 1908, 563, 126, 300, 350, 329, 1623, 542, 266, 868, 308, 1640, 1448, 2080, 76, 719, 1771, 608, 1856, 236, 2099, 1799, 1314, 88, 1780, 2013, 841, 1224, 1772, 1346, 426, 2089, 1658, 1572, 851, 61, 2394, 826, 476, 2091, 2014, 519, 2104, 2078, 1624, 1337, 1656, 1490, 1797, 318, 1072, 995, 1986, 2093, 133, 1584, 603, 138, 1269, 112, 655, 2425, 1701, 153, 277, 1309, 1789, 958, 2092, 1787, 417, 1741, 120, 2103, 735, 554, 1251, 1525, 1105, 578, 1805, 1288, 302, 306, 2060, 459, 1394, 1367, 638, 2011, 514, 1661, 1131, 1561, 581, 1998, 2094, 1781, 129, 2102, 1526, 252, 779, 1978, 1773, 1779, 2010, 102, 1551, 2450, 1919, 1977, 2153, 1583, 516, 682, 289, 452, 490, 109, 1769, 2084, 1463, 778, 2016, 498, 1625, 2528, 739, 294, 1981, 1785, 1237, 454, 124, 2105, 1158, 1338, 175, 1820, 41, 2012, 1227, 1483, 2388, 1873, 596, 1743, 1958]",1,20.0,20.0,300.0,0.6746676514032496,, -"[470, 1820, 511, 1337, 549, 869, 232, 231, 563, 1846, 1864, 1847, 854, 1299, 2045, 1440, 230, 1095, 1867, 857, 699, 576, 1873, 1665, 1636, 1870, 24, 1848, 1115, 318, 716, 1868, 1625, 835, 1334, 1863, 1121, 1241, 1521, 1852, 27, 215, 335, 532, 201, 1097, 160, 1854, 1860, 216, 157, 1871, 664, 1661, 1464, 1851, 1856, 1850, 553, 874, 88, 1583, 197, 1224, 1866, 343, 1853, 1701, 1855, 143, 1928, 226, 2034, 1810, 1729, 1479, 633, 1872, 1100, 973, 1212, 767, 224, 13, 790, 784, 1120, 1309, 1875, 261, 1986, 568, 203, 1052, 962, 181, 2357, 158, 1791, 1677, 729, 1004, 836, 1874, 1859, 1877, 1843, 1013, 1876, 880, 482, 277, 1453, 1998, 351, 2305, 1023, 963, 2317, 1858, 1278, 695, 696, 2306, 1323, 863, 908, 1844, 1912, 1799, 54, 382, 1861, 1704, 38, 995, 69, 1338, 309, 1703, 2102, 1075, 547, 2152, 1293, 486, 2189, 1857, 429, 948, 2103, 1845, 1644, 1818, 506, 994, 25, 586, 1862, 235, 416, 467, 1012, 530, 2534, 481, 2011, 2667, 191, 1865, 407, 2319, 205, 1107, 1675, 795, 719, 2309, 435, 2668, 1065, 1849, 1348, 1842, 1841, 1840, 1020, 334, 510, 471, 1590, 2076, 1869, 1219, 792, 1160, 2268, 1817, 78, 37, 1692, 2035, 1329, 1966, 2263, 383, 1096, 2428, 1997, 1403, 862, 104, 1062, 1682, 2490, 759, 2427, 2291, 2355, 45, 794, 218, 2385, 2302, 2292, 1265, 781, 199, 2008, 675, 715, 401, 377, 1324, 608, 2119, 420, 94, 352, 934, 2091, 2233, 1382, 2594, 2412, 1192, 2304, 1294, 2303, 2320]",1,20.0,25.0,265.0,0.6237075332348597,, -"[779, 1583, 1370, 1013, 490, 1919, 738, 778, 2162, 1309, 711, 1395, 1525, 1851, 1914, 438, 1285, 334, 1526, 2261, 693, 146, 2154, 1975, 1841, 2266, 2312, 1080, 2357, 702, 667, 1341, 736, 1291, 436, 2180, 2131, 1333, 1079, 2355, 2156, 1655, 2509, 1463, 2343, 1616, 1660, 1695, 1388, 1927, 1682, 718, 1089, 1293, 2059, 763, 61, 408, 257, 539, 1839, 2232, 2025, 1131, 2499, 2310, 1275, 733, 80, 437, 30, 133, 1358, 55, 2519, 1587, 1224, 1623, 269, 415, 1966, 1474, 734, 386, 2472, 2197, 2383, 1225, 2202, 1104, 1468, 1732, 827, 276, 1110, 242, 109, 697, 604, 771, 1848, 2311, 2231, 2588, 2072, 2114, 195, 2364, 2189, 149, 1156, 69, 1351, 1920, 619, 94, 2181, 2295, 2418, 1218, 1158, 2183, 1117, 934, 847, 2422, 2490, 1303, 399, 315, 52, 2133, 2576, 964, 2056, 321, 239, 1635, 2182, 749, 1450, 347, 1152, 2049, 751, 95, 2152, 597, 1769, 1789, 2263, 1618, 65, 2653, 2575, 2021, 305, 1923, 2107, 454, 1921, 1924, 1467, 1649, 456, 2116, 1680, 675, 423, 525, 429, 1535, 274, 2053, 572, 1008, 993, 2172, 1143, 586, 1204, 2151, 682, 1838, 1222, 1580, 1425, 2074, 1083, 2001, 2233, 861, 1139, 858, 2186, 2277, 2155, 756, 142, 2200, 434, 2153, 965, 1692, 1015, 1602, 2262, 1926, 543, 1330, 2336, 1385, 1628, 1519, 2199, 1515, 2293, 544, 2335, 1669, 280, 942, 1481, 100, 698, 2332, 1540, 2347, 1892, 2201, 2052, 1416, 2294, 1203, 2198, 2054, 2055, 74, 901, 1788, 627, 151, 2259, 1452, 2419, 1068, 1279, 1922, 731, 2057]",1,20.0,25.0,275.0,0.6266617429837519,, -"[415, 1174, 2227, 1677, 1908, 851, 910, 300, 1782, 1527, 1623, 162, 608, 2178, 701, 2017, 1775, 426, 2394, 815, 1630, 266, 2045, 139, 103, 2078, 1797, 1787, 2425, 1772, 1494, 737, 1798, 1251, 1770, 958, 2084, 2099, 1805, 2088, 1799, 2062, 452, 1651, 2085, 1781, 2011, 2014, 350, 581, 2048, 1847, 519, 2012, 1974, 1656, 1314, 1789, 2080, 573, 318, 1584, 130, 126, 1561, 487, 1490, 1780, 112, 887, 655, 1288, 890, 329, 1227, 133, 1551, 2046, 1448, 153, 236, 1309, 129, 1072, 76, 1802, 88, 1973, 1367, 542, 2013, 868, 1640, 498, 1871, 1771, 2105, 2087, 2015, 1779, 1978, 417, 2086, 1804, 563, 743, 1269, 478, 1741, 1245, 1769, 1224, 308, 459, 719, 1998, 1856, 1701, 1624, 120, 1337, 138, 1986, 2089, 124, 603, 1732, 306, 2010, 1705, 841, 1394, 61, 514, 1525, 1526, 2029, 2090, 1773, 132, 1131, 739, 610, 102, 656, 393, 1237, 302, 779, 696, 2095, 1193, 1919, 109, 578, 1346, 1977, 2104, 252, 682, 1658, 1572, 490, 735, 2153, 1583, 2016, 778, 2450, 1022, 2093, 2094, 249, 454, 476, 479, 1785, 277, 1543, 1981, 1713, 1158, 1483, 175, 516, 1820, 2388, 2091, 554, 1958, 41, 1917, 596, 1592, 704, 826, 1463, 470, 69, 1920, 1914, 1873, 1441, 1918, 1676, 955, 1675, 1661, 1359, 857, 1097, 1464, 441, 359, 506, 1743, 1881, 2061, 973, 289, 604, 2060, 1916, 1915, 176, 1954, 590, 753, 671, 2275, 2184, 1362, 70, 1105, 638, 1674, 1849, 1905, 1880, 440, 1485, 1539, 1079, 921, 1537, 1077, 2644, 1957, 905, 1128]",1,20.0,25.0,300.0,0.7138109305760709,, -"[52, 519, 1628, 109, 2172, 1139, 735, 1869, 391, 885, 2057, 2181, 2054, 142, 306, 2099, 553, 2309, 2162, 1955, 95, 1958, 456, 201, 366, 1039, 1956, 878, 2009, 438, 2227, 460, 2183, 45, 739, 2, 816, 1131, 215, 1543, 2611, 1399, 68, 408, 1227, 1980, 2405, 2027, 1495, 1540, 1848, 1080, 347, 476, 826, 2397, 1995, 1285, 239, 2133, 718, 2198, 451, 1226, 1655, 102, 2105, 232, 666, 1914, 1127, 1985, 525, 1526, 651, 1842, 2312, 2228, 1121, 2180, 2131, 1239, 1986, 277, 1515, 1977, 2396, 2022, 1045, 1115, 1224, 61, 1571, 745, 335, 1881, 179, 71, 1972, 2182, 490, 604, 1416, 854, 1973, 681, 725, 483, 1396, 160, 733, 2000, 1463, 2024, 1671, 578, 807, 437, 950, 2256, 973, 1979, 1652, 1838, 436, 2395, 1421, 423, 1420, 634, 1332, 1667, 504, 1269, 151, 1929, 2019, 1259, 935, 1013, 1630, 1981, 120, 2385, 1268, 32, 2136, 1884, 621, 1729, 1974, 1879, 1529, 2394, 858, 1773, 1358, 323, 2071, 1850, 566, 2020, 1873, 1999, 830, 55, 433, 673, 2231, 593, 2233, 792, 249, 356, 724, 2137, 1085, 2323, 424, 1878, 753, 344, 842, 191, 2018, 471, 2016, 2072, 2232, 1978, 748, 2103, 2102, 211, 743, 1338, 995, 1880, 2034, 890, 2116, 818, 668, 1566, 1982, 389, 1027, 1625, 113, 1732, 1178, 1912, 1908, 1843, 498, 1487, 1474, 2644, 1644, 118, 161, 1847, 2100, 2178, 1314, 737, 1344, 479, 1885, 1394, 1570, 25, 1174, 478, 76, 441, 2450, 1658, 60, 696, 600, 1677, 815, 2017, 2011, 130, 1713, 415, 514, 571, 2467, 2010, 162, 1741, 1494, 661, 2015, 97, 1668, 841, 2013, 1527, 1898, 1309, 331, 2274, 1883, 2104, 860, 88, 1353, 1696, 851, 2593, 300, 204, 516, 966, 2014, 272]",1,20.0,28.0,285.0,0.732274741506647,, -"[2280, 2252, 2189, 2155, 242, 121, 2251, 2293, 1882, 252, 69, 1464, 1655, 1395, 822, 1769, 838, 1402, 893, 2185, 2015, 591, 2425, 2102, 1133, 2281, 302, 661, 1538, 1224, 1333, 1526, 399, 299, 955, 808, 1920, 746, 306, 1527, 1637, 924, 323, 751, 1851, 2202, 666, 504, 2364, 1500, 181, 711, 1026, 779, 586, 2332, 1692, 1017, 1215, 1482, 814, 868, 1677, 2282, 815, 1131, 2103, 1121, 280, 642, 1279, 1848, 2264, 1158, 60, 1174, 1330, 1973, 1525, 192, 81, 2020, 725, 32, 589, 636, 572, 1051, 1013, 1484, 88, 1288, 2186, 436, 836, 1974, 1847, 1309, 1351, 1015, 2383, 1850, 1303, 2382, 61, 1930, 1338, 1163, 2099, 731, 1669, 610, 1079, 2068, 1975, 1976, 2472, 2163, 1926, 1683, 1080, 2528, 578, 1879, 979, 1481, 1104, 1981, 1921, 2287, 1232, 552, 1118, 1535, 1741, 2335, 1204, 210, 1006, 667, 544, 851, 826, 1416, 55, 1625, 2051, 527, 1908, 510, 995, 737, 887, 74, 910, 1125, 593, 2347, 2178, 1909, 588, 901, 62, 2233, 415, 2181, 2199, 734, 624, 693, 736, 1843, 2072, 651, 1983, 517, 1631, 2419, 1376, 984, 2201, 347, 1892, 423, 707, 2196, 1644, 2198, 315, 490, 2330, 2283, 300, 2291, 334, 1841, 818, 497, 787, 2105, 763, 2014, 861, 771, 2052, 485, 456, 2418, 2108, 2180, 897, 1580, 966, 680, 1635, 1293, 1925, 1397, 408, 2219, 2292, 543, 1285, 6, 1156, 2234, 1042, 2197, 1838, 1388, 142, 525, 1602, 2132, 2620, 1916, 1047, 95, 2333, 239, 2200, 1517, 2073, 204, 2074, 2054, 2021, 1588, 839, 2232, 1515, 57, 2056, 2576, 1540, 2055, 65, 1069, 2277, 964, 373, 2183, 1680, 1198, 965, 1274, 1759, 1220, 2182, 858, 2231, 2049, 2311, 2502, 1025, 2172, 2131, 597, 1463, 619]",1,20.0,28.0,300.0,0.6237075332348597,, -"[1787, 306, 656, 1337, 887, 124, 1772, 1158, 1640, 1572, 1787, 306, 656, 1337, 887, 124, 1772, 1158, 1640, 1572, 1787, 306, 656, 1337, 887, 124, 1772, 1158, 1640, 1572, 1787, 306, 656, 1337, 887, 124, 1772, 1158, 1640, 1572, 1787, 306, 656, 1337, 887, 124, 1772, 1158, 1640, 1572, 1787, 306, 656, 1337, 887, 124, 1772, 1158, 1640, 1572, 1787, 306, 656, 1337, 887, 124, 1772, 1158, 1640, 1572, 1787, 306, 656, 1337, 887, 124, 1772, 1158, 1640, 1572, 1787, 306, 656, 1337, 887, 124, 1772, 1158, 1640, 1572, 1787, 306, 656, 1337, 887, 124, 1772, 1158, 1640, 1572]",0,,10.0,100.0,0.0904726735598227,0.2, -"[2586, 1375, 2586, 1375, 2586, 1375, 2586, 1375, 2586, 1375, 2586, 1375, 2586, 1375, 2586, 1375, 2586, 1375, 2586, 1375]",0,,15.0,300.0,0.0993353028064992,0.2, -"[306, 656, 476, 1337, 1346, 306, 656, 476, 1337, 1346, 306, 656, 476, 1337, 1346, 306, 656, 476, 1337, 1346, 306, 656, 476, 1337, 1346, 306, 656, 476, 1337, 1346, 306, 656, 476, 1337, 1346, 306, 656, 476, 1337, 1346, 306, 656, 476, 1337, 1346, 306, 656, 476, 1337, 1346]",0,,5.0,200.0,0.0904726735598227,0.6, -"[2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690, 611]",0,,15.0,200.0,0.087149187592319,1.5, -"[1367, 1251, 887, 1623, 1778, 1443, 1196, 1448, 2400, 2399]",0,2.0,1.0,80.0,0.1447562776957164,, -"[1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479]",0,2.0,2.0,20.0,0.087149187592319,, -"[2269, 2188, 2190, 643, 2022, 118, 392, 973, 1842, 1553]",0,3.0,1.0,60.0,0.1129985228951255,, -"[236, 138, 1780, 126, 103, 350, 1770, 409, 2080, 1561, 153, 484, 112, 1253, 1773, 814, 1776, 1774, 2382, 1775]",0,4.0,2.0,90.0,0.1078286558345642,, -"[1984, 74, 305, 6, 30, 1416, 149, 61, 1667, 2059]",0,5.0,1.0,20.0,0.1403249630723781,, -"[1555, 753, 1470, 1953, 1269, 1226, 2235, 1956, 1955, 645]",0,5.0,1.0,70.0,0.0985967503692762,, -"[1656, 2084, 2090, 350, 973, 308, 958, 1798, 2080, 1805]",0,5.0,1.0,90.0,0.0912112259970457,, -"[426, 1789, 2048, 1805, 1337, 2046, 563, 1651, 1772, 1779, 1998, 1624, 1346, 306, 318, 341, 1785, 519, 2026, 1248]",0,5.0,2.0,50.0,0.0904726735598227,, -"[2099, 779, 868, 2154, 1526, 1343, 1879, 1927, 1156, 118, 1303, 1660, 885, 1973, 2305, 1291, 1152, 1382, 1535, 575, 1907, 1977, 1851, 1818, 1983, 1957, 305, 146, 878, 1808, 1370, 1039, 524, 1171, 1819, 1923, 1062, 1572, 382, 2293, 962, 1581, 1822, 270, 897, 1811, 771, 1924, 1351, 1309, 751, 1341, 1848, 279, 708, 1587, 224, 668, 424, 239, 1812, 1909, 1494, 2078, 979, 2283, 826, 490, 519, 1008, 2105, 211, 1204, 1922, 1468, 2034, 498, 1299, 1203, 719, 86, 461, 1813, 1821, 795, 1816, 737, 1817, 1869, 1787, 1463, 1820, 351, 568, 1107, 1815, 836, 1814, 13, 61]",1,3.0,10.0,225.0,0.5952732644017725,, -"[2667, 2076, 218, 1020, 2172, 1662, 45, 408, 1169, 2091, 1382, 525, 2381, 1954, 486, 2131, 738, 2057, 52, 901, 2133, 552, 1841, 985, 2155, 1080, 2127, 2293, 2198, 1441, 1013, 1149, 1139, 2476, 2075, 1927, 1916, 280, 2073, 2056, 30, 1986, 430, 1358, 2077, 2668, 1703, 142, 482, 2244, 1535, 2053, 1119, 2186, 2424, 2477, 415, 964, 2072, 8, 2182, 524, 1042, 912, 2124, 2118, 731, 100, 1540, 1851, 2243, 2006, 1671, 2180, 2231, 229, 1204, 2181, 101, 61, 1111, 1968, 1000, 281, 1810, 1351, 2126, 2055, 1285, 1704, 983, 1628, 2196, 1355, 1929, 1957, 2129, 48, 1502, 1631]",1,3.0,10.0,275.0,0.5760709010339734,, -"[306, 156, 1505, 1804, 1797, 1802, 1799, 1805, 417, 236, 1798, 1801, 1483, 773, 973, 554, 1538, 945, 1954, 255, 1296, 1956, 1911, 1902, 2491, 1478, 1072, 90, 1899, 2112, 342, 807, 1803, 20, 155, 842, 1851, 1901, 2010, 644, 643, 645, 2510, 388, 2553, 728, 1368, 1960, 2621, 1360, 2165, 958, 1955, 755, 1725, 118, 1183, 1825, 59, 2166, 1168, 694, 1733, 244, 793, 448, 1959, 1898, 1578, 1616, 1824, 107, 379, 1267, 904, 1470, 2235, 132, 1740, 1549, 541, 1070, 1022, 170, 971, 1113, 1953, 748, 2388, 2497, 581, 1904, 1226, 174, 1343, 2030, 1690, 1555, 111, 1029, 1963, 68, 1650, 454, 53, 30, 34, 676, 72, 1358, 101, 1276, 1167, 647, 2516, 1800, 1784, 1179, 193, 1961, 119, 709, 468, 1896, 1507, 646, 1897, 1826, 1537, 446, 1262, 189, 360, 1312, 941, 2517, 1829, 1962, 458, 1713, 801, 2363, 154, 703, 479, 73, 173, 125, 1122, 1880]",1,3.0,15.0,150.0,0.5675775480059084,, -"[1224, 1919, 779, 778, 1914, 2062, 807, 1226, 1525, 661, 701, 69, 1503, 415, 682, 344, 2207, 500, 1070, 129, 1975, 1851, 88, 1956, 1013, 176, 2094, 524, 1191, 979, 109, 1926, 2241, 1955, 1829, 2235, 1885, 70, 2184, 1303, 645, 1826, 671, 2628, 1839, 1470, 1168, 323, 67, 1179, 2699, 1661, 193, 1881, 793, 735, 238, 440, 1828, 1072, 2061, 277, 441, 2630, 2592, 1897, 1267, 115, 1409, 840, 559, 252, 360, 1960, 1827, 1902, 119, 2457, 1408, 47, 940, 1903, 1623, 264, 854, 2029, 20, 1901, 2275, 1579, 1455, 1583, 1237, 1957, 331, 282, 1954, 1182, 2374, 1824, 1128, 1700, 1917, 1370, 1553, 2375, 750, 2272, 1958, 1404, 50, 1619, 2022, 935, 1060, 2269, 2273, 2153, 241, 472, 265, 648, 676, 1707, 400, 1825, 973, 720, 1441, 2270, 1616, 1395, 306, 1543, 392, 2450, 1354, 371, 847, 1527, 1537, 540, 2274, 581, 860, 1884, 2190, 880, 2242, 739]",1,3.0,15.0,200.0,0.5583456425406204,, -"[2287, 2288, 826, 1773, 1505, 2324, 1735, 484, 1283, 409, 114, 1778, 454, 1358, 1878, 1367, 1681, 1788, 1722, 544, 2256, 231, 1015, 1871, 973, 661, 1821, 2080, 191, 1564, 1483, 1583, 1561, 2086, 1499, 2143, 608, 613, 1389, 716, 1786, 542, 1789, 1770, 1197, 1196, 598, 387, 2327, 289, 1245, 417, 487, 1072, 581, 1701, 610, 1729, 2084, 655, 1120, 1797, 176, 1282, 416, 18, 719, 1781, 2034, 384, 1045, 102, 1777, 1998, 1873, 399, 1846, 426, 103, 1705, 1810, 1464, 1791, 1769, 1623, 1780, 1656, 2026, 1193, 2088, 880, 795, 2046, 181, 603, 910, 1609, 1661, 1584, 2082, 2025, 699, 341, 1337, 1804, 1805, 1551, 318, 1772, 1802, 1852, 887, 79, 459, 563, 2092, 2094, 2048, 1851, 337, 1776, 1571, 1651, 836, 2090, 554, 215, 519, 2096, 329, 139, 2318, 1248, 1574, 1787, 1779, 2093, 2089, 126, 109, 1448, 306, 302, 476, 133, 530, 317, 234, 2045, 1546]",1,3.0,15.0,225.0,0.7038404726735599,, -"[2205, 1797, 1608, 463, 2126, 2118, 482, 1012, 1111, 788, 1508, 870, 1662, 306, 1405, 210, 790, 2120, 1072, 1986, 1671, 1031, 2076, 1149, 1452, 2125, 1810, 229, 2033, 835, 2124, 48, 1119, 962, 2001, 1666, 2044, 1273, 1703, 1894, 2075, 1203, 55, 775, 1336, 2462, 33, 2117, 1215, 2122, 2295, 196, 2041, 2077, 1479, 2040, 1812, 1655, 1235, 1020, 1807, 1665, 261, 2471, 1614, 1442, 1968, 2129, 86, 722, 1010, 897, 2032, 1702, 1701, 1697, 443, 1324, 546, 549, 1654, 2004, 705, 1567, 2206, 770, 1268, 536, 2045, 1615, 2531, 417, 2238, 1964, 429, 1049, 382, 2128, 180, 695, 2127, 1026, 1234, 1448, 916, 863, 794, 1015, 2335, 598, 22, 1929, 49, 2035, 523, 1264, 1952, 432, 14, 158, 2034, 2668, 1002, 224, 1531, 1663, 102, 186, 1319, 2038, 1983, 617, 1669, 2494, 293, 740, 1251, 141, 1303, 2036, 2039, 2043, 2037, 2357, 1611, 505, 191, 833, 2042, 1497]",1,3.0,15.0,275.0,0.6015509601181684,, -"[2619, 2618, 2619, 2618, 2619, 2618, 2619, 2618, 2619, 2618, 2619, 2618, 2619, 2618, 2619, 2618, 2619, 2618, 2619, 2618]",0,3.0,20.0,200.0,0.163589364844904,, -"[745, 1558, 792, 160, 366, 1704, 743, 1875, 484, 1127, 36, 1912, 973, 1640, 133, 176, 2094, 1149, 1670, 487, 1705, 1584, 391, 598, 102, 1775, 1772, 68, 201, 426, 1798, 1367, 350, 1782, 1725, 2046, 2080, 2085, 836, 454, 435, 1574, 1785, 236, 1784, 2107, 910, 1740, 2090, 2089, 308, 218, 673, 383, 1876, 958, 1856, 1251, 1453, 1661, 1656, 112, 151, 1624, 1572, 656, 887, 302, 2078, 2266, 1805, 138, 1894, 2385, 1986, 1801, 2086, 1873, 69, 1926, 329, 1789, 1859, 2267, 773, 1346, 139, 2048, 126, 1337, 2, 1787, 542, 1062, 1434, 794, 2301, 1262, 452, 1358, 476, 1192, 519, 2045, 1448, 1045, 2304, 460, 1505, 1697, 2008, 1804, 2268, 342, 1490, 2326, 318, 681, 1709, 1160, 1023, 581, 416, 189, 1651, 1870, 1779, 1095, 2042, 124, 289, 2318, 1733, 899, 244, 1329, 109, 1817, 1818, 2302, 2321, 103, 1771, 1551, 1166, 566, 2093, 1770, 1483, 232, 1158, 733, 2039, 2130, 1072, 862, 1193, 94, 306, 2011, 1812, 759, 968, 153, 1781, 691, 1677, 1791, 2035, 1294, 1803, 719, 1800, 191, 1802, 1891, 417, 573, 563, 791, 841, 2189, 2092, 1807, 2072, 655, 429, 387, 678, 1395, 945, 554, 679, 2091, 1623, 1769, 1929, 847, 1245, 71, 1497, 158, 1070, 1968, 2303, 2088, 294, 2040, 1265, 1270, 1614, 2087, 2095, 1478, 1654, 2037, 20, 740, 409, 1681, 14, 1319, 1998, 215, 2036, 1983, 141, 610, 2034, 1609, 608, 407, 2320, 459, 1273, 788, 86, 1799, 224, 49, 2319, 617, 1797, 1682, 335, 45, 1889, 401, 948, 2033]",1,3.0,25.0,275.0,0.7211964549483013,, -"[2209, 2159, 108, 23, 2157, 2161, 2160, 1647, 1328, 1504, 1327, 898, 1835, 495, 2158, 2192, 92, 1836, 1165, 1698, 1593, 537, 144, 213, 145, 2622, 144, 2209, 1504, 108, 144, 2209, 1504, 108, 1836, 144, 2209, 1504, 108, 1836, 144, 2209, 1504, 108, 1836, 144, 2209, 1504, 108, 1836]",0,5.0,5.0,150.0,0.087149187592319,, -"[1362, 1351, 483, 2388, 2070, 816, 604, 1637, 777, 921, 2101, 702, 1485, 1359, 955, 1368, 1416, 2293, 1535, 822, 424, 454, 1118, 1042, 69, 2490, 2355, 2263, 2357, 2069, 1464, 498, 1332, 433, 1917, 885, 1079, 252, 149, 1517, 1013, 86, 1914, 993, 1203, 897, 118, 175, 1683, 510, 1974, 836, 1152, 1674, 2485, 306, 1120, 2026, 2295, 2024, 671, 506, 1916, 749, 779, 1919, 878, 1849, 1500, 2194, 1039, 2185, 596, 1482, 1592, 1652, 2028, 1675, 2110, 666, 1358, 1127, 830, 1072, 630, 279, 2282, 935, 1357, 1661, 2099, 572, 1539, 1918, 1966, 1920, 436, 651, 115, 2156]",1,5.0,10.0,200.0,0.5701624815361891,, -"[1864, 1346, 1670, 109, 869, 197, 231, 1624, 1665, 24, 836, 1583, 1701, 1239, 416, 332, 598, 1445, 2352, 1358, 232, 303, 2143, 1948, 1570, 1870, 519, 2003, 2667, 306, 1131, 1875, 1666, 2303, 179, 1192, 88, 1645, 2360, 570, 2034, 2246, 1787, 1998, 2668, 849, 2001, 1, 1446, 2409, 2305, 396, 2, 1127, 1023, 2578, 2354, 2002, 1326, 1859, 2353, 743, 1709, 2430, 2045, 968, 45, 2379, 206, 1062, 1497, 2004, 681, 1548, 553, 1907, 1134, 2543, 1129, 68, 577, 829, 579, 1993, 387, 316, 1820, 792, 1988, 1987, 795, 928, 566, 201, 733, 1410, 1558, 1950, 310, 199, 2243, 1217, 503, 297, 716, 1655, 2247, 1454, 302, 962, 2308, 2306, 205, 606, 218, 603, 1822, 2351, 1299, 823, 2691, 1811, 1171, 1097, 1219, 1996, 1816, 1807, 2509, 394, 420, 2335, 1287, 906, 2163, 203, 1581, 493, 665, 1989, 2000, 1873, 171, 1654, 1995, 2348, 1149, 2006, 277, 1518]",1,5.0,15.0,275.0,0.6248153618906942,, -"[1111, 1998, 2125, 1907, 1987, 2126, 1273, 566, 1996, 792, 519, 1875, 210, 1988, 179, 1235, 1453, 2509, 899, 476, 1990, 673, 1462, 2400, 1434, 1709, 2399, 1812, 1663, 805, 1995, 2000, 1443, 375, 1095, 2401, 43, 2519, 968, 1876, 1574, 1094, 2124, 1558, 963, 232, 1785, 1141, 203, 1991, 1986, 1906, 29, 275, 1701, 1870, 2127, 2211, 283, 45, 198, 2434, 215, 2034, 2673, 1157, 2671, 1704, 2645, 1149, 706, 201, 1992, 1873, 160, 2129, 2538, 745, 2518, 1365, 1989, 1023, 335, 1859, 460, 2356, 743, 1119, 2355, 2, 1127, 1867, 1071, 366, 391, 1999, 2335, 1255, 1964, 1997, 2678, 1994, 1993, 68, 1166, 733, 681, 870, 71, 2563, 2250, 492, 2357, 2679, 688, 1969, 1965, 2236, 1380, 2059, 1452, 1671, 1530, 258, 1366, 443, 2677, 362, 320, 2240, 1390, 837, 195, 2130, 1971, 1050, 2239, 1575, 1260, 1970, 1422, 2670, 151, 1135, 248, 2253, 1257, 2594, 1417, 1081, 271, 529, 2393, 1206, 2260, 2128, 1967, 1697, 1529, 1846, 2350, 934, 348, 1010, 2249, 152, 450, 675, 1240, 856, 2117, 759, 1194, 1968, 1703, 1966, 627, 94, 996, 473, 2680, 2564, 428, 463, 2238, 1379, 368, 1230, 1342, 2248, 2647, 1702, 1138, 2237, 243, 1569, 194, 2122, 442, 2263]",1,5.0,20.0,225.0,0.5882570162481536,, -"[1798, 1072, 1705, 958, 1787, 1624, 1805, 1245, 487, 1337, 1193, 236, 1656, 1483, 1770, 2045, 519, 1799, 329, 302, 945, 551, 129, 1743, 542, 1797, 1785, 610, 440, 563, 417, 887, 1782, 1986, 656, 1444, 138, 56, 719, 1651, 1583, 1616, 426, 1998, 153, 1455, 1661, 2155, 289, 2457, 2450, 2278, 412, 447, 2323, 133, 109, 1224, 1501, 1584, 2093, 441, 1372, 1772, 704, 1158, 671, 350, 1218, 70, 578, 2388, 1701, 55, 2092, 1781, 1487, 126, 2649, 1820, 1013, 2293, 1879, 1975, 318, 476, 1537, 850, 452, 2480, 2608, 891, 252, 139, 1077, 344, 308, 1074, 1917, 655, 1572, 1142, 112, 1079, 2184, 1402, 1769, 1367, 1359, 603, 1779, 1346, 1919, 176, 115, 2153, 910, 490, 1362, 905, 1881, 306, 454, 1251, 581, 779, 1543, 60, 1482, 2071, 459, 2094, 2653, 1683, 1526, 2019, 1956, 193, 907, 97, 2134, 359, 1973, 294, 644, 210, 728, 1775, 661, 1368, 1911, 1490, 1771, 1377, 1849, 1269, 1535, 830, 124, 2095, 1789, 2135, 2217, 2069, 389, 1926, 103, 1637, 973, 181, 2256, 1905, 1571, 2016, 1652, 1370, 102, 1009, 1352, 682, 1873, 1954, 203, 712, 1028, 113, 1640, 1676, 249, 1168, 880, 1883, 857, 1773, 2553, 1464, 1551, 1353, 676, 277]",1,5.0,20.0,275.0,0.7141802067946824,, -"[1731, 1711, 1733, 1717, 1725, 1730, 1714, 1072, 1169, 1712, 1727, 1568, 1729, 1070, 831, 1721, 1284, 1723, 1604, 1483, 1724, 687, 1154, 902, 1546, 1103, 748, 1726, 1145, 689, 1722, 1620, 1041, 744, 1646, 1715, 796, 1471, 1243, 552, 1718, 1708, 1040, 970, 341, 819, 1720, 524, 1719, 853]",0,7.0,5.0,150.0,0.164327917282127,, -"[525, 160, 69, 65, 142, 2332, 1920, 2000, 2172, 1858, 1574, 456, 751, 1299, 95, 899, 1871, 1854, 964, 597, 2009, 743, 519, 1994, 179, 745, 1998, 2057, 1697, 1992, 2021, 2620, 1709, 1540, 2419, 1669, 2189, 2199, 1558, 1602, 1812, 620, 1860, 673, 792, 1434, 1856, 2335, 2, 1416, 2186, 1279, 1855, 2198, 2001, 2007, 968, 681, 2054, 544, 731, 1125, 1334, 1995, 2219, 839, 2003, 1149, 1873, 1925, 1991, 366, 62, 1628, 1820, 1481, 1862, 1926, 2155, 1358, 1127, 1440, 1986, 1999, 586, 460, 231, 151, 1989, 1810, 1588, 485, 2535, 869, 2002, 1866, 1303, 416, 68, 45]",1,7.0,10.0,275.0,0.6266617429837519,, -"[2694, 431, 2695, 2694, 431, 2695, 2694, 431, 2695, 2694, 431, 2695, 2694, 431, 2695, 2694, 431, 2695, 2694, 431, 2695, 2694, 431, 2695, 2694, 431, 2695, 2694, 431, 2695]",0,7.0,15.0,150.0,0.1816838995568685,, -"[1975, 581, 181, 822, 56, 454, 412, 277, 1487, 2153, 907, 2016, 1333, 1851, 1792, 1535, 2109, 1224, 2480, 682, 1683, 1501, 1616, 1370, 1351, 1849, 702, 1013, 1583, 447, 262, 118, 2071, 712, 2019, 1916, 32, 306, 1359, 973, 2189, 1973, 2155, 1623, 1464, 2293, 1676, 1917, 1218, 2422, 1637, 1074, 850, 2608, 2485, 55, 880, 210, 2457, 1483, 1402, 935, 1251, 1072, 231, 2282, 176, 1482, 2310, 1926, 779, 2649, 2050, 1920, 1079, 69, 671, 1976, 891, 1919, 1358, 778, 2278, 1779, 661, 2134, 1142, 1592, 630, 1525, 1974, 1675, 2164, 895, 1560, 1743, 1913, 704, 490, 1701, 604, 2083, 666, 1674, 504, 1368, 1304, 2100, 2653, 1180, 551, 1905, 35, 506, 1448, 252, 945, 1911, 133, 452, 1362, 1725, 440, 573, 830, 2030, 2217, 539, 115, 2113, 1915, 2450, 505, 1845, 2135, 1077, 921, 748, 1914, 1296, 255, 1690, 955, 2553, 2023, 2112, 640, 190, 1918, 2420]",1,7.0,15.0,200.0,0.6750369276218612,, -"[95, 1989, 1156, 1838, 1983, 1540, 483, 460, 725, 519, 2054, 1993, 771, 1314, 1992, 1848, 1127, 1980, 1671, 816, 335, 2006, 215, 1875, 456, 2007, 1574, 2611, 897, 45, 1880, 210, 1655, 2008, 68, 890, 2001, 718, 1285, 2027, 1628, 1121, 1998, 1995, 2002, 553, 1079, 1973, 899, 1131, 438, 2009, 391, 2133, 733, 858, 2131, 753, 2003, 1239, 1704, 239, 878, 2181, 2183, 2172, 201, 1224, 1652, 885, 366, 2005, 55, 2004, 1139, 1990, 476, 1981, 2136, 1558, 745, 1986, 1991, 792, 32, 1269, 661, 1987, 2034, 514, 1149, 1697, 490, 666, 787, 1979, 142, 52, 1812, 2227, 2182, 2071, 1929, 651, 1709, 2309, 950, 2137, 436, 504, 1039, 160, 71, 525, 1013, 1358, 1399, 437, 191, 2024, 1974, 1396, 1869, 2228, 2000, 2057, 347, 1985, 681, 1095, 2072, 1988, 968, 1434, 2232, 2395, 1873, 1859, 1421, 1978, 179, 1453, 1630, 2180, 2, 1994, 408, 1876, 151, 1023]",1,7.0,15.0,275.0,0.6621122599704579,, -"[1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587]",0,7.0,20.0,200.0,0.0993353028064992,, -"[2003, 2001, 160, 1846, 2000, 1995, 745, 673, 366, 2002, 1993, 1874, 874, 743, 1859, 1992, 681, 1867, 1166, 1857, 1990, 391, 1127, 1709, 1434, 179, 1854, 1853, 2006, 68, 899, 1013, 792, 1558, 1704, 1996, 1989, 1866, 460, 231, 1858, 1991, 1986, 1574, 1998, 2, 1861, 1052, 1697, 232, 633, 1864, 532, 1479, 1997, 1863, 297, 1987, 869, 784, 1988, 1868, 1665, 1876, 261, 1473, 766, 24, 2707, 1870, 151, 1860, 201, 71, 1212, 165, 1636, 1440, 2004, 1994, 2005, 1701, 1799, 476, 1999, 205, 45, 1023, 962, 1869, 1149, 1812, 157, 1507, 1875, 989, 1453, 482, 519, 143, 1865, 1297, 845, 1808, 1877, 566, 1819, 973, 1814, 1100, 664, 968, 1862, 1343, 335, 1095, 1581, 729, 598, 1075, 1062, 790, 1576, 306, 416, 2046, 1823, 1645, 1299, 1331, 230, 467, 2048, 719, 1811, 1241, 547, 1809, 767, 13, 215, 316, 109, 1852, 549, 1337, 1822, 1121, 158, 308, 139, 1855, 1107, 226, 568, 1856, 1810, 835, 699, 1651, 343, 511, 1772, 1871, 1334, 216, 1057, 2409, 576, 1323, 341, 1003, 2045, 1815, 351, 2047, 1776, 1817, 27, 235, 1004, 563, 451, 1830, 1813, 1448, 1248, 1787, 1820, 1872, 795, 1873, 133, 236, 1791, 935, 637, 716, 2026, 48]",1,7.0,20.0,225.0,0.6089364844903988,, -"[2185, 1500, 1526, 69, 1538, 1920, 1882, 2252, 814, 1121, 2186, 1351, 1133, 1224, 1908, 1741, 1848, 1655, 1930, 2155, 300, 32, 1279, 1397, 661, 2178, 121, 2282, 2189, 586, 2287, 2251, 610, 1395, 2015, 2293, 737, 589, 2382, 893, 527, 60, 88, 2014, 552, 299, 1625, 2099, 2364, 1131, 2335, 808, 838, 711, 2280, 1850, 242, 280, 1158, 1879, 1981, 1013, 61, 81, 1484, 1677, 2383, 1909, 1309, 1535, 826, 2292, 588, 924, 306, 2528, 901, 851, 1669, 510, 1926, 1051, 1376, 1527, 572, 731, 210, 1079, 1851, 707, 302, 1973, 1232, 815, 1847, 1481, 1118, 2332, 1916, 2291, 436, 861, 836, 415, 1983, 2020, 55, 1288, 2201, 323, 1843, 2264, 1285, 2103, 1080, 1416, 2419, 204, 746, 897, 74, 423, 995, 2620, 2051, 667, 2199, 2283, 787, 1892, 1015, 1174, 2072, 651, 544, 2281, 910, 578, 2418, 624, 887, 347, 2231, 1156, 1338, 1644, 2102, 1104, 1215, 725, 1302, 763, 2330, 490, 2180, 1580, 1303, 1925, 2163, 2052, 57, 2233, 1293, 636, 771, 334, 2068, 2021, 475, 6, 1274, 818, 239, 1602, 1635, 979, 1759, 1841, 65, 95, 315, 966, 593, 525, 456, 1026, 1683, 2219, 1006, 2347, 2073, 1482, 2200, 1125, 2183, 693, 1220, 619, 1588, 543, 408, 751, 2576, 1838, 1139, 2054, 1628, 1680, 2172, 1047, 2074, 2182, 2333, 736, 52, 497, 984, 2055, 1204, 2056, 1042, 597, 100, 680, 2202, 2232, 734, 858, 1388, 2311, 555, 373, 2277, 1463, 2108, 2053, 2197, 2499, 2132, 485, 2234, 257, 142, 1198, 2133, 1117, 2502, 1515, 1524, 2057]",1,7.0,25.0,275.0,0.6573116691285081,, -"[1737, 1745, 1739, 1742, 1740, 1743, 1709, 1725, 1209, 1734, 1483, 1714, 1070, 1738, 1072, 1154, 1733, 616, 1169, 1284, 1723, 1721, 1711, 1604, 174, 1730, 1724, 1568, 1103, 1726, 853, 796, 1735, 1727, 970, 748, 1546, 53, 957, 1620, 1145, 1729, 902, 1041, 764, 580, 687, 689, 466, 552]",0,9.0,5.0,150.0,0.163589364844904,, -"[563, 1346, 519, 1337, 1701, 318, 459, 236, 153, 289, 426, 1072, 610, 1986, 109, 175, 1919, 779, 1820, 41, 306, 778, 2388, 1743, 2450, 596, 1483, 1237, 1455, 454, 69, 1441, 2016, 1978, 704, 1873, 1920, 955, 1592, 1184, 2101, 621, 803, 124, 1918, 1077, 671, 604, 1917, 506, 2100, 1676, 1675, 176, 1537, 115, 1464, 973, 1359, 514, 1954, 133, 389, 102, 2019, 359, 88, 905, 1914, 1958, 857, 277, 252, 735, 2153, 2184, 1142, 1526, 470, 193, 2094, 441, 682, 1879, 1583, 880, 630, 739, 490, 578, 676, 1097, 2235, 451, 70, 1309, 2285, 1525, 249, 603]",1,9.0,10.0,150.0,0.5797636632200887,, -"[2123, 2063, 316, 91, 2380, 1739, 652, 180, 267, 2378, 1347, 429, 8, 169, 327, 849, 157, 2044, 2379, 633, 1325, 1738, 2348, 2004, 2412, 2034, 1996, 1781, 191, 2001, 1997, 2003, 1, 2326, 598, 2301, 482, 1820, 1358, 2064, 1810, 1709, 306, 1994, 2381, 1701, 1894, 1993, 185, 109, 2339, 2000, 1998, 383, 218, 2025, 733, 2340, 1870, 695, 197, 1864, 2365, 1812, 1346, 297, 2045, 396, 869, 553, 1295, 314, 2121, 1191, 1522, 2002, 2477, 1583, 603, 231, 0, 285, 1862, 24, 2338, 746, 230, 2009, 818, 827, 2008, 574, 1197, 1858, 503, 298, 1171, 985, 2555, 1999]",1,9.0,10.0,200.0,0.5808714918759232,, -"[146, 2395, 724, 1072, 1421, 1351, 2024, 897, 733, 478, 1818, 514, 935, 2102, 305, 1420, 651, 504, 1884, 842, 438, 661, 1928, 1983, 69, 2190, 1142, 880, 1396, 1104, 118, 94, 120, 771, 1912, 1085, 2396, 453, 306, 1239, 2282, 1278, 2274, 2059, 2306, 454, 1416, 1156, 161, 2109, 2103, 2228, 1268, 634, 681, 2405, 2153, 1667, 571, 1927, 2283, 1842, 516, 74, 25, 1526, 2397, 1344, 1907, 1013, 2326, 2394, 1566, 1583, 1972, 1358, 1986, 1027, 1203, 277, 471, 1137, 604, 2155, 973, 645, 2071, 787, 60, 1402, 1882, 1914, 2450, 1570, 1338, 1399, 160, 2011, 725, 718]",1,9.0,10.0,275.0,0.6085672082717873,, -"[869, 297, 598, 1864, 2045, 1701, 24, 1870, 1784, 1772, 231, 139, 1869, 1776, 126, 1875, 153, 133, 215, 1782, 493, 201, 232, 1780, 2430, 733, 197, 899, 350, 2668, 1778, 1777, 2303, 1873, 910, 2667, 1453, 1859, 2003, 2691, 206, 1783, 1876, 332, 570, 1767, 519, 968, 1023, 1771, 335, 1773, 660, 1251, 102, 2335, 1358, 1561, 1775, 887, 2001, 459, 1781, 1583, 452, 1623, 2008, 2004, 1127, 1144, 1995, 45, 384, 179, 696, 556, 608, 1994, 1768, 542, 581, 1769, 1779, 1149, 1774, 1709, 719, 980, 88, 973, 71, 1770, 1570, 112, 1560, 1998, 2025, 2002, 391, 1989, 1987, 1219, 1, 2, 1454, 1666, 151, 1988, 745, 792, 2236, 68, 366, 277, 566, 2238, 460, 1558, 553, 2007, 1574, 743, 314, 2606, 1986, 2009, 1239, 670, 673, 1997, 1166, 1991, 1697, 1999, 160, 1434, 267, 2000, 1992, 1704, 476, 1812, 681, 2373, 1990, 2005, 1993, 1095, 1996, 2006]",1,9.0,15.0,150.0,0.5579763663220089,, -"[1039, 2045, 1346, 1670, 603, 731, 1416, 1738, 327, 2340, 451, 2472, 1655, 836, 1013, 938, 2326, 2063, 1325, 2360, 429, 1017, 1446, 1171, 878, 1197, 436, 157, 8, 2582, 1042, 441, 849, 1864, 109, 1739, 2325, 191, 879, 665, 2568, 316, 2365, 642, 827, 2123, 2027, 1239, 2380, 1295, 2044, 91, 2412, 1894, 935, 306, 572, 671, 973, 818, 2034, 1820, 149, 2418, 2509, 2295, 2064, 2339, 598, 1445, 2096, 1996, 1810, 285, 746, 885, 298, 2026, 901, 215, 1358, 482, 1998, 1709, 681, 1166, 869, 1993, 1014, 591, 1066, 396, 1701, 24, 2301, 2430, 1991, 1279, 180, 445, 519, 1558, 1781, 2199, 1023, 230, 2555, 2550, 1999, 88, 1812, 487, 1912, 1095, 1870, 1873, 985, 2024, 332, 197, 651, 297, 576, 456, 2004, 553, 2001, 1987, 231, 218, 1583, 570, 743, 2170, 2338, 185, 2003, 733, 1997, 2381, 232, 2006, 201, 1120, 277, 1988, 2008, 416, 383, 1434]",1,9.0,15.0,275.0,0.6270310192023634,, -"[173, 687, 91, 2291, 1192, 377, 2380, 1062, 2035, 1265, 580, 609, 2034, 2063, 1020, 327, 59, 48, 1662, 90, 613, 326, 552, 246, 14, 154, 746, 686, 2075, 794, 1818, 333, 2123, 2378, 2268, 337, 2381, 155, 1682, 191, 524, 2122, 2077, 156, 862, 341, 289, 49, 1817, 8, 1160, 2615, 364, 678, 157, 1347, 180, 486, 2076, 665, 435, 616, 1810, 429, 482, 2477, 466, 695, 1325, 1996, 1197, 101, 2304, 2379, 2064, 733, 1358, 218, 73, 1666, 2006, 94, 759, 1329, 1894, 53, 2001, 2302, 684, 1739, 1171, 2667, 574, 626, 383, 1738, 606, 489, 2044, 2121, 818, 2668, 994, 401, 78, 1812, 332, 598, 1998, 109, 170, 30, 1820, 244, 1781, 2045, 1870, 1997, 2011, 2365, 645, 1294, 1912, 285, 503, 935, 72, 652, 316, 306, 2348, 1701, 111, 346, 553, 1999, 603, 2025, 654, 1185, 174, 1583, 2412, 2002, 501, 357, 1709, 297, 34, 228, 1, 230, 2004, 1346, 1864, 2541, 2003, 231, 2009, 1873, 869, 1995, 2339, 2301, 185, 88, 827, 1219, 985, 1095, 1295, 1704, 366, 298, 197, 2340, 1191, 2253, 849, 519, 1858, 277, 2335, 1992, 2303, 1987, 215, 2326, 68, 160, 968, 1119, 232, 1453, 745, 1875, 1149, 24, 416, 1697]",1,9.0,20.0,275.0,0.6794682422451994,, -"[1065, 1096, 1952, 1031, 1072, 306, 2088, 1859, 1704, 1809, 1806, 104, 1046, 215, 792, 191, 1701, 1558, 1870, 460, 443, 476, 1203, 2336, 2077, 201, 2308, 179, 1697, 740, 232, 1990, 426, 1023, 1127, 1264, 1149, 1773, 1988, 1873, 899, 1999, 1998, 1989, 743, 2307, 2075, 1987, 519, 382, 159, 598, 2, 1681, 1574, 417, 2206, 2205, 1992, 1875, 1994, 1095, 160, 109, 1797, 401, 1876, 1453, 745, 1666, 948, 54, 788, 1662, 1993, 1654, 2668, 1810, 1107, 1002, 2009, 335, 33, 920, 2000, 2319, 1986, 1018, 407, 1580, 383, 2259, 2076, 1020, 248, 2155, 968, 1531, 576, 1261, 218, 962, 714, 1995, 48, 14, 1814, 2004, 1789, 1665, 1692, 1434, 1682, 2033, 695, 482, 366, 2381, 309, 141, 68, 2006, 775, 91, 2378, 783, 2262, 2376, 2348, 1894, 2007, 1790, 391, 151, 983, 1997, 1385, 1818, 1893, 1991, 45, 2121, 1062, 1817, 1125, 1808, 2040, 2291, 158, 1709, 673, 790, 1812, 430, 574, 2302, 503, 2120, 1042, 332, 2335, 588, 1051, 698, 1417, 286, 681, 1143, 2268, 1012, 49, 665, 566, 2130, 71, 377, 2008, 2380, 1996, 1265, 224, 1452, 2263, 2005, 912, 1892, 1474, 2127, 94, 733, 2123, 1166, 2379, 330, 1890, 2230, 897, 1015, 2249, 2001, 1278, 2332, 1788, 2003, 678, 2406, 1192, 2471, 185, 2002, 523, 2295, 2303, 2301, 1336, 949, 2043, 2041, 1493, 2035, 862, 180, 1481, 691, 627, 1110, 1655, 2122, 2011, 1807, 429, 1519, 1294, 791, 435, 863, 1983, 442, 794, 1329, 2304, 1669, 1889, 86, 759, 1891, 1068, 196, 1160, 2032]",1,9.0,25.0,275.0,0.6680206794682423,, -"[1919, 778, 1370, 1914, 578, 1224, 1402, 2485, 1525, 955, 2423, 242, 2280, 838, 1592, 518, 2394, 304, 2123, 181]",0,3.0,2.0,150.0,0.10782865583456426,, -"[1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474, 1181, 474]",0,3.0,12.0,300.0,0.09785819793205318,, -"[1358, 985, 109, 231, 306, 1191, 1738, 1445, 1446, 2339, 1346, 2045, 1701, 2034, 1894, 197, 1295, 869, 1869, 1739, 1810, 2338, 481, 1864, 157, 1171, 277, 429, 1858, 38, 1583, 24, 863, 553, 206, 2365, 2691, 298, 297, 2077, 2335, 88, 482, 1703, 2306, 387, 1348, 465, 1692, 14, 2305, 1570, 1149, 2509, 2550, 232, 2238, 695, 2568, 416, 1812, 879, 2430, 962, 2340, 1820, 396, 681, 1966, 1817, 1558, 1278, 1014, 1, 2292, 10, 1324, 2001, 1062, 2555, 1590, 935, 1818, 1870, 603, 2236, 2613, 2035, 2233, 2076, 407, 1621, 1066, 1239, 1055, 1603, 1907, 2302, 493, 1666, 1987, 2011, 1023, 899, 1140, 759, 201, 1095, 862, 1873, 948, 2385, 598, 1670, 476, 1995, 1325, 715, 1399, 794, 391, 1134, 25, 71, 151, 1998, 934, 1268, 1043, 230, 68, 179, 1012, 743, 586, 195, 1192, 675, 366, 1876, 2532, 2291, 1382, 1265, 781, 2119, 352, 2304, 160, 1875, 673, 1859, 1160, 1418, 1294, 335, 377, 2357, 2263, 94, 968, 579, 1219, 2003, 570, 1800, 1709, 1988, 2309, 678, 2004, 2002, 2253, 994, 2, 218, 733, 460, 45, 2000]",1,3.0,18.0,250.0,0.6887001477104875,, -"[1628, 1851, 95, 2054, 118, 1538, 456, 2112, 454, 2172, 142, 1980, 2019, 1540, 1842, 1583, 2181, 1195, 1929, 651, 2165, 1133, 1410, 885, 2156, 1894, 2183, 2405, 2057, 682, 1500, 270, 1013, 880, 2034, 514, 115, 414, 436, 1614, 2287, 1178, 1373, 356, 306, 630, 1529, 2018, 973, 211, 2137, 1487, 835, 2153, 1625, 836, 1792, 1338, 644, 242, 2133, 711, 1810, 478, 1979, 176, 1909, 2485, 2528, 2113, 868, 1838, 1652, 748, 1224, 525, 2180, 995, 671, 1977, 52, 2395, 1912, 2111, 1142, 2103, 160, 1332, 1358, 479, 1121, 498, 842, 471, 518, 1072, 2423, 1370, 1655, 1982, 1671, 496, 279, 826, 1839, 2425, 838, 510, 1908, 539, 2099, 2384, 935, 1882, 1981, 437, 1139, 1526, 2050, 1884, 1885, 149, 1732, 1396, 502, 745, 516, 897, 277, 610, 2280, 2136, 2016, 1285, 1741, 851, 2010, 1394, 76, 1463, 1464, 743, 252, 1239, 702, 1667, 2017, 1570, 2132, 733, 724, 408, 1637, 779, 1878, 1919, 571, 344, 1658, 1566, 1215, 841, 1883, 601, 1309, 2467, 2274, 1268, 1115, 1880, 1592, 2282, 2229, 1525, 2295, 1879, 1314, 966, 2014, 130, 2593, 815, 11, 2024, 1474, 822, 423, 490, 1174, 830, 2105, 1677, 204, 2015, 323, 1288, 32, 1399, 2178, 1085, 1482, 55, 661, 210, 1131, 60, 1973, 666, 1968, 1676, 1974, 1683, 2011, 1976, 1975, 955, 1986, 737, 2071, 97]",1,3.0,22.0,300.0,0.6676514032496307,, -"[1743, 1012, 704, 1336, 389, 2270, 695, 1512, 18, 1735, 2324, 2113, 121, 562, 2145, 565, 1546, 2327, 1610, 2269, 1282, 2375, 1583, 1509, 2251, 1283, 2252, 2107, 1773, 303, 1681, 1077, 118, 980, 2034, 244, 1780, 643, 589, 1790, 2075, 742, 1335, 1851, 935, 880, 530, 342, 1786, 1778, 1089, 2382, 1013, 945, 359, 399, 699, 969, 808, 1560, 1289, 341, 2082, 2143, 671, 1911, 231, 2322, 661, 1774, 1412, 1701, 317, 1777, 252, 1358, 814, 1465, 1871, 2326, 1167, 441, 685, 189, 1262, 2318, 1705, 1776, 1878, 1197, 2106, 384, 630, 218, 2081, 1768, 1015, 1499, 277, 1258, 2153, 1791, 1618, 314, 1785, 1616, 1622, 2094, 487, 1797, 960, 1624, 1142, 1810, 2256, 1783, 1798, 1311, 1670, 2016, 2329, 963, 1448, 741, 1799, 554, 1623, 1789, 1561, 1782, 1998, 1788, 660, 2091, 973, 387, 1620, 2598, 2388, 234, 191, 2494, 2478, 2083, 1045, 459, 1986, 1803, 2481, 1571, 2504, 1787, 1725, 1804, 2451, 1490, 139, 102, 1661, 728, 655, 2087, 754, 1771, 2080, 318, 20, 1337, 563, 133, 2090, 544, 176, 2079, 426, 452, 1722, 2092, 1772, 2095, 417, 905, 719, 1726, 112, 1767, 416, 114, 2086, 454, 505, 1144, 1779, 564, 1574, 329, 1072, 958, 1253, 573, 1770, 887, 1193, 1802, 2374, 2407, 610, 126, 308, 109, 556, 1196, 153, 239, 1775, 1483, 1800, 103, 1070, 2085, 337, 138, 124, 36, 2045, 236, 2047, 1313, 1470, 603, 2078, 215, 1249, 406, 656, 2089, 2019, 1584, 1656, 409, 1346, 1140, 1505, 682, 542, 1564, 1158, 1651, 1572, 859, 1740, 1367, 1459, 1551, 1856, 831, 1852, 115, 1784, 2084, 1781, 1769, 598, 2048, 2288, 484, 581, 289, 2208, 350, 1792, 1248, 2026, 1733, 608, 910, 2096, 135, 1801, 1245]",1,3.0,28.0,300.0,0.7511078286558346,, -"[2339, 327, 2340, 1522, 935, 383, 2063, 109, 1864, 1325, 306, 869, 603, 1346, 2045, 1701, 230, 191, 1820, 1873, 598, 1119, 1858, 416, 695, 1191, 2509, 553, 2034, 1583, 2077, 1987, 335, 1697, 169, 733, 277, 2532, 1295, 1603, 2064, 968, 157, 2412, 1704, 1810, 2365, 570, 2004, 2430, 1781, 1738, 1912, 55, 1066, 387, 1197, 218, 267, 24, 482, 1983, 1171, 2253, 1998, 849, 2325, 460, 298, 1894, 1875, 332, 2667, 445, 1996, 231, 681, 818, 1494, 215, 1434, 2335, 1993, 1870, 519, 1670, 232, 1095, 1219, 297, 1876, 1800, 1558, 197, 1453, 1994, 1446, 1445, 1023, 2007, 201, 1, 1859, 1614, 2303, 1995, 2006, 1358, 164, 1997, 1079, 2568, 88, 2338, 366, 1812, 1156, 1709, 8, 2668, 1127, 285, 1999, 1739, 827, 2555, 45, 2326, 1991, 2008, 985, 2009, 476, 2613, 2003, 2000, 1574, 1014, 1185, 899, 1989, 792, 1347, 160, 2001, 1488, 2, 206, 2691, 391, 1149, 151, 1992, 745, 210, 2238, 396, 1671, 1986, 68, 879, 1152, 897, 71, 179, 1703, 1927, 1055, 743, 10, 1140, 1988, 1239, 2550, 2002, 2582, 836, 1410, 1570, 1454]",1,6.0,18.0,200.0,0.6997784342688331,, -"[332, 665, 349, 2003, 1023, 661, 2229, 868, 973, 1403, 896, 45, 2006, 1127, 1574, 792, 1844, 451, 1349, 465, 1558, 2123, 968, 1991, 335, 366, 2001, 1519, 306, 1870, 157, 266, 1805, 176, 68, 1661, 91, 1993, 1385, 426, 1704, 2259, 1812, 476, 638, 1634, 153, 899, 236, 441, 2383, 1474, 2008, 124, 1997, 924, 1859, 1149, 935, 2, 1532, 582, 1986, 318, 160, 2009, 553, 232, 1701, 519, 1787, 2005, 201, 2335, 1810, 673, 887, 2000, 1908, 344, 1772, 2040, 745, 743, 1988, 2323, 1434, 1885, 2120, 1999, 249, 1045, 1346, 2304, 2002, 566, 1481, 215, 289, 391, 415, 1166, 459, 409, 24, 1876, 1995, 588, 1873, 13, 1789, 1095, 39, 1992, 1051, 1453, 1875, 179, 2350, 143, 2094, 460, 2121, 2122, 1709, 109, 140, 277, 733, 94, 2045, 698, 1994, 1996, 1989, 1697, 2034, 2007, 2276, 809, 1990, 1015, 71, 1769, 548, 681, 389, 1522, 1417, 946, 1569, 2004, 2547, 563, 808, 1998, 2357, 1987, 1884, 1336, 1869, 1705, 1779, 151, 1780, 2228, 1337, 196, 515, 696, 1102, 205, 2290, 798, 1505, 1969, 593, 271, 2336, 286]",1,6.0,18.0,300.0,0.689807976366322,, -"[1784, 389, 1012, 704, 1725, 1336, 359, 969, 1740, 859, 1735, 905, 2407, 1070, 2096, 1743, 1620, 1144, 133, 1583, 189, 1262, 1248, 36, 1412, 1448, 2079, 102, 1623, 1797, 1358, 277, 244, 562, 1251, 2327, 2016, 1282, 2376, 1289, 2045, 1061, 10, 671, 1773, 643, 1651, 754, 1776, 1313, 1733, 1490, 573, 1986, 2113, 2081, 1505, 1800, 1782, 1787, 1780, 2326, 1775, 318, 742, 1810, 1572, 33, 2107, 236, 1771, 2088, 911, 1077, 2085, 1803, 138, 1777, 598, 2019, 2026, 308, 887, 2086, 18, 505, 1245, 2089, 2034, 773, 416, 2094, 176, 2478, 271, 387, 1196, 476, 2143, 1670, 454, 1998, 655, 1483, 2252, 1781, 1346, 2251, 118, 1778, 973, 1622, 1574, 1616, 1072, 1140, 660, 1801, 1307, 2083, 1856, 519, 836, 1772, 126, 630, 1624, 581, 1726, 1534, 2093, 484, 2091, 1142, 302, 2383, 682, 563, 1769, 487, 1253, 1871, 1705, 542, 1802, 452, 139, 2047, 935, 1009, 661, 719, 1770, 1804, 2046, 1792, 910, 2078, 1478, 685, 153, 350, 1779, 1045, 588, 1143, 1640, 112, 1798, 1571, 2080, 239, 191, 608, 698, 1337, 945, 459, 1789, 252, 115, 1258, 2498, 783, 2387, 1367, 980, 1051, 1564, 289, 426, 109, 554, 2153, 802, 2048, 303, 1661, 124, 306, 880, 2322, 103, 406, 638, 2494, 1805, 893, 2318, 1158, 20, 417, 342, 299, 2382, 1484, 924, 2208, 1584, 2092]",1,6.0,22.0,250.0,0.7429837518463811,, -"[2355, 149, 861, 35, 2357, 895, 572, 1913, 1441, 1357, 880, 1416, 2135, 1330, 1145, 1309, 454, 578, 181, 1070, 935, 2068, 1103, 1013, 2028, 2394, 95, 553, 1850, 711, 2025, 1015, 2419, 2194, 528, 2263, 88, 440, 2376, 1842, 1927, 277, 671, 2608, 118, 1042, 1421, 2009, 1074, 1616, 1498, 1879, 1649, 1410, 2132, 510, 693, 2282, 1072, 1912, 366, 1127, 2110, 2195, 56, 934, 544, 1661, 2052, 1527, 279, 461, 447, 74, 2116, 1169, 2027, 2016, 306, 1953, 803, 1851, 2026, 2280, 539, 1358, 2480, 1618, 1296, 661, 1972, 2274, 1143, 2490, 94, 783, 2105, 1966, 2099, 2109, 2262, 262, 1652, 725, 826, 1203, 1675, 2015, 1995, 1655, 666, 1142, 2396, 897, 496, 1926, 2217, 412, 1120, 1981, 1463, 2649, 630, 1555, 490, 1218, 1517, 737, 1190, 973, 891, 1974, 921, 1184, 175, 551, 1118, 252, 675, 1792, 1435, 324, 1470, 2189, 1849, 280, 1911, 502, 147, 1683, 748, 1909, 1501, 376, 1583, 1930, 382, 644, 2653, 945, 1374, 86, 1163, 32, 2422, 712, 1512, 2134, 2423, 1848, 1359, 2457, 728, 733, 1568, 210, 955, 586, 1152, 1917, 55, 195, 424, 1674, 838, 1464, 836, 1538, 242, 1550, 1905, 801, 1370, 2278, 504, 1525, 2050, 518, 1372, 1487, 1535, 1482, 1121, 1402, 2295, 2281, 1526, 651, 274, 850, 1824, 1920, 2485, 1910, 1975, 1840, 1617, 868, 1351, 1676]",1,6.0,22.0,300.0,0.7012555391432792,, -"[2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236]",0,9.0,2.0,200.0,0.08677991137370754,, -"[213, 144, 1165, 1593, 2157, 2192, 145, 537, 2158, 2159, 23, 92, 2161, 1327, 1835, 898, 1836, 495, 1698, 2622, 2160, 108, 1328, 1647, 2209, 1504, 108, 1647, 2157, 2160, 1835, 2158, 2209, 108, 1504, 1647, 2157, 2160, 1835, 2158, 2209, 108, 1504, 1647, 2157, 2160, 1835, 2158, 2209, 108, 1504, 1647, 2157, 2160, 1835, 2158, 2209, 108, 1504, 1647, 2157, 2160, 1835, 2158, 2209, 108, 1504, 1647, 2157, 2160, 1835, 2158, 2209, 108, 1504, 1647, 2157, 2160, 1835, 2158]",0,9.0,8.0,150.0,0.08677991137370754,, -"[1460, 2358, 1185, 792, 1453, 2000, 335, 1845, 1434, 1558, 672, 1859, 968, 476, 391, 519, 68, 179, 1692, 1574, 45, 756, 2349, 899, 743, 1023, 1847, 1850, 2346, 836, 1709, 1844, 1661, 306, 460, 1986, 2, 2155, 376, 1469, 1625, 1370, 1149, 1919, 1851, 1729, 2590, 324, 1675, 2154, 779, 778, 1697, 215, 160, 2045, 1464, 1660, 181, 2153, 2156, 1291, 1131, 681, 586, 673, 530, 1841, 1293, 1013, 454, 1239, 510, 1894, 2094, 1840, 151, 1118, 65, 74, 69, 1072, 1843, 1975, 214, 1848, 71, 1587, 277, 1849, 1704, 566, 1812, 220, 239, 1455, 1525, 1846, 70, 1410, 897, 1095, 201, 2184, 2021, 2194, 1537, 1166, 232, 1121, 2195, 2193, 1926, 1842, 2295, 1924, 506, 2171, 880, 1224, 471, 30, 745, 908, 334, 1403, 1521, 1644, 441, 771, 973, 553, 935, 440, 1655, 682, 2282, 697, 738, 1995, 129, 11, 176, 1468, 2418, 1416, 2384, 2162, 487, 1921, 2019, 2136, 193, 6, 344, 305, 2170, 1080, 1839, 1583, 1602, 1127, 1925, 2016, 252, 61, 1142, 630, 2009, 1922, 86, 149, 366, 2285, 2152, 938, 1498, 718, 115, 1008]",1,9.0,18.0,200.0,0.7005169867060561,, -"[1738, 2338, 2555, 2365, 1346, 2340, 303, 1129, 2306, 1358, 420, 1287, 2305, 2339, 1948, 1295, 199, 2143, 1654, 1191, 218, 1739, 383, 836, 1670, 24, 332, 2351, 1800, 2309, 387, 2077, 1894, 2325, 2230, 579, 109, 297, 2582, 605, 306, 1709, 445, 2045, 879, 1126, 869, 2605, 2291, 1171, 2163, 2353, 416, 2308, 1858, 1666, 1558, 2034, 1806, 231, 1864, 465, 1950, 553, 792, 2243, 1488, 1131, 1223, 985, 1655, 1348, 197, 1665, 733, 1518, 598, 519, 665, 1063, 310, 1454, 2348, 1055, 570, 1134, 1446, 2352, 1704, 1116, 1445, 2, 277, 606, 1818, 2412, 394, 157, 935, 1624, 2136, 1869, 1149, 823, 1185, 899, 1583, 201, 906, 1107, 1701, 1991, 1434, 829, 171, 1823, 391, 849, 476, 316, 2430, 1787, 1166, 2667, 2003, 2668, 1347, 1410, 719, 206, 1819, 1014, 1119, 232, 603, 1497, 68, 482, 2246, 203, 2691, 1987, 2303, 481, 2001, 1807, 88, 681, 180, 169, 835, 1873, 1127, 8, 205, 191, 366, 179, 1998, 1870, 568, 1424, 1548, 1140, 160, 302, 2009, 1995, 2379, 1522, 1697, 493, 1809, 2568, 1992, 281, 1997, 1121, 2335, 1489]",1,9.0,18.0,300.0,0.6761447562776958,, -"[1616, 454, 1358, 868, 118, 2276, 1538, 126, 1914, 604, 412, 1676, 1713, 1160, 1741, 1131, 816, 1920, 1127, 306, 1732, 95, 383, 1818, 1074, 2405, 277, 1265, 1655, 1908, 1980, 236, 2357, 498, 1487, 1652, 56, 553, 2078, 2117, 1867, 2268, 420, 734, 736, 1352, 109, 737, 415, 2281, 657, 1376, 262, 2485, 447, 1909, 973, 712, 1203, 120, 401, 1351, 850, 1918, 2385, 1683, 153, 2427, 2014, 1979, 133, 419, 1926, 1224, 2008, 1849, 1568, 2189, 1171, 139, 2291, 1675, 794, 1701, 1572, 469, 894, 1359, 708, 1844, 205, 1343, 377, 2012, 1677, 2178, 45, 1842, 1846, 199, 506, 408, 94, 693, 1810, 1110, 1062, 2010, 349, 1192, 69, 65, 678, 32, 2056, 2301, 2309, 366, 1464, 510, 1661, 1851, 696, 1729, 215, 2419, 851, 1527, 2045, 1505, 1930, 2071, 1869, 1309, 2303, 2017, 2304, 2016, 1995, 1682, 2015, 2094, 2018, 1299, 2418, 1848, 1845, 300, 1843, 733, 2025, 519, 203, 1583, 1294, 334, 1850, 1817, 181, 836, 1644, 216, 1875, 1841, 76, 1174, 2302, 1100, 1870, 143, 547, 2013, 157, 435, 1658, 1107, 1636, 1334, 1823, 1864, 88, 869, 841, 1413, 201, 877, 1293, 130, 530, 24, 772, 1435, 1821, 1935, 98, 1300, 471, 1847, 2394, 812, 1340, 162, 251, 862, 316, 1176, 402, 325, 1120, 880, 759, 908, 872, 1936, 1521, 845, 1940, 1941, 1840, 1931]",1,9.0,22.0,300.0,0.7119645494830132,, -"[88, 1320, 1789, 69, 1120, 2034, 1464, 1417, 2045, 1847, 2684, 1618, 1968, 481, 1729, 78, 205, 13, 1068, 201, 181, 1403, 880, 1050, 2262, 215, 1967, 334, 836, 158, 1452, 2393, 2233, 1385, 1625, 450, 2385, 963, 24, 407, 1850, 1324, 1521, 948, 471, 2189, 864, 1461, 2680, 82, 1425, 1810, 1842, 1732, 695, 1365, 465, 2305, 506, 1143, 1013, 2035, 1906, 1015, 1240, 1701, 1219, 1634, 1583, 1692, 1661, 429, 2306, 1349, 1481, 1699, 1569, 1110, 789, 1986]",1,12.0,8.0,250.0,0.6632200886262924,, -"[973, 1842, 1703, 910, 1660, 1291, 2281, 1121, 242, 2154, 1410, 682, 1141, 1583, 963, 452, 1351, 459, 2043, 436, 1919, 1655, 655, 897, 1222, 1702, 542, 2282, 1975, 112, 1909, 133, 461, 1370, 836, 1133, 1538, 490, 1916, 2617, 1782, 139, 838, 2280, 1417, 778, 1215, 1500, 779, 424, 153, 95, 1732, 1772, 2041, 544, 581, 1798, 719, 1517, 859, 2259, 1203, 306, 1970, 1848, 1131, 2185, 2587, 1587, 661, 350, 1525, 126, 2151, 887, 2080, 1851, 1623, 487, 1224, 245, 608, 2153, 109, 2052, 1535, 794, 426, 2294, 2283, 166, 484, 1042, 660, 1927, 2295, 1777, 86, 702, 1616, 2034, 1152, 863, 429, 382, 74, 1779, 1807, 1045, 1930, 102, 314, 1785, 2419, 1656, 1162, 1787, 1584, 1218, 1889, 1336, 2001, 1778, 921, 2335, 1780, 274, 1493, 2044, 1046, 1474, 1669, 1110, 1705, 124, 38, 91, 2293, 1333, 151, 2156, 1416, 1624, 1118, 993, 1481, 586, 556, 2026, 705, 749, 384, 1144, 94, 2123, 1526, 980, 196, 2143, 317, 1367, 1341, 627, 1773, 589, 1789, 2152, 2121, 1452, 299, 523, 1571, 2155, 1158, 539, 2332, 510, 2122, 2490]",1,12.0,18.0,250.0,0.6831610044313147,, -"[826, 2287, 1681, 1012, 36, 530, 2324, 1283, 2106, 1015, 399, 337, 2251, 14, 158, 1552, 1620, 1560, 1618, 2113, 1743, 359, 1013, 1282, 969, 2143, 2096, 2288, 1270, 1740, 2075, 1089, 2252, 695, 1788, 1077, 2107, 2329, 134, 704, 741, 2382, 114, 2451, 1412, 661, 441, 960, 189, 2081, 963, 1336, 1248, 228, 1735, 1546, 1499, 1358, 544, 1505, 2505, 2504, 389, 1801, 671, 1701, 1784, 115, 2318, 742, 610, 1465, 808, 905, 409, 121, 831, 1699, 2094, 1786, 1726, 1878, 2322, 342, 1851, 556, 1571, 1564, 2326, 1852, 1776, 234, 277, 341, 2034, 86, 814, 2095, 754, 2256, 406, 1722, 603, 1725, 2145, 135, 2208, 1791, 880, 18, 303, 1804, 1583, 1262, 945, 239, 1483, 215, 1789, 1144, 1609, 859, 773, 935, 231, 1785, 1733, 1070, 454, 1661, 1769, 1197, 505, 1478, 1777, 562, 1009, 589, 1448, 1856, 1803, 1622, 2026, 1311, 1871, 980, 1624, 1158, 133, 1797, 384, 459, 1767, 1705, 1799, 1142, 1640, 1998, 1783, 1778, 1616, 118, 1781, 1045, 1787, 141, 699, 836, 2093, 416, 1623, 244, 2091, 1289, 317, 484, 289, 191, 2045, 1561, 1072, 102, 1651, 1790, 2086, 2083, 1337, 1367, 1313, 1773, 49, 2082, 1774, 318, 1986, 563, 973, 2481, 2016, 314, 887, 2425, 1335, 1779, 1196, 581, 2019, 218, 1772, 1782, 1670, 1780, 1251, 2080, 1805, 1768, 1798, 387, 176, 910]",1,12.0,22.0,300.0,0.7588626292466765,, -"[1919, 779, 778, 2154, 1370, 1660, 1291, 1587, 306, 1525, 1131, 1072, 586, 2153, 1625, 1729, 1926, 454, 74, 2282, 935, 1121, 1293, 2136, 487, 771, 65, 1118, 2155, 510, 1922, 836, 1008, 1224, 69, 129, 2295, 86, 1995, 2019, 2152, 193, 1924, 344, 115, 366, 1732, 2384, 2016, 1921, 1526, 2151, 1925, 630, 2344, 305, 897, 57, 1923, 2045, 1848, 718, 1537, 973, 2590, 2418, 1464, 738, 745, 880, 2170, 1655, 1416, 553, 2162, 530, 441, 1986, 451, 1842, 671, 1110, 1975, 1845, 1080, 2094, 1661, 1583, 1839, 6, 2009, 1469, 572, 1847, 1844, 2026, 1410, 1455, 1498, 1840, 1851, 1120, 181, 1357, 506, 471, 908, 1403, 1521, 1644, 334, 682, 1013, 2021, 239, 61, 440, 11, 1602, 1894, 1468, 277, 30, 1127, 376, 176, 2285, 252, 1341, 1142, 1222, 2156, 1239, 2096, 160, 2194, 2184, 2349, 70, 2028, 2025, 149, 697, 1675, 1841, 1843, 1846, 2027, 2360, 938, 1521, 1625, 1644, 1661, 1675, 1729, 1464, 1521, 1625, 1644, 1661, 1675, 1729, 1840, 1841, 2025, 149, 572]",1,3.0,12.0,150.0,0.6635893648449039,, -"[231, 1701, 869, 306, 109, 277, 2045, 24, 553, 172, 598, 2303, 416, 2130, 197, 2302, 1891, 297, 733, 387, 240, 512, 2034, 2430, 48, 2349, 2346, 570, 2039, 1875, 2532, 1890, 2040, 2187, 862, 201, 1051, 1140, 1870, 2365, 1864, 1219, 2041, 788, 617, 10, 1336, 185, 1800, 2009, 232, 2345, 519, 476, 1358, 160, 745, 442, 206, 45, 2667, 215, 1692, 1889, 1023, 493, 2406, 1590, 1995, 291, 1893, 1876, 1514, 1998, 33, 1531, 179, 332, 2186, 1873, 1453, 2347, 665, 1127, 2238, 2259, 391, 1583, 1810, 366, 196, 756, 794, 1018, 2043, 2008, 1472, 460, 863, 38, 705, 2378, 2066, 86, 2387, 2122, 1510, 68, 2123, 1469, 1095, 429, 2668, 523, 2380, 1107, 743, 1493, 2044, 1149, 2691, 2003, 2336, 1807, 71, 91, 2004, 151, 503, 2348, 2335, 2001, 1987, 627, 574, 1160, 2379, 1989, 1143, 2000, 330, 2002, 1558, 962, 482, 1669, 792, 180, 1709, 1454, 1789, 1892, 576, 1812, 1, 2587, 1859, 1046, 2236, 335, 949, 1481, 1423, 1474, 1015, 2295, 1662, 1385]",1,3.0,12.0,200.0,0.6606351550960118,, -"[1224, 1131, 2017, 1013, 830, 1309, 779, 1850, 1341, 1882, 1849, 211, 514, 1914, 2137, 815, 1644, 1843, 1394, 2425, 838, 2178, 1975, 1344, 2099, 2105, 1885, 1463, 2015, 2434, 1884, 1677, 1487, 1028, 199, 130, 1883, 490, 277, 1732, 479, 807, 1636, 1701, 2707, 1870, 24, 201, 666, 1473, 157, 2399, 1847, 2016, 1878, 420, 1195, 205, 165, 935, 2110, 32, 2401, 1299, 661, 851, 1525, 1880, 826, 2010, 1881, 868, 1174, 604, 1844, 1314, 1558, 415, 2340, 297, 1912, 766, 1741, 76, 2240, 1676, 1527, 988, 438, 818, 143, 1301, 724, 1864, 1573, 651, 29, 1297, 1191, 1448, 1869, 637, 255, 2339, 465, 1785, 890, 696, 854, 1251, 48, 845, 1675, 1908, 1246, 2014, 2396, 600, 966, 2335, 1420, 753, 857, 1560, 203, 1105, 1107, 406, 1821, 1027, 2400, 1227, 573, 162, 470, 1421, 598, 634, 504, 1779, 1269, 1097, 2182, 593, 2542, 578, 300, 843, 2276, 258, 1879, 1984, 963, 1474, 1288, 1074, 1703, 822, 2394, 505, 1115, 1630, 1333, 1178, 2138, 1266, 2395, 1304]",1,3.0,12.0,250.0,0.6676514032496307,, -"[1365, 2519, 1864, 1346, 109, 1191, 2339, 157, 143, 216, 197, 231, 8, 1358, 1701, 1583, 2244, 1119, 285, 2045, 1858, 1670, 869, 2338, 2245, 91, 2123, 269, 2114, 1894, 1781, 2412, 1665, 332, 314, 598, 1131, 2340, 416, 2126, 836, 2378, 1624, 2555, 2381, 281, 1738, 746, 1870, 1347, 1124, 24, 1382, 1666, 2352, 2509, 2243, 1948, 1064, 879, 1655, 2003, 669, 232, 191, 519, 2143, 2246, 1998, 1558, 158, 267, 303, 2001, 2477, 316, 2034, 1295, 1875, 306, 2380, 818, 2025, 1999, 215, 1287, 297, 179, 1055, 1787, 870, 985, 1023, 1570, 1623, 1995, 101, 1800, 823, 2253, 1326, 2004, 2579, 1127, 387, 1873, 1739, 1709, 2006, 310, 972, 1185, 185, 383, 665, 2353, 1859, 1410, 1907, 1129, 1820, 1987, 1950, 570, 2543, 1697, 1997, 2121, 2325, 603, 1453, 2568, 1807, 1171, 1876, 2044, 1197, 429, 2303, 2, 1445, 827, 2335, 1239, 2000, 2351, 1149, 2580, 1299, 180, 553, 2305, 899, 88, 169, 792, 968, 1704, 1823, 1912, 1522, 2667, 849, 476, 1121, 2136, 2306, 1996, 10, 366, 795, 1, 2354, 1645, 2077, 745, 2668, 1815, 503, 743, 2430, 2326, 205, 396, 681, 1140, 695, 2532, 2002, 577, 1992, 199, 1548, 1134, 1062, 1603, 45, 2247, 906, 1192, 2409, 673, 1994, 1991, 606, 1488, 2348, 935, 1822, 218, 1818, 1819, 829, 1816, 460, 68, 1993, 224, 1454, 1126, 1581, 1988, 2360, 2163, 566, 1518, 2236, 1811, 1086, 1989, 351, 574, 716, 928, 1217, 1331, 1595, 420, 1348, 1446, 481, 1576, 201, 733, 302, 719, 962, 1219, 277, 576, 2063, 171]",1,3.0,18.0,350.0,0.6746676514032496,, -"[33, 1820, 442, 911, 1810, 1346, 203, 1051, 935, 306, 316, 1781, 2339, 109, 2340, 2412, 1325, 1858, 230, 196, 327, 1358, 2045, 2383, 482, 985, 1869, 1347, 298, 1873, 2276, 429, 416, 746, 1739, 1701, 2120, 593, 603, 24, 2044, 1670, 1738, 1997, 2077, 695, 1119, 854, 519, 344, 1095, 2253, 576, 2326, 297, 1529, 2348, 2230, 2040, 2064, 1445, 2338, 1171, 893, 598, 396, 2379, 1998, 1446, 1098, 2121, 2555, 1023, 836, 1697, 627, 857, 1115, 1884, 2613, 1246, 1197, 180, 1453, 197, 2568, 1875, 1522, 1709, 206, 2691, 2066, 1603, 681, 1704, 169, 2063, 215, 2637, 1988, 185, 1140, 1870, 1149, 2009, 27, 1066, 1800, 335, 1912, 849, 1558, 968, 879, 899, 1488, 277, 2004, 1295, 1574, 503, 2325, 2006, 71, 2335, 383, 566, 2002, 2025, 201, 1993, 476, 1994, 574, 387, 2122, 68, 792, 2123, 1991, 2236, 745, 1031, 2696, 2005, 714, 1989, 531, 1907, 470, 179, 1995, 2615, 1055, 2003, 157, 285, 1191, 1990, 1894, 2667, 2008, 267, 654, 818, 2007, 2668, 160, 1666, 232, 652, 1, 1127, 1986, 151, 2329, 231, 332, 827, 606, 314, 570, 1999, 2, 460, 1996, 49, 2380, 1096, 391, 1097, 2477, 809, 218, 2001, 2365, 48, 8, 2000, 330, 553, 869, 1864, 191, 1166, 1046, 2303, 1859, 2301, 1876, 2381, 397, 1987, 2041, 1185, 733, 91, 2509, 1812, 673, 1014, 1583, 10, 445, 2238, 2430, 1992, 2034, 2360, 2582, 2532, 1219, 1434, 493, 45, 665, 2205, 2361, 2206, 2471, 1454, 366, 1662, 743, 2378, 2550, 1570, 1239, 88, 2398, 2329, 33, 442, 2637, 330, 1046, 893, 911, 2383, 2, 652, 654, 332, 1454, 2329, 33, 442, 2637, 330, 1046, 893, 911, 2383, 2, 652, 654, 332, 1454, 1666, 1986, 1662, 2381, 45, 68, 71, 151, 2329, 33, 442, 2637, 330, 1046, 893, 911, 2383, 2, 652, 654, 332, 1454, 1666, 1986, 1662, 2381, 45, 68, 71, 151]",1,3.0,22.0,250.0,0.6842688330871491,, -"[962, 232, 230, 549, 224, 231, 1665, 1864, 661, 1870, 519, 1045, 563, 973, 2045, 215, 1986, 869, 160, 719, 351, 716, 568, 153, 176, 1403, 481, 1866, 236, 1868, 924, 1810, 1873, 1820, 1346, 426, 277, 482, 465, 553, 1778, 306, 2323, 133, 935, 548, 1440, 1777, 1859, 1998, 2383, 349, 696, 102, 1805, 835, 318, 576, 1636, 1789, 1337, 896, 2034, 795, 633, 27, 1871, 2548, 157, 511, 638, 2092, 344, 2228, 1885, 384, 124, 158, 459, 2335, 809, 887, 1015, 109, 24, 2304, 1381, 1453, 2093, 266, 1788, 2549, 416, 2095, 1196, 1793, 1780, 13, 790, 1769, 798, 1786, 627, 1115, 415, 1799, 699, 1875, 2309, 289, 1097, 409, 1023, 335, 808, 2509, 271, 1661, 2519, 868, 610, 593, 151, 2122, 442, 1867, 582, 1474, 451, 1869, 1908, 1624, 1856, 140, 1844, 1479, 39, 854, 441, 470, 1787, 1365, 1794, 1863, 706, 2434, 2401, 1772, 210, 2435, 1417, 1858, 2547, 789, 2238, 375, 1907, 2229, 205, 201, 1795, 1585, 22, 203, 1452, 1087, 884, 1529, 529, 623, 1653, 1874, 1969, 43, 828, 2399, 874, 1970, 199, 1865, 547, 2400, 152, 2464, 761, 515, 1854, 1779, 1240, 584, 1862, 963, 2260, 1884, 1369, 1102, 2237, 1701, 1505, 2289, 1852, 1207, 2299, 2276, 784, 1846, 1705, 317, 1241, 805, 664, 1872, 1094, 1703, 1323, 1853, 89, 1530, 261, 1075, 1877, 1212, 226, 1401, 1443, 113, 1334, 767, 488, 1958, 389, 2094, 1299, 1876, 467, 532, 94, 1860, 589, 463, 343, 143, 1861, 249, 1569, 1336, 1141, 1857, 275, 340, 362, 1157, 652, 857, 2645, 1785, 1153, 420, 733, 2239, 1246, 1419, 2236, 2496, 444, 1702, 1967, 1796, 2465, 2240, 1968, 2290, 1178, 2297, 1906, 2350, 258, 2463, 1234, 29, 1350, 1966, 1671, 1052, 1266, 216, 729, 1971, 1964, 1984, 1855, 235, 759, 1100, 2294, 1010, 946, 443, 2059, 2155, 1874, 1875, 1876, 1877, 27, 224, 230, 351]",1,3.0,22.0,300.0,0.6835302806499262,, -"[1358, 61, 973, 1732, 1914, 1927, 454, 2162, 1152, 1068, 139, 779, 893, 210, 306, 356, 1079, 146, 1224, 1907, 1916, 2251, 1309, 1455, 1015, 1051, 1463, 814, 69, 323, 851, 121, 552, 2252, 70, 2184, 1537, 589, 586, 1351, 2163, 604, 1623, 211, 2085, 1759, 2155, 493, 1592, 1912, 2020, 671, 1908, 1279, 2189, 1677, 1847, 984, 498, 1174, 181, 2382, 1695, 1669, 815, 436, 2016, 2010, 731, 2186, 149, 1644, 1850, 569, 315, 60, 2109, 728, 1416, 2293, 1898, 2265, 437, 62, 1911, 1524, 2068, 441, 2131, 1843, 1624, 302, 2383, 344, 1013, 55, 738, 572, 280, 842, 129, 257, 2335, 434, 1481, 2295, 1187, 1892, 1435, 2332, 1983, 2015, 1527, 1377, 1535, 1296, 1131, 1204, 2014, 415, 966, 2499, 300, 2408, 1388, 2133, 588, 818, 2364, 808, 88, 1920, 1972, 2202, 204, 1397, 755, 80, 751, 901, 1652, 895, 497, 334, 299, 1588, 2576, 861, 100, 734, 2201, 57, 965, 763, 1303, 2059, 1693, 746, 707, 2311, 1232, 1680, 680, 979, 2330, 408, 1026, 276, 2056, 1117, 2197, 2277, 1069, 1910, 1158, 2232, 440, 81, 1602, 2485, 2199, 2074, 897, 2049, 423, 2181, 725, 2179, 2418, 2071, 1118, 2053, 1895, 1137, 2333, 910, 1635, 1047, 1494, 52, 667, 1293, 2180, 1104, 1080, 2021, 787, 1580, 924, 325, 1484, 95, 2344, 2264, 1006, 693, 887, 2073, 2057, 597, 1851, 2094, 543, 35, 839, 193, 1277, 1467, 485, 142, 1566, 1410, 1628, 1139, 1838, 2178, 624, 1540, 525, 1848, 1156, 2419, 2231, 1840, 651, 1171, 239, 697, 2055, 527, 1909, 1199, 2305, 619, 1402, 2052, 964, 1913, 1926, 2347, 593, 1841, 1631, 347, 858, 2620, 2219, 373, 737, 736, 1285, 2196, 74, 2200, 544, 1515, 1376, 6, 2182, 2108, 1925, 2183, 1274, 1220, 456, 65, 2054, 2172, 2198, 2072, 771, 768, 2051, 1125, 2472, 1042, 1025, 2233, 1198, 1517, 1358, 211, 1131, 1171, 2305, 1068, 2265, 61, 1358, 211, 1131, 1171, 2305, 1068, 2265, 61, 973, 2010, 1623, 1624, 139, 2085, 493, 569, 768, 1199, 1732, 1895, 779, 2485, 1377, 842, 2109, 325, 1224, 1402, 1358, 211, 1131, 1171, 2305, 1068, 2265, 61, 973, 2010, 1623, 1624, 139, 2085, 493, 569, 768, 1199, 1732, 1895, 779, 2485, 1377, 842, 2109, 325, 1224, 1402, 1358, 211, 1131, 1171, 2305, 1068, 2265, 61, 973, 2010, 1623, 1624, 139, 2085, 493, 569, 768, 1199, 1732, 1895, 779, 2485, 1377, 842, 2109, 325, 1224, 1402]",1,3.0,28.0,300.0,0.6842688330871491,, -"[1912, 2034, 356, 995, 2001, 1338, 2002, 2003, 1812, 323, 331, 2018, 1268, 2102, 60, 55, 1982, 841, 211, 162, 2004, 2103, 1174, 68, 1704, 436, 366, 1668, 61, 490, 681, 2136, 1987, 1023, 1843, 437, 201, 45, 1870, 2014, 1988, 968, 1095, 1667, 476, 2005, 215, 1741, 1344, 1570, 1630, 1269, 1527, 2012, 498, 391, 2182, 818, 1658, 525, 815, 1999, 519, 1309, 1998, 571, 76, 1574, 2394, 792, 1394, 1558, 1989, 130, 553, 851, 1986, 737, 2137, 460, 1908, 1494, 696, 415, 897, 860, 2009, 1127, 232, 1995, 745, 1625, 2178, 204, 1434, 1644, 277, 1974, 2015, 1847, 88, 71, 1873, 593, 966, 2010, 1677, 335, 2016, 743, 1876, 899, 1149, 2611, 2027, 1996, 1850, 1991, 634, 2, 1992, 1709, 1997, 179, 2007, 673, 1697, 1859, 1652, 2000, 2017, 1993, 566, 300, 1453, 1166, 2008, 1882, 1239, 151, 160, 1994, 1875, 1990, 2006, 1288, 2013, 1732, 2011, 1713, 61, 490, 995, 1338, 1625, 2102, 61, 490, 995, 1338, 1625, 2102, 2103, 211, 2137, 88, 160, 745]",1,6.0,12.0,150.0,0.7008862629246676,, -"[2046, 2048, 139, 306, 153, 236, 318, 1973, 1772, 215, 1776, 563, 1572, 109, 1701, 1337, 1787, 490, 1852, 734, 2295, 1856, 736, 1592, 126, 408, 2357, 1867, 94, 242, 1309, 2071, 415, 1215, 553, 2045, 308, 1127, 56, 2078, 1841, 838, 1847, 1995, 2094, 454, 1651, 120, 88, 1131, 1791, 436, 2056, 2117, 2047, 666, 32, 519, 1568, 498, 447, 699, 1482, 1644, 725, 1655, 1341, 1121, 1676, 133, 1616, 341, 1732, 95, 118, 868, 262, 2405, 816, 1846, 586, 2485, 1343, 973, 693, 1980, 412, 1358, 1293, 1975, 712, 1972, 1351, 1683, 603, 1784, 1248, 2026, 376, 1830, 497, 657, 1133, 1448, 1979, 1538, 1218, 2155, 1525, 366, 2281, 2317, 1203, 55, 779, 1521, 1505, 1661, 98, 2403, 2418, 1849, 1074, 1843, 1729, 1914, 850, 2276, 1842, 1110, 1583, 65, 74, 530, 836, 2189, 1652, 1625, 2394, 2013, 1850, 2025, 1851, 334, 1840, 1464, 1675, 510, 1526, 239, 1844, 708, 2419, 1370, 1919, 1403, 604, 1918, 1359, 2153, 787, 1487, 1920, 778, 2154, 682, 1909, 1013]",1,6.0,12.0,200.0,0.6746676514032496,, -"[1291, 1346, 1912, 2034, 733, 157, 815, 2691, 2412, 1738, 1894, 2063, 327, 570, 603, 109, 218, 206, 2011, 1677, 1660, 2155, 1781, 879, 2430, 2045, 191, 1014, 1864, 1358, 1820, 298, 1174, 2394, 383, 1325, 482, 1851, 2509, 197, 2064, 297, 1661, 2365, 2338, 1527, 471, 1670, 231, 306, 2026, 2010, 2326, 1701, 1729, 1347, 1812, 1713, 2154, 598, 1846, 869, 519, 1907, 2668, 586, 841, 2303, 1526, 2156, 201, 2178, 1185, 285, 1224, 2077, 176, 1370, 1848, 2550, 2017, 1558, 2555, 387, 415, 1587, 985, 2339, 1222, 1341, 24, 1847, 396, 1850, 416, 1997, 1357, 1309, 2013, 2340, 818, 1191, 2335, 1131, 1295, 1704, 2238, 1171, 1739, 493, 300, 230, 1908, 673, 1525, 1998, 1996, 779, 149, 778, 1858, 1741, 1110, 503, 1859, 2667, 2611, 851, 1919, 1810, 2015, 1975, 1732, 71, 1445, 465, 2153, 1446, 1870, 2151, 1149, 1873, 2613, 1709, 1987, 681, 571, 2152, 391, 1023, 232, 1072, 1142, 445, 737, 2582, 2008, 252, 1666, 1197, 8, 1097, 1844, 1454, 215, 1849, 1095, 1521, 69, 1625, 696, 76, 1992, 792, 332, 162, 1570, 1999, 849, 205, 2016, 45, 1603, 1988, 1488, 1875, 335, 1055, 899, 566, 151, 68, 836, 2014, 179, 454, 88, 2001, 1993, 2028, 277, 1453, 2004, 2568, 510, 130, 2194, 2, 1403, 429, 1876, 1119, 1800, 968, 671, 2025, 91, 880, 1288, 2027, 1239, 1882, 160, 2007, 1574, 1990, 682, 2003, 1434, 1166, 2253, 476, 1869, 1013, 1843, 1644, 1697, 1066, 1522, 1140, 1658, 827, 2325, 973, 2236, 1120, 10, 1, 695, 1986, 169, 2000]",1,6.0,18.0,300.0,0.7566469719350074,, -"[2082, 505, 1448, 2096, 859, 20, 1142, 406, 2324, 1735, 2326, 18, 1800, 808, 118, 1878, 36, 804, 960, 189, 661, 2000, 1837, 337, 1283, 1776, 573, 2047, 1616, 1313, 2034, 1784, 935, 1282, 638, 2598, 2383, 1911, 2407, 2046, 2078, 1986, 542, 35, 1072, 1768, 1560, 581, 1773, 1499, 2256, 2026, 2085, 2318, 454, 1913, 1681, 294, 1146, 2048, 1799, 1045, 191, 1583, 682, 350, 484, 103, 308, 387, 1258, 115, 530, 643, 1490, 1245, 1140, 1571, 1307, 1790, 1561, 329, 318, 880, 416, 895, 1015, 1670, 302, 922, 1346, 1998, 2536, 1767, 1810, 1251, 314, 598, 1771, 2300, 1444, 476, 887, 176, 836, 1009, 1574, 2094, 1623, 1552, 440, 519, 2325, 973, 2107, 1640, 2045, 1483, 2088, 1792, 1656, 102, 544, 1856, 1144, 1311, 1871, 1253, 1775, 2087, 407, 893, 417, 399, 1789, 963, 1296, 1158, 1782, 773, 1781, 719, 289, 1910, 1786, 610, 1622, 303, 1797, 236, 1798, 2089, 133, 1774, 1783, 2081, 608, 1801, 2382, 252, 277, 554, 139, 487, 2327, 2019, 958, 459, 1572, 1779, 563, 2153, 1769, 239, 814, 910, 1802, 1772, 1505, 1804, 1699, 671, 1770, 2016, 1805, 1780, 1778, 138, 1635, 556, 2079, 1289, 1785, 306, 2494, 2084, 10, 630, 1661, 426, 2090, 660, 409, 153, 1912, 656, 1564, 452, 2080, 802, 1787, 1788, 1337, 126, 112, 1196, 1459, 384, 109, 1777, 655, 2252, 2251, 685, 2086, 2143, 121, 980, 330, 1367, 943, 124, 1651, 589, 728, 1705, 1061, 2322, 1624, 2095, 2478, 1584, 1551, 317, 1193, 2091, 2093, 763, 1628, 2092, 935, 1142, 1583, 2016, 2019, 2153, 661, 1571, 20, 121, 980, 1158, 1624, 802, 935, 1142, 1583, 2016, 2019, 2153, 661, 1571, 20, 121, 980, 1158, 1624, 802, 1910, 2251, 109, 317, 384, 409, 887, 1196, 935, 1142, 1583, 2016, 2019, 2153, 661, 1571, 20, 121, 980, 1158, 1624, 802, 1910, 2251, 109, 317, 384, 409, 887, 1196]",1,6.0,22.0,250.0,0.7429837518463811,, -"[2045, 887, 1787, 175, 236, 955, 318, 103, 563, 524, 126, 1572, 910, 1656, 1624, 1367, 1705, 608, 596, 417, 1772, 1337, 1551, 139, 1770, 350, 1805, 1651, 1184, 2388, 1483, 2095, 905, 1358, 655, 1584, 656, 133, 2184, 1245, 1782, 359, 1674, 487, 1072, 2062, 803, 1785, 1775, 452, 124, 891, 308, 519, 153, 426, 41, 1920, 2269, 109, 1959, 1267, 904, 132, 302, 1701, 203, 1527, 1797, 1998, 1743, 610, 1592, 476, 2093, 979, 880, 857, 2092, 1079, 176, 1346, 2450, 289, 1619, 115, 1824, 603, 2100, 1773, 1009, 1661, 2016, 1986, 458, 604, 1537, 1789, 2207, 1193, 70, 542, 1251, 69, 1303, 138, 470, 1503, 500, 1142, 306, 682, 1779, 1640, 1915, 950, 1077, 2190, 1918, 506, 854, 704, 779, 719, 1898, 701, 102, 554, 578, 646, 1097, 1957, 459, 1919, 2188, 479, 389, 1914, 647, 1276, 1905, 440, 1781, 2101, 2153, 958, 193, 778, 2019, 1849, 392, 1444, 1676, 973, 1158, 541, 379, 112, 277, 2285, 645, 1769, 1455, 826, 88, 2287, 2241, 1839, 119, 1495, 1525, 441, 1485, 1370, 1675, 1826, 1490, 1539, 393, 1355, 1820, 1879, 1362, 1022, 344, 1464, 1360, 1415, 540, 252, 67, 2094, 2022, 2527, 1359, 2274, 1549, 1171, 490, 671, 129, 2630, 1896, 1771, 2286, 1526, 1873, 451, 747, 573, 118, 1045, 454, 107, 1883, 1583, 735, 2189, 1571, 1700, 113, 2323, 676, 294, 630, 1899, 739, 1895, 830, 1309, 612, 2284, 1900, 1962, 581, 1414, 2256, 921, 1917, 329, 935, 2242, 1191, 2528, 1543, 1168, 2060, 1880, 1963, 1916, 1882, 2235, 1961, 1878, 2061, 1884, 410, 661, 2699, 1237, 1901, 371, 720, 1269, 514, 753, 1881, 2375, 516, 1354, 703, 801, 1885, 1441, 1553, 2644, 1958, 1954, 249, 1028, 1955, 97, 1353, 1956, 1226, 750, 1179, 2271, 648, 1707, 807, 241, 1128, 590, 2275, 50, 2272, 265, 1060, 2273, 1009, 1158, 1193, 1245, 1251, 1367, 1490, 1551]",1,6.0,22.0,300.0,0.6735598227474151,, -"[869, 563, 2117, 1440, 318, 24, 1508, 1636, 778, 1299, 1405, 1337, 1919, 1810, 2118, 232, 2405, 2154, 1592, 229, 770, 13, 2045, 1370, 2485, 211, 2305, 1849, 1120, 1171, 2034, 1494, 235, 1909, 779, 729, 1039, 1732, 382, 424, 1464, 1535, 2153, 1174, 88, 1583, 2178, 74, 1110, 1241, 143, 1982, 1661, 201, 835, 633, 1851, 1351, 158, 181, 1665, 1587, 1820, 1625, 1914, 280, 261, 2195, 539, 711, 483, 2394, 1421, 1979, 1410, 1453, 1675, 231, 471, 767, 1980, 816, 699, 1729, 586, 1852, 818, 1463, 215, 973, 498, 643, 1848, 1846, 1660, 664, 1479, 1323, 1894, 2010, 1291, 682, 885, 1402, 955, 335, 1839, 1927, 1172, 1701, 1842, 252, 519, 1100, 416, 2135, 1482, 11, 1403, 1853, 1521, 1222, 2293, 1118, 239, 737, 1203, 1003, 1149, 1908, 1333, 2295, 1977, 1224, 2152, 2384, 1999, 1052, 204, 532, 467, 1212, 1930, 2132, 747, 1309, 1075, 1152, 1616, 1676, 1121, 2312, 433, 2281, 436, 461, 2151, 2110, 702, 1840, 2109, 1683, 2014, 1677, 2155, 69, 1507, 1013, 1527, 2078, 1847, 1974, 547, 1690, 851, 300, 878, 708, 578, 454, 2423, 1133, 749, 2112, 738, 826, 2136, 1655, 93, 2396, 1538, 1332, 1975, 216, 2282, 993, 868, 2166, 2316, 988, 510, 1525, 536, 388, 554, 1526, 2403, 784, 1928, 575, 61, 205, 506, 880, 32, 242, 415, 157, 1382, 30, 1879, 446, 836, 1341, 1373, 2182, 2162, 874, 343, 226, 1334, 2283, 1799, 504, 1645, 1023, 1572, 1981, 118, 1049, 490, 1029, 1708, 304, 518, 1143, 2099, 1973, 1845, 1637, 2156, 697, 2185, 1343, 822, 581, 255, 2164, 1500, 1844, 1644, 2276, 525, 1841, 668, 2294, 1131, 1978, 2030, 1850, 1080, 838, 908, 2343, 1843, 285, 149, 334, 530, 1293, 1195, 2280, 1215, 279, 848, 1857, 842, 2344, 771, 666, 593, 2105, 966, 2475, 270, 1058, 1976, 2165, 147, 502, 874, 1023, 1052, 1075, 1100, 1212, 1241, 1299, 869, 874, 1023, 1052, 1075, 1100, 1212, 1241, 547, 563, 633, 664, 699, 729, 767, 784, 231, 232, 235, 318, 335, 343, 416, 532, 143, 157, 158, 201, 869, 874, 1023, 1052, 1075, 1100, 1212, 1241, 547, 563, 633, 664, 699, 729, 767, 784, 231, 232, 235, 318, 335, 343, 416, 532, 143, 157, 158, 201, 869, 874, 1023, 1052, 1075, 1100, 1212, 1241, 547, 563, 633, 664, 699, 729, 767, 784, 231, 232, 235, 318, 335, 343, 416, 532, 143, 157, 158, 201]",1,6.0,28.0,300.0,0.6946085672082718,, -"[91, 2123, 2122, 2380, 2555, 2064, 2338, 482, 665, 2063, 185, 760, 2379, 2301, 256, 1110, 536, 327, 285, 2340, 1513, 2121, 572, 2325, 901, 2509, 2199, 885, 290, 445, 671, 544, 879, 576, 456, 1014, 314, 1120, 180, 157, 567, 1279, 218, 1810, 2326, 1858, 2028, 985, 1894, 1912, 316, 2044, 2339, 2378, 1325, 1996, 2003, 110, 1262, 2001, 2025, 332, 1558, 1488, 1820, 1119, 1991, 2008, 2034, 2026, 2027, 429, 733, 297, 1812, 849, 267, 827, 792, 1999, 396, 2186, 1197, 598, 2002, 1434, 2004, 383, 2000, 2347, 756, 1701, 1446, 2381, 149, 1583, 1987, 652, 306, 1870, 2568, 1738, 2348, 2045, 109, 1781, 1239, 1358, 1998, 1704, 1295, 1191]",1,9.0,8.0,250.0,0.6827917282127031,, -"[1567, 2668, 2500, 2667, 722, 1708, 1358, 14, 1710, 2045, 2335, 603, 935, 2002, 2314, 1662, 1800, 310, 2001, 2041, 605, 2034, 1666, 2236, 2691, 1172, 1950, 1072, 1239, 2309, 2164, 1810, 2582, 733, 1577, 88, 1894, 1140, 2568, 218, 230, 669, 2303, 2501, 417, 206, 2493, 973, 1446, 716, 109, 1106, 332, 1013, 873, 311, 1874, 2327, 2534, 687, 277, 553, 306, 199, 1, 2532, 1907, 2379, 304, 1583, 2698, 2023, 1055, 467, 2004, 699, 1149, 54, 45, 1603, 228, 151, 1465, 1171, 2206, 944, 1852, 1124, 1445, 175, 2359, 0, 420, 2388, 1159, 1570, 570, 1221, 476, 2003, 1812, 1346, 962, 1488, 2, 743, 1241, 2372, 1873, 1147, 2117, 2471, 1709, 1337, 1542, 1869, 2217, 71, 1857, 1865, 1995, 681, 1095, 1856, 13, 68, 767, 158, 1821, 318, 102, 729, 1166, 1334, 920, 343, 1316, 180, 1097, 673, 2430, 1799, 215, 176, 1127, 1107, 2008, 566, 1862, 1998, 563, 2412, 1023, 363, 2706, 366, 1823, 745, 1987, 2009, 160, 1820, 1573, 2140, 17, 792, 48, 2138]",1,9.0,12.0,300.0,0.7104874446085672,, -"[231, 232, 2126, 1812, 1111, 179, 2125, 2509, 2034, 1235, 2124, 2129, 366, 181, 1625, 1583, 1464, 1989, 869, 529, 2127, 1479, 2009, 1987, 1875, 43, 2001, 275, 205, 201, 460, 2519, 1273, 2035, 2008, 1334, 334, 1644, 13, 1241, 506, 767, 2000, 2006, 1293, 1403, 1661, 2260, 2399, 375, 1988, 318, 1999, 1820, 532, 1995, 261, 563, 1998, 391, 2045, 1337, 24, 1729, 2007, 2003, 1876, 1643, 160, 870, 1530, 2122, 1120, 442, 1665, 745, 2130, 158, 1848, 908, 1440, 805, 1462, 1843, 271, 68, 1873, 2335, 1851, 519, 1094, 1127, 157, 2002, 235, 1149, 1119, 2004, 2, 699, 416, 2128, 1889, 467, 2291, 143, 2236, 2301, 1422, 1636, 792, 743, 1224, 2434, 1701, 627, 836, 1558, 1013, 1443, 1212, 1166, 1986, 1052, 215, 1870, 1968, 2400, 681, 1967, 1434, 1907, 2401, 1265, 258, 1818, 1675, 1842, 1417, 1671, 1574, 1529, 2645, 1365, 210, 566, 1100, 2268, 2005, 673, 874, 1663, 69, 152, 2350, 1847, 335, 1682, 1970, 963, 1452, 1023, 1709, 348, 2240, 1453, 151, 837]",1,9.0,12.0,350.0,0.7186115214180206,, -"[2113, 158, 704, 1681, 791, 1743, 562, 114, 969, 1735, 2042, 2288, 1583, 2025, 1784, 2318, 1560, 1077, 1336, 2130, 2252, 1499, 2407, 544, 1878, 1665, 1026, 826, 2287, 2039, 1767, 2326, 1701, 2145, 2256, 1983, 441, 2481, 1561, 1012, 1412, 1620, 14, 1807, 2300, 695, 1358, 1726, 135, 1282, 407, 935, 234, 644, 1889, 945, 1891, 880, 382, 1313, 389, 2096, 215, 682, 228, 2324, 610, 330, 671, 1013, 2, 337, 1089, 399, 2019, 1283, 1609, 943, 1564, 121, 1273, 905, 661, 1803, 742, 86, 691, 699, 1783, 2329, 1459, 2382, 1722, 359, 831, 1061, 1616, 191, 1812, 754, 2034, 2041, 189, 68, 741, 2038, 2251, 314, 2095, 18, 1852, 1768, 1465, 231, 1552, 1142, 1002, 2208, 1483, 1196, 1791, 643, 151, 36, 1733, 2327, 1851, 617, 1262, 1070, 2107, 2036, 141, 638, 505, 2040, 218, 409, 1776, 454, 102, 341, 2075, 1801, 530, 2037, 980, 1740, 808, 1258, 1725, 2026, 1790, 2082, 1871, 1497, 1311, 1448, 1986, 573, 1654, 1774, 2106, 1998, 118, 1799, 1856, 2094, 416, 1769, 459, 224, 1546, 1968, 814, 1146, 133, 1929, 1623, 973, 1797, 1009, 1319, 1614, 244, 2035, 1505, 1618, 1270, 1015, 289, 1810, 2505, 2091, 841, 2153, 2143, 1782, 963, 1781, 1197, 1661, 581, 836, 1699, 1158, 1773, 1786, 2451, 2545, 2083, 1574, 859, 1772, 1335, 1778, 71, 1072, 1894, 406, 1251, 762, 556, 318, 1337, 2093, 960, 2086, 49, 563, 1248, 2045, 2080, 852, 589, 2495, 1622, 1490, 303, 277, 1779, 45, 10, 1640, 1789, 1030, 603, 2504, 1785, 1770, 1800, 1670, 2016, 630, 1777, 2425, 1788, 1780, 2090, 484, 608, 542, 1787, 1045, 1289, 452, 2087, 2494, 350, 1367, 1830, 1792, 1144, 387, 476, 417, 176, 134, 1804, 139, 773, 20, 2084, 1245, 138, 112, 2085, 1771, 308, 660, 1478, 153, 1775, 103, 126, 655, 384, 2081, 1802, 656, 1584, 2089, 487, 1346, 1199, 2322, 1651, 1798]",1,9.0,22.0,350.0,0.7548005908419497,, -"[95, 2183, 963, 2395, 146, 1152, 897, 2102, 2054, 203, 724, 1351, 2024, 733, 1869, 1421, 478, 1420, 514, 382, 1818, 673, 1923, 2385, 1072, 935, 405, 1097, 305, 1628, 1501, 1884, 651, 456, 1519, 1285, 504, 2276, 1104, 1838, 2059, 1043, 1268, 1139, 1239, 1928, 674, 69, 423, 842, 1021, 453, 661, 1983, 2133, 880, 2282, 2231, 438, 1396, 2396, 2228, 94, 1703, 2103, 1416, 118, 2181, 1912, 1526, 1142, 2274, 516, 2283, 120, 1085, 738, 1986, 681, 306, 2034, 347, 2306, 1540, 1907, 470, 142, 25, 1998, 1964, 2397, 771, 1137, 1203, 1278, 1929, 1927, 2180, 634, 1156, 769, 2109, 717, 277, 1515, 1338, 2172, 1968, 1667, 2309, 1027, 161, 2405, 809, 454, 52, 2326, 2190, 1972, 1435, 1344, 2153, 571, 2394, 1842, 408, 1399, 2467, 2001, 2057, 2198, 471, 74, 1999, 2450, 255, 2155, 151, 995, 994, 1115, 1402, 1013, 32, 604, 2131, 525, 529, 2260, 191, 2182, 2071, 725, 1583, 1566, 1914, 566, 1882, 973, 1417, 2259, 1358, 2011, 1849, 608, 1974, 2336, 2335, 1924, 718, 787, 645, 1906, 1978, 2305, 1570, 1977, 160, 1655, 1224, 60, 1850, 1494, 461, 743, 666, 65, 249, 1984, 1985, 1481, 630, 1385, 2281, 2232, 61, 745, 682, 816, 827, 885, 1332, 1538, 2419, 1625, 2072, 55, 1092, 1981, 696, 687, 1898, 1487, 2021, 748, 2178, 1452, 586, 1979, 440, 1671, 300, 252, 2016, 93, 1495, 1732, 2010, 887, 2022, 424, 2185, 1309, 483, 1527, 2136, 910, 2013, 1980, 627, 950, 115, 2015, 1930, 841, 1614, 818, 1079, 737, 1644, 510, 1630, 1215, 1843, 1759, 1500, 858, 1741, 162, 671, 1376, 2315, 600, 1133, 1791, 1171, 1848, 176, 2019, 1266, 836, 1269, 1905, 1131, 2017, 2359, 2233, 2280, 1847, 1677, 756, 2386, 2137, 2418, 878, 1713, 1652, 1121, 239, 1174, 1474, 415, 325, 1295, 593, 851, 437, 649, 433, 619, 2593, 1039, 2020, 1218, 210, 490, 1288, 242, 1908, 838, 621, 2261, 436, 1982, 1394, 1069, 1220, 1529, 498, 309, 2018, 88, 2350, 1274, 130, 323, 1355, 204, 356, 2342, 1259, 211, 1668, 966, 815, 1855, 1696, 2014, 164, 1626, 668, 331, 76, 1178, 860, 1909, 2012, 1658, 272, 2116, 666, 52, 95, 858, 1139, 1285, 1515, 1838, 2054, 2057, 2172, 2180, 2181, 2183, 666, 52, 95, 858, 1139, 1285, 1515, 1838, 2054, 2057, 2172, 2180, 2181, 2183, 1999, 32, 1849, 142, 347, 408, 423, 456, 1540, 1628, 674, 717, 461, 994]",1,9.0,28.0,350.0,0.7496307237813885,, -"[2358, 1460, 1185, 792, 1453, 1844, 1434, 672, 1558, 476, 335, 391, 68, 519, 179, 1692, 1661, 1859, 968, 1574, 756, 2349, 899, 1849, 743, 2000, 1023, 836, 2346, 45, 306, 1709, 460, 1846, 1729, 2, 2155, 1843, 1851, 1919, 1986, 1370, 2154, 779, 454, 1625, 778, 376, 324, 1469, 1660, 1291, 1149, 1697, 1131, 2590, 215, 2153, 1464, 2156, 586, 2045, 181, 239, 1840, 530, 681, 673, 1013, 1239, 65, 69, 510, 1587, 1072, 1975, 2021, 1841, 151, 74, 1525, 160, 2094, 1118, 214, 1847, 1521, 2194, 1537, 1894, 71, 1293, 2282, 1848, 220, 277, 2195, 1121, 2193, 1644, 2184, 487, 70, 1224, 566, 1850, 1455, 1845, 1704, 880, 1812, 745]",1,12.0,8.0,200.0,0.6621122599704579,, -"[1077, 389, 699, 1546, 1013, 341, 1583, 2107, 1313, 1311, 2327, 1336, 36, 133, 1871, 754, 1784, 1743, 905, 1986, 969, 1358, 1574, 1661, 1791, 2407, 1800, 289, 1735, 359, 1249, 1852, 1740, 1701, 2113, 2034, 1505, 191, 459, 2019, 118, 1012, 2326, 2096, 115, 1777, 1801, 630, 1142, 1248, 704, 773, 661, 1620, 1616, 742, 332, 2318, 1282, 215, 176, 2494, 1725, 671, 252, 505, 880, 1776, 2208, 2478, 1448, 1459, 643, 610, 1412, 91, 1465, 1483, 409, 1998, 603, 1337, 2066, 454, 2081, 935, 239, 384, 1781, 573, 1726, 1571, 973, 1070, 562, 417, 2451, 836, 102, 1670, 2045, 244, 1804, 1733, 682, 2083, 563, 476, 484, 2093, 2094, 1856]",1,12.0,8.0,300.0,0.6776218611521418,, -"[2125, 2405, 1535, 1111, 74, 1152, 1416, 2178, 2501, 1851, 1979, 171, 120, 280, 1309, 1927, 1072, 2309, 1907, 1106, 1797, 483, 1119, 2293, 1351, 48, 1548, 1919, 2126, 210, 2025, 1013, 2099, 1494, 2117, 316, 498, 1980, 1924, 816, 310, 598, 1118, 2124, 733, 1110, 1975, 146, 88, 1587, 1630, 1583, 415, 1039, 885, 1370, 2118, 1219, 1879, 2129, 2152, 2453, 1508, 1080, 180, 229, 2151, 2033, 2521, 118, 1950, 778, 2308, 2424, 429, 1291, 705, 461, 1424, 1526, 1149, 1192, 868, 779, 109, 1012, 1332, 2154, 826, 1303, 829, 149, 186, 1978, 1666, 519, 1329, 1973, 2164, 740, 1572, 1848, 2075, 770, 306, 1273, 1405, 1806, 1204, 1269, 1097, 586]",1,12.0,8.0,350.0,0.6698670605612999,, -"[2436, 989, 1224, 1525, 1826, 1919, 2485, 1829, 1131, 778, 472, 1133, 2699, 415, 779, 903, 88, 1824, 1500, 2185, 360, 1842, 1975, 1370, 1645, 87, 161, 645, 1914, 56, 1182, 2153, 682, 1851, 1507, 1583, 1543, 836, 252, 1538, 1825, 1828, 1885, 1408, 1792, 2630, 1409, 661, 331, 842, 1183, 685, 2242, 1827, 1899, 2010, 2112, 2061, 1013, 2016, 701, 1839, 1527, 2030, 446, 1579, 807, 860, 2062, 69, 1503, 500, 1881, 388, 2241, 2497, 1549, 1616, 935, 119, 854, 2207, 2450, 344, 1343, 735, 1191, 554, 979, 1668, 306, 1226, 2286, 109, 129, 255, 60, 524, 1926, 1884, 1171, 739, 1072, 1901, 643, 128, 1682, 1537, 2235, 676, 880, 2189, 2094, 1266, 20, 118, 2270, 282, 1303, 1661, 847, 176, 747, 1355, 370, 1690, 2269, 1395, 392, 1455, 1955, 1956, 2316, 2060, 1237, 441, 2165, 2374, 973, 2274, 440, 1553, 2628, 671, 1354, 2284, 1414, 67, 2184, 193, 113, 1917, 2166, 2457, 1957, 1415, 70, 1619, 581, 323, 277, 1029, 1168, 1179, 115, 2375, 630, 1958]",1,12.0,12.0,200.0,0.6787296898079763,, -"[1139, 2182, 2172, 2054, 2011, 2039, 142, 1909, 2042, 2057, 525, 25, 604, 69, 596, 1842, 95, 1665, 1658, 1849, 454, 356, 1487, 1807, 1635, 382, 1448, 1869, 1703, 2116, 1540, 130, 724, 1220, 2137, 2040, 955, 1674, 1889, 2388, 2053, 1274, 1042, 1270, 506, 921, 1713, 1539, 2394, 1676, 1203, 437, 2130, 120, 2041, 2405, 2059, 543, 2467, 887, 1917, 1376, 1293, 1677, 2012, 86, 2295, 1675, 211, 1915, 691, 1336, 1628, 41, 1918, 1891, 1359, 57, 2056, 1920, 239, 910, 175, 2190, 1741, 417, 791, 1654, 14, 1394, 1485, 1497, 224, 572, 1914, 1968, 718, 697, 1848, 1464, 1069, 743, 146, 2017, 1927, 2019, 158, 2055, 566, 945, 1399, 2034, 619, 300, 141, 1982, 2018, 1985, 1152, 1652, 1026, 415, 1131, 2418, 1759, 1344, 1410, 696, 2155, 2315, 1002, 88, 1975, 1110, 2022, 1309, 816, 1906, 461, 1592, 1928, 1979, 1417, 851, 1929, 2036, 2015, 498, 483, 738, 65, 2419, 2035, 1980, 2450, 1908, 61, 779, 1288, 2162, 1021, 1919, 424, 2309, 2010, 1370, 2016, 1174]",1,12.0,12.0,300.0,0.6765140324963073,, -"[327, 2063, 91, 2044, 2339, 2123, 429, 2412, 1358, 2303, 2340, 553, 2064, 2380, 1325, 1295, 2582, 722, 1119, 733, 1013, 206, 1522, 1197, 314, 766, 1221, 1738, 2501, 827, 197, 465, 2667, 879, 1739, 2691, 2509, 2500, 1171, 1124, 1583, 2381, 2121, 493, 1106, 191, 1191, 960, 1996, 387, 396, 228, 1950, 1055, 1577, 2534, 218, 574, 230, 2573, 1894, 2335, 10, 1567, 985, 1097, 1907, 2001, 383, 54, 1140, 45, 1219, 2668, 2338, 1697, 310, 2117, 1603, 2325, 1, 1666, 836, 2348, 2365, 1072, 277, 199, 1346, 1149, 1558, 2217, 1709, 1454, 603, 1166, 1874, 1987, 669, 8, 71, 2555, 482, 1997, 873, 1995, 0, 306, 298, 1066, 2045, 1708, 1998, 2532, 1127, 1781, 2003, 1159, 2009, 417, 366, 1999, 445, 68, 1147, 1862, 109, 2379, 2000, 1615, 476, 160, 2008, 2077, 1095, 420, 1542, 1988, 1347, 1075, 316, 849, 784, 1488, 391, 2, 2034, 1810, 1846, 745, 519, 566, 88, 285, 1670, 1877, 1239, 2357, 1445, 968, 729, 14, 792, 598, 673, 681, 1869, 1446, 297, 1241, 151, 695, 1812, 1868, 899, 1334, 818, 1991, 570, 743, 267, 332, 1800, 2253, 2550, 179, 2002, 1855, 1434, 767, 1574, 231, 1479, 2326, 503, 2238, 1704, 2613, 1465, 102, 1941, 926, 2378, 1856, 962, 1867, 304, 1014, 2236, 2430, 1993, 1871, 176, 1865, 460, 2004, 1665, 1337, 2327, 699, 169, 1799, 1864, 261, 318, 1912, 1570, 563, 1857, 2314, 1185, 1299, 1873, 1172, 216, 1636, 1870, 1820, 1335, 1852, 2096, 2309, 201, 13, 180, 215, 2139, 2141, 203, 467, 1994, 869, 158, 935, 311, 157, 547, 1858, 1992, 1875, 143, 1986, 24, 2568, 633, 1100, 1863, 232, 551, 416, 343, 1876, 1989, 1859, 1023, 532, 2493, 1440, 1701, 1866, 874, 1854, 205, 2007, 1853, 1453, 235, 1212, 1323, 17, 2005, 664, 2006, 226, 1990, 1872, 335, 1860, 1052, 1861, 1197, 1781, 2326, 285, 818, 827, 267, 314]",1,12.0,22.0,300.0,0.6746676514032496,, -"[2, 332, 1983, 277, 45, 55, 2004, 74, 1703, 1421, 1987, 748, 733, 68, 1702, 286, 973, 6, 1085, 2228, 471, 1420, 436, 842, 1051, 1906, 1993, 30, 724, 1358, 1399, 665, 2072, 681, 1602, 1709, 149, 1396, 179, 1515, 22, 1924, 1869, 698, 1990, 706, 2240, 1558, 566, 2274, 2005, 1925, 196, 1023, 2237, 2120, 1875, 118, 2395, 950, 152, 476, 2133, 1013, 1969, 2008, 805, 210, 232, 787, 968, 600, 1848, 1844, 673, 963, 1838, 516, 391, 1118, 1239, 1171, 588, 1926, 1127, 2260, 2040, 2034, 1994, 25, 897, 1452, 201, 2231, 160, 2385, 651, 1923, 792, 2009, 899, 1982, 1344, 442, 2000, 272, 60, 519, 1898, 1131, 745, 1812, 146, 1652, 1095, 161, 1042, 2394, 1995, 1566, 2238, 1149, 1989, 1468, 1416, 2396, 1979, 868, 48, 1487, 2011, 1986, 1224, 1574, 1704, 460, 2102, 2071, 323, 571, 2122, 1259, 1998, 1453, 255, 2236, 1732, 529, 1922, 1988, 1435, 1008, 2013, 95, 1668, 1667, 514, 1417, 405, 2131, 366, 2137, 2233, 2059, 2054, 438, 71, 1996, 2016, 696, 2003, 743, 1494, 2229, 1625, 1713, 382, 1419, 2593, 2012, 1139, 809, 504, 1570, 1741, 995, 2182, 2103, 1015, 854, 714, 1338, 498, 191, 1909, 203, 1882, 666, 1628, 2232, 2006, 1912, 437, 1434, 815, 1527, 266, 1166, 1997, 608, 857, 1992, 335, 1876, 1843, 1644, 1985, 215, 429, 2276, 2198, 1097, 356, 1850, 2024, 1847, 162, 2362, 2397, 456, 1870, 2010, 2116, 52, 2017, 2007, 2015, 490, 1268, 1991, 1791, 2239, 1873, 1697, 331, 1859, 841, 2172, 858, 164, 1027, 627, 130, 142, 151, 572, 1540, 2018, 2181, 1495, 860, 305, 1999, 2123, 1285, 1921, 2450, 239, 1984, 2183, 2066, 1288, 1394, 470, 1385, 2305, 1428, 548, 1309, 91, 1534, 61, 718, 2335, 1696, 88, 2467, 2057, 1266, 211, 2259, 737, 1630, 1519, 1269, 2524, 1658, 1202, 2380, 2002, 76, 525, 147, 2180, 1174, 2640, 32, 1974]",1,12.0,22.0,350.0,0.7492614475627769,, -"[1337, 1616, 563, 109, 56, 412, 415, 1045, 88, 447, 539, 124, 2056, 1248, 139, 1402, 1772, 604, 126, 850, 736, 1651, 308, 318, 935, 341, 280, 120, 306, 603, 2110, 1787, 236, 94, 2109, 734, 748, 366, 153, 1118, 1918, 1914, 2357, 1701, 2274, 1152, 712, 95, 1535, 830, 1847, 693, 465, 1482, 2024, 1867, 1270, 973, 2293, 1676, 1974, 133, 2013, 1105, 2094, 1849, 1810, 408, 1568, 1448, 2394, 2136, 1652, 749, 2418, 1289, 2045, 1843, 593, 1644, 1421, 1880, 1592, 1882, 1358, 1309, 454, 97, 1881, 1572, 894, 747, 1359, 1927, 1841, 2485, 1494, 2071, 149, 1622, 498, 324, 2189, 1351, 519, 2117, 1353, 65, 651, 506, 1920, 1352, 1846, 1878, 2194, 1791, 1505, 1732, 98, 1661, 2135, 553, 578, 2405, 74, 1850, 2078, 69, 2025, 376, 118, 1975, 93, 1464, 1218, 1675, 483, 699, 661, 215, 1074, 334, 1332, 835, 2156, 239, 816, 1776, 1926, 2155, 2419, 1403, 1884, 885, 1784, 1851, 1293, 1583, 1885, 1919, 1845, 1566, 1842, 1487, 1973, 772, 1857, 586, 880, 682, 266, 2153, 1080, 1121, 1525, 1521, 1840, 1995, 1980, 1370, 262, 1120, 778, 530, 779, 1729, 1377, 1683, 2282, 1979, 1127, 1028, 1376, 1655, 708, 2151, 2305, 1538, 471, 908, 1848, 2294, 325, 809, 702, 1203, 1410, 807, 181, 1894, 1930, 2317, 1110, 11, 925, 993, 2281, 1883, 1839, 344, 666, 2295, 32, 878, 836, 1222, 1039, 1382, 657, 1143, 1909, 424, 1171, 826, 1291, 2423, 490, 1224, 1660, 2283, 436, 518, 1013, 1976, 2154, 1879, 304, 279, 1343, 1133, 575, 510, 270, 2403, 1195, 643, 2164, 1625, 1215, 1844, 1058, 2276, 470, 2344, 1587, 2475, 1981, 2280, 668, 2185, 1131, 2165, 1341, 502, 1172, 1097, 2099, 868, 1115, 285, 211, 1463, 1500, 2105, 2384, 433, 2343, 1978, 1982, 838, 988, 2152, 2425, 203, 242, 1529, 2426, 2385, 1526, 1977, 139, 153, 215, 236, 306, 308, 318, 341, 139, 153, 215, 236, 306, 308, 318, 341, 2357, 56, 412, 447, 1487, 1616, 126, 133, 734, 736, 2056, 120, 894, 1701, 1867, 2117, 109, 454, 1074, 657, 139, 153, 215, 236, 306, 308, 318, 341, 2357, 56, 412, 447, 1487, 1616, 126, 133, 734, 736, 2056, 120, 894, 1701, 1867, 2117, 109, 454, 1074, 657, 139, 153, 215, 236, 306, 308, 318, 341, 2357, 56, 412, 447, 1487, 1616, 126, 133, 734, 736, 2056, 120, 894, 1701, 1867, 2117, 109, 454, 1074, 657]",1,12.0,28.0,300.0,0.6816838995568686,, -"[498, 139, 103, 2045, 154, 1358, 989, 519, 88, 1367, 1368, 1705, 1624, 2015, 1726, 1070, 1284, 1338, 1914, 350, 1645, 426, 138, 302, 910, 1572, 1193, 2107, 644, 2436, 711, 655, 2102, 1998, 887, 1470, 2553, 308, 1527, 1309, 969, 487, 1500, 379, 2113, 1158, 1849, 1505, 2485, 836, 417, 1620, 1131, 1115, 2264, 1357, 995, 323, 447, 1735, 1842, 1174, 1188, 1592, 562, 435, 133, 1013, 1133, 2388, 126, 563, 1022, 719, 1656, 608, 2497, 1282, 2029, 124, 2185, 1789, 236, 1226, 1584, 56, 318, 685, 1787, 1651, 1224, 1769, 412, 1609, 656, 693, 2083, 1785, 289, 161, 945, 490, 2316, 1772, 539, 597, 604, 87, 2009, 2026, 1444, 1307, 1848, 1337, 2027, 1498, 153, 2050, 742, 69, 1549, 1912, 1104, 643, 1072, 452, 1773, 1911, 121, 1251, 459, 1623, 252, 1851, 573, 306, 2103, 728, 1507, 2285, 1840, 754, 1538, 113, 1781, 1346, 1959, 1805, 1956, 112, 1551, 102, 1146, 2274, 704, 1955, 2450, 1483, 1779, 1552, 963, 1245, 747, 514, 1089, 1110, 1882, 1187, 1743, 1901, 1962, 1640, 1652, 2010, 542, 1792, 1120, 109, 530, 516, 842, 1571, 1412, 830, 1616, 1526, 753, 1343, 905, 2093, 203, 1009, 1583, 958, 388, 2112, 119, 1917, 2095, 2025, 1269, 847, 2016, 2071, 554, 1097, 2086, 329, 540, 1954, 646, 359, 470, 1543, 1566, 1402, 880, 2153, 36, 1704, 1661, 682, 807, 1972, 1953, 1869, 2256, 1077, 1435, 1246, 1555, 1045, 1490, 2275, 2271, 1276, 294, 2208, 2404, 479, 1957, 1880, 903, 755, 1884, 1899, 1128, 1249, 578, 1879, 1926, 2100, 2092, 572, 2363, 590, 249, 857, 581, 1873, 279, 1878, 2273, 2022, 1910, 255, 476, 2028, 2189, 1266, 725, 854, 446, 2060, 277, 125, 1237, 2030, 2644, 1885, 2165, 735, 389, 1441, 739, 1958, 935, 2062, 35, 610, 720, 2061, 371, 454, 118, 1913, 652, 671, 1296, 1707, 265, 701, 2188, 1395, 2272, 1881, 1137, 648, 50, 176, 1690, 241, 393, 1619, 895, 193, 750, 1060, 129, 973, 1700, 149, 2179, 451, 97, 1353, 661, 1682, 2094, 440, 2106, 2457, 2190, 458, 2166, 441, 1029, 630, 2184, 1142, 344, 2019, 1537, 115, 70, 2323, 1883, 1122, 1028, 1455, 711, 279, 103, 112, 126, 138, 139, 294, 447, 539, 1402, 1849, 2050, 693, 711, 279, 103, 112, 126, 138, 139, 294, 447, 539, 1402, 1849, 2050, 693, 1224, 2485, 747, 1131, 1133, 1500, 1842, 2185, 56, 412, 87, 161, 836, 1645]",1,12.0,28.0,350.0,0.7082717872968981,, -"[1375, 2586, 1375, 2586, 1375, 2586, 1375, 2586, 1375, 2586]",0,3.0,22.0,300.0,0.09785819793205318,, -"[910, 1805, 1776, 556, 1775, 2045, 306, 887, 660, 1251]",0,6.0,2.0,50.0,0.09010339734121123,, -"[158, 14, 1628, 2001, 1907, 2295, 435, 1880, 1848, 1655, 2003, 1152, 1131, 1889, 2130, 95, 1968, 164, 553, 456, 1224, 897, 2131, 1996, 210, 1909, 1891, 2057, 1973, 56, 146, 661, 1166, 1434, 725, 2008, 2071, 1873, 68, 753, 1540, 681, 408, 2022, 1758, 1156, 1463, 2021, 2116, 1410, 1314, 1420, 1992, 1121, 1023, 1875, 1416, 771, 842, 2009, 1079, 1574, 1894, 424, 382, 2016, 1997, 673, 2037, 2136, 2024, 1269, 1665, 1991, 2054, 1983, 2002, 224, 391, 792, 2039, 651, 1039, 1149, 1870, 1985, 86, 2133, 2020, 724]",1,6.0,18.0,350.0,0.6665435745937962,, -"[2174, 2173, 939, 2174, 2174, 2173, 2174, 2173, 2174, 2173]",0,9.0,2.0,150.0,0.08677991137370754,, -"[1343, 1954, 720, 1507, 1690, 1884, 2010, 2112, 252, 1128]",0,9.0,2.0,200.0,0.11152141802067947,, -"[108, 2160, 2157, 537, 2209, 1698, 1647, 898, 1835, 2158, 2161, 2159, 1165, 1504, 1593, 1328, 92, 1836, 145, 2622, 23, 2192, 1327, 144, 495, 213, 144, 2192, 537, 145, 1593, 1327, 1328, 1165, 1698, 2622, 2192, 144, 213, 495, 537, 145, 1593, 1327, 1328, 1165, 1698, 2622, 2192, 144, 213, 495, 537, 145, 1593, 1327, 1328, 1165, 1698, 2622]",0,9.0,12.0,150.0,0.08677991137370754,, -"[1483, 2405, 1980, 704, 1358, 885, 1013, 1535, 878, 1416, 1203, 1743, 2485, 1843, 1420, 270, 1085, 1847, 1851, 1351, 1979, 1039, 1644, 1842, 181, 279, 816, 1118, 424, 1732, 359, 2288, 514, 1810, 1370, 1729, 1402, 1978, 149, 2024, 1421, 539, 118, 2017, 483, 1869, 1908, 454, 1080, 1894, 600, 835, 211, 1394, 1077, 2396, 905, 2450, 116, 2293, 1215, 841, 1839, 2426, 1222, 708, 1163, 74, 239, 2403, 280, 1572, 1587, 204, 1474, 471, 114, 70, 518, 1592, 1464, 2481, 1332, 1195, 2195, 2312, 1848, 1291, 1174, 1676, 724, 234, 2182, 461, 389, 815, 502, 2110, 130, 1713, 1919, 2071, 2162, 1410, 2109, 1958, 1178, 11, 61, 711, 2153, 162, 2281, 436, 1343, 1912, 1487, 2350, 113, 1027, 32, 2155, 306, 510, 836, 702, 610, 1583, 2394, 778, 2154, 2385, 434, 666, 1625, 993, 838, 2280, 1850, 1975]",1,9.0,28.0,350.0,0.6997784342688331,, -"[1705, 139, 1551, 656, 1798, 2046, 215, 2090, 581, 1772]",0,12.0,2.0,100.0,0.09010339734121123,, -"[1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587]",0,12.0,8.0,200.0,0.09785819793205318,, -"[1593, 213, 2209, 144, 1165, 145, 1647, 537, 2192, 1504, 92, 898, 2622, 108, 1698, 1328, 2161, 1836, 2160, 1327, 495, 1835, 2158, 2157, 23, 2159, 2157, 2160, 108, 1835, 2158, 898, 1328, 1647, 1698, 2161, 2159, 23, 2157, 2160, 108, 1835, 2158, 898, 1328, 1647, 1698, 2161, 2159, 23, 2157, 2160, 108, 1835, 2158, 898, 1328, 1647, 1698, 2161]",0,12.0,12.0,300.0,0.08677991137370754,, -"[1628, 95, 2054, 2183, 1995, 2057, 1985, 2180, 1127, 391, 2009, 1870, 1973, 366, 553, 438, 1558, 2020, 1574, 890, 1139, 215, 1540, 1671, 1838, 456, 1929, 179, 1259, 2232, 1285, 1121, 1880, 68, 792, 519, 2611, 1999, 1453, 604, 681, 32, 460, 2, 45, 745, 2133, 1314, 2172, 151, 142, 335, 1227, 666, 1704, 1869, 753, 2019, 1986, 2072, 2181, 1655, 2233, 718, 1859, 1709, 2116, 504, 1115, 525, 1974, 52, 2182, 1269, 1149, 968, 1131, 2309, 1848, 490, 1023, 2136, 661, 578, 160, 1515, 651, 1652, 232, 1399]",1,12.0,18.0,250.0,0.6613737075332349,, -"[1782, 1798, 2080, 2090, 1775, 910, 350, 608, 1656, 958, 487, 655, 656, 103, 1778, 1072]",0,3.0,2.0,150.0,0.09010339734121123,, -"[109, 487, 426, 2080, 1782, 1998, 236, 1772, 2046, 1798, 608, 519, 1062, 655, 1811, 112]",0,3.0,2.0,350.0,0.08899556868537666,, -"[498, 88, 1919, 778, 56, 2485, 779, 447, 2016, 252, 1402, 701, 2050, 524, 1975, 412, 1370, 415, 279, 2062, 1303, 682, 1882, 1682, 676, 842, 2235, 1525, 1224, 1072, 1537, 505, 87, 989, 119, 109, 1549, 1070, 979, 1455, 176, 2153, 129, 1616, 2094, 1168, 630, 668, 2436, 2165, 1839, 2061, 878, 2019, 711, 1507, 70, 1978, 433, 1142, 203, 1313, 440, 1851, 2699, 2184, 973, 1881, 807, 1955, 323, 2286, 2207, 735, 1583, 1395, 2060, 621, 161, 441, 436, 1045, 344, 693, 671, 573, 1926, 1849, 1171, 20, 193, 1655, 685, 1980, 1503, 1355, 2630, 1914, 880, 1956, 277, 1543, 1167, 1029, 2166, 128, 2405, 2189, 2457, 120, 739, 1404, 857, 118, 2375, 830, 838, 554, 446, 2275, 392, 1128, 1661, 1883, 1878, 1917, 1825, 1707, 1880, 1028, 241, 242, 539, 306, 1645, 1343, 2273, 1842, 50, 265, 483, 1958, 1827, 2010]",1,3.0,18.0,250.0,0.6783604135893648,, -"[2154, 1498, 1500, 2280, 778, 1133, 2162, 1914, 1930, 779, 1525, 1660, 524, 242, 1079, 2185, 838, 1618, 1370, 2267, 2312, 1017, 133, 1505, 2025, 1655, 2490, 1587, 1291, 1039, 651, 11, 738, 1333, 1080, 2266, 1909, 1623, 836, 1131, 239, 733, 2357, 1072, 2263, 1425, 1169, 1919, 1035, 1538, 2358, 1916, 1013, 1927, 510, 2282, 55, 1121, 1143, 146, 2355, 576, 1966, 2107, 2376, 2590, 94, 827, 1695, 885, 1463, 2059, 276, 192, 948, 2024, 1152, 1801, 437, 1789, 109, 52, 591, 1376, 61, 1110, 1385, 1469, 2262, 399, 1218, 1467, 2281, 878, 1682, 1583, 1889, 1395, 2108, 81, 1089, 552, 2349, 1519, 1358, 979, 438, 711, 771, 2283, 1769, 436, 934, 2180, 1293, 1015, 2386, 2472, 1452, 675, 2001, 2418, 1851, 149, 2163, 1788, 2116, 429, 1848, 2021, 1841, 1839, 2575, 763, 619, 2343, 942, 847, 347, 1309, 490, 2348, 756, 2364]",1,3.0,18.0,350.0,0.6827917282127031,, -"[117, 259, 2537, 117, 259, 2537, 117, 259, 2537, 117, 259, 2537, 117, 259, 2537, 117, 259, 2537, 117, 259, 2537, 117, 259, 2537]",0,6.0,8.0,300.0,0.09785819793205318,, -"[2173, 939, 2174, 2173, 939, 2174, 2173, 939, 2174, 2173, 939, 2174, 2173, 939, 2174, 2173, 939, 2174, 2173, 939, 2174, 2173, 939, 2174]",0,6.0,12.0,200.0,0.08677991137370754,, -"[711, 139, 2485, 2050, 121, 1869, 1787, 1131, 1070, 412, 1789, 2103, 1158, 350, 644, 56, 1624, 1773, 2388, 1309, 393, 719, 747, 490, 252, 903, 1914, 1956, 755, 113, 945, 1955, 452, 2113, 1337, 236, 1954, 1849, 1133, 1848, 302, 540, 1623, 487, 539, 645, 1470, 277, 203, 958, 1592, 132, 656, 1500, 2404, 1358, 910, 1224, 1972, 1146, 1851, 887, 1805, 2026, 742, 1962, 2185, 426, 1357, 652, 836, 435, 1842, 103, 1543, 138, 1898, 2436, 126, 1957, 963, 590, 1435, 1013, 2045, 1174, 2027, 1552, 516, 2010, 1583, 88, 2030, 2644, 1128, 479, 109, 989, 2016, 1769, 1402, 2015, 1089, 1412, 1022, 308, 102, 739, 1785, 1620, 1115, 447, 1338, 685, 289, 2102, 112, 176, 1726, 1226, 1251, 2009, 1343, 1490, 880, 1645, 87, 133, 36, 1640, 1505, 608, 1072, 995, 542, 2553, 563, 578, 655, 1880, 1779, 2093, 1772, 35, 359, 161, 1527, 1616, 728, 2316, 1526, 1566, 153, 498, 1538, 604, 318, 1346, 1269, 1910, 2061, 693, 753, 1193, 2286, 2071, 1704, 1266, 895, 1682, 1840, 720, 1899, 1307, 610, 1735]",1,6.0,22.0,350.0,0.6610044313146234,, -"[1367, 487, 2090, 2080, 2089, 1771, 1681, 399, 118, 426, 1779, 2079, 1805, 556, 306, 505]",0,9.0,2.0,100.0,0.11853766617429838,, -"[2054, 525, 2155, 2201, 586, 2200, 18, 530, 95, 1786, 1735, 441, 2324, 2186, 406, 2472, 505, 1926, 114, 1118, 454, 2026, 1197, 1042, 1788, 1258, 1282, 667, 1416, 1289, 1851, 1552, 1517, 2288, 484, 1015, 1499, 901, 2145, 118, 1986, 1505, 1283, 2096, 1830, 1560, 142, 215, 826, 1270, 1692, 1013, 808, 2287, 1791, 638, 2034, 36, 337, 1564, 2000, 741, 1483, 2082, 2199, 74, 456, 239, 1852, 1358, 121, 1622, 341, 1776, 1661, 589, 573, 1574, 1448, 2094, 1616, 1580, 2143, 303, 1618, 487, 1705, 2047, 1701, 731, 2318, 831, 2327, 2052, 2326, 1998, 699, 836, 581, 2504, 1681, 1367, 103, 1251, 2080, 2325, 973, 1248, 476, 1778, 680, 1856, 191, 610, 1798, 1782, 542, 1072, 1158, 1009, 1779, 2046, 387, 228, 2106, 1775, 1088, 139, 350, 603, 2107, 1773, 980, 554, 231, 409, 417, 1784, 2085, 416, 695, 887, 2086, 1624, 910, 1871, 1245, 1768, 1089, 960, 2078, 1640, 1670, 2095, 2045, 1799, 1196, 1781, 1490, 1789, 608, 302, 452, 958, 294, 655, 1892, 1777, 1584, 112, 1651, 519, 1792, 1144, 2084, 1780]",1,9.0,22.0,250.0,0.7134416543574594,, -"[335, 1701, 2303, 377, 1508, 2295, 1871, 2291, 897, 48, 45, 1814, 95, 835, 1160, 1692, 1655, 1624, 1502, 1479, 1196, 2117, 2268, 2033, 745, 1666, 383, 1810, 1303, 2021, 401, 740, 525, 2668, 2011, 232, 1477, 302, 215, 622, 1098, 2667, 160, 1505, 2163, 1558, 1119, 460, 743, 1062, 426, 102, 1488, 1892, 2550, 1265, 55, 261, 430, 94, 1682, 201, 2230, 438, 2302, 142, 2075, 519, 1893, 1405, 790, 2126, 2045, 2, 2172, 366, 306, 1474, 2640, 88, 339, 566, 2249, 678, 2118, 1096, 788, 109, 2124, 2336, 1952, 879, 1111, 962, 1531, 1251, 2054, 1890, 589, 770, 2032, 2125, 1324, 2494, 1072, 1983, 549, 2127, 65, 1818, 2129, 1012, 1002, 281, 2307, 2057, 2490, 1671, 1817, 916, 2301, 2053, 1788, 733, 1540, 1510, 2240, 1797, 1812, 693, 1986, 505, 2155, 293, 210, 695, 714, 1894, 482, 229, 476, 417, 2008, 179, 792, 391, 1329, 68, 317, 1665, 2251, 1192, 180, 1273, 1046, 1348, 1382, 224, 1889, 503, 330, 1336, 2076, 2066, 2348, 49, 2077, 1149, 574, 1020, 71, 813, 2041, 1654, 38, 1448]",1,9.0,22.0,350.0,0.6646971935007385,, -"[2185, 1538, 1655, 2282, 1526, 510, 1171, 1844, 1013, 1500, 1909, 1133, 1810, 446, 2045, 1820, 24, 482, 2123, 429, 2099, 1701, 603, 2155, 1121, 2340, 2339, 1507, 2555, 2121, 2303, 1446, 1864, 2379, 1739, 91, 1800, 2378, 2044, 2550, 1894, 416, 388, 1373, 570, 297, 2025, 2380, 2253, 454, 1224, 467, 869, 191, 1066, 1445, 383, 1191, 1119, 2276, 2613, 879, 255, 988, 1583, 109, 157, 2338, 327, 1295, 230, 1812, 215, 581, 868, 1131, 681, 118, 554, 836, 2348, 2668, 1708, 445, 306, 201, 733, 1203, 1873, 1341, 1644, 598, 185, 2001, 231, 1325, 1843, 1358, 695, 1858, 2430, 1453, 1876, 2063, 1055, 2064, 335, 1859, 1197, 2034, 218, 298, 1857, 935, 1847, 1738, 1095, 2532, 1697, 553, 1343, 1875, 387, 1870, 316, 1998, 1219, 232, 1346, 1709, 1023, 1185, 1558, 2003, 2301, 2381, 206, 2667, 2002, 45, 1912, 1781, 519, 1574, 1996, 1677, 197, 1239, 2365, 1930, 2009, 899, 2412, 1434, 985, 2156, 490, 1850, 746, 1666, 1975, 476, 518, 1149, 8, 2326, 2691, 1704, 1995, 1670, 1127, 1215, 779, 88, 1174, 1525, 1309, 792, 1992, 2151, 1029, 1, 2283, 277, 267, 2008, 2281, 842, 2582, 745, 743, 2178, 160, 460, 2335, 332, 1172, 180, 1347, 851, 68, 2, 2006, 366, 2000, 1997, 1908, 2004, 968, 1907, 1522, 737, 503, 436, 179, 1454, 396, 2568, 300, 71, 574, 2165, 2477, 2014]",1,9.0,28.0,300.0,0.7012555391432792,, -"[2127, 229, 2249, 1268, 788, 1479, 1890, 1797, 2032, 2122, 1531, 1405, 2118, 2274, 1119, 2124, 48, 2117, 261, 1892, 1986, 1251, 1012, 415, 306, 770, 2120, 33, 1149, 2045, 1508, 1701, 505, 2129, 860, 482, 2126, 2033, 430, 1810, 1111, 2125, 1324, 2041, 1952, 2040, 2309, 1527, 1894, 1072, 102, 331, 1889, 2130, 1703, 1929, 733, 1049, 695, 1666, 835, 1336, 962, 162, 1812, 429, 1871, 1448, 88, 566, 1174, 1121, 2035, 536, 1273, 1215, 673, 549, 1807, 1668, 1203, 740, 1655, 1927, 2427, 180, 2075, 912, 498, 1261, 1190, 65, 2155, 1968, 1665, 2668, 1906, 885, 1002, 983, 705, 622, 2039, 436, 158, 37, 417, 1654, 1452, 1671, 1529, 1893, 2418, 2077, 2017, 841, 437, 239, 2024, 224, 1891, 1218, 2550, 1410, 2059, 916, 722, 791, 1628, 1266, 1567, 1026, 833, 1152, 14, 86, 1178, 2531, 382, 1303, 141, 1468, 2037, 2295]",1,12.0,18.0,200.0,0.6672821270310192,, -"[109, 1863, 532, 1787, 1856, 1785, 563, 1864, 244, 1337, 633, 1869, 318, 417, 1868, 124, 1358, 1781, 1245, 2045, 1623, 1636, 205, 1656, 1665, 317, 1740, 1804, 1871, 215, 2080, 1798, 1870, 231, 1262, 869, 2203, 664, 335, 1867, 2330, 232, 887, 2451, 1820, 1780, 24, 1779, 2331, 1797, 1853, 1212, 1479, 1866, 1196, 409, 1861, 306, 1299, 1241, 1725, 91, 2094, 1035, 1852, 236, 1100, 608, 589, 1854, 1089, 1708, 1873, 1778, 216, 1803, 2034, 1784, 2095, 1846, 139, 1865, 467, 157, 1875, 2601, 2143, 1805, 384, 980, 2122, 1483, 1070, 189, 2106, 261, 1701, 350, 1802, 1453, 542, 465, 1777, 1624, 699, 201, 1733, 1440, 2123, 547, 2591, 1013, 958, 1584, 2144, 1857, 873, 1986, 1799, 1023, 2048, 1367, 13, 1810, 962, 1072, 1055, 342, 945, 2086, 1552, 158, 1801, 1542, 2329, 399, 1800, 1506, 1859, 2046, 407, 1640, 36, 2313, 1012, 729, 874, 2609, 1775, 332, 1705, 973, 1015, 137, 1334, 330, 1558, 2091, 1075, 2107, 2314, 1054, 1221, 1855, 1577, 695, 1874, 1099, 2357, 1488, 103, 1465, 1862, 1323, 2305, 792, 1790, 1786, 530, 1860, 1146, 226, 343, 1278, 2306, 1858, 784, 665, 1052, 1872, 143, 20, 416, 235, 2263, 1348, 948, 576, 2355, 39, 879, 1788, 481, 1219, 2319, 1818, 309, 429, 1020, 994, 2667, 45, 482, 2254, 1704, 1817, 2081, 2268, 78, 38, 2668, 2356, 1062, 863]",1,12.0,28.0,300.0,0.6746676514032496,, -"[160, 2001, 873, 1558, 2002, 1975, 366, 1894, 2009, 1995, 236, 899, 2000, 1812, 792, 1045, 2109, 2034, 133, 669, 745, 1911, 1171, 661, 2003, 304, 1624, 1772, 68, 1779, 570, 262, 297, 1616, 1370, 1577, 968, 1074, 1670, 1149, 1095, 1708, 306, 180, 1072, 124, 2004, 1810, 1352, 1709, 1172, 681, 1542, 2117, 1574, 181, 1950, 1296, 1583, 310, 2070, 2553, 1787, 743, 816, 1993, 962, 491, 1124, 71, 1997, 1988, 56, 0, 153, 1996, 2045, 1351, 2649, 2568, 2582, 1501, 2493, 1805, 2019, 610, 2134, 1683, 2016, 391, 151, 1652, 1769, 2430, 426, 935, 1697, 1465, 1479, 842, 1221, 277, 14, 2314, 261, 1127, 598, 1998, 1346, 1987, 836, 2653, 1941, 289, 2327, 102, 2069, 1013, 682, 711, 1655, 1159, 412, 1799, 199, 252, 447, 2534, 1615, 630, 1676, 1334, 1857, 1858, 1986, 1224, 1070, 1973, 1661, 2485, 342, 1434, 1665, 1855, 1166, 1785, 702, 767, 729, 454, 2008, 671, 850, 2457, 604, 476, 945, 712, 158, 109, 1535, 519, 1241, 2139, 2068, 1299, 1704, 1856, 2141, 1989, 316, 2071, 201, 1862, 1869, 1377, 467, 1675, 190, 973, 1464, 311, 1402, 1525, 1131, 157, 1915, 699, 506, 1840, 460, 2189, 830, 907, 45, 13, 1023, 2024, 1636, 1789, 459, 2412, 673, 1846, 1849, 2608, 891, 880, 2310, 2282, 1870, 1871, 2132, 2100, 766, 1335, 2007, 1820, 960, 1487, 566, 547, 2006, 1865]",1,12.0,28.0,350.0,0.7156573116691285,, -"[454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371, 454, 1441, 1538, 1884, 371]",0,,5.0,150.0,0.09785819793205318,0.1, -"[2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 898, 145, 1165, 1836, 2158]",0,,15.0,250.0,0.08677991137370754,1.0, -"[1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158, 1787, 306, 656, 1774, 476, 1337, 1346, 887, 124, 1158]",0,,10.0,150.0,0.08936484490398819,5.0, -"[2160, 1328, 1647, 2157, 2192, 1836, 537, 213, 145, 1165, 495, 898, 1698, 2159, 92, 108, 2209, 2158, 144, 1593, 2161, 1327, 1835, 1504, 23, 2622]",0,,10.0,100.0,0.08677991137370754,,0.8 -"[117, 259, 2537]",0,,10.0,250.0,0.09785819793205318,,0.8 -"[376, 1127, 2056, 120, 816, 1538, 1979, 2357, 118, 133, 306, 308, 1448, 1784, 1787, 1856, 2046, 2047, 1163, 252, 1333, 1637, 1976, 711, 1291, 553, 1309, 1980, 2281, 2405, 2276, 1701, 2117, 126, 603, 1248, 1791, 1830, 1463, 1416, 2051, 1682, 96, 231, 2164, 498, 1421, 2110, 682, 1341, 366, 1995, 109, 1358, 1655, 1867, 88, 325, 1909, 32, 666, 139, 318, 1337, 1651, 1772, 1776, 504, 822, 1974, 1258, 847, 1395, 1396, 1222, 65, 465, 2317, 98, 415, 657, 408, 734, 736, 1583, 153, 215, 1841, 341, 563, 699, 1973, 2189, 2026, 490, 2177, 1042, 2052, 2023, 725, 1914, 1013, 69, 1568, 1674, 1849, 1918, 868, 894, 908, 1120, 2419, 1625, 1843, 236, 2045, 1402, 1926, 496, 1852, 2048, 2293, 190, 586, 1527, 1464, 2403, 2194, 175, 596, 1525, 1915, 1919, 334, 836, 1840, 1842, 1847, 1848, 1851, 2135, 2217, 702, 661, 55, 2071, 2608, 1975, 1487, 2282]",1,,25.0,250.0,0.6451255539143279,,0.8 -"[2287, 963, 1015, 530, 2106, 115, 2329, 2504, 1560, 337, 2096, 960, 189, 134, 1740, 2251, 2143, 359, 36, 1248, 2081, 1465, 399, 1681, 826, 1851, 1197, 441, 135, 252, 1571, 1499, 2324, 244, 773, 141, 158, 121, 980, 905, 969, 1743, 2322, 2326, 1336, 1701, 2451, 556, 1013, 1830, 277, 671, 342, 49, 1070, 945, 1358, 1505, 2382, 754, 1249, 1735, 2083, 1546, 603, 1791, 409, 218, 2075, 1699, 695, 2256, 2082, 1722, 234, 384, 1786, 1618, 831, 2145, 630, 1767, 1768, 880, 2425, 1283, 228, 86, 1725, 1733, 1784, 1803, 814, 2252, 1077, 1564, 2107, 1852, 1311, 114, 2288, 544, 808, 239, 682, 1142, 1270, 2016, 14, 1478, 1635, 1144, 1448, 389, 704, 742, 1282, 2113, 1012, 1335, 215, 341, 1561, 1878, 231, 2481, 317, 1788, 1616, 191, 303, 1810, 314, 935, 1774, 1583, 655, 2153, 661, 20, 1787, 2045, 1412, 1620, 2088, 2208, 1289, 2318, 1089, 1609]",1,,25.0,300.0,0.7019940915805022,,0.8 -"[1798, 1780, 1623, 1782, 1624, 1770, 1705, 236, 958, 1072, 2046, 1245, 1799, 910, 2089, 153, 887, 542, 1772, 1787, 2045, 1561, 1805, 2080, 350, 1584, 139, 581, 1337, 1856, 2085, 1797, 487, 2090, 2078, 426, 1251, 1193, 112, 2048, 519, 126, 1804, 1367, 1769, 1802, 1779, 656, 1448, 308, 563, 1131, 1781, 329, 302, 138, 1743, 459, 1785, 1572]",0,3.0,60.0,300.0,0.09010339734121123,, -"[117, 259, 2537]",0,3.0,180.0,200.0,0.09785819793205318,, -"[106, 2461]",0,6.0,30.0,100.0,0.08677991137370754,, -"[2209, 108, 1647, 1504, 898, 1835, 2157, 2160, 2159, 1328, 1836, 92, 2158, 495, 1327, 1698, 23, 144, 2161, 213, 1593, 2192, 537, 145, 2622, 1165]",0,6.0,30.0,250.0,0.08677991137370754,, -"[1798, 2080, 1656, 1782, 1245, 487, 2089, 989, 329, 2436, 1705, 1770, 1224, 1296, 1045, 2085, 2045, 1826, 1787, 655, 958, 1367, 112, 1781, 412, 2084, 36, 1913, 1251, 1772]",0,6.0,30.0,300.0,0.0982274741506647,, -"[1593, 213, 144, 1165, 2161, 145, 1504, 537, 2192, 2209, 1647, 1327, 2622, 898, 23, 1698, 495, 1328, 2160, 108, 92, 1836, 2159, 1835, 2158, 2157]",0,12.0,100.0,200.0,0.08677991137370754,, -"[587, 1032]",0,12.0,220.0,250.0,0.09785819793205318,, -"[2209, 2159, 1328, 108, 1835, 2160, 1698, 2161, 1327, 2157, 23, 898, 1836, 1647, 1504, 537, 1165, 495, 92, 1593, 2192, 2158, 145, 144, 213, 2622]",0,3.0,30.0,50.0,0.08677991137370754,, -"[1780, 1776, 1778, 1773, 980, 589, 1788, 139, 1783, 2080, 2291, 1767, 2083, 1784, 1180, 2321, 1144, 406, 350, 1304, 1798, 2046, 1505, 1775, 608, 1245, 660, 2090, 1452, 103, 1777, 910, 1560, 2078, 2045, 2094, 1251, 2086, 887, 2048, 656, 1623, 843, 126, 1998, 138, 1786, 2089, 1774, 1656, 236, 1790, 733, 1768, 1682, 314, 1624, 308, 1045, 1781, 1483, 519, 112, 2092, 505, 610, 542, 317, 958, 2093, 1448, 153, 329, 102, 1772, 695, 1337, 318, 1346, 1640, 1856, 2084, 1336, 109, 459, 1551, 1367, 719, 409, 407, 452, 176, 1072, 2319, 1782, 1012, 581, 1805, 124, 2085]",0,3.0,100.0,150.0,0.09379615952732644,, -"[2537, 117, 259]",0,3.0,140.0,200.0,0.09785819793205318,, -"[2209, 2161, 108, 2157, 1327, 898, 2160, 1836, 1647, 1835, 495, 1328, 1504, 2158, 2159, 92, 1698, 537, 1593, 1165, 2622, 2192, 23, 145, 213, 144]",0,3.0,250.0,250.0,0.08677991137370754,, -"[307, 991]",0,6.0,30.0,300.0,0.08677991137370754,, -"[2158, 2157, 108, 2209, 145, 144, 2159, 1647, 1835, 537, 1593, 1504, 1698, 213, 2192, 2622, 2160, 1165, 898, 23, 92, 1836, 2161, 495, 1328, 1327]",0,6.0,60.0,300.0,0.08677991137370754,, -"[108, 1647, 2209, 537, 898, 2157, 1698, 2160, 1328, 2159, 1835, 2158, 1504, 495, 1327, 2192, 92, 23, 1836, 2161, 1165, 1593, 145, 213, 144, 2622]",0,9.0,90.0,350.0,0.08677991137370754,, -"[213, 144, 1165, 108, 1593, 2209, 2192, 145, 537, 1647, 92, 1836, 1504, 495, 2622, 2161, 1327, 1328, 1698, 898, 2160, 2157, 2158, 2159, 23, 1835]",0,9.0,220.0,350.0,0.08677991137370754,, -"[189, 1262, 1782, 1772, 2045, 109, 2046, 2080, 426, 1798, 2048, 2085, 487, 2090, 112, 2089, 2088, 519, 350, 1805, 1787, 945, 1367, 1770, 1884, 1245, 656, 1584, 138, 1656, 236, 153, 1346, 244, 910, 1776, 1885, 1797, 1705, 329, 139, 1651, 655, 973, 2078, 126, 1193, 302, 1775, 308, 124, 1856, 1572, 608, 1779, 1490, 958, 2084, 887, 935, 1640, 470, 103, 1097, 857, 773, 306, 1072, 719, 542, 854, 1070, 2094, 294, 1158, 1045, 2324, 1283, 1455, 2086, 97, 1620, 1551, 1358, 1537, 454, 1771, 671, 711, 129]",0,15.0,90.0,150.0,0.0930576070901034,, -"[1012, 1336, 441, 948, 2288, 1013, 2418, 2021, 1701, 2287, 880, 1197, 215, 695, 1620, 454, 65, 114, 1035, 1851, 2123, 1791, 1743, 671, 1682, 843, 341, 826, 1358, 1852, 699, 91, 2106, 2591, 505, 234, 742, 1180, 1986, 1759, 1733, 1784, 1412, 1740, 399, 1560, 2382, 231, 1304, 1725, 118, 1722, 831, 314, 619, 2383, 610, 1998, 1069, 1661, 1909, 1871, 661, 1561, 407, 1681, 741, 330, 2096, 1635, 544, 630, 36, 1335, 1783, 935, 416, 2320, 1313, 2107, 1274, 1774, 1773, 1051, 905, 1856, 468, 239, 836, 2256, 2094, 2113, 1077, 1670, 2262, 244, 1546, 1070, 563, 133, 1337, 1616, 1312, 1789, 1767, 2153, 2326, 102, 1142, 1552, 135, 2045, 2504, 1251, 1618, 1089, 1769, 802, 318, 1878]",1,3.0,60.0,350.0,0.6872230428360414,, -"[36, 1282, 180, 831, 1701, 429, 1552, 705, 114, 1146, 2107, 695, 699, 1012, 1735, 704, 562, 2106, 1952, 2082, 905, 741, 1743, 544, 1733, 399, 158, 1358, 341, 969, 1681, 215, 1665, 791, 826, 234, 1412, 622, 2327, 2130, 2288, 530, 556, 2145, 1784, 740, 2041, 231, 661, 2668, 416, 1546, 1725, 1015, 407, 1878, 1089, 1002, 2113, 2329, 2025, 610, 1199, 671, 916, 1781, 935, 1249, 1740, 2256, 244, 2077, 808, 1726, 2039, 1986, 1224, 682, 598, 121, 2287, 303, 1889, 762, 18, 14, 1583, 2504, 1812, 1871, 1061, 1618, 1336, 141, 638, 742, 1699, 2481, 490, 859, 1030, 133, 2251, 1571, 1483, 86, 1810, 1778, 1830, 1654, 2451, 2075, 1505, 1851, 1879, 2096, 2037, 1070, 660, 1013, 1640, 1335, 1998, 563, 1776, 754, 1574, 1804, 691, 318, 1635, 389, 1311, 963, 2094, 1773, 1337, 1248, 2153, 484, 980, 88, 1788, 454, 102, 2087, 1799, 2208, 1666, 836, 2042, 441, 617, 1769, 330, 1790, 841, 2095, 1026, 2033, 1140, 2026, 2143, 1203, 2083, 1722, 2093, 1775, 2300, 1661, 1465, 1459, 387, 1251, 880, 542, 1801, 589, 1623, 2382, 1045, 519, 1670, 2318, 224, 1785, 1780, 1158, 1620, 1651, 1797, 409, 1077, 603, 1196, 2045, 138, 1807, 814, 1771, 1448, 1561, 49, 459, 2046, 608, 382, 1789, 103, 1786, 359, 1852, 1572, 189, 2080, 773, 476, 1490, 384, 406, 452, 2099, 2048, 277, 958, 1803, 10, 317, 2040, 1258, 1782, 1197, 1584, 1777, 1787, 973, 1072, 2085, 1564, 719, 655, 2088, 1981, 1341, 350, 1891, 1262, 1770, 417, 2090, 1977, 2407, 1805, 139, 868, 191, 1142, 2326, 910, 1798, 342, 2494, 1497, 289, 2252, 1526, 943, 1779, 1614, 1367, 1624, 643, 1929, 302, 236, 1856, 2034, 20, 581, 1792]",1,3.0,140.0,350.0,0.7112259970457903,, -"[563, 1928, 1927, 1983, 2045, 2071, 318, 699, 553, 651, 143, 231, 1840, 869, 1566, 146, 963, 1592, 467, 510, 1729, 745, 232, 2020, 771, 1134, 1156, 593, 216, 973, 664, 897, 2021, 1178, 1529, 843, 1979, 215, 2136, 252, 1296, 2024, 55, 1661, 204, 1218, 24, 1850, 1701, 880, 1583, 2059, 277, 1410, 262, 2315, 334, 416, 201, 673, 412, 45, 1359, 1703, 1570, 1986, 1344, 13, 203, 525, 2404, 160, 842, 1131, 335, 2102, 1338, 571, 725, 181, 779, 235, 579, 1851, 1609, 343, 1474, 261, 424, 205, 532, 226, 211, 661, 479, 157, 733, 1671, 1808, 1843, 896, 633, 1879, 60, 1329, 1906, 1918, 1616, 767, 1968, 1810, 465, 415, 158, 2304, 784, 854, 1660, 323, 118, 547, 1291, 1358, 586, 1120, 25, 382, 2156, 1846, 857, 436, 1732, 356, 1919, 2034, 56, 1526, 2485, 966, 1464, 2276, 995, 349, 729, 666, 1535, 2011, 1667, 514, 674, 1912, 1362, 1842, 1521, 1523, 1964, 78, 32, 548, 1844, 1110, 2182, 2016, 437, 1625, 748, 1841, 787, 1898, 2018, 1704, 191, 724, 76, 1907, 1929, 1525, 447, 1658, 1848, 1908, 1587, 2154, 818, 435, 1696, 161, 1013, 1644, 2134, 1403, 1215, 2155, 1105, 1916, 907, 836, 199, 2151, 1115]",1,6.0,100.0,350.0,0.6713441654357459,, -"[2173, 939, 2174, 2173, 939, 2174]",0,9.0,30.0,100.0,0.08677991137370754,, -"[1181, 474, 1181, 474]",0,12.0,100.0,300.0,0.09785819793205318,, -"[239, 1110, 779, 1848, 2112, 1690, 1592, 181, 2045, 1846, 1909, 467, 1675, 2418, 1849, 1889, 1914, 1338, 1708, 1026, 1538, 382, 1303, 1665, 1500, 1661, 74, 2419, 1701, 619, 1729, 1975, 674, 1448, 255, 581, 2015, 506, 1974, 53, 749, 14, 1851, 1464, 1002, 1894, 880, 49, 94, 1336, 158, 530, 72, 2316, 1616, 2026, 2102, 1120, 1683, 1203, 1118, 1906, 791, 1008, 1842, 454, 896, 1224, 141, 1270, 147, 2130, 1891, 1348, 1583, 2039, 2030, 1487, 2035, 1807, 1637, 572, 2294, 2185, 1133, 1840, 1319, 1999, 334, 671, 1912, 717, 69, 2295, 2166, 2021, 86, 1925, 1341, 1507, 2178, 388, 11, 1525, 2099, 1333, 111, 2136, 1922, 1373, 1013, 490, 1625, 118, 2036, 1293, 1841, 1839, 737, 1121, 988, 1645, 1869, 2163, 1843, 1628, 1526, 2041, 2281, 733, 2034, 224, 1172, 2014, 1029, 1482, 1952, 65, 1073, 1857, 586, 2042, 842, 2276, 2025, 1358, 1844, 446, 691, 1973, 1343, 1907, 1215, 1655, 436, 1845, 617, 2103, 1309, 995, 1614, 2071, 1502, 285, 1654, 2155, 2417, 1410, 1416, 210, 465, 2309, 836, 349, 2137, 1131, 2153, 868, 59, 2028, 554, 1976, 2016, 1926, 2059, 2027, 34, 415, 1644, 1732, 1357, 68, 6, 1602, 539, 2010, 1882, 2009, 2384, 2293]",1,12.0,100.0,350.0,0.7101181683899557,, -"[123, 2604, 99, 2455, 127, 26, 122, 2454, 2455, 26, 99, 122, 123, 2454, 2604, 127]",0,12.0,160.0,350.0,0.08677991137370754,, -"[179, 197, 1560, 1773, 118, 215, 189, 484, 18, 91, 303, 232, 2143, 1780, 181, 1913, 2123, 1851, 1258, 1986, 244, 406, 895, 2025, 2022, 1144, 2425, 1270, 589, 603, 1583, 1849, 1852, 1725, 699, 1800, 1262, 119, 1296, 1768, 1120, 836, 1803, 314, 1464, 1740, 416, 2327, 980, 854, 2188, 505, 1830, 1786, 2034, 510, 1701, 2189, 384, 1699, 1441, 387, 2179, 2190, 1842, 1403, 1065, 1729, 1625, 506, 2329, 660, 231, 2145, 1843, 317, 1618, 69, 1622, 341, 1070, 1841, 2026, 471, 252, 1015, 330, 2504, 1035, 554, 139, 556, 440, 476, 551, 20, 1358, 2318, 2082, 389, 2096, 1229, 454, 2323, 1846, 1733, 1848, 831, 1248, 741, 1013, 344, 1574, 1789, 1367, 1868, 1072, 135, 458, 1775, 581, 2203, 1537, 2046, 542, 1289, 1767, 2089, 1777, 1478, 103, 2095, 1681, 1245, 1670, 1623, 1804, 487, 1045, 1845, 1783, 452, 973, 519, 1856, 1624, 1158, 2451, 1088, 417, 2078, 228, 1784, 2045, 719, 1776, 671, 1490, 1782, 661, 334, 1483, 1801, 2080, 1798, 350, 2085, 1572, 1791, 191, 880, 1564, 1448, 1788, 1871, 342, 115, 1799, 1705, 1917]",1,15.0,90.0,300.0,0.7307976366322009,, -"[2209, 2161, 108, 2157, 1327, 898, 2160, 495, 1836, 1647, 1835, 1328, 1504, 2158, 2159, 2192, 92, 1698, 537, 1165, 213, 23, 145, 1593, 144, 2622, 1593, 144, 145, 213, 537, 1165, 2192, 2622, 1698, 495, 2160, 1327, 1328, 898, 1647, 1835, 2157, 2159, 2161, 92, 1836, 108, 1504, 2209, 2158, 23, 1593, 144, 145, 213, 537, 1165, 2192, 2622, 1698, 495, 2160, 1327, 1328, 898, 1647, 1835, 2157, 2159, 2161, 92, 1836, 108, 1504, 2209, 2158, 23]",0,3.0,30.0,100.0,0.08677991137370754,, -"[1889, 2130, 2178, 1812, 2039, 791, 1891, 851, 88, 1908, 1979, 1152, 1980, 1174, 1986, 300, 691, 2042, 2405, 1527, 1810, 519, 2014, 841, 1927, 211, 2295, 816, 158, 1847, 1309, 1270, 1983, 1729, 593, 1929, 848, 818, 224, 1529, 74, 897, 86, 1732, 415, 14, 2037, 985, 2305, 69, 2010, 1625, 1677, 280, 2041, 1358, 204, 1026, 2040, 1928, 1003, 1999, 2036, 2035, 483, 1110, 1614, 1968, 1118, 471, 2038, 1351, 835, 1708, 2293, 2034, 737, 749, 1909, 1622, 586, 1843, 617, 2155, 2045, 1097, 1497, 747, 1002, 708, 239, 873, 470, 1846, 1930, 1848, 1587, 1273, 1507, 1373, 1535, 1382, 1842, 885, 530, 1319, 1143, 966, 1675, 1645, 2115, 334, 1644, 525, 498, 1840, 2309, 181, 446, 2153, 643, 382, 1572, 842, 386, 1807, 809, 191, 1293, 993, 908, 2403, 539, 1857, 836, 461, 1333, 120, 554, 682, 2152, 1039, 1849, 1463, 1115, 1701, 1053, 2156, 578, 1538, 878, 1222, 1850, 2475, 2078, 1013, 2426, 506, 880, 1203, 2154, 1120, 1845, 1974, 1976, 1683, 147, 1919, 2283, 1879, 1661, 661, 454, 229, 1215, 1526, 1637, 502, 1343, 285, 255, 118, 779, 2165, 252, 504, 1121, 1224, 666, 1341, 1448, 467, 518, 141, 1654, 2118, 1973, 1844, 2276, 1525, 2425, 1336, 32, 2030, 1482, 1665, 1690, 1981, 1171, 1195, 490, 1058, 2185, 1029, 1500, 770, 1977, 304, 1149, 1131, 279, 2344, 1494, 424, 1867, 2343, 1464, 270, 2423, 49, 11, 868, 1508, 2136, 1894, 575, 2117, 2280, 2294, 417, 2112, 2281, 567, 2135, 2610, 1405, 697, 1616, 1851, 1583, 866, 436, 1978, 536, 2166, 778, 2282, 30, 1370, 668, 771, 203, 93, 1410, 1521, 1403, 1479, 388, 2151, 826]",1,3.0,90.0,300.0,0.7296898079763663,, -"[587, 1032, 587, 1032, 587, 1032]",0,3.0,100.0,350.0,0.09785819793205318,, -"[2017, 841, 76, 1288, 1713, 2178, 1810, 1394, 1741, 716, 1675, 815, 2434, 1732, 17, 1880, 258, 344, 1850, 661, 205, 2137, 1644, 2182, 416, 1013, 2399, 88, 737, 2013, 957, 1094, 1882, 1557, 130, 1314, 1914, 890, 650, 43, 988, 1685, 2034, 1658, 935, 1440, 743, 1975, 899, 604, 1843, 1676, 498, 1309, 2706, 752, 603, 1869, 2014, 471, 1316, 525, 696, 2261, 706, 2110, 1527, 1195, 1847, 2140, 2430, 1884, 1701, 2168, 1443, 1849, 1526, 2318, 2011, 963, 2243, 547, 363, 1157, 1, 1550, 1076, 836, 2240, 1483]",1,6.0,30.0,300.0,0.6798375184638109,, -"[2185, 1500, 838, 2280, 1224, 1677, 2282, 1133, 851, 242, 2155, 831, 1538, 415, 1655, 603, 1525, 276, 2145, 1882, 88, 1741, 995, 36, 510, 741, 1637, 2133, 1930, 1505, 384, 1526, 135, 505, 779, 1564, 1552, 2132, 2047, 1695, 2143, 1338, 252, 2178, 2318, 1851, 1248, 1131, 1197, 1776, 1013, 2131, 1618, 1121, 2283, 1482, 191, 1683, 963, 1309, 504, 1015, 530, 1625, 666, 589, 1288, 1791, 661, 2015, 1463, 317, 1788, 1973, 2093, 2095, 1786, 1778, 294, 2106, 573, 836, 1810, 1358, 1986, 61, 1574, 1215, 1681, 699, 1773, 215, 1640, 1779, 1781, 1367, 2078, 1572, 1782, 1701, 1798, 454, 2080, 822, 2086, 487, 608, 2099, 1661, 1998, 973, 1981, 138, 695, 1879, 1780, 1448, 910, 719, 1787, 1852, 1251, 1789, 2085, 350, 441, 1072, 1333, 2103, 1624, 1777, 1785, 1772, 1651, 2094, 1158, 1623, 32, 409, 416, 2281, 436, 112, 581, 139, 2326, 1771, 2026, 555, 2591, 958, 1775, 1770, 2084, 103, 1909, 1196, 1245, 1871, 2045, 1799, 2102, 519, 1656, 302, 476, 1045, 1705, 417, 556, 1584, 655, 329, 2089, 1483, 1903, 1856, 1878, 126, 2504]",1,9.0,60.0,250.0,0.6702363367799113,, -"[1998, 519, 1133, 2326, 2185, 753, 1500, 456, 2003, 2232, 1628, 1880, 2054, 1314, 1399, 1376, 382, 1574, 890, 1558, 460, 2004, 1351, 1987, 69, 1538, 2021, 1983, 1396, 347, 408, 1448, 1273, 2001, 391, 201, 1992, 454, 2057, 1453, 95, 1023, 325, 1095, 215, 1891, 1929, 191, 1421, 2611, 1139, 2280, 142, 1540, 792, 239, 1416, 2309, 164, 1149, 2109, 1989, 2315, 1654, 2059, 899, 1889, 232, 2, 1402, 1870, 45, 1614, 68, 120, 1269, 2009, 1671, 242, 2133, 553, 1285, 158, 366, 14, 1838, 2395, 2002, 1336, 1137, 2227, 86, 2027, 2172, 1002, 836, 2342, 1434, 787, 438, 1994, 210, 1203, 1270, 1995, 1973, 1227, 1986, 141, 791, 838, 2034, 2228, 1972, 1239, 2008, 1515, 52, 645, 423, 1928, 1115, 514, 661, 525, 1869, 604, 1104, 1999, 1127, 2019, 1842, 2190, 718, 617, 1914, 1812, 118, 2181, 973, 1085, 666, 1026, 179, 1215, 478, 2282, 1709, 1996, 1697, 1859, 2131, 733, 691, 827, 504, 2182, 1131, 71, 745, 2183, 621, 1358, 2405, 1909, 897, 93, 1420, 1655, 1848, 1344, 335, 2006, 2000, 1268, 61, 1079, 224, 1319, 1876]",1,9.0,60.0,350.0,0.6709748892171344,, -"[1336, 1851, 2000, 1283, 118, 1282, 1986, 695, 1499, 1791, 2324, 215, 791, 741, 2026, 1735, 638, 1013, 1797, 1197, 337, 2042, 86, 2130, 2039, 808, 191, 1270, 18, 655, 1889, 121, 2090, 2096, 36, 831, 1656, 317, 1812, 2325, 2040, 1616, 1799, 409, 1681, 1968, 2107, 2041, 1804, 1705, 1505, 1891, 691, 1497, 1574, 2145, 234, 963, 476, 141, 1158, 341, 958, 554, 1552, 2086, 382, 1273, 1786, 1983, 1358, 49, 487, 1618, 454, 1776, 1830, 530, 1788, 826, 1998, 1929, 2143, 1856, 1661, 1483, 1622, 2048, 2080, 1785, 1319, 314, 2035, 484, 836, 2318, 573, 1798, 1072, 589, 303, 1002, 114, 1015, 2093, 2034, 2082, 603, 1873, 2327, 2106, 1564, 158, 656, 1367, 1640, 2088, 2329, 2287, 224, 1009, 1768, 1560, 1193, 1248, 581, 387, 1810, 1180, 2288, 14, 1781, 699, 1304, 2084, 1701, 610, 980, 2083, 1584, 1624, 2037, 2085, 1773, 2046, 2045, 841, 308, 302, 1778, 350, 608, 1784, 2094, 490, 519, 843, 1026, 1490, 103, 112, 1651, 1670, 505, 1665, 2095, 88, 1802, 1782, 2047, 1775, 1852, 1196, 2089, 399, 1837, 306, 2326, 1245, 416, 719, 1772, 231, 1807, 139, 1779, 973, 1572, 1789, 1309, 1958, 1894, 2078, 406, 135, 441, 329, 2091, 2504, 544, 739, 1770, 417, 2038, 2036, 239, 735, 1258, 1777, 1787, 960, 236, 1805, 542, 289, 426, 1654, 389, 1722, 1614, 138, 1955, 124, 1882, 1771, 910, 294, 459, 1956, 1543, 854, 153, 1346, 1337, 1226, 1623, 2079, 384, 452, 887, 109, 318, 1914, 2092, 935, 807, 1780, 1792, 556, 1769, 133, 2481, 617, 830, 516, 1561, 1767, 598, 1783, 1880, 1884, 1881, 1885, 1448, 1144, 1774, 753, 344, 1269, 1028]",1,9.0,90.0,300.0,0.7115952732644018,, -"[240, 172, 454, 519, 512, 1309, 1469, 1592, 1174, 572, 2485, 109, 1472, 851, 1131, 2418, 2292, 2332, 1890, 524, 2394, 973, 850, 1171, 2034, 2291, 2187, 1655, 2107, 1839, 2295, 771, 1683, 910, 1047, 2130, 2346, 1801, 133, 993, 657, 2178, 120, 88, 2349, 1169, 708, 2282, 619, 1110, 818, 1505, 1089, 2294, 2189, 553, 1224, 1198, 2333, 1623, 885, 1995, 1922, 712, 204, 1358, 1891, 305, 61, 1025, 1909, 2025, 2207, 772, 429, 2071, 2051, 1677, 816, 1923, 2419, 1538, 1517, 2039, 1979, 756, 1127, 2045, 1125, 1514, 2094, 1008, 81, 366, 1503, 543, 737, 1481, 887, 1395, 510, 1676, 636, 2153, 257, 1156, 1661, 979, 55, 1588, 118, 1423, 256, 1926, 262, 1568, 1889, 65, 1583, 1692, 1669, 836, 552, 1118, 399, 1069, 149, 2348, 1074, 1914, 966, 1464, 1220, 239, 1759, 2014, 1842, 376, 2013, 30, 586, 1908, 1848, 2233, 1463, 1527, 1073, 1729, 2155, 1920, 2231, 2383, 1416, 1769, 1695, 1918, 591, 1359, 415, 1625, 1925, 1376, 1293, 373, 74, 300, 1502, 908, 1143, 2162, 181, 880, 1042, 1352, 1343, 1351, 2134, 276, 1026, 80, 1680, 2180, 2021, 98, 1572, 1675, 1120, 1468, 2403, 1851, 921, 434, 1693, 471, 2317, 2203, 506, 69, 485, 2364, 423, 2502, 1467, 1017, 2531, 1204, 1531, 2133, 1362, 574, 1482, 604, 347, 1972, 2078, 1847, 1921, 503, 2052, 475, 1104, 749, 642, 530, 2219, 2576, 539, 1274, 2194, 2234, 2511, 1403, 1893, 2163, 718, 964, 1635, 1521, 1117, 2232, 465, 2055, 2329, 1203, 858, 2056, 1849, 2072, 2499, 2302, 2131, 291, 624, 711, 2198, 1158, 839, 2311, 2345, 916, 315, 52, 1388, 597, 1840, 525, 2172, 1916, 2472, 1515]",1,9.0,90.0,350.0,0.7385524372230429,, -"[921, 2046, 2306, 1416, 2134, 2048, 1818, 1042, 306, 2052, 139, 1118, 2099, 1517, 210, 41, 687, 1973, 596, 280, 1500, 815, 318, 1394, 1981, 1985, 1983, 153, 1628, 175, 563, 1908, 490, 2357, 1362, 1776, 2021, 771, 1566, 1278, 718, 955, 2282, 681, 215, 2359, 910, 1337, 830, 440, 1402, 2293, 437, 2185, 453, 56, 1079, 1977, 164, 1905, 1535, 1215, 94, 2295, 2425, 1482, 544, 749, 1787, 1879, 1156, 543, 74, 1421, 1274, 1701, 447, 1133, 993, 2283, 236, 1759, 55, 436, 887, 93, 88, 1856, 2078, 1926, 308, 2423, 649, 651, 1572, 868, 666, 32, 126, 1616, 438, 109, 1526, 1667, 2182, 2280, 2117, 2109, 2110, 2305, 838, 2024, 1655, 1984, 1772, 65, 1855, 1351, 2071, 1538, 2294, 211, 323, 1651, 1358, 412, 2020, 1592, 1914, 2155, 1171, 816, 415, 2151, 1915, 1485, 95, 2045, 1867, 1525, 1975, 519, 239, 748, 702, 1732, 1683, 973, 539, 242, 619, 1203, 779, 315, 1846, 2394, 341, 1980, 2485, 2056, 1309, 1131, 1919, 1791, 60, 2405, 1121, 498, 1849, 2047, 2418, 1218, 2232, 725, 1979, 133, 118, 1972, 1248, 1784, 309, 1341, 1568, 408, 2156, 734, 1674, 603, 1069, 1852, 699, 1370, 324, 907, 2276, 1847, 1110, 778, 454, 604, 1295, 2026, 2013, 1092, 2274, 1842, 120, 1851, 1676, 1220, 586, 1661, 736, 553, 356, 1918, 1539, 1152, 1104, 1729, 787, 2153, 1355, 2281, 1359, 2025, 2419, 1127, 2094, 1376, 1995, 1843, 1583, 880, 1137, 2194, 1840, 1143, 2385, 1487, 1625, 1844, 510, 1830, 1850, 1841, 1920, 506, 1930, 2154, 708, 693, 1909, 682, 366, 262, 1291, 1464, 1660, 1120, 1644, 894, 2152, 334, 1224, 471, 1343, 497, 1845, 1403, 1675, 2317, 657, 530, 1013, 325, 1293, 1848, 69, 712, 376, 1587, 1074, 181, 2189, 1377, 1222, 1448, 465, 1505, 850, 925, 1352, 836, 1652, 1521, 2403, 908, 98, 772]",1,9.0,100.0,300.0,0.6979320531757754,, -"[1624, 1956, 306, 1779, 1785, 807, 1955, 426, 249, 1844, 1574, 1789, 2394, 506, 2212, 2024, 2276, 2162, 1080, 304, 1772, 318, 191, 2071, 1163, 1507, 356, 2182, 2010, 74, 1661, 1787, 651, 1346, 1840, 153, 1092, 908, 1850, 2045, 830, 1028, 2054, 1986, 1805, 124, 69, 563, 1402, 2426, 2359, 2216, 2344, 955, 1464, 236, 2095, 1914, 5, 1841, 471, 841, 687, 1353, 1120, 1729, 147, 2506, 289, 2135, 1421, 478, 1998, 1884, 621, 1592, 1226, 2165, 525, 1045, 2396, 2423, 1622, 1670, 2093, 1878, 1769, 575, 1174, 2016, 681, 2012, 1645, 2018, 1675, 518, 133, 120, 2385, 76, 109, 539, 1337, 1912, 239, 2164, 204, 118, 2156, 519, 973, 2591, 266, 1358, 1880, 97, 211, 2485, 1118, 2110, 516, 1416, 1978, 514, 1351, 1013, 1676, 2326, 323, 2078, 1572, 181, 2013, 708, 1776, 598, 1355, 1171, 1482, 661, 504, 666, 711, 102, 1332, 1849, 897, 61, 993, 1927, 1855, 1851, 459, 880, 1343, 2109, 1195, 32, 2405, 696, 149, 334, 279, 1885, 1583, 466, 885, 2402, 935, 2425, 2546, 2133, 2295, 2103, 498, 1152, 815, 1644, 162, 995, 416, 1110, 530, 1919, 1976, 2131, 2094, 586, 2178, 1527, 1908, 255, 702, 1394, 114, 234, 2282, 2593, 868, 1403, 252, 1338, 2312, 1842, 743, 1295, 176, 756, 779, 2102, 1683, 1105, 415, 1677, 816, 2011, 1658, 2132, 778, 1882, 2155, 578, 1625, 1847, 1845, 88, 2152, 1293, 2283, 649, 1975, 2015, 2017, 1521, 737, 1370, 1652, 344, 130, 1535, 387, 483, 628, 2386, 1741, 502, 1713, 1930, 1333, 2281, 1843, 2294, 2403, 434, 749, 682, 270, 1979, 2316, 1980, 2092, 1909, 878, 280, 1848, 510, 2151, 1525, 2153, 437, 1883, 818, 1410, 1846, 1203, 2195, 1881, 433, 851, 1637, 2288, 1538, 1494, 300, 1810, 1732, 838, 1133, 2185, 242, 822, 610, 1500, 1039, 2287, 1879, 966, 952, 2293, 1982]",1,9.0,100.0,350.0,0.6842688330871491,, -"[611, 2690, 611, 2690, 611, 2690]",0,9.0,110.0,350.0,0.08677991137370754,, -"[1351, 1773, 2282, 179, 1410, 1070, 1655, 1842, 644, 1776, 1916, 1127, 2309, 1337, 745, 1358, 563, 1624, 1570, 1258, 743, 1733, 2099, 197, 702, 1780, 1777, 748, 2109, 1778, 1986, 1218, 1676, 1505, 2425, 1879, 2110, 1402, 1623, 484, 2092, 1998, 822, 2281, 610, 1909, 2190, 836, 1592, 1015, 1977, 1784, 510, 133, 465, 1914, 461, 414, 1785, 1789, 1927, 2326, 1725, 2009, 1121, 2034, 1871, 387, 2093, 1285, 504, 2416, 1421, 1637, 95, 2185, 2079, 598, 2094, 203, 2419, 1701, 1995, 289, 2113, 1652, 1538, 1333, 1500, 783, 102, 1740, 32, 651, 2111, 505, 661, 1262, 2025, 2598, 1616, 205, 2274, 1670, 10, 1133, 232, 490, 2262, 318, 897, 1526, 643, 1013, 571, 868, 1622, 1618, 88, 1769, 1482, 2026, 2078, 1779, 711, 1042, 1535, 1976, 1869, 2024, 452, 2086, 773, 366, 2022, 1572, 1781, 1810, 2376, 519, 2280, 274, 1732, 666, 1771, 2052, 160, 459, 887, 382, 660, 1574, 244, 86, 693, 436, 420, 1441, 1801, 1251, 1483, 1045, 199, 1775, 993, 416, 1873, 542, 406, 1131, 2000, 1930, 842, 854, 231, 1787, 2071, 2045, 1143, 109, 138, 1203, 1341, 696, 1152, 553, 838, 1975, 1566, 1868, 1848, 1981, 476, 1974, 1917, 1120, 2010, 1782, 417, 1973, 189, 925, 921, 1367, 581, 118, 544, 2046, 719, 2095, 573, 749, 329, 242, 2189, 1802, 441, 126, 779, 1517, 1583, 1772, 2050, 1705, 139, 2112, 1072, 1097, 112, 426, 487, 350, 1770, 608, 2295, 2156, 539, 2478, 910, 153, 1142, 1919, 74, 1856, 1416, 308, 1140, 1346, 103, 880, 1253, 496, 1222, 1799, 1804, 778, 935, 1798, 2016, 1525, 1661, 124, 2080, 342, 236, 1800, 1110, 18, 424, 1683]",1,12.0,90.0,350.0,0.7570162481536189,, -"[1885, 661, 588, 698, 745, 2123, 1127, 665, 286, 2000, 160, 2004, 2040, 1337, 2003, 792, 1705, 2007, 1558, 2008, 1772, 332, 2229, 2383, 133, 1522, 868, 1787, 924, 1844, 553, 2120, 1051, 2009, 1995, 673, 1778, 1958, 1346, 2228, 113, 743, 232, 1166, 1777, 91, 176, 1884, 1661, 2001, 681, 266, 415, 1574, 1805, 476, 1532, 389, 566, 71, 1859, 124, 2045, 366, 2122, 1697, 215, 548, 1045, 1992, 442, 1453, 335, 306, 973, 809, 563, 426, 1987, 1993, 2006, 391, 1095, 2005, 1991, 201, 409, 2121, 1986, 2093, 344, 1780, 45, 519, 1701, 935, 289, 896, 2276, 899, 2, 1810, 1907, 1789, 2034, 109, 277, 1015, 1999, 1624, 384, 1875, 2066, 638, 733, 1876, 1519, 2259, 39, 68, 1989, 808, 1908, 1812, 2002, 1998, 1481, 1873, 2362, 460, 236, 593, 441, 1474, 196, 1779, 465, 318, 1434, 153, 2335, 1870, 2357, 1023, 1505, 1788, 1569, 1149, 887, 2094, 451, 1385, 1709, 1196, 249, 1996, 610, 1990, 179, 349, 1988, 1349, 589, 1869, 1634, 1419, 1704, 968, 2092, 1965, 1994, 2323, 94, 1529, 102, 271, 1097, 1964, 627, 1786, 210, 1997, 1906, 1785, 1971, 205, 854, 1365, 1966, 1968, 696, 2509, 1178, 1702, 459, 1671, 463, 1115, 1769, 470, 203, 317, 1234, 1703, 2309, 1970, 2095, 1452, 857, 22, 1336, 529, 1010, 443, 151, 1984, 420, 2336, 963, 1417, 805, 199, 1141, 2260, 2519, 2350, 2434, 2238, 2645, 652, 1443, 761, 1266, 759, 275, 2236, 2237, 706, 1967, 2401, 1157, 1350, 375, 1969, 362, 946, 1240, 2059, 1401, 89, 789, 2400, 798, 1381, 1794, 1094, 828, 2240, 1795, 29, 1585, 2239, 258, 1087, 2294, 884, 1793, 1246, 2289, 2463, 444, 2299, 2435, 584, 152, 623, 488, 2399, 1796, 2464, 1653, 2290, 2155, 43, 140, 2547, 1530, 2297, 1153, 1102, 1369, 1207, 515, 340, 2298, 2496, 582, 2549, 2465, 2548]",1,12.0,100.0,300.0,0.6875923190546529,, -"[1293, 2418, 65, 1376, 1012, 650, 1552, 2324, 1725, 1283, 2326, 337, 2025, 1282, 2021, 1070, 1848, 1089, 1616, 671, 1759, 1840, 1550, 121, 1873, 613, 937, 1801, 1583, 1846, 1909, 1574, 2107, 1644, 1661, 1557, 610, 189, 2094, 957, 1847, 2256, 935, 1685, 2318, 1412, 1464, 2182, 1733, 682, 218, 1142, 661, 1681, 2322, 544, 1120, 407, 1851, 704, 118, 1735, 1358, 2023, 1067, 2016, 752, 36, 2451, 773, 1878, 359, 1389, 1986, 1789, 814, 1841, 1743, 1871, 685, 1262, 2208, 389, 619, 1729, 1849, 1140, 2113, 960, 1499, 2075, 399, 69, 181, 905, 1821, 530, 2542, 2168, 20, 1792, 2092, 239, 137, 969, 1701, 1336, 1740, 695, 1620, 2083, 454, 2096, 506, 2329, 1505, 1625, 1726, 859, 2091, 2252, 367, 1013, 1249, 2251, 2382, 716, 2505, 742, 2339, 1776, 1521, 1998, 341, 1635, 1810, 643, 1274, 2086, 1289, 581, 554, 562, 843, 1571, 1546, 1843, 1077, 1069, 2000, 134, 1483, 945, 603, 795, 1197, 973, 1622, 1830, 79, 1845, 2153, 471, 1335, 510, 598, 2026, 2143, 1158, 548, 589, 889, 244, 277, 1180, 1304, 215, 2478, 1797, 409, 880, 1512, 1072, 406, 2407, 102, 836, 476, 1844, 1842, 980, 2331, 289, 1850, 1403, 1769, 1781, 2504, 1778, 1856, 1513, 416, 303, 1800, 1045, 1782, 1705, 115, 1804, 754, 1773, 1785, 2144, 573, 109, 135, 1313, 699, 908, 644, 519, 1675, 1670, 1787, 2087, 2327, 334, 1311, 318, 563, 1852, 1337, 176, 487, 1779, 1220, 2034, 426, 1837, 2079, 2078, 1791, 1009, 1799, 1367, 2080, 124, 1459, 1609, 1803, 191, 252, 1640, 719, 2325, 459, 638, 1784, 1448, 630, 505, 1258, 1798, 236, 1768, 1788, 417, 2090, 2093, 1478, 1772, 1572, 1624, 112, 1245, 317, 126, 1777, 2095, 1805, 342, 2019, 1699, 608, 387, 1656, 306, 887, 1802, 1584, 1490, 1790, 2046, 1248, 308, 1346, 2085, 2045, 153, 314, 1564, 1251, 958, 1770, 350, 2082, 655, 138, 2089, 1551, 1196, 1193, 133, 302, 2084, 329, 1465, 452, 18, 660, 2598, 2088, 10, 1560, 1780, 2081, 1771, 1253, 1786, 1651]",1,12.0,110.0,350.0,0.7548005908419497,, -"[323, 1882, 1879, 1660, 2020, 2153, 97, 1309, 1353, 1526, 771, 2154, 2155, 1291, 498, 2101, 1884, 1907, 993, 318, 2021, 1732, 1045, 787, 88, 1906, 325, 2405, 1614, 935, 586, 490, 1203, 1671, 1885, 711, 252, 341, 2394, 816, 109, 2281, 1337, 2048, 1878, 651, 55, 1538, 164, 1980, 868, 1396, 2023, 1701, 815, 661, 1421, 1979, 1881, 1587, 2046, 65, 2070, 699, 1131, 133, 215, 1880, 1787, 1856, 231, 120, 1791, 210, 1772, 153, 2419, 1258, 553, 2276, 1883, 1842, 1248, 2152, 1110, 2045, 1652, 1028, 725, 1651, 1983, 702, 415, 1156, 2177, 1184, 1847, 1830, 1848, 190, 1358, 56, 1127, 777, 2151, 1224, 973, 1218, 1625, 344, 1616, 1085, 2094, 2013, 1844, 2026, 807, 2047, 412, 2418]",1,3.0,30.0,300.0,0.6613737075332349,, -"[2252, 1812, 65, 1811, 1822, 2251, 1095, 1062, 2021, 1158, 835, 962, 1821, 1121, 1851, 1986, 1507, 1343, 1820, 2485, 1809, 160, 2034, 1869, 1819, 255, 2112, 441, 795, 224, 2083, 619, 603, 2086, 447, 1576, 695, 1849, 382, 1483, 121, 790, 407, 482, 1823, 2189, 183, 230, 2096, 1917, 351, 1818, 1817, 1335, 2418, 1810, 1856, 1661, 1815, 2190, 1004, 511, 1814, 446, 1910, 859, 1505, 576, 1701, 1690, 2094, 476, 843, 1348, 1009, 973, 2048, 802, 2030, 568, 2188, 1816, 1029, 1107, 27, 2166, 2046, 1248, 1304, 302, 1791, 1677, 1402, 1837, 294, 267, 1253, 454, 2026, 2000, 417, 1282, 1808, 176, 1337, 481, 191, 1813, 505, 409, 426, 1564, 716, 1735, 948, 519, 589, 573, 1012, 2107, 2165, 1572, 1581, 1045, 2385, 1331, 2078, 317, 1871, 1788, 563, 1699, 2084, 1789, 138, 638, 1773, 2090, 2319, 1656, 1802, 539, 1584, 549, 2143, 1785, 960, 303, 1772, 487, 980, 1089, 1852, 610, 2022, 1670, 56, 1790, 1144, 1616, 1651, 1782, 308, 228, 1777, 1792, 1779, 1805, 1245, 719, 1283, 306, 2047, 2091, 13, 236, 1448, 133, 1251, 655, 1561, 1180, 1786, 1776, 958, 1490, 1367, 2079, 660, 1768, 2088, 1784, 412, 2050, 1909, 1640, 1770, 109, 554, 1013, 1376, 2325, 556, 399, 1299, 2081, 329, 1774, 2318, 1804, 1783, 153, 1780, 350, 661, 1193, 1781, 1767, 459, 406, 542, 2327, 112, 452, 1623, 1560, 102, 118, 1538, 2010, 1771, 608, 215, 1681, 384, 1220, 1072, 1998, 239, 139]",1,3.0,60.0,300.0,0.723781388478582,, -"[239, 2228, 2021, 1421, 1133, 2185, 1396, 619, 1986, 1507, 1500, 1072, 1909, 306, 771, 1420, 2199, 36, 838, 2009, 55, 1156, 2395, 1346, 242, 149, 1855, 681, 1580, 1376, 2280, 2396, 2153, 153, 1645, 1628, 1370, 236, 215, 1538, 682, 1975, 2182, 456, 2046, 1930, 139, 2106, 2048, 1399, 1501, 2195, 2200, 1552, 65, 2608, 2316, 563, 349, 1670, 822, 1085, 1042, 945, 1416, 109, 1131, 1787, 2394, 671, 60, 891, 1121, 1295, 2107, 441, 2216, 1572, 868, 2051, 1624, 1146, 2078, 1772, 1358, 1343, 1174, 1655, 763, 1995, 2155, 2212, 2418, 1776, 2197, 2016, 318, 1333, 973, 2054, 118, 504, 366, 2181, 1701, 847, 1616, 2359, 2402, 2283, 2274, 1574, 2405, 324, 454, 2056, 860, 56, 1498, 708, 1998, 2422, 2108, 816, 1856, 702, 1980, 1309, 308, 1027, 2109, 1652, 2025, 734, 553, 126, 415, 115, 1218, 630, 1224, 1682, 2282, 1568, 2281, 1926, 2403, 2094, 1357, 1074, 2028, 1676, 465, 490, 2164, 277, 1127, 1203, 2276, 1142, 1637, 120, 88, 1979, 1171, 416, 2019, 1079, 600, 1801, 214, 778, 325, 543, 2052, 1852, 1525, 2194, 280, 1651, 736, 1215, 341, 255, 1674, 1842, 1535, 2189, 2165, 1668, 728, 408, 1337, 572, 1372, 1110, 412, 252, 1527, 1848, 1844, 2653, 687, 210, 2027, 2293, 1972, 992, 2217, 1915, 510, 2135, 773, 133, 1402, 1919, 836, 2388, 2485, 1120, 1843, 2450, 830, 2295, 1920, 699, 2201, 1729, 657, 2100, 2397, 1917, 1675, 41, 1841, 649, 436, 2086, 1485, 693, 98, 496, 1849, 596, 1788, 551, 1395, 176, 2045, 2026, 1539, 1918, 1362, 661, 447, 175, 1583, 1914, 1248, 604, 935, 1521, 1625, 2649, 1661, 1847, 2310, 1293, 519, 1973, 1851, 376, 1791, 603, 2047, 1916, 1463, 1784, 880, 955, 724, 1303, 69, 2071, 1377, 644, 1602, 1448, 1403, 95, 1505, 2074, 387, 2193, 2317, 471, 1351, 1355, 1830, 1487]",1,3.0,75.0,350.0,0.6639586410635155,, -"[2054, 1628, 1529, 1540, 1115, 1178, 2018, 1139, 2133, 1616, 2136, 2418, 456, 2276, 1538, 693, 2015, 65, 1995, 1926, 734, 30, 95, 2057, 604, 1500, 388, 1658, 995, 1979, 738, 1297, 2274, 1029, 76, 1494, 1592, 1701, 2071, 2185, 1849, 842, 2194, 553, 1842, 2109, 2013, 297, 1402, 1358, 239, 1914, 1421, 1838, 1572, 2030, 2153, 760, 118, 1074, 2010, 1871, 1713, 1351, 771, 1851, 1676, 1919, 201, 2131, 446, 2112, 142, 1882, 1299, 2102, 708, 1463, 2110, 778, 1474, 2162, 498, 1073, 1894, 1573, 2485, 2281, 985, 1668, 1120, 109, 1912, 2152, 2232, 1721, 1920, 211, 2394, 2154, 2189, 2016, 581, 366, 2396, 1473, 1655, 955, 436, 1133, 2045, 1269, 1149, 1121, 1999, 2025, 1352, 2172, 1341, 2166, 408, 1864, 149, 815, 1301, 593, 1909, 1821, 1690, 454, 1644, 285, 376, 1928, 1359, 1222, 2099, 1847, 2707, 988, 712, 2450, 181, 2312, 52, 2231, 1515, 1908, 2094, 2183, 657, 255, 1630, 262, 69, 24, 2317, 316, 1507, 2475, 1293, 1741, 697, 61, 1568, 1291, 415, 554, 331, 471, 1526, 1127, 1675, 637, 1394, 525, 850, 1527, 818, 2011, 2182, 547, 1100, 143, 322, 1918, 519, 2181, 1636, 851, 1870, 634, 1370, 737, 467, 228, 334, 869, 162, 1875]",1,6.0,50.0,350.0,0.6787296898079763,, -"[1583, 2245, 1293, 318, 1847, 2243, 143, 39, 94, 869, 1820, 1464, 316, 1337, 1675, 1661, 69, 2355, 1995, 1120, 158, 563, 1644, 633, 2357, 1432, 1521, 1403, 1127, 235, 366, 1868, 2246, 1440, 1873, 181, 2045, 2009, 1864, 1851, 1866, 1856, 2356, 1013, 160, 1855, 1299, 1863, 1850, 1842, 298, 1853, 743, 334, 1241, 1849, 2610, 873, 201, 24, 745, 1095, 8, 767, 1729, 1848, 1998, 1334, 1625, 22, 1100, 880, 664, 2003, 532, 1997, 205, 180, 908, 1841, 471, 729, 231, 784, 2244, 269, 1574, 1799, 1872, 669, 467, 1840, 1869, 232, 1865, 1595, 1843, 343, 1124, 1049, 1844, 506, 519, 1365, 530, 1636, 1845, 836, 1212, 2551, 157, 1969, 1874, 1852, 335, 1042, 1453, 2509, 1993, 510, 1877, 418, 789, 759, 699, 1075, 963, 1992, 2008, 1991, 391, 45, 1434, 1967, 2007, 1996, 1701, 1347, 327, 2063, 1857, 1450, 2115, 1858, 866, 2001, 1171, 416, 2000, 1325, 2034, 1052, 2519, 321, 1665, 2543, 547, 1000, 1655, 1870, 1417, 179, 1240, 899, 2238, 2004, 1703, 624, 68, 1225, 1086, 1966, 261, 215, 1986, 1479, 1846, 746, 2247, 1234, 567, 281, 2006, 972, 2002, 1053, 1854, 13, 1861, 1323, 1987, 386, 1702, 1871, 460, 1989, 1862, 2580, 1083, 2579, 229, 1860, 1968, 536, 216, 1295, 681, 226, 1859, 874, 1875, 2117, 2114, 1876, 673, 792, 1812, 1023, 1405, 1558, 1970, 101, 1971, 1382, 151, 463, 2118, 1867, 1894, 71, 1906, 1508, 770, 1215, 2, 1697, 1232, 476, 1990, 1704]",1,6.0,60.0,250.0,0.7112259970457903,, -"[1445, 1446, 24, 236, 1784, 218, 1864, 2349, 417, 1805, 1583, 387, 2045, 48, 306, 598, 1787, 482, 230, 2346, 1810, 1428, 1160, 1469, 1346, 2303, 231, 317, 1779, 512, 869, 958, 1640, 756, 1095, 1483, 893, 681, 1797, 570, 733, 973, 2187, 2302, 384, 1799, 1219, 1789, 1143, 1666, 1798, 1472, 109, 1089, 2532, 1807, 1670, 862, 172, 638, 1202, 2524, 1780, 911, 1804, 2034, 1705, 962, 240, 2009, 2555, 399, 1127, 519, 1072, 244, 2120, 553, 1488, 2451, 180, 442, 2430, 1802, 1701, 2615, 887, 1171, 1013, 1997, 10, 197, 1590, 1070, 1785, 1988, 1994, 792, 794, 179, 1107, 1519, 1987, 617, 232, 2130, 2086, 2383, 1891, 88, 1812, 2254, 1893, 201, 271, 1624, 1875, 1907, 1015, 2668, 1989, 2295, 1870, 45, 1454, 2039, 2332, 1, 1196, 1023, 1336, 277, 1858, 1662, 1777, 493, 589, 1055, 1570, 1889, 1740, 2, 460, 416, 1803, 1995, 1725, 1065, 20, 1358, 1478, 899, 2238, 71, 215, 2691, 945, 1890, 2667, 476, 397, 2004, 366, 206, 1786, 1434, 1552, 342, 1474, 566, 2378, 1018, 1558, 297, 949, 673, 484, 968, 1035, 2568, 1781, 38, 1510, 1733, 2186, 36, 1618, 1262, 2041, 1692, 189, 863, 836, 1051, 743, 1800, 160, 1778, 1873, 1986, 332, 33, 1876, 2365, 745, 2008, 1531, 1859, 1892, 2336, 335, 1998, 1162, 86, 1514, 1709, 1453, 2094, 391, 1481, 1425, 1699, 2406, 1801, 1406, 2040, 409, 1146, 299, 2591, 1574, 330, 2387, 665, 1149, 1166, 773, 1423, 1506, 1704]",1,6.0,60.0,300.0,0.7171344165435746,, -"[1798, 1245, 1358, 1773, 2080, 1782, 1656, 289, 2089, 487, 1780, 1045, 1070, 1224, 329, 36, 1770, 2085, 1705, 989, 1973, 887, 1144, 1483, 530, 1251, 1975, 2436, 910, 2485, 1919, 2084, 519, 1525, 958, 2106, 1804, 1781, 412, 1296, 112, 1725, 524, 1640, 189, 1849, 2050, 56, 69, 1740, 2090, 725, 682, 2326, 1127, 645, 1733, 1651, 109, 1926, 2045, 350, 1682, 1913, 1772, 1505, 1914, 903, 1013, 1787, 1395, 308, 1995, 1826, 102, 2071, 1792, 88, 1769, 1776, 389, 1193, 1799, 1226, 553, 643, 1367, 1856, 807, 426, 2048, 2189, 244, 655, 452, 376, 2242, 126, 880, 542, 2155, 2286, 1842, 1538, 124, 980, 1829, 779, 2016, 1262, 2699, 446, 133, 1801, 2235, 895, 1549, 676, 1956, 1584, 1825, 1767, 1803, 778, 87, 2091, 415, 2323, 231, 1346, 1158, 417, 556, 1237, 1131, 139, 236, 252, 1805, 153, 459, 1402, 1824, 1583, 306, 2046, 1120, 608, 472, 2241, 1572, 1142, 458, 2078, 447, 2088, 2094, 500, 696, 836, 656, 1779, 1370, 1072, 128, 1191, 1784, 1777, 440, 1778, 302, 1490, 1133, 1623, 1851, 1958, 539, 366, 1500, 1775, 1303, 630, 1797, 2185, 847, 1783, 70, 1551, 1507, 2153, 773, 160, 2284, 1561, 176, 1168, 711, 1179, 1903, 1645, 2010, 393, 138, 2457, 573, 2188, 1616, 1955, 103, 1543, 1899, 661, 842, 2450, 2272, 719, 2086, 294, 1800, 344, 1404, 1503, 1901, 2207, 2630, 1661, 360, 1873, 1560, 1771, 2030, 476, 1917, 193, 2112, 2025, 1183, 739, 161, 1408, 1827, 2179, 277, 2184, 1802, 735, 1881, 119, 1182, 660, 979, 973, 1409, 1009, 2061, 1828, 392, 701, 1527, 1885, 2062, 935, 370, 1869, 331, 2270, 747, 1478, 1343, 118, 1884, 581, 384, 685, 451, 129, 314, 1839, 388, 2269, 1579, 854, 454, 1768, 2497, 441, 342, 2087, 945, 554, 20, 1171, 1774, 671, 1690, 2018, 860, 2375, 2316, 255]",1,6.0,75.0,350.0,0.6694977843426884,, -"[59, 580, 154, 831, 671, 1733, 552, 364, 819, 1258, 18, 337, 441, 1336, 783, 794, 179, 2153, 303, 754, 609, 2143, 489, 1725, 1801, 524, 796, 505, 2335, 882, 970, 1248, 1289, 1800, 853, 764, 2034, 744, 94, 196, 1560, 2019, 758, 173, 342, 53, 1807, 111, 945, 687, 2096, 2376, 684, 326, 1564, 2329, 1571, 1889, 963, 880, 244, 984, 86, 2640, 2490, 2318, 90, 1511, 2332, 863, 893, 1767, 1534, 1786, 929, 68, 73, 155, 442, 2107, 156, 2145, 271, 741, 1740, 36, 682, 20, 466, 197, 797, 2155, 191, 876, 2001, 1070, 1493, 215, 1270, 523, 588, 147, 630, 661, 2387, 2504, 1776, 530, 645, 2082, 34, 1803, 1519, 357, 1013, 695, 1358, 935, 384, 613, 1669, 1505, 286, 823, 1142, 1791, 1986, 33, 1783, 1481, 603, 1768, 1417, 973, 1009, 978, 232, 573, 189, 1583, 2093, 1774, 589, 980, 2263, 638, 859, 429, 1072, 476, 2041, 72, 1452, 454, 1483, 38, 1622, 2016, 484, 2026, 1781, 1681, 2044, 1797, 2326, 1262, 1448, 773, 1868, 1385, 1042, 1068, 1651, 2095, 1856, 708, 626, 698, 170, 341, 2287, 139, 1045, 2288, 231, 2094, 1701, 1367, 1143, 1784, 1830, 2106, 115, 228, 1779, 1705, 556, 1640, 719, 748]",1,9.0,50.0,350.0,0.725258493353028,, -"[213, 144, 1165, 1593, 2192, 1698, 537, 145, 108, 1327, 495, 2209, 1504, 2161, 2622, 2157, 1647, 2158, 92, 1836, 1328, 2160, 2159, 1835, 898, 23, 2160, 898, 1328, 1647, 1698, 1835, 2157, 2159, 2161, 92, 1836, 495, 1327, 108, 1504, 2209, 537, 2158, 23, 2192, 145, 1165, 1593, 144, 213, 2622, 2160, 898, 1328, 1647, 1698, 1835, 2157, 2159, 2161, 92, 1836, 495, 1327, 108, 1504, 2209, 537, 2158, 23, 2192, 145, 1165, 1593, 144, 213, 2622, 2160, 898, 1328, 1647, 1698, 1835, 2157, 2159, 2161, 92, 1836, 495, 1327, 108, 1504, 2209, 537, 2158, 23, 2192, 145, 1165, 1593, 144, 213, 2622]",0,9.0,60.0,250.0,0.08677991137370754,, -"[794, 2200, 95, 731, 1160, 1039, 1265, 2045, 1042, 2073, 603, 1192, 1416, 327, 1655, 429, 435, 1738, 1670, 2326, 2340, 1346, 1580, 1013, 836, 377, 2063, 2472, 878, 451, 1325, 1197, 2203, 157, 938, 759, 680, 1062, 436, 2582, 1739, 1171, 1017, 8, 441, 1446, 2568, 1912, 1864, 2412, 401, 879, 2325, 2201, 109, 2123, 2360, 2011, 827, 191, 316, 849, 1820, 2365, 2199, 642, 665, 149, 1894, 1810, 2380, 862, 2295, 91, 901, 396, 572, 306, 1295, 2064, 678, 973, 935, 671, 2027, 733, 2418, 180, 1294, 1818, 2339, 1996, 1239, 298, 2096, 2044, 598, 456, 1817, 2034, 285, 88, 2026, 2509, 885, 1998, 1445, 2555, 1358, 215, 746, 818, 482, 2430, 1709, 1066, 1166, 869, 1279, 230, 1701, 1329, 681, 24, 1682, 2301, 591, 1991, 1781, 487, 197, 519, 1558, 1023, 1993, 695, 1014, 2338, 1873, 1095, 1870, 1988, 2004, 1999, 2024, 1812, 231, 985, 651, 218, 570, 1997, 553, 2001, 576, 297, 1080, 1583, 2003, 332, 2381, 445, 232, 383, 544, 1987, 201, 185, 277, 1120, 2550, 2006, 1224, 743, 1191, 2170, 2008, 416, 2378, 1875, 1704, 314, 2253, 433, 2194, 1078, 1149, 2009, 387, 1995, 2379, 94, 2000, 1858, 2477, 1110, 792, 1434, 1574, 391, 1453, 366, 968, 1859, 745, 536, 760, 1697, 1522, 160, 2002, 1570, 267, 2386, 179, 1357, 335, 2077, 2186, 1666, 2348, 1127, 2302, 1876, 169, 1347, 2613, 493, 2303, 574, 2025, 1119, 503, 2347, 2121, 460, 376, 2532, 1994, 1986]",1,9.0,60.0,300.0,0.6905465288035451,, -"[519, 1574, 306, 739, 1543, 718, 438, 1977, 391, 1975, 604, 735, 1989, 2054, 1810, 1421, 1085, 109, 2256, 2109, 1526, 1226, 835, 1676, 878, 2281, 1985, 1127, 461, 1410, 1955, 1812, 1956, 179, 1203, 725, 1876, 52, 1894, 504, 1023, 424, 102, 1614, 476, 32, 2282, 1914, 460, 1285, 2116, 2384, 553, 1958, 724, 2233, 2232, 366, 2104, 1848, 1992, 68, 1474, 95, 1976, 1095, 1869, 1538, 1131, 1149, 1224, 1139, 566, 1929, 118, 2009, 1333, 1881, 994, 2034, 2156, 668, 885, 950, 968, 1652, 2172, 2133, 215, 1074, 1515, 1981, 2, 1525, 1704, 973, 2396, 1709, 822, 1558, 210, 379, 1875, 1628, 456, 779, 1121, 792, 1021, 1959, 437, 600, 1995, 1968, 807, 1997, 1986, 1420, 252, 1878, 745, 1178, 1683, 1540, 651, 232, 1973, 423, 1022, 904, 2309, 2180, 1870, 1655, 854, 436, 45, 1667, 2397, 1045, 11, 1671, 516, 681, 2182, 2000, 2183, 899, 1358, 1039, 1344, 897, 2181, 1842, 160, 1850, 2071, 1529, 1987, 277, 1838, 1453, 1839, 2394, 1972, 666, 1990, 25, 142, 1013, 1859, 593, 673, 1980, 1501, 2131, 2198, 816, 2611, 356, 433, 1988, 335, 1898, 1697, 1166, 2019, 201, 1396, 1991, 1115, 132, 1996, 2024, 2057, 2405, 748, 2020, 842, 1332, 525, 935, 347, 1994, 1268, 1884, 733, 55, 2395, 1873, 1879, 1999, 71, 601, 490, 344, 1434, 1487, 1993, 441, 151, 2010, 1908, 548, 113, 451, 1732, 1912, 1259, 809, 1495, 1043, 389, 2323, 1239, 2011, 2295, 2229, 830, 1566]",1,9.0,60.0,350.0,0.7271048744460856,, -"[737, 160, 1677, 130, 745, 1974, 815, 1908, 1843, 1842, 2178, 1982, 415, 471, 2405, 2136, 2017, 1362, 1979, 2026, 181, 743, 2010, 1174, 162, 519, 300, 2011, 483, 1980, 1494, 2013, 1850, 1713, 2045, 2078, 1394, 11, 1741, 1810, 1351, 1986, 643, 816, 1839, 1359, 2070, 2009, 2388, 708, 433, 1332, 1343, 76, 666, 822, 530, 1637, 1846, 1309, 211, 826, 696, 334, 1845, 1644, 2028, 88, 578, 661, 270, 1841, 1847, 777, 835, 1625, 851, 2015, 1622, 1882, 1441, 885, 2069, 277, 2281, 306, 1171, 1464, 1195, 2403, 836, 1527, 897, 149, 733, 1683, 118, 2014, 2012, 1358, 1485, 2016, 279, 2024, 366, 1844, 1729, 1410, 2426, 2071, 1121, 1013, 1655, 553, 1840, 702, 436, 1127, 32, 878, 1995, 69, 604, 1039, 1203, 830, 175, 1288, 1538, 693, 1463, 2276, 2357, 935, 86, 596, 2194, 454, 2274, 2282, 2135, 1674, 2027, 1792, 1973, 2101, 1535, 239, 1572, 1592, 672, 1416, 1879, 2025, 575, 490, 506, 510, 1482, 2425, 1917, 955, 1909, 1382, 1521, 498, 1918, 838, 747, 651, 1566, 1920, 41, 2050, 1914, 1675, 2423, 2283, 2295, 1215, 1072, 2109, 2280, 1894, 1118, 572, 2485, 2293, 1661, 1293, 841, 2105, 1658, 868, 921, 908, 1120, 424, 1652, 1152, 1976, 1042, 324, 376, 973, 242, 2305, 74, 252, 1851, 1978, 280, 2155, 880, 1376, 1421, 993, 1403, 2099, 1927, 2153, 1583, 682, 1930, 1732, 1975, 1133, 1131, 1517, 1498, 668, 1849, 749, 1977, 1539, 2052, 1525, 1526, 2110, 1218, 115, 2185, 1333, 2355, 1916, 1079, 2164, 2263, 671, 1110, 220, 1370, 630, 504, 1676, 1143, 2384, 2419, 539, 2490, 176, 2019, 586, 1142, 1587, 1368, 2294, 2151, 1981, 2116, 1341, 1500, 544, 748, 1224, 93, 779, 1402, 1357, 988, 2156, 2154, 1848, 94, 1291, 325, 925, 1966, 1919, 1222, 1915, 1905, 2152, 778, 1649, 934, 1660, 195, 675, 2385]",1,9.0,75.0,300.0,0.696824224519941,, -"[1624, 1772, 1787, 417, 133, 236, 185, 1072, 1221, 153, 1950, 873, 310, 297, 2117, 1800, 1661, 880, 2534, 1779, 1045, 2582, 945, 1013, 1708, 1894, 383, 2381, 454, 2500, 2045, 2380, 426, 603, 2044, 598, 1172, 304, 1577, 2077, 199, 1097, 277, 2378, 1738, 1670, 2139, 671, 88, 2238, 620, 1567, 2338, 2063, 722, 124, 1159, 115, 1295, 420, 2379, 0, 2314, 1, 733, 306, 2034, 2327, 1805, 1570, 91, 203, 1542, 14, 2094, 1785, 2141, 682, 2025, 429, 252, 2335, 218, 459, 2550, 2253, 327, 1603, 396, 2123, 1142, 1325, 2016, 1358, 1666, 1241, 818, 729, 2304, 1191, 1869, 1346, 1124, 1479, 836, 1789, 465, 1334, 109, 261, 1119, 1522, 2568, 2325, 1185, 973, 669, 316, 1583, 827]",1,12.0,30.0,350.0,0.6632200886262924,, -"[1095, 65, 2021, 1062, 1822, 1811, 1812, 160, 2251, 1986, 2252, 1507, 962, 1343, 1809, 230, 1158, 2418, 1851, 835, 2046, 1819, 1335, 255, 2048, 446, 1029, 619, 482, 2086, 2166, 603, 1909, 2190, 1821, 441, 576, 2485, 2088, 1818, 1483, 1376, 948, 1917, 1791, 1910, 1121, 795, 1012, 2081, 1808, 859, 1581, 1823, 2010, 1538, 2022, 716, 317, 2000, 407, 1820, 2112, 2182, 2189, 2030, 224, 549, 1576, 1813, 121, 695, 1849, 2165, 2083, 1331, 1009, 1810, 476, 1814, 802, 351, 447, 118, 1004, 56, 183, 215, 1402, 2034, 1505, 1013, 1584, 2087, 239, 1699, 1856, 412, 2094, 1311, 2188, 1797, 790, 454, 2096, 511, 1817, 843, 2047, 1253, 267, 382, 1089, 1690, 568, 1852, 1815, 2320, 2050, 1661, 13, 2026, 1304, 1681, 1869, 1248, 409, 1701, 1180, 1998, 1072, 484, 554, 1788, 1616, 1837, 2319, 519, 1348, 302, 1705, 191, 1785, 1299, 1802, 1790, 589, 294, 1792, 2090, 1282, 973, 1798, 958, 505, 1735, 1220, 1787, 2045, 1830, 1804, 236, 487, 1624, 306, 1799, 1107, 27, 1441, 1816, 563, 417, 1759, 36, 842, 699, 1367, 1552, 1786, 2385, 308, 573, 1651, 1274, 318, 426, 481, 655, 1490, 2107, 1069, 2143, 2145, 2080, 1656, 1337, 539, 1045, 1459, 388]",1,12.0,50.0,300.0,0.7119645494830132,, -"[95, 1095, 816, 753, 1980, 2004, 142, 1989, 1671, 1628, 1880, 771, 1149, 1859, 1812, 651, 2183, 1156, 1929, 787, 1978, 1848, 2181, 1873, 718, 1870, 1079, 2180, 1314, 1810, 483, 519, 210, 899, 1983, 2006, 2008, 1997, 1139, 1993, 2072, 566, 2020, 2007, 1987, 1332, 215, 1990, 460, 2131, 1574, 438, 456, 1115, 1023, 203, 2022, 232, 858, 201, 408, 890, 950, 885, 1994, 2021, 1701, 621, 1072, 878, 1142, 2153, 2054, 1998, 792, 1838, 1039, 2034, 514, 1876, 2001, 1121, 1558, 725, 55, 2024, 239, 1977, 2002, 347, 2116, 391, 424, 1131, 897, 968, 2003, 1986, 436, 437, 306, 1224, 1991, 1655, 1540, 1269, 1973, 2133, 52, 733, 2172, 68, 14, 45, 935, 1495, 1988, 1227, 2309, 1652, 525, 1667, 668, 1358, 1875, 1709, 454, 1979, 2136, 2057, 666, 1097, 2182, 1453, 1992, 335, 423, 661, 2, 1697, 1985, 1268, 2233, 1013, 32, 323, 2227, 1399, 2137, 252, 476, 1583, 1981, 490, 1529, 151, 1396, 2228, 1704, 593, 630, 880, 504, 2395, 2071, 973, 205, 1974, 366, 1996, 1972, 1127, 1995, 1166, 179, 1869, 682, 191, 2009, 2005, 71, 1999, 1421, 2315, 604, 356, 199, 433, 1928, 1434, 1850, 745, 2231, 673, 2027, 553, 420, 748, 724, 681, 2000, 118, 1420, 160, 1344, 634, 2019, 2396, 2611, 1239, 1732, 1630, 471, 211, 2018, 842, 161, 2394, 1982, 1566, 498, 671, 1487, 1085, 1912, 578, 60, 1625, 1644, 1843, 2274, 2017, 737, 2232, 25, 815, 1847, 2178, 1713, 2198, 2102, 1259, 2010, 995, 2015, 2103, 743, 1474, 1394, 1908, 1178, 2011, 2016, 600, 1285, 130, 1668, 2467, 1527, 1515, 860, 841, 277, 115, 1898, 1027, 2450, 176, 1696, 1741, 571, 2397, 1494, 1338, 851, 818, 1570, 162, 465, 516, 1882, 61, 1658, 1677, 331, 2012, 696, 272, 1309, 300, 88, 2013, 1288, 1174, 415, 76, 2593, 966, 204, 2014]",1,12.0,75.0,300.0,0.7211964549483013,, -"[1258, 589, 252, 711, 189, 661, 121, 505, 1262, 2153, 935, 682, 88, 1448, 1196, 2251, 2316, 406, 2407, 1725, 1505, 244, 880, 1253, 289, 1313, 1358, 2252, 277, 563, 1670, 1785, 2026, 2093, 1224, 630, 191, 814, 318, 598, 1624, 1070, 10, 1801, 1740, 1013, 1790, 2019, 20, 115, 1986, 1645, 2096, 279, 2436, 2598, 773, 2016, 1583, 1810, 989, 303, 980, 2382, 1998, 2034, 973, 2094, 416, 1131, 573, 539, 1158, 2143, 836, 747, 1733, 643, 1574, 1337, 1560, 2086, 1622, 2095, 133, 2485, 859, 671, 2045, 519, 1787, 1251, 1072, 1777, 1045, 1784, 308, 1346, 2185, 1500, 1133, 102, 441, 1779, 2010, 476, 2047, 452, 1842, 1849, 18, 1343, 1571, 1367, 1778, 484, 109, 1780, 2112, 945]",1,15.0,30.0,250.0,0.6816838995568686,, -"[831, 1505, 2407, 2145, 18, 643, 1788, 741, 1313, 1790, 1248, 36, 191, 1358, 1786, 1564, 1791, 68, 745, 1784, 603, 544, 1871, 530, 1448, 1561, 695, 1800, 556, 454, 1773, 2009, 1989, 1095, 1140, 566, 1851, 118, 1253, 1258, 1991, 2034, 1704, 2143, 1616, 1812, 2329, 303, 899, 1810, 1709, 661, 505, 1878, 2288, 2256, 2082, 699, 366, 1997, 1996, 484, 1623, 1127, 2003, 573, 114, 2106, 1013, 2093, 681, 1722, 1780, 2, 1015, 2005, 2478, 1859, 859, 441, 2287, 160, 460, 341, 2001, 1777, 1149, 391, 1681, 1995, 2095, 1776, 743, 384, 1571, 1873, 102, 2006, 399, 252, 673, 2326, 708, 1622, 685, 2047, 1821, 1197, 476, 1196, 581, 1778, 151, 2504, 1992, 1990, 135, 660, 719, 2008, 409, 1769, 1661, 1023, 1045, 1434, 826, 2086, 1251, 289, 1781, 1651, 406, 2004, 973, 1072, 1624, 554, 1343, 2002, 1640, 1618, 297, 139, 103, 1986, 1367, 452, 2094, 1830, 1299, 417, 1775, 610, 1987, 234, 1998, 1483, 1823, 1701, 608, 1107, 542, 232, 1782, 133, 294, 124, 1772, 2000, 2318, 316, 1789, 887, 1771, 1805, 2045, 1787, 239, 2078, 10, 910, 1798, 487, 115, 1864, 2079, 792, 963, 201, 1876, 302, 231, 317, 2481, 459, 1572, 869, 308, 2085, 1558, 2046, 1770, 1870, 1779, 2403, 1852, 350, 836, 766, 1453, 1656, 215, 24, 1988, 335, 1785, 1705, 2084, 2026, 112, 416, 1636, 143, 1875, 1297, 138, 426, 845, 1490, 589, 2080, 2007, 1131, 329, 1166, 1670, 157, 1066, 1473, 1799, 179, 71, 176, 126, 2707, 2598, 1802, 1574, 808, 2048, 165, 45, 1100, 656, 387, 1193, 1792, 1584, 1999, 2089, 958, 547, 1993, 519, 1994, 153, 1697, 1856, 1245, 1158, 1804, 2087, 2090, 306, 236, 2494, 1346, 318, 109, 1797, 655, 2191, 598, 968, 1337, 563, 1551, 1009, 2088, 2138, 2091, 2092, 1301, 521, 637, 1573, 1003, 48, 480]",1,15.0,75.0,300.0,0.7134416543574594,, -"[2197, 745, 236, 736, 504, 109, 2395, 1616, 1388, 563, 1566, 2162, 1421, 571, 160, 126, 470, 65, 2418, 2317, 1701, 2396, 1538, 2071, 734, 2046, 435, 447, 1655, 733, 553, 412, 308, 809, 1487, 2021, 25, 318, 356, 81, 527, 748, 315, 1080, 514, 1396, 1867, 1314, 544, 1239, 2228, 1337, 2094, 1171, 1570, 712, 890, 1131, 277, 1420, 1704, 215, 133, 1463, 2419, 1097, 1121, 153, 661, 1467, 56, 2137, 306, 2347, 1651, 118, 1787, 1227, 724, 272, 1986, 842, 600, 1602, 1085, 1973, 2485, 285, 1027, 2404, 2056, 2202, 850, 1676, 2074, 2593, 1995, 1880, 2397, 366, 2472, 1127, 1482, 1879, 603, 2194, 1652, 2199, 743, 1772, 1580, 239, 1074, 1609, 753, 2394, 1852, 1399, 1416, 519, 436, 1909, 2276, 1568, 696, 708, 1671, 1856, 2019, 2227, 2026, 1376, 454, 341, 1248, 1344, 1849, 2049, 1293, 257, 2405, 2105, 55, 1358, 2048, 61, 120, 1115, 1572, 1269, 423, 1042, 2078, 699, 2232, 1841, 490, 1732, 95, 1784, 2011, 1635, 2181, 2117, 1846, 510, 161, 2281, 973, 667, 2136, 1982, 621, 1980, 816, 1661, 2261, 2045, 498, 578, 323, 2357, 868, 2055, 408, 456, 2016, 1912, 1981, 211, 1224, 2012, 1848, 2047, 1999, 1667, 325, 2099, 1359, 2467, 2131, 1842, 1791, 2018, 1882, 2025, 1840, 1628, 1979, 2133, 2034, 1110, 1851, 604, 2017, 2015, 2312, 1914, 1926, 52, 1117, 1675, 1296, 1403, 139, 1741, 1974, 1448, 2172, 1683, 666, 32, 347, 1464, 2198, 2189, 1343, 897, 2180, 2054, 1120, 836, 142, 1474, 826, 1494, 2072, 203, 1844, 2335, 1630, 1583, 1776, 60, 2010, 2053, 693, 827, 634, 162, 1394, 841, 1918, 1920, 1898, 1139, 815, 530, 1713, 2196, 98, 1729, 772, 1529, 181, 1351, 506, 516, 1352, 262, 334, 130, 1838, 597, 69, 593, 1377, 1178, 880, 191, 995, 1830, 1203, 1338, 1521, 2103, 2102, 1908, 657, 737]",1,15.0,75.0,350.0,0.7049483013293943,, -"[106, 2461, 106, 2461, 106, 2461, 106, 2461, 106, 2461]",0,3.0,5.0,50.0,0.08677991137370754,, -"[2107, 426, 2045, 1772, 530, 399, 2046, 1705, 773, 1805, 487, 1776, 109, 505, 303, 1856, 2081, 1158, 1146, 133, 1781, 1045, 1773, 2088, 1777]",0,3.0,5.0,150.0,0.09490398818316101,, -"[144, 145, 537, 1698, 1593, 1165, 2192, 2622, 213, 898, 495, 1328, 1835, 2160, 23, 92, 1327, 1836, 2159, 2161, 2209, 1504, 2157, 1647, 2158, 108, 2160, 1835, 2159, 898, 108, 1647, 2157, 2209, 1504, 2160, 1835, 2158, 2159, 898, 108, 1647, 2157, 2209, 1504, 2160, 1835, 2158, 2159, 898]",0,3.0,10.0,250.0,0.08677991137370754,, -"[236, 553, 1572, 1616, 1448, 1224, 779, 1568, 118, 1592, 277, 1358, 454, 447, 2078, 2405, 1919, 816, 366, 1248, 1676, 65, 1525, 1538, 2155, 56, 1980, 412, 2419, 2485, 1979, 2276, 2046, 2034, 1072, 2189, 1127, 868, 603, 1784, 2048, 153, 873, 766, 1674, 973, 1074, 306, 133, 1776, 417, 1651, 316, 539, 2112, 88, 1351, 1851, 1309, 102, 1110, 1849, 736, 408, 126, 1791, 722, 1583, 2194, 519, 199, 693, 2418, 1995, 228, 1567, 1926, 1787, 1362, 1106, 772, 1482, 734, 376, 2071, 1917, 1842, 262, 2045, 2501, 778, 95, 1708, 1729, 1874, 139, 1914, 109, 1675, 1683, 2500, 1070, 1487, 1915, 1973, 420, 1844, 1464, 2026, 2013, 708, 1661, 830, 2025, 1577, 1377, 308, 69, 1918, 1221, 1359, 1124, 2135, 1848, 604, 1986, 1841, 1120, 1655, 341, 452, 1920, 1293, 252, 1856, 175, 669, 1916, 1875, 32, 2096, 1505, 666, 203, 836, 2394, 921, 1868, 712, 1485]",1,3.0,30.0,350.0,0.6850073855243722,, -"[1507, 962, 1343, 230, 1808, 989, 549, 1809, 1062, 482, 790, 1851, 2324, 1625, 1282, 1297, 1735, 1095, 1299, 1868, 835, 845, 1121, 1819, 181, 1581, 1645, 484, 224, 1729, 1283, 766, 1814, 1499, 2034, 511, 505, 1464, 1583, 143, 1661, 889, 1869, 1197, 2098, 598, 1998, 530, 975, 836, 576, 1769, 1779, 337, 1843, 1609, 2094, 1348, 316, 1823, 1812, 1623, 1335, 1782, 1624, 1670, 471, 459, 1574, 2025, 1785, 69, 1842, 1107, 1846, 1775, 1004, 1644, 522, 2097, 1830, 13, 1067, 1847, 1841, 351, 1810, 1483, 887, 510, 1815, 1073, 542, 399, 519, 1513, 1403, 1546, 416, 1986, 1564, 568, 1776, 1817, 1850, 1013, 1675, 1057, 1798, 1844, 1781, 487, 387, 1772, 1251, 1701, 334, 506, 1813, 1367, 1840, 1816, 1521, 1856, 1448, 795, 1705, 960, 417, 610, 1852, 699, 2096, 1346, 2092, 1791, 1120, 1804, 910, 1656, 289, 1802, 1584, 103, 2079, 1072, 1797, 1787, 215, 2087, 1799, 79, 109, 126, 124, 1337, 350, 133, 112, 1358, 608, 2046, 2327, 2045, 573, 1248, 176, 2048, 2086, 1771, 426, 1821, 153, 1873, 603, 563, 191, 2082, 2088, 1640, 308, 1193, 1576, 27, 139, 656, 1331, 1045, 655, 1651, 2409, 1777, 341, 1551, 1780, 1245, 2026, 1818, 935, 102]",1,3.0,40.0,250.0,0.6739290989660266,, -"[553, 2009, 366, 2054, 1628, 1574, 519, 142, 95, 1127, 1139, 1995, 456, 2181, 1540, 2183, 1121, 1985, 45, 1558, 1583, 651, 1131, 792, 1980, 2172, 836, 885, 2057, 745, 1074, 816, 68, 1039, 1676, 2180, 578, 753, 673, 160, 878, 525, 232, 201, 179, 1023, 483, 1149, 423, 277, 1986, 334, 52, 2, 1838, 604, 973, 733, 1675, 2229, 681, 151, 1704, 968, 266, 71, 2116, 435, 1851, 347, 950, 1529, 1981, 510, 1929, 2000, 1652, 1166, 1661, 1495, 460, 1403, 490, 1972, 2019, 437, 1120, 55, 822, 566, 666, 210, 1115, 69, 809, 1095, 1671, 436, 215, 621, 2024, 890, 252, 32, 1453, 1296, 476, 1848, 1178, 880, 335, 1869, 2109, 2136, 1637, 433, 1521, 2404, 1979, 1974, 424, 60, 725, 391, 1975, 1977, 2034, 1269, 118, 181, 1293, 1227, 1729, 1655, 1625, 1850, 779, 1667, 1333, 1696, 908, 1973, 1332, 1609, 548, 438, 995, 2467, 1908, 1849, 2102, 1464, 1912, 2103, 1494, 1501, 1013, 897, 506, 2011, 1844, 2110, 2020, 530, 116, 1982, 2018, 2450, 356, 514, 1845, 1314, 1978, 860, 130, 1525, 2045, 2137, 1843, 498, 1846, 1880, 1999, 2017, 1421, 1396, 61, 1268, 2274, 1630, 1288, 1085, 471, 1847, 1677, 2010, 737, 696, 815, 1842, 841, 2395, 1394, 2016, 25, 2013, 743, 1527, 2228, 1309, 2315, 1224, 239, 1370, 1683, 1474, 1713, 1027, 1399, 2397, 1043, 718, 88, 2027, 724, 2072, 2394, 1882, 2133, 76, 2232, 1482, 571, 1741, 2182, 504, 899, 1434, 1358, 1420, 162, 1515, 408, 1487, 415, 2611, 2198, 2233, 1239, 668, 1259, 1021, 1344, 211, 1566, 2231, 2022, 323, 1928, 2012, 593, 2309, 1976, 2178, 2227, 818, 1570, 2071, 1898, 1338, 661, 191, 1658, 851, 1644, 331, 842, 204, 1174, 966, 1668, 2593, 1841, 2131, 2014, 516, 2396, 1840, 600, 272, 161, 2015, 1732, 1285, 858, 300, 461, 994, 748, 634]",1,3.0,60.0,300.0,0.689807976366322,, -"[563, 153, 109, 734, 236, 2405, 1787, 318, 126, 1337, 95, 1980, 2025, 408, 2048, 1879, 736, 519, 2281, 2078, 1909, 514, 868, 2026, 1772, 693, 1979, 1867, 816, 973, 118, 120, 2056, 651, 215, 1655, 1572, 1526, 2317, 1701, 2155, 1882, 306, 133, 65, 1677, 1856, 1810, 1343, 2117, 415, 2136, 2071, 25, 2276, 1568, 1842, 2608, 211, 1131, 2045, 1127, 2485, 1852, 454, 1841, 2419, 772, 850, 2153, 2135, 1846, 1358, 2046, 1713, 737, 490, 891, 1851, 1171, 1448, 1784, 699, 2011, 2357, 1309, 2094, 1908, 581, 1661, 55, 2653, 376, 702, 1741, 1995, 2418, 578, 2178, 60, 1120, 1314, 2310, 836, 470, 945, 1529, 2016, 2010, 1097, 1732, 2217, 1505, 2457, 1791, 1583, 1592, 2156, 851, 341, 1538, 1840, 907, 1203, 1660, 1110, 1341, 1376, 2047, 1377, 2015, 712, 1351, 1850, 21, 2154, 1403, 2014, 822, 880, 1644, 1776, 1482, 1974, 2194, 308, 1843, 277, 1847, 2278, 300, 479, 1848, 1174, 753, 262, 1844, 1293, 175, 139, 908, 2388, 2012, 164, 1830, 1402, 1880, 1535, 1729, 510, 1105, 465, 586, 1917, 890, 815, 1625, 728, 682, 1291, 1269, 1907, 1906, 1115, 2189, 1674, 551, 1527, 1928, 553, 530, 2638, 325, 841, 1362, 2422, 41, 1394, 661, 1920, 2100, 88, 2403, 1248, 2050, 1227, 1464, 162, 1919, 1370, 1637, 1651, 1671, 1288, 1937, 601, 1916, 1614, 2018, 1926, 779, 199, 894, 778, 323, 1372, 955, 1626, 1912, 32, 657, 2293, 2013, 447, 1968, 2134, 1079, 1683, 1658, 252, 1352, 2110, 1494, 210, 420, 2109, 644, 2138, 2649, 596, 2480, 471, 1587, 1869, 579, 2229, 1219, 1630, 1976, 992, 1329, 2227, 1013, 1845, 830, 1975, 1616, 1485, 539, 603, 1525, 1905, 2228, 593, 634, 181, 708, 334, 1929, 498, 1396, 2137, 604, 266, 1043, 724, 1218, 56, 1399, 994, 1224, 412, 1914, 696, 205, 2468, 809, 2395, 1344, 504]",1,3.0,60.0,350.0,0.706794682422452,, -"[565, 1743, 687, 73, 1610, 389, 876, 764, 819, 155, 1620, 154, 90, 797, 1077, 1546, 742, 1767, 613, 364, 1282, 341, 228, 905, 1012, 173, 796, 1336, 1167, 1512, 118, 59, 156, 643, 1783, 2113, 684, 337, 564, 1616, 580, 359, 2083, 2407, 501, 111, 1313, 728, 758, 333, 744, 609, 1412, 1735, 68, 1725, 935, 326, 708, 1470, 1311, 244, 671, 2252, 2270, 72, 505, 1560, 741, 1070, 1358, 748, 562, 2107, 53, 1089, 661, 686, 2318, 1768, 2075, 2269, 2251, 1726, 2451, 1851, 2388, 544, 2145, 121, 399, 1483, 101, 1571, 110, 626, 2096, 1465, 1681, 2327, 1871, 567, 1262, 303, 945, 880, 36, 1740, 552, 346, 215, 218, 466, 853, 246, 1258, 1701, 1986, 342, 1144, 814, 704, 170, 1829, 1249, 189, 2026, 231, 1784, 1142, 1635, 2106, 277, 823, 689, 2143, 252, 695, 699, 30, 1776, 1583, 1774, 969, 454, 1623, 836, 406, 1013, 980, 1448, 1801, 1804, 1878, 1911, 1561, 1574, 1505, 973, 2382, 682, 317, 581, 1670, 174, 1733, 1791, 630, 1777, 1797, 416, 314, 1564, 489, 1253, 1158, 135, 1778, 2016, 831, 573, 2287, 524, 441, 1140, 530, 1769, 1810, 2091, 126, 554, 2256, 2494, 1251, 719, 1622, 1770, 2019, 20, 2288, 387, 685, 1792, 1800, 1572, 963, 476, 1790, 2375, 1852, 859, 1788, 2504, 1781, 10, 2034, 1015, 102, 2153, 115, 2079, 826, 302, 1799, 1618, 34, 1722, 1335, 2085, 1289, 1771, 2080, 2326, 1782, 112, 1786, 2086, 308, 1072, 1803, 2046, 1509, 1785, 2481, 660, 384, 1248, 773, 357, 808, 655, 1490, 1009, 2047, 598, 2093, 608, 191, 754, 1045, 757, 2478, 176, 2082, 616, 133, 2089, 2090, 1197, 556, 1551, 2045, 2095, 452, 1780, 1245, 236, 18, 1640, 645, 1196, 350, 2048, 1779, 1478, 519, 487, 426, 1624, 1346, 1830, 417, 459, 873, 563, 910, 589, 2081, 318, 1651, 2087, 1789, 1772, 103, 2088, 2322, 153, 1337, 610, 1787, 2374, 887, 1773, 409, 2208, 484, 114, 603, 1856, 1459, 1367, 294, 2094, 958, 124]",1,3.0,65.0,350.0,0.6986706056129985,, -"[2455, 99, 123, 122, 26, 127, 2604, 2454, 122, 99, 122, 26, 99, 2454, 2455, 122, 26, 99, 2454, 2455, 122, 26, 99, 2454, 2455]",0,6.0,5.0,250.0,0.08677991137370754,, -"[2209, 108, 1647, 2161, 2160, 1328, 1504, 2157, 1327, 2159, 898, 1835, 495, 2158, 23, 1698, 1836, 2192, 92, 1165, 537, 1593, 144, 213, 145, 2622, 2192, 1165, 495, 1698, 144, 145, 213, 1593, 2192, 537, 1165, 2622, 495, 1698, 144, 145, 213, 1593, 2192, 537, 1165, 2622, 495, 1698]",0,6.0,10.0,300.0,0.08677991137370754,, -"[1567, 2500, 2668, 2164, 1708, 722, 1358, 2667, 1, 1710, 230, 2691, 2314, 1810, 2045, 2002, 14, 2335, 332, 2582, 603, 1800, 228, 1662, 2309, 1812, 935, 2532, 2023, 1095, 605, 1666, 2, 2034, 2001, 1894, 2217, 733, 206, 88, 460, 1140, 420, 2379, 1072, 0, 2041, 1577, 277, 310, 199, 1166, 1862, 716, 2008, 160, 1570, 1172, 1989, 175, 973, 553, 2501, 1446, 218, 2359, 2236, 1106, 2388, 1097, 687, 417, 1950, 1869, 1239, 311, 1583, 1874, 1852, 1013, 2003, 1221, 109, 873, 1907, 1171, 13, 2303, 96, 203, 2357, 551, 1995, 1454, 2534, 669, 1821, 521, 306, 1335]",1,6.0,20.0,300.0,0.6938700147710487,, -"[2291, 377, 154, 1265, 364, 2122, 994, 1329, 1020, 2380, 91, 2268, 2123, 613, 341, 876, 326, 73, 59, 111, 819, 616, 155, 2338, 2076, 180, 2199, 1682, 552, 2555, 2541, 401, 687, 764, 2667, 285, 901, 544, 2063, 109, 1662, 2075, 2615, 48, 970, 289, 665, 1346, 290, 231, 156, 482, 1294, 72, 2045, 689, 332, 2379, 306, 758, 2365, 2301, 1160, 1192, 2064, 580, 797, 197, 2668, 1171, 536, 2238, 1858, 90, 919, 796, 524, 445, 609, 606, 78, 416, 678, 2303, 1701, 179, 486, 387, 1670, 1583, 24, 14, 1119, 456, 2008, 1812, 885, 256, 218, 1875, 2077, 185, 1999, 1038, 2325, 2002, 827, 2430, 553, 173, 1062, 2121, 757, 327, 570, 1014, 951, 760, 836, 2509, 733, 2340, 357, 2034, 708, 1279, 759, 2326, 574, 1191, 1110, 2550, 957, 466, 572, 1120, 157, 435, 34, 298, 2304, 818, 1697, 2026, 899, 576, 1347, 244, 383, 1666, 591, 1325, 1739, 2381, 1358, 2339, 1522, 2035, 1513, 695, 968, 151, 999, 744, 567, 346, 1894, 1907, 794, 2253, 1810, 2378, 1219, 1603, 2186, 94, 671, 1781, 1055, 1912, 489, 101, 49, 1820, 2003, 1818, 1738, 519, 2691, 1558, 598, 2001, 873, 2028, 1800, 314, 1140, 823, 2236, 297, 493, 1295, 333, 215, 879, 10, 1434, 53, 2412, 1996, 985, 2025, 1991, 2044, 2347, 110, 1262, 476, 603, 429, 2011, 201, 1995, 316, 228, 1870, 1817, 501, 1876, 1998, 1423, 853, 1127, 792, 1488, 366, 869, 45, 1864, 1859, 1709, 1023, 335, 756, 1453, 645, 2027, 337, 882, 1997]",1,6.0,50.0,350.0,0.6994091580502215,, -"[2080, 608, 112, 426, 487, 1789, 302, 887, 1777, 1779, 910, 102, 1780, 329, 1775, 2095, 476, 1251, 1798, 103, 973, 958, 656, 722, 1782, 2501, 420, 1805, 417, 581, 655, 2082, 350, 1624, 1623, 1245, 1367, 719, 1770, 2094, 2357, 1705, 1781, 1771, 935, 1072, 1584, 1158, 542, 199]",0,9.0,10.0,250.0,0.10819793205317578,, -"[596, 175, 41, 1464, 69, 1359, 604, 921, 955, 1362, 1013, 1042, 1072, 2293, 2109, 454, 880, 306, 1118, 506, 2355, 2490, 2357, 1485, 2052, 2263, 277, 252, 1661, 1927, 553, 1914, 1535, 830, 2016, 1079, 1652, 1152, 118, 1517, 88, 1218, 280, 1421, 1358, 1416, 2110, 544, 2485, 2024, 2274, 651, 671, 973, 2295, 366, 1966, 1655, 74, 1127, 325, 510, 1995, 2099, 993, 94, 2282, 2116, 836, 1851, 1538, 868, 2155, 1203, 935, 115, 1930, 2419, 630, 1848, 1525, 1142, 176, 748, 1975, 1583, 2019, 2156, 1120, 702, 1973, 2185, 2294, 1500, 733, 490, 1649, 934, 1981, 1592, 675, 2425, 1732, 1133, 195, 2281, 838, 1370, 749, 436, 1341, 2153, 682, 2423, 1676, 1977, 2025, 539, 1121, 324, 1143, 1526, 1909, 93, 586, 2151, 1215, 1131, 1566, 2280, 1919, 1110, 1879, 242, 693, 376, 1224, 779, 1376, 778, 2152, 2385, 2154, 1291, 1402, 1587, 1660, 1222, 2283, 925]",1,9.0,30.0,150.0,0.6735598227474151,, -"[259, 117, 2537, 259, 117, 2537, 259, 117, 2537, 259, 117, 2537, 259, 117, 2537]",0,9.0,30.0,250.0,0.09785819793205318,, -"[306, 1980, 1226, 1955, 1139, 651, 1956, 1421, 1975, 1929, 1839, 2282, 885, 1115, 1614, 2231, 11, 1894, 109, 423, 347, 621, 1810, 816, 1332, 1131, 2172, 2405, 973, 1842, 149, 1482, 1628, 2396, 2198, 1515, 1540, 52, 379, 1959, 2054, 1976, 1420, 2034, 779, 272, 904, 2156, 210, 1022, 95, 807, 1838, 835, 456, 1224, 483, 2071, 1914, 1085, 2397, 436, 249, 1333, 1655, 1529, 854, 1525, 408, 2024, 1667, 1045, 1979, 666, 1671, 252, 1121, 2182, 2295, 1973, 504, 1285, 55, 132, 142, 2133, 733, 593, 32, 525, 1410, 1268, 1013, 2016, 120, 1683, 1999, 1637, 935, 842, 2057, 809, 1566, 1850, 2181, 490, 1978, 2183, 2012, 2018, 1396, 1873, 897, 161, 1898, 2131, 118, 344, 2394, 2180, 1884, 1968, 389, 1986, 1474, 471, 277, 441, 858, 239, 2323, 451, 2100, 724, 830, 1487, 1843, 1571, 818, 2015, 1178, 2011, 1105, 1027, 2072, 1908, 1358, 2309, 25, 1344, 2010, 2395, 160, 2233, 1625, 1732, 2450, 2232, 1644, 745, 2137, 1494, 1982, 356, 822, 1912, 748, 743, 578, 2017, 1879, 1501, 76, 851, 323, 815, 498, 1399, 2103, 1570, 1658, 211, 60, 995, 478, 2384, 479, 2178, 1974, 266, 2013, 1847, 97, 1353, 600, 2102, 1338, 2229, 415, 514]",1,9.0,40.0,250.0,0.6650664697193501,, -"[1567, 2668, 2500, 2667, 722, 1708, 1358, 1710, 14, 2045, 2335, 603, 1662, 935, 2314, 2002, 1800, 310, 2041, 2001, 605, 1666, 2034, 2236, 2691, 1172, 1950, 1072, 1810, 1239, 2309, 2164, 2582, 733, 1577, 88, 1894, 1140, 2568, 218, 230, 669, 2303, 2501, 417, 206, 973, 2493, 1446, 716, 109, 1106, 332, 1013, 873, 311, 1874, 687, 2327, 2534, 2698, 277, 553, 199, 1, 2379, 306, 2532, 1907, 2206, 304, 1583, 2023, 1603, 1055, 467, 699, 2004, 54, 45, 1149, 228, 1465, 1171, 151, 2359, 1852, 1124, 1445, 175, 0, 420, 2388, 1159, 1570, 1221, 570, 944, 2471, 476, 962, 2372, 2003, 1812, 1488, 1241, 743, 2, 1346, 1147, 1873, 2117, 1337, 1709, 1869, 1542, 1857, 1865, 2217, 71, 1995, 1856, 681, 13, 1095, 767, 1107, 158, 1821, 68, 318, 102, 729, 1334, 920, 343, 180, 1823, 1166, 1097, 48, 1573, 1799, 2430, 673, 215, 1127, 1862, 1316, 563, 176, 2412, 2008, 566, 480, 1023, 2706, 1299, 366, 1820, 745, 160, 1998, 2009, 1641, 17, 1987, 1126, 792, 363, 1479, 2365, 2357, 460, 2140, 2555, 1846, 1855, 2238, 1941, 261, 2006, 2138, 203, 1871, 2141, 1665, 1454, 2139, 968, 2191, 10, 1670, 548, 2096, 1997, 1219, 493, 899, 185]",1,9.0,40.0,300.0,0.6931314623338257,, -"[1775, 2088, 102, 656, 2046, 1798, 484, 1656, 1797, 302, 1158, 1624, 910, 1784, 1651, 2048, 1772, 1782, 133, 1337, 1785, 563, 719, 610, 2090]",0,12.0,5.0,100.0,0.09010339734121123,, -"[2255, 225, 2255, 225, 2255, 225, 2255, 225, 2255, 225]",0,12.0,10.0,300.0,0.09010339734121123,, -"[156, 1560, 155, 505, 215, 1733, 603, 671, 831, 1248, 90, 189, 880, 228, 1791, 232, 980, 484, 2106, 773, 2096, 179, 303, 68, 197, 341, 2107, 2153, 935, 18, 1725, 630, 1852, 2145, 1262, 1258, 1986, 1142, 1197, 72, 73, 277, 36, 1740, 441, 252, 741, 945, 1013, 2026, 1564, 1505, 2019, 1767, 573, 1070, 1801, 342, 551, 1810, 2217, 59, 1144, 2329, 1701, 1358, 973, 1788, 808, 191, 1786, 231, 1009, 2326, 2143, 244, 530, 1583, 1871, 103, 1783, 2093, 589, 661, 1856, 719, 1651, 1775, 1851, 1367, 1781, 695, 101, 1072, 454, 1830, 1776, 135, 1803, 2094, 1868, 2095, 2034, 1778, 1448, 2288, 2080, 1782, 53, 1773, 1015, 1270, 1251, 542, 836, 1798, 1681, 2287, 2425, 2046, 487, 1779, 2045, 2082, 1289, 682, 1661, 139, 1670, 1245, 1804, 111, 1784, 1800, 399, 1998, 1705, 2085, 660, 2086, 114, 655, 20, 1780, 608, 1478, 417, 350, 476, 1799, 1089, 416, 699, 1640, 1572, 826, 34, 1772, 1622, 1623, 1787, 115, 2016, 910, 2318, 1770, 170, 1584, 409, 1656, 314, 1574, 2078, 1785, 1618, 1483, 1771, 958, 581, 154, 2047, 1789, 329, 112, 30, 2089, 610, 126, 544, 1045, 138, 2084, 1193, 2504, 1490, 1797, 2090, 1624, 1802, 306, 452, 656, 563, 308, 302, 887, 519, 294, 2079, 387, 963, 318, 1337, 1346, 1551, 459, 289, 426, 1777, 1878, 1158, 554, 2048, 1805, 1722, 176, 153, 1196, 236, 1769, 2088, 317, 109, 124, 556, 2481, 1768, 2092, 133, 1571, 234, 1774, 2091, 102, 1561, 384, 2087, 598, 2256, 2478]",1,12.0,50.0,250.0,0.7097488921713442,, -"[210, 164, 2003, 438, 2001, 1131, 1987, 2004, 673, 718, 2130, 1538, 1336, 86, 460, 691, 1704, 1907, 2172, 1889, 1909, 1285, 2039, 1876, 787, 1319, 2131, 968, 1614, 1655, 771, 1039, 249, 791, 899, 2008, 1989, 1990, 1026, 578, 1848, 1121, 1709, 1891, 2295, 1807, 1983, 1410, 2009, 792, 2385, 1838, 417, 2002, 1203, 1463, 1984, 1079, 1259, 1812, 604, 1985, 681, 2021, 2198, 1993, 1894, 2037, 2042, 725, 435, 1495, 2020, 2312, 2105, 878, 756, 1978, 1156, 1515, 1152, 2309, 1296, 1697, 1671, 324, 71, 1991, 2005, 1665, 566, 1988, 120, 2007, 239, 2133, 553, 141, 1416, 146, 1628, 1928, 1002, 151, 382, 1558, 433, 1995, 2386, 391, 1227, 55, 37, 2034, 2057, 733, 179, 897, 2041, 1652, 1986, 2181, 2232, 1974, 519, 885, 1998, 1149, 2035, 1224, 366, 1080, 52, 1875, 2281, 95, 2182, 1973, 437, 1879, 68, 191, 1574, 483, 1870, 1968, 816, 424, 2040, 490, 1667, 1869, 1115, 2054, 1139, 1127, 14, 158, 1929, 436, 1912, 1497, 1842, 2231, 211, 745, 826, 160, 668, 45, 2136, 1654, 118, 1453, 621, 995, 514, 49, 1358, 2000, 1529, 1540, 1338, 142, 1609, 215, 2022, 2102, 1434, 2036, 2103, 2027, 60, 2162, 1399, 2227, 356, 61, 2071, 525, 2156, 1178, 323, 890, 1448, 2183, 423, 456, 1013, 1166, 1273, 1269, 2404, 2, 2405, 617, 277, 1344, 504, 1314, 1994, 1859, 1729, 32, 651, 666, 478, 1997, 1992, 1977, 1396, 2228, 232, 2038, 2233, 1268, 858, 1873, 1880, 2180, 1239, 950, 2116, 753, 2395, 201, 1095, 224, 2137, 724]",1,12.0,50.0,350.0,0.7485228951255539,, -"[1358, 1889, 1891, 2045, 1026, 985, 1002, 708, 1303, 1846, 1807, 1660, 779, 524, 1986, 2099, 141, 1973, 1850, 181, 1732, 1614, 1448, 2039, 868, 1464, 14, 1879, 519, 1848, 1729, 1273, 1625, 2042, 2035, 158, 1171, 1291, 1907, 617, 1841, 1701, 691, 1968, 1851, 2152, 575, 1957, 1156, 1341, 2293, 1526, 2036, 885, 1661, 1149, 873, 1382, 1665, 2026, 1152, 1787, 668, 749, 2153, 146, 2305, 2154, 1330, 2037, 118, 1535, 1521, 280, 2009, 1838, 239, 1370, 1671, 2034, 778, 1919, 1849, 1525, 1869, 1309, 1929, 1927, 1847, 1844, 791, 1975, 1572, 2038, 2294, 1909, 2283, 1812, 498, 1983, 417, 1343, 751, 2178, 1095, 979, 1013, 962, 1039, 1840, 482, 993, 1497, 878, 866, 1845, 1818, 841, 771, 69, 13, 1204, 1332, 30, 897, 506, 1224, 1336, 1299, 530, 1820, 539, 510, 490, 2610, 2155, 270, 1351, 567, 1587, 1293, 279, 836, 1270, 747, 2041, 305, 1468, 1981, 549, 2207, 1049, 514, 1479, 1926, 1675, 2040, 737, 1319, 49, 1977, 1222, 261, 382, 1403, 2117, 1924, 334, 1053, 908, 1432, 483, 1503, 436, 1930, 838, 1923, 586, 86, 1809, 738, 1644, 1978, 1843, 471, 1583, 2078, 880, 1062, 1814, 1808, 230, 1821, 424, 790, 511, 1979, 1576, 1120, 826, 2344, 1811, 1822, 74, 536, 354, 1819, 120, 1331, 224, 61, 1581, 1203, 1118, 716, 195, 433, 211, 1654, 1810, 2280, 1008, 461, 697, 1494, 1405, 1215, 2424, 229, 2453, 2403, 816, 1538, 2025, 1110, 1922, 1416, 2136, 1004, 1602, 1348, 1823, 2162, 572, 149, 2405, 1410, 1842, 2027, 1080, 1498, 1813, 795, 1925, 2151, 1107, 1815, 27, 2185, 1839, 2282, 1357, 671, 1463, 718, 719, 1121, 1816, 1817, 1500, 2135, 1143, 2156, 351, 2028, 481, 1133, 568, 576, 1980, 2295, 6, 242, 770, 1508, 1134, 835, 1921, 1982, 682, 2164, 1894, 2384, 1655, 11, 2105, 643, 1131, 2118, 2281]",1,12.0,60.0,300.0,0.7370753323485968,, -"[1543, 739, 306, 1985, 553, 735, 438, 1421, 1812, 792, 885, 1226, 2256, 1676, 604, 878, 2009, 1975, 651, 1955, 1810, 1980, 109, 1956, 1839, 519, 476, 1139, 2109, 460, 1989, 423, 2282, 1526, 1614, 1894, 366, 2000, 11, 347, 102, 461, 566, 2022, 725, 1434, 1993, 2003, 1149, 1115, 621, 1929, 424, 335, 1332, 1869, 1558, 1995, 1482, 950, 1914, 816, 1997, 1998, 1958, 1515, 2001, 2172, 973, 718, 1131, 1095, 2396, 1976, 994, 1420, 1990, 272, 1127, 899, 1881, 2405, 2034, 2198, 52, 1166, 1085, 1628, 1540, 1842, 1574, 1074, 2, 149, 1981, 1667, 2071, 1039, 2397, 1021, 2231, 1259, 379, 2054, 1977, 433, 210, 1333, 2002, 1959, 160, 1972, 151, 1224, 779, 807, 745, 1876, 456, 1652, 2020, 68, 1986, 1838, 391, 437, 1709, 2156, 277, 249, 1022, 95, 1655, 904, 2309, 1878, 408, 2024, 436, 733, 1704, 1870, 45, 1875, 483, 2019, 2611, 1988, 854, 835, 1525, 1023, 1529, 1671, 1045, 55, 666, 2295, 1848, 1979, 252, 323, 71, 2116, 1358, 1973, 179, 1396, 1991, 1999, 1637, 2016, 525, 1285, 1121, 2182, 593, 2012, 142, 1268, 1453, 1683, 132, 504, 1501, 1994, 356, 120, 1410, 935, 1987, 968, 32, 2057, 1013, 1239, 842, 681, 809, 1992, 1884, 490, 1996, 1566, 2133, 1879, 2027, 1859, 1978, 191, 118, 2018, 1474, 1873, 161, 2183, 601, 1495, 1898, 2394, 1043, 215, 1850, 897, 344, 441, 1968, 2181, 1697, 673, 2100, 232, 389, 471, 1105, 548, 201, 830, 1487, 451, 1912, 668, 818, 743, 2180, 498, 113, 2229, 2131, 858, 239, 2010, 1908, 1732, 1178, 1399, 2015, 2323, 748, 1843, 1027, 2395, 2011, 266, 116, 1625, 1571, 578, 76, 2384, 724, 479, 2450, 1773, 822, 851, 2233, 211, 2178, 1885, 2137, 1227, 995, 2103, 2013, 25, 2644, 2102, 514, 2072, 1338, 2232, 2227, 1880, 60, 2228, 1713, 1677, 2136, 1982]",1,12.0,60.0,350.0,0.7348596750369276,, -"[240, 1692, 1472, 1842, 621, 512, 1013, 2018, 1655, 743, 2186, 2187, 478, 490, 242, 1262, 1120, 514, 323, 2156, 189, 172, 2282, 1973, 1070, 1336, 2034, 1558, 130, 2010, 2155, 118, 1709, 88, 1780, 249, 1358, 2093, 1538, 1987, 2280, 232, 1224, 1677, 1203, 1526, 1875, 1989, 2094, 1873, 792, 20, 1725, 1909, 2022, 71, 120, 215, 2047, 1023, 2190, 45, 1740, 55, 1812, 1800, 745, 68, 838, 2281, 827, 231, 897, 2025, 510, 1127, 1505, 553, 2478, 2004, 441, 842, 1453, 289, 1670, 2095, 2188, 1704, 2009, 356, 1978, 968, 1133, 2185, 160, 1930, 1661, 681, 1469, 256, 756, 484, 880, 1990, 2485, 2006, 460, 598, 2385, 1733, 2405, 1500, 387, 2001, 1981, 1995, 1166, 1434, 1682, 244, 1999, 1012, 1784, 1121, 451, 1149, 1776, 773, 1991, 1131, 2178, 2007, 36, 1215, 2000, 2019, 2003, 391, 1583, 1681, 2086, 382, 696, 1801, 1332, 1996, 836, 2024, 2153, 2008, 2189, 366, 816, 2319, 2107, 416, 1986, 671, 1494, 935, 2071, 2016, 151, 2002, 252, 498, 2079, 554, 294, 1870, 1790, 2283, 299, 1982, 1624, 407, 179, 948, 899, 1977, 682, 1072, 454, 1979, 566, 2045, 133, 1803, 1773, 1652, 277, 1797, 417, 1980, 476, 103, 376, 1483, 201, 18, 581, 651, 695, 519, 342, 1869, 1346, 1871, 1561, 711, 2026, 854, 2078, 1804, 191, 661, 1791, 483, 945, 1574, 2046, 1572, 139, 1785, 176, 102, 424, 109, 1988, 124, 314, 318, 459, 563, 1994, 1775, 138, 2, 1997, 542, 1367, 126, 308, 1560, 1984, 1802, 1039, 1197, 2080, 1697, 1045, 1876, 2386, 1337, 1253, 1778, 2092, 2023, 1787, 1564, 236, 1777, 608, 673, 1859, 1769, 1779, 437, 1705, 1770, 153, 1667, 1772, 1640, 1082, 1993, 958, 2048, 1245, 1623, 1622, 2085, 1998, 1805, 973, 2005, 306, 1768, 335, 887, 1992, 384, 1781, 1789, 1798, 1656, 1095, 1251, 1651, 302, 910, 1783, 1584, 1193, 655, 1551, 329, 1771, 1856, 2088, 2090, 452, 1490, 2326, 1767, 2089, 719, 656, 1142, 1799, 980, 1917, 1478, 112, 610]",1,12.0,65.0,350.0,0.7463072378138847,, -"[426, 1782, 1725, 1740, 487, 1070, 1505, 655, 2046, 153, 1358, 350, 103, 1789, 201, 719, 2090, 1262, 112, 1856, 24, 1772, 302, 236, 1245, 542, 316, 189, 2262, 1705, 109, 1805, 1804, 139, 608, 910, 2080, 2086, 2085, 1143, 157, 1798, 1385, 1656, 124, 519, 1703, 430, 2091, 2084, 480, 656, 1801, 1068, 1779, 1775, 165, 2127, 1110, 958, 138, 2089, 773, 329, 1802, 409, 2118, 845, 1367, 20, 766, 2326, 1483, 547, 521, 1788, 973, 2155, 963, 143, 297, 2048, 2045, 1346, 459, 452, 1733, 1770, 968, 1640, 598, 983, 244, 1651, 2260, 887, 1268, 1425, 1490, 1799, 308, 1584, 1072, 1787, 1701, 306, 1618, 126, 1986, 1572, 1158, 1790, 912, 1193, 438, 2078, 1015, 714, 33, 2117, 1405, 2320, 637, 529, 229, 2318, 476, 1045, 1149, 554, 1417, 2295, 698, 945, 1508, 2120, 1810, 342, 2124, 1622, 2075, 1609, 1968, 1998, 505, 1119, 1800, 2350, 1529, 1478]",1,15.0,30.0,300.0,0.6831610044313147,, -"[2130, 1889, 1891, 791, 1980, 851, 1979, 1812, 1152, 816, 2178, 885, 300, 2405, 1358, 1986, 224, 1927, 2293, 691, 1270, 985, 2039, 280, 1026, 1535, 1846, 1174, 593, 2040, 1729, 1039, 1810, 878, 1908, 88, 2036, 204, 211, 1675, 2037, 1351, 2305, 841, 1527, 737, 2295, 1807, 835, 2041, 966, 747, 1843, 1529, 539, 1677, 897, 120, 1929, 818, 525, 1309, 415, 873, 158, 483, 519, 1849, 530, 2403, 2136, 86, 239, 1002, 1840, 2042, 1149, 1867, 1003, 334, 471, 848, 1847, 2034, 2135, 2309, 1999, 708, 1622, 11, 1625, 1654, 69, 2038, 2014, 14, 2035, 1842, 1732, 1448, 2045, 749, 1097, 1968, 141, 1983, 908, 2115, 1930, 1572, 386, 1616, 1614, 1909, 1463, 1143, 1928, 643, 1494, 1336, 2282, 74, 498, 2155, 191, 1115, 1708, 738, 49, 1497, 1053, 809, 30, 1665, 2117, 1118, 1373, 1839, 1701, 470, 1110, 1013, 436, 1121, 461, 1538, 181, 836, 2010, 1333, 2426, 2281, 1644, 1432, 1382, 1273, 1464, 1974, 1683, 1370, 1655, 1661, 2610, 1319, 575, 2283, 1049, 417, 2294, 778, 866, 2154, 1919, 1405, 490, 779, 261, 586, 1851, 617, 1120, 2162, 61, 1479, 578, 1508, 229, 382, 446, 2078, 1583, 1171, 554, 1660, 2343, 1879, 1291, 661, 1507, 770, 1848, 118, 252, 504, 1410, 1894, 1525, 826, 1845, 880, 2185, 2112, 1500, 203, 454, 285, 2425, 1482, 1587, 581, 1973, 668, 2276, 1224, 1975, 536, 518, 506, 567, 1844, 2030, 255, 1343, 1341, 2105, 1690, 2423, 388, 467, 2118, 1133, 1841, 1131, 2384, 433, 2151, 2165, 1215, 510, 424, 993, 697, 1857, 1645, 1293, 842, 32, 1058, 1521, 1403, 868, 2344, 304, 1203, 502, 1080, 2153, 1195, 1850, 149, 2166, 822, 1029, 2099, 1637, 1976, 2316, 2164, 147, 666, 2280, 1982, 682, 2152, 93, 2156, 1222, 279, 1526, 1981, 1332, 1172, 270, 988, 1978, 2475, 1977, 838, 771, 242]",1,15.0,60.0,300.0,0.7481536189069424,, -"[452, 656, 306, 1670, 887, 452, 656, 306, 1670, 887, 452, 656, 306, 1670, 887, 452, 656, 306, 1670, 887, 452, 656, 306, 1670, 887, 452, 656, 306, 1670, 887]",0,,5.0,100.0,0.09010339734121123,0.5, -"[99, 2455, 2604, 127, 26, 99, 2455, 2604, 127, 26, 99, 2455, 2604, 127, 26, 99, 2455, 2604, 127, 26, 99, 2455, 2604, 127, 26, 99, 2455, 2604, 127, 26]",0,,5.0,50.0,0.08677991137370754,1.0, -"[109, 459, 306, 656, 1251, 109, 459, 306, 656, 1251, 109, 459, 306, 656, 1251, 109, 459, 306, 656, 1251, 109, 459, 306, 656, 1251, 109, 459, 306, 656, 1251]",0,,5.0,200.0,0.09010339734121123,1.0, -"[2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 145, 1165, 898, 1836, 2158, 2157, 213, 1327, 2192, 1647, 1835, 2159, 495, 144, 2209, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 145, 1165, 898, 1836, 2158, 2157, 213, 1327, 2192, 1647, 1835, 2159, 495, 144, 2209, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 145, 1165, 898, 1836, 2158, 2157, 213, 1327, 2192, 1647, 1835, 2159, 495, 144, 2209, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 145, 1165, 898, 1836, 2158, 2157, 213, 1327, 2192, 1647, 1835, 2159, 495, 144, 2209, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 145, 1165, 898, 1836, 2158, 2157, 213, 1327, 2192, 1647, 1835, 2159, 495, 144, 2209, 2160, 1593, 1328, 23, 92, 2622, 537, 108, 1504, 1698, 145, 1165, 898, 1836, 2158, 2157, 213, 1327, 2192, 1647, 1835, 2159, 495, 144, 2209]",0,,25.0,250.0,0.08677991137370754,1.0, -"[505, 1557, 1775, 2168, 303, 1782, 121, 589, 814, 1635, 2251, 2382, 1158, 109, 139, 329, 554, 581, 910, 958, 1193, 1245, 1584, 1651, 1770, 1787, 1799, 2048, 484, 409]",0,,30.0,150.0,0.0915805022156573,,0.2 -"[153, 294, 329, 350, 426, 476, 655, 887, 910, 958, 1045, 1193, 1245, 1251, 1346, 1490, 1640, 1651, 1656, 1705, 1772, 1775, 1779, 1781, 1782, 1787, 1802, 1804, 2045, 2048, 2080, 2084, 2086, 2087, 2088, 2089, 2091, 133, 314, 384, 459, 660, 980, 1144, 1767, 1768, 1773, 1774, 1776, 1777, 1778, 2164, 2403, 795, 282, 242, 436, 510, 1215, 1655]",0,,60.0,250.0,0.09342688330871492,,0.2 -"[109, 306, 1490, 126, 236, 1770, 655, 973, 1998, 2045]",0,,10.0,100.0,0.09711964549483014,,0.8 -"[127, 2604, 99, 123, 26, 122, 2454, 2455]",0,,10.0,250.0,0.08677991137370754,,0.8 -"[1543, 735, 739, 1881, 2029, 701, 1237, 1958, 2060, 2062, 393, 661, 472, 1579, 1441, 1884, 1954, 344, 807, 935, 1880, 1882, 1885, 559, 1182, 1183, 441, 454, 590, 648]",0,,30.0,50.0,0.09785819793205318,,0.8 -"[1362, 1485, 2388, 777, 822, 1637, 2070, 1351, 1974, 2026, 2028, 433, 483, 1332, 1494, 1982, 826, 1171, 519, 708, 1343, 2078, 2403, 1810, 2136, 643, 1842, 11, 1839, 745, 1359, 1464, 1674, 2101, 2071, 32, 666, 1482, 1976, 1357, 86, 424, 885, 1039, 1978, 2105, 1572, 211, 239, 2164, 575, 988, 1844, 2276, 1622, 2426, 1527, 1792, 2050, 1986, 596, 830, 921, 1539, 1675, 1905, 1916, 504, 1333, 2194, 149, 572, 2009, 897, 279, 2274, 498, 668, 878, 1979, 270, 835, 1410, 1894, 2305, 2405, 2135, 1382, 1195, 1441, 175, 1370, 604, 955, 1915, 1218, 1917, 1918, 1920, 702, 1368, 2295, 1683, 1655, 1909, 1498, 2281, 2282, 2027, 2485, 1013, 1127, 748, 1376, 2110, 2384, 2263, 578, 661, 1927, 935, 1660, 41, 1222, 1525, 2151, 2153, 1914, 1120, 436, 510, 836, 1133, 925, 868, 1981, 2099, 376, 88, 1358, 2419, 1661, 1402, 2109, 280, 993, 1152, 2294, 1517, 2355]",1,,30.0,250.0,0.6750369276218612,,0.2 -"[2008, 588, 698, 665, 2040, 133, 2123, 389, 661, 1885, 1337, 1958, 1661, 1772, 2228, 924, 1705, 1522, 1532, 160, 335, 476, 673, 681, 745, 1127, 1166, 2000, 2004, 2007, 2362, 153, 2121, 441, 652, 451, 1769, 1779, 1787, 1844, 553, 1805, 696, 349, 887, 1349, 1965, 71, 179, 743, 1023, 1095, 1558, 1697, 1704, 1812, 1859, 1876, 1987, 1989, 1481, 1519, 2336, 286, 1051, 2120, 176, 289, 973, 1998, 2045, 2092, 2094, 638, 384, 1786, 215, 366, 899, 968, 1873, 1875, 1992, 1993, 1997, 1999, 2001, 2002, 2005, 2006, 196, 332, 124, 2066, 236, 318, 459, 113, 249, 415, 266, 2229, 896, 2383, 409, 589, 1505, 1778, 1780, 1788, 45, 391, 566, 792, 1709, 1870, 1988, 1990, 1996, 2003, 2009, 1569, 91, 2034, 271, 102, 306, 2323, 426, 563, 610, 1045, 2549, 548, 868, 1777, 2645, 1634, 2, 2237, 68, 201, 275, 460, 1434, 1453, 1574, 1991, 277, 1995]",1,,30.0,300.0,0.6174298375184638,,0.2 -"[109, 1239, 298, 1325, 2176, 1894, 2582, 1738, 687, 1909, 603, 1917, 1884, 1370, 2091, 2119, 411, 2329, 1583, 1800, 1670, 88, 1858, 1603, 2283, 383, 1295, 935, 1820, 1414, 2325, 1185, 2190, 739, 2284, 2286, 665, 1096, 332, 2691, 1073, 1119, 2077, 2550, 157, 2359, 1415, 2375, 2527, 645, 2235, 2413, 2601, 1454, 2667, 206, 2236, 2335, 297, 1701, 1346, 1445, 1348, 1553, 118, 441, 973, 1881, 1226, 733, 1465, 961, 2238, 231, 24, 869, 493, 2365, 10, 306, 416, 1570, 1055, 985, 2162, 1066, 605, 1522, 1912, 1739, 849, 1661, 2022, 2188, 67, 1168, 2599, 1666, 553, 197]",1,,20.0,300.0,0.6152141802067946,,0.5 -"[1616, 907, 1487, 2649, 1501, 56, 447, 1975, 412, 197]",0,,2.0,150.0,0.08677991137370754,,0.8 -"[133, 1337, 1661, 1772, 924, 1705, 1778, 1522, 1532, 160, 232, 335, 476, 673, 681, 745, 792, 1127, 1166, 2000, 153, 1769, 1779, 1787, 1805, 887, 1634, 2357, 179, 201, 968, 1434, 1453, 1704, 1812, 1859, 1873, 1875, 1990, 1992, 124, 176, 289, 1998, 2045, 2092, 2094, 638, 384, 1788, 2, 366, 460, 1095, 1149, 1987, 1988, 1994, 1995, 2001, 236, 306, 426, 519, 409, 589, 1196, 1777, 1786, 1349, 1965, 215, 899, 1023, 1558, 1574, 1989, 1991, 1993, 1996, 1569, 2034, 271, 318, 459, 610, 1346, 1703, 1780, 2645, 45, 71, 391, 566, 1697, 1986, 1870, 1876, 1997, 1999]",1,,20.0,250.0,0.6026587887740029,,0.8 -"[1149, 1873, 1875, 1991, 1992, 1797, 1995, 1996, 1998, 185, 1002, 33, 1270, 1319, 1018, 2037, 286, 332, 665, 2376, 1580, 790, 1814, 2230, 2308, 1264, 430, 681, 968, 1127, 1988, 2032, 1994, 1997, 2000, 2005, 1273, 1448, 1497, 1666, 598, 1654, 714, 1031, 1110, 2362, 1068, 783, 109, 2307, 248, 443, 1261, 2249, 330, 576, 2, 160, 179, 215, 1558, 1574, 1709, 1870, 1876, 1987, 2033, 49, 2006, 1665, 1812, 1894, 1968, 2036, 2040, 1051, 1773, 1125, 1808, 1809, 71, 151, 366, 391, 566, 673, 743, 745, 1023, 1095, 1859, 1990, 1452, 1993, 14, 158, 382, 2007, 2009, 1474, 2259, 1107, 2205, 2038, 2042, 897, 2155, 627, 698, 2120, 1788, 159, 2004, 1806, 1046, 68, 201, 232, 519, 899, 1166, 733, 1692, 1989, 465, 469, 2385, 141, 1999, 224, 691, 791, 841, 862, 2336, 1662, 588, 1892, 2066, 1519, 2262, 426, 1986, 2003, 775, 912, 983, 2127, 335, 476]",1,,30.0,300.0,0.6185376661742984,,0.8 -"[1077, 389, 359, 18, 603, 530, 695, 1262, 1248, 880, 1197, 1780, 1142, 2145, 2407, 773, 189, 704, 562, 1013, 1258, 935, 244, 1791, 1616, 215, 643, 1070, 2016, 1253, 969, 118, 2251, 121, 115, 741, 905, 682, 1778, 2019, 1725, 1788, 1583, 1505, 1790, 10, 252, 630, 2252, 1803, 303, 1740, 1478, 980, 973, 1313, 1776, 2153, 1483, 660, 831, 1851, 1358, 2082, 454, 1733, 589, 1786, 661, 505, 754, 2106, 2096, 239, 573, 2598, 139, 1681, 484, 36, 671, 191, 1072, 1801, 742, 1986, 2326, 135, 384, 441, 1773, 2382, 1158, 1635, 581, 1998, 814, 1701, 1144, 1623, 836, 1651, 1784, 1661, 2093, 2094, 1771, 1624, 945, 1367, 476, 452, 1670, 1015, 308, 302, 1856, 2078, 2095, 1800, 719, 2045, 406, 2143, 342, 2288, 277, 2287, 329, 887, 554, 1196, 1448, 1852, 699, 1775, 1810, 350, 1009, 859, 1249, 138, 1804, 1830, 1781, 1572, 341, 910, 2046, 409]",1,12.0,30.0,250.0,0.6853766617429837,, -"[1772, 487, 306, 1782, 160, 236, 1705, 1892, 1889, 215, 476, 1805, 318, 736, 1580, 1623, 734, 2045, 102, 153, 1807, 2200, 2051, 2182, 743, 530, 745, 1558, 1998, 529, 1769, 2265, 2199, 1484, 2198, 2034, 2094, 176, 586, 2095, 1787, 578, 6, 2197, 1777, 814, 45, 1072, 519, 887, 195, 1602, 1926, 330, 566, 1704, 2086, 1778, 1198, 2350, 280, 2073, 2054, 118, 885, 456, 792, 2033, 2181, 426, 1628, 1127, 861, 1859, 1061, 2333, 733, 1779, 1510, 968, 1013, 366, 1045, 1346, 934, 813, 563, 86, 299, 124, 1797, 68, 973, 773, 133, 2153, 2043, 232, 2074, 638, 794, 523, 1801, 2055, 963, 1125, 705, 1421, 1118, 181, 2201, 1838, 2383, 1337, 391, 36, 675, 2109, 1812, 901, 1574, 289, 1552, 2041, 384, 1635, 109, 695, 459, 95, 1780, 1785, 1453, 1303, 1773, 1149, 74, 2260, 373, 1876, 1535, 1879, 1047, 1661, 1042, 1196, 863, 1986, 2386, 1464, 1417, 1529, 1336, 317, 94, 1095, 2110, 2052, 984, 993, 429, 1851, 1434, 1419, 1025, 1505, 2355, 2165, 48, 1224, 1974, 2155, 2357, 2295, 1703, 2092, 460, 783, 893, 1012, 1925, 38, 1624, 2040, 1146, 2107, 1046, 899, 2376, 2520, 1966, 1669, 1517, 1919, 1975, 673, 2123, 610, 1709, 1202]",1,12.0,40.0,350.0,0.6994091580502215,, -"[756, 240, 869, 1864, 512, 553, 973, 24, 2376, 206, 2691, 1583, 172, 924, 783, 2520, 48, 2349, 1797, 2383, 1429, 306, 1089, 277, 1143, 1472, 399, 244, 2187, 1469, 2430, 598, 1787, 911, 197, 808, 1160, 2186, 862, 384, 1423, 1506, 1015, 1799, 2532, 1785, 231, 1780, 1346, 2130, 1725, 945, 962, 1484, 2346, 2045, 10, 482, 733, 1701, 1, 387, 2365, 1891, 232, 1098, 617, 2303, 1065, 342, 166, 1670, 1798, 893, 1534, 1070, 1262, 2668, 570, 2347, 1810, 1140, 409, 476, 1773, 1995, 201, 1127, 1425, 71, 1454, 236, 109, 1358, 366, 1805, 1804, 1483, 245, 1789, 2302, 1531, 297, 1800, 958, 1802, 1779, 1705, 2667, 1692, 887, 1336, 1640, 1013, 2086, 1662, 1890, 1778, 922, 417, 189, 1740, 291, 1477, 2386, 317, 2009, 519, 38, 638, 2034, 2203, 1428, 1970, 2236, 1666, 1807, 299, 2065, 179, 1519, 1875, 1709, 45, 1072, 416, 1781, 1385, 1107, 1870, 1889, 2254, 1481, 1777, 1699, 160, 2498, 484, 1812, 2041, 1051, 460, 1552, 215, 1784, 2, 1624, 2332, 1618, 271, 1146, 794, 2066, 745, 1162, 33, 391, 2451, 1788, 180, 705, 2039, 1876, 1892, 1873, 2008, 1406, 2120, 863, 335, 968, 836, 1023, 1453, 1733, 1149, 1859, 2615, 185, 1202, 1893, 1998, 2094, 1669, 1095, 2107, 68, 442, 899, 1219, 20, 1035, 1801, 1196, 2406, 2040, 2004, 493, 576, 949, 1590, 151, 523, 2238, 36, 2106, 86, 2381, 1786, 627, 2348, 2000, 1558, 743, 530, 503, 574, 792, 1574, 196, 1803, 1478, 332, 2591, 788, 2121, 1514, 2387, 589, 1697]",1,12.0,50.0,300.0,0.6861152141802068,, -"[611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690]",0,12.0,60.0,300.0,0.08677991137370754,, -"[1500, 27, 606, 2185, 2358, 1156, 1787, 95, 436, 2059, 1448, 1460, 2034, 1930, 1185, 2578, 229, 1358, 1628, 838, 1133, 405, 279, 426, 1999, 1508, 827, 1856, 2004, 1980, 1998, 341, 2117, 2074, 1405, 55, 1992, 1782, 2405, 1275, 816, 2118, 2280, 1538, 2054, 1987, 373, 109, 112, 318, 1979, 1709, 1810, 242, 2424, 1309, 792, 770, 737, 1870, 1845, 2051, 1852, 968, 1042, 146, 2230, 2475, 1927, 437, 1772, 335, 1988, 100, 2000, 2261, 1996, 2281, 1337, 2360, 308, 655, 1203, 1812, 1434, 563, 490, 315, 2005, 2163, 1479, 120, 68, 2007, 979, 672, 2135, 1994, 1791, 2001, 1453, 1215, 2080, 1859, 2052, 153, 718, 236, 519, 261, 1894, 1921, 2073, 603, 86, 771, 1830, 1784, 334, 1149, 1798, 1204, 2575, 133, 1558, 179, 1732, 1993, 306, 2189, 2344, 897, 2003, 126, 1131, 1602, 1072, 476, 1729, 1842, 1049, 1909, 45, 2046, 2002, 544, 1920, 2349, 2343, 1846, 2045, 1839, 835, 1248, 1008, 1873, 324, 1517, 391, 1926, 6, 460, 1468, 1844, 1847, 1625, 1023, 536, 1351, 1095, 2590, 1986, 2096, 69, 2419, 1875, 487, 1009, 1850, 899, 65, 2021, 1651, 2048, 738, 1990, 305, 1661, 30, 176, 1692, 1469, 1876, 618, 2136, 2282, 756, 1574, 2, 1989, 743, 139, 2453, 619, 2346, 1848, 2094, 2611, 1924, 2155, 1671, 376, 74, 1923, 1239, 454, 1410, 61, 1675, 1925, 1697, 938, 239, 836, 160, 1118, 438, 2162, 1705, 1121, 2008, 2072, 681, 673, 151, 2283, 1991, 1997, 2295, 1013, 937, 2006, 1840, 1851, 935, 859, 1655, 1849, 215, 1370, 1841, 1080, 942, 2312, 1416, 2154, 1919, 277, 671, 2153, 1704, 973, 553, 2156, 1498, 699, 2170, 440, 441, 779, 778, 510, 1357, 566, 214, 2171, 1660, 71, 1166, 181, 11, 1464, 697, 1843, 586, 232, 1293, 252, 530, 1291, 543, 1142, 220, 2016, 129, 2047, 1773, 1975, 880, 149]",1,12.0,60.0,350.0,0.766248153618907,, -"[1829, 1505, 1557, 1788, 1801, 389, 1903, 1901, 2256, 2168, 650, 1825, 2252, 367, 905, 2251, 1878, 793, 119, 1826, 341, 2288, 752, 1550, 387, 2451, 957, 2287, 448, 603, 121, 1842, 1183, 562, 1077, 980, 1618, 969, 709, 695, 742, 441, 556, 1786, 1015, 1726, 134, 2107, 1113, 189, 215, 704, 1681, 2153, 1743, 244, 808, 2598, 231, 1685, 1335, 1784, 971, 234, 1735, 277, 114, 416, 1699, 2016, 1564, 1620, 399, 1276, 1962, 1144, 1336, 2096, 1358, 2083, 2218, 2505, 1478, 826, 1851, 1733, 831, 454, 2113, 36, 1013, 1465, 1133, 479, 1897, 1262, 1012, 1701, 2185, 2081, 1902, 1574, 671, 773, 1131, 1500, 264, 1070, 359, 589, 1740, 814, 548, 1282, 2075, 610, 1168, 409, 598, 1963, 1412, 1546, 1871, 1495, 2481, 2621, 685, 1248, 1725, 1616, 1824, 118, 1635, 1670, 741, 544, 384, 1650, 406, 1810, 2322, 107, 1483, 1158, 1624, 937, 18, 102, 1856, 1852, 844, 1538, 2542, 1470, 1899, 2382, 645, 1986, 1561, 661, 682, 2087, 1583, 1791, 133, 1896, 859, 2318, 1661, 1022, 2019, 880, 317, 1959, 1789, 1196, 699, 1785, 644, 843, 950, 1609, 1089, 1072, 973, 754, 303, 1623, 1459, 1960, 329, 1197, 2026, 1799, 1776, 1790, 2326, 379, 1998, 2086, 1780, 1804, 1773, 2094, 836, 519, 2082, 2494, 1777, 2034, 1722, 1769, 1142, 2143, 132, 2091, 2045, 630, 1781, 554, 581, 1778, 1898, 2090, 1705, 484, 1009, 1805, 459, 945, 2339, 963, 2093, 2327, 417, 360, 191, 1797, 1772, 1367, 1787, 747, 1245, 2208, 719, 1900, 891, 563, 935, 1337, 1895, 1640, 139, 1961, 318, 2491, 1830, 1560, 289, 176, 1448, 1779, 218, 1258, 1782, 1267, 426, 2078, 2047, 1180, 1771, 126, 1651, 958, 1304, 112, 660, 487, 1251, 252, 308, 20, 103, 505, 2084, 1167, 542, 1798, 1346, 1775, 1551, 452, 1360, 910, 1904, 342, 115, 608, 294, 1289, 887, 302, 1792, 1572, 1803, 306, 656, 2085, 2046, 153, 1802, 1045, 1770, 1490, 541, 350, 1800, 1249, 138, 236, 904, 1312, 1193, 1656, 109]",1,12.0,65.0,350.0,0.7234121122599705,, -"[108, 2157, 2160, 2159, 2158, 2209, 898, 495, 1698, 1647, 23, 1835, 1328, 1504, 2622, 2192, 1327, 537, 2161, 92, 1836, 1165, 145, 1593, 213]",0,15.0,5.0,50.0,0.08677991137370754,, -"[1900, 891, 1903, 645, 950, 1953, 1824, 1898, 1895, 1470, 1495, 1956, 448, 1555, 1899, 1902, 1826, 753, 709, 1128, 1954, 1441, 1269, 2275, 755]",0,15.0,5.0,100.0,0.10376661742983752,, -"[1149, 1894, 771, 1080, 1614, 1891, 2130, 791, 753, 1671, 1889, 95, 1095, 1980, 142, 718, 2004, 1929, 651, 1880, 1812, 1156, 2180, 816, 1859, 1628, 2183, 2039, 1978, 1879, 2131, 899, 1497, 1870, 1848, 2312, 1873, 210, 1314, 2181, 1416, 1989, 2042, 1810, 1079, 14, 2006, 1990, 2008, 1993, 2162, 1319, 566, 1142, 1983, 158, 519, 1139, 1332, 1968, 2022, 1463, 435, 1729, 858, 483, 787, 215, 224, 2072, 950, 1987, 203, 878, 2040, 1994, 2020, 1115, 1655, 1023, 1997, 1574, 885, 201, 1072, 621, 438, 890, 456, 1701, 1977, 691, 460, 49, 2021, 1665, 408, 1876, 1039, 514]",1,15.0,20.0,350.0,0.6931314623338257,, -"[1919, 2154, 1370, 778, 1660, 779, 1291, 1587, 1975, 2155, 1525, 1131, 1661, 586, 836, 1851, 2045, 1729, 1625, 510, 1464, 1847, 181, 2002, 2003, 2153, 88, 1732, 1987, 306, 2001, 454, 1848, 1224, 1110, 2016, 519, 1570, 506, 1013, 2004, 696, 1840, 880, 1072, 1845, 215, 672, 220, 476, 935, 2156, 1521, 1644, 1841, 2006, 671, 908, 334, 1846, 1293, 1873, 2152, 376, 1675, 471, 682, 673, 530, 1998, 1574, 1844, 1849, 1403, 1850, 1842, 1988, 1843, 69, 1023, 1994, 973, 1434, 1870, 2000, 201, 1859, 1142, 2151, 1120, 1095, 1989, 1986, 2025, 115, 1999, 1869, 743, 2, 2019, 1812, 1558, 1166, 460, 1993, 1990, 151, 391, 68, 1991, 1992, 252, 792, 1526, 968, 1875, 899, 1697, 232, 1583, 1453, 335, 1997, 2007, 1149, 2005, 45, 681, 2008, 1996, 1709, 1222, 277, 1876, 1127, 566, 1704, 179, 630, 176, 71, 1341, 160, 2194, 1995, 366, 2009, 553, 1239, 745]",1,15.0,30.0,150.0,0.6949778434268833,, -"[2287, 826, 344, 2288, 695, 389, 661, 969, 1743, 905, 359, 231, 1412, 1560, 1012, 1282, 704, 1455, 1878, 1197, 114, 1620, 2083, 121, 1358, 1077, 1262, 384, 1722, 1786, 1015, 341, 505, 699, 1618, 193, 2082, 754, 1735, 317, 1681, 189, 1448, 1725, 603, 118, 1810, 2113, 1701, 1537, 2208, 2252, 671, 1478, 1546, 880, 1791, 1336, 610, 1459, 70, 836, 1248, 1065, 1229, 18, 134, 1851, 1013, 2081, 742, 1070, 387, 1803, 935, 1483, 277, 2251, 2256, 441, 2184, 1144, 2143, 234, 1564, 2327, 1740, 115, 2481, 1289, 980, 215, 589, 1998, 406, 598, 2505, 1196, 244, 562, 2123, 1311, 1726, 289, 1790, 1733, 239, 1624, 91, 1783, 129, 1789, 409, 1609, 454, 1616, 2203, 314, 2318, 416, 1852, 191, 2034, 102, 1787, 176, 20, 1035, 1251, 1776, 2016, 1335, 2326, 1767, 1785, 1699, 2075, 1583, 303, 1784, 1871, 859, 1788, 1158, 133, 1777, 1856, 2096, 1661, 544]",1,15.0,30.0,300.0,0.7134416543574594,, -"[1415, 1884, 392, 2269, 735, 1909, 1370, 687, 1092, 113, 1414, 1295, 1917, 739, 2022, 1881, 2527, 1237, 441, 281, 2176, 2359, 747, 2247, 2190, 2189, 11, 1549, 676, 645, 1655, 2188, 2162, 118, 2286, 119, 1358, 1171, 1619, 581, 746, 1987, 2274, 454, 1348, 1958, 598, 1810, 1073, 540, 2379, 2001, 1169, 2002, 552, 1354, 510, 2284, 1901, 371, 2006, 2009, 1955, 1875, 1553, 1382, 2163, 2424, 745, 720, 1839, 1870, 649, 2004, 1709, 1812, 2003, 201, 519, 1527, 968, 1095, 2453, 1149, 2244, 1995, 67, 1661, 1855, 160, 2375, 2476, 743, 973, 1107, 1998, 95, 681, 68, 1127, 1989, 366, 1204, 2000, 1026, 218, 1574, 1303, 505, 524, 460, 215, 548, 2283, 2542, 843, 2246, 2, 1986, 1994, 391, 151, 45, 1956, 1595, 1999, 566, 2555, 476, 335, 1023, 899, 792, 2271, 1997, 1434, 1697, 232, 179, 1704, 1166, 1441, 1128, 1991, 2275, 1993, 1700, 2008, 2272, 265, 1992, 1453, 1996, 1488, 1876, 2007, 648, 2005, 673, 1859, 1060, 2338, 71, 1873, 1988, 1990, 590, 1954, 1502, 979, 241, 50, 2243, 1503, 2699, 2273, 1558, 2339, 1707, 1355, 1191, 2630, 2632, 750, 2245, 879, 605, 397, 2414, 2340, 2539, 1200, 237, 1014, 1957, 2540, 2207, 2242, 2241, 500]",1,15.0,40.0,200.0,0.6613737075332349,, -"[738, 405, 816, 893, 1929, 911, 30, 638, 2355, 2357, 1980, 2034, 483, 2405, 270, 11, 1602, 1789, 1425, 2025, 120, 118, 1894, 1013, 1922, 2263, 1039, 885, 1840, 1452, 1907, 1120, 429, 581, 747, 624, 1481, 2136, 195, 1343, 496, 498, 2417, 149, 769, 1839, 1382, 575, 1171, 146, 412, 1025, 2116, 2490, 2195, 1979, 2312, 61, 1983, 1125, 414, 1628, 191, 2055, 1618, 901, 878, 1966, 1925, 1332, 1927, 6, 1008, 2073, 803, 1842, 305, 1309, 1928, 1015, 2054, 1923, 510, 86, 1583, 1519, 2112, 382, 94, 1810, 1385, 1470, 519, 572, 675, 373, 2283, 279, 2099, 783, 698, 1215, 399, 934, 1851, 1202, 239, 56, 1849, 2050, 1428, 1921, 65, 1788, 868, 1655, 1468, 1416, 436, 2416, 835, 1616, 2403, 2113, 1926, 2376, 95, 1333, 728, 1110, 1061, 643, 838, 2281, 2282, 2199, 826, 644, 1675, 69, 1981, 2305, 596, 1909, 1121, 1042, 836, 1464, 1156, 447, 506, 1976, 711, 1538, 456, 2280, 2071, 2198, 1435, 1769, 490, 1848, 897, 1526, 2388, 1068, 1879, 2051, 1296, 1377, 813, 1218, 1580, 1920, 666, 32, 2553, 2111, 2105, 2485, 1482, 1351, 544, 945, 1977, 2100, 41, 175, 1649, 252, 2135, 1924, 2164, 748, 1410, 1975, 668, 1572, 211, 1930, 1341, 1978, 1494, 2384, 1973, 604, 242, 2347, 1732, 433, 2052, 1047, 1676, 1463, 2333, 955, 1683, 1914, 1152, 973, 1362, 2309, 1131, 733, 1525, 2156, 1402, 2151, 708, 682, 2068, 1974, 1592, 1198, 1224, 1285, 485, 274, 778, 1911, 586, 1517, 2154, 718, 539, 1982, 74, 1359, 830, 2078]",1,15.0,50.0,300.0,0.6617429837518464,, -"[2075, 14, 435, 218, 669, 2268, 1160, 962, 306, 2077, 1072, 2076, 109, 1062, 310, 1124, 994, 1020, 2291, 1192, 482, 1325, 486, 1781, 1941, 766, 94, 794, 2309, 304, 1221, 722, 678, 1577, 1159, 1346, 2668, 2501, 2412, 1950, 417, 465, 1106, 2064, 377, 2063, 1329, 383, 1810, 2011, 2493, 1265, 2335, 1335, 17, 836, 420, 2141, 401, 327, 2139, 316, 746, 2615, 1191, 49, 1682, 1542, 396, 2304, 1055, 1014, 78, 2667, 1818, 2613, 2509, 873, 2534, 1295, 34, 203, 297, 2122, 1817, 102, 1185, 849, 1140, 2357, 1097, 1708, 176, 1567, 54, 2500, 2327, 1662, 429, 2541, 10, 1912, 1739, 729, 1172, 2034, 733, 1197, 1147, 553, 2302, 695, 2253, 1013, 199, 1670, 1334, 1799, 48, 1738, 2381, 73, 298, 1869, 1874, 53, 1241, 267, 1446, 180, 1856, 2123, 91, 2378, 1857, 1219, 606, 2339, 1800, 985, 1666, 665, 935, 1358, 158, 1868, 2379, 2121, 197, 185, 2117, 2045, 476, 2348, 2314, 230, 2573, 2380, 88, 1812, 311, 332, 2025, 2555, 1865, 1998, 1095, 2035, 1337, 24, 1075, 1997, 652, 1488, 960, 2550, 2238, 1636, 1665, 2044, 318, 1465, 1988, 598, 1583, 519, 1171, 169, 767, 1119, 792, 1894, 1445, 2340, 228, 277, 2365, 1996, 157, 1987, 563, 699, 2001, 1704, 2303, 1992, 151, 1993, 2006, 1299, 13, 1846, 1871, 30, 1709, 2338, 1479, 143, 1558, 759, 1697, 1820, 467, 1877, 1, 899, 261, 503, 862, 72, 2004, 2009, 654, 59, 681, 603, 784, 1867, 2096, 1347, 2301, 2003, 1995, 2000, 2217, 191, 1149, 343, 1294, 1994]",1,15.0,50.0,350.0,0.6635893648449039,, -"[1032, 587, 1032, 587, 1032, 587, 1032, 587, 1032, 587]",0,18.0,5.0,50.0,0.09785819793205318,, -"[1779, 306, 426, 1624, 1956, 1955, 318, 1772, 1785, 807, 2212, 1080, 2394, 2162, 1163, 1844, 1574, 2024, 1789, 830, 506, 1346, 2071, 153, 1840, 1402, 1661, 2276, 2045, 356, 1120, 563, 2182, 1787, 191, 5, 249, 2095, 908, 289, 2344, 2216, 651, 1986, 2426, 1507, 2359, 1805, 2093, 124, 687, 236, 2165, 2010, 1841, 1421, 2054, 1914, 74, 1670, 1878, 69, 2012, 1850, 304, 471, 1464, 1174, 1092, 1998, 133, 2396, 841, 1045, 1675, 1729, 2164, 525, 2385, 681, 575, 1912, 518, 2110, 1592, 204, 1353, 621, 478, 2423, 2016, 2591, 1226, 1622, 2506, 109, 120, 2135, 1645, 1337, 955, 1884, 323, 2018, 2013, 2485, 211, 1358, 973, 519, 1028, 147, 993, 118, 102, 516, 239, 2156, 76, 266, 1195, 2326, 1769, 1676, 1644, 1013, 1355, 708, 1855, 334, 2402, 2078, 1572, 897, 1394, 1880, 539, 2109, 1332, 1851, 1583, 1776, 2295, 2405, 181, 1403, 1110, 97, 1927, 1105, 162, 1171, 1849, 61, 416, 514, 1343, 149, 1527, 598, 1351, 1118, 815, 459, 504, 1338, 1919, 995, 711, 1482, 1978, 1416, 661, 702, 885, 2103, 935, 880, 2102, 666, 255, 1908, 2593, 2312, 32, 1152, 696, 2546, 2011, 279, 234, 498, 252, 466, 2425, 743, 1845, 1842, 2133, 868, 2131, 176, 2152, 530, 2386, 1885, 2094, 2178, 415, 586, 130, 387, 778, 1847, 2017, 2282, 1293, 578, 114, 779, 1975, 2132, 2403, 1683, 1370, 88, 1976, 1625, 2015, 816, 737, 1295, 483, 1677, 818, 2283, 1713, 756, 1652, 1979, 2294, 1980, 270, 1535, 1521, 1882, 682, 2155, 649, 1741]",1,18.0,50.0,350.0,0.6875923190546529,, -"[2159, 2157, 23, 108, 898, 2158, 2209, 92, 1835, 2160, 1647, 2192, 1836, 1165, 537, 1504, 495, 1327, 1328, 1698, 2161, 1593, 2622, 213, 144]",0,22.0,5.0,100.0,0.08677991137370754,, -"[1583, 2055, 2198, 6, 1628, 1120, 1517, 792, 1558, 483, 2000, 1977, 1984, 1669, 816, 1989, 2333, 2026, 1987, 567, 196, 1602, 1110, 1980, 1127, 373, 1998, 476, 1981, 2051, 1475, 1995, 1047, 2004, 149, 366, 1814, 705, 1979, 2027, 460, 498, 424, 519, 192, 485, 1709, 566, 1453, 2178, 478, 1820, 1876, 572, 1816, 2025, 1481, 1493, 1873, 68, 1357, 1889, 1926, 1991, 899, 1993, 1813, 160, 827, 1982, 1988, 1978, 1667, 1666, 2326, 1807, 1042, 1494, 1999, 391, 1925, 745, 1704, 2028, 2009, 1662, 1385, 179, 436, 55, 1025, 1859, 1125, 673, 631, 1697, 2, 2007, 38, 1131]",1,22.0,20.0,300.0,0.6646971935007385,, -"[505, 1142, 1448, 2326, 2096, 2082, 1735, 36, 1800, 859, 1878, 18, 2000, 2324, 2318, 2034, 416, 935, 1313, 1986, 2026, 661, 1913, 2256, 35, 1283, 1146, 118, 808, 1282, 337, 2047, 638, 20, 2407, 1616, 406, 1258, 1911, 895, 544, 387, 1560, 1583, 1574, 573, 454, 598, 1998, 1140, 1499, 1837, 960, 440, 277, 2598, 252, 519, 2094, 191, 2107, 1009, 476, 2019, 1670, 530, 1311, 804, 303, 1015, 1251, 399, 643, 1776, 1681, 973, 1871, 1552, 1856, 1483, 682, 2153, 836, 1622, 2045, 102, 1571, 2300, 1779, 1784, 2383, 922, 407, 1072, 1640, 1786, 1296, 1810, 893, 1782, 1781, 314, 554, 1790, 671, 294, 417, 2081, 1789, 487, 115, 1798, 880, 630, 581, 2016, 1307, 1289, 2079, 2078, 329, 2536, 1045, 103, 1773, 236, 133, 139, 1912, 1768, 685, 773, 563, 1769, 719, 189, 1661, 426, 2046, 1444, 318, 1346, 308, 1505, 2322, 1572, 1802, 1805, 1772, 1337, 1459, 2085, 239, 10, 1564, 608, 176, 1801, 1775, 958, 2048, 943, 1561, 1788, 126, 409, 350, 910, 1705, 484, 2086, 1196, 2327, 1245, 138, 2494, 1787, 1785, 1656, 153, 1797, 459, 2090, 2080, 2084, 1490, 2325, 2089, 1799, 1061, 124, 2143, 814, 1651, 109, 1804, 112, 1778, 2382, 656]",1,22.0,40.0,250.0,0.7175036927621861,, -"[742, 1743, 969, 2113, 1258, 1620, 704, 562, 1077, 1878, 1726, 1616, 2382, 814, 1583, 859, 754, 2251, 1335, 1735, 836, 389, 359, 10, 2252, 2208, 1412, 905, 1701, 118, 589, 277, 2016, 2096, 1311, 2327, 121, 2256, 2326, 505, 1358, 1196, 416, 1871, 661, 671, 1483, 2451, 1012, 1574, 303, 2083, 1336, 1725, 1282, 1013, 36, 643, 191, 2107, 699, 1740, 2087, 2034, 409, 454, 1142, 1856, 1313, 1505, 1140, 1801, 2081, 682, 1998, 1564, 1158, 2407, 2143, 2075, 1546, 1986, 1624, 1790, 630, 476, 1249, 1852, 1803, 598, 581, 973, 1289, 1571, 215, 935, 1779, 1070, 1670, 102, 2478, 1448, 484, 1778, 980, 1804, 1792, 573, 452, 563, 2318, 1337, 252, 2026, 1009, 115, 2090, 1776, 1661, 406, 880, 1622, 318, 2078, 519, 1572, 1705, 2094, 1781, 341, 2080, 1791, 1490, 1089, 1367, 1784, 1560, 1800, 1777, 2091, 1797, 1799, 554, 244, 1733, 1045, 417, 289, 1782, 2086, 2322, 773, 133, 487, 1251, 218, 1780, 1459, 887, 1785, 1787, 542, 1773, 1772, 239, 2084, 103, 1798, 2019, 608, 1640, 1810, 1656, 124, 176, 1805, 719, 2153, 685, 18, 384, 2494, 1770, 1465, 1789, 1072, 1245, 556, 426, 1775, 1635, 189, 1346, 1262, 109, 1584, 387, 1248, 910, 1193, 1771, 112, 1623, 1767, 1561, 2085, 1783, 308, 1253, 655, 139, 329, 603, 350, 138, 302, 2045, 660, 2093, 2088, 2598, 610, 1774, 1768, 2046, 1651, 306, 2095, 1144, 958, 2047, 656, 2089, 1802, 20, 1769, 459, 2092, 314, 126, 342, 945, 1478, 153, 2079, 1830, 294, 2048, 236, 1551]",1,22.0,50.0,250.0,0.7426144756277696,, -"[1448, 1370, 1337, 779, 232, 1778, 1676, 816, 1777, 1624, 505, 1873, 1525, 95, 1701, 603, 420, 1074, 412, 484, 56, 120, 743, 459, 1868, 563, 734, 2034, 1623, 215, 657, 1975, 2155, 1980, 133, 745, 179, 1538, 2418, 199, 1258, 1733, 2309, 415, 1773, 2326, 1914, 1780, 408, 1801, 1986, 699, 1171, 1979, 1616, 447, 1592, 1784, 2111, 2405, 712, 850, 2013, 2093, 1351, 2394, 504, 191, 2112, 102, 693, 1769, 2094, 644, 65, 1998, 203, 406, 604, 539, 1740, 2281, 1224, 571, 387, 1570, 643, 598, 197, 854, 2095, 366, 18, 736, 610, 859, 2009, 1909, 1248, 1251, 1871, 88, 1110, 660, 262, 416, 1505, 1779, 2056, 1789, 318, 1131, 2050, 1574, 1309, 1785, 1333, 1127, 1771, 1852, 2079, 205, 1675, 2407, 1203, 1377, 822, 1670, 1867, 452, 1842, 231, 2113, 2026, 1775, 1849, 1140, 1869, 542, 1976, 1776, 887, 2025, 334, 189, 1995, 118, 490, 10, 1359, 1097, 1293, 1781, 471, 1367, 1791, 1358, 1847, 1973, 1644, 1782, 342, 1843, 2086, 98, 573, 608, 1920, 711, 2485, 109, 519, 1787, 2317, 2419, 1841, 581, 476, 1142, 2153, 842, 530, 1918, 1313, 160, 103, 1725, 376, 153, 2047, 1652, 719, 1917, 510, 139, 2045, 1655, 1568, 2000, 2494, 696, 1810, 1846, 1850, 2276, 289, 1464, 553, 1856, 2092, 126, 69, 441, 1729, 1705, 1799, 2046, 1770, 1772, 112, 350, 325, 1683, 2071, 935, 868, 2010, 2190, 2478, 487, 2357, 417, 1790, 1262, 1045, 1851, 426, 1840, 2117, 32, 910, 1926, 1483, 1848, 1804, 2080, 773, 1521, 1070, 1583, 341, 1478, 1802, 244, 1072, 2598, 496, 2403, 1403, 1792, 1651, 308, 236, 294, 1253, 506, 1798, 1120, 2022, 551, 2016, 682, 2189, 666, 2078, 1346, 708, 465, 1343, 1285, 685, 1974, 302, 124, 1572, 1844, 181, 1625, 661, 2085, 329, 554, 1805, 1637, 138, 1800, 1482, 655, 414, 1584, 2089, 1656, 836, 1622, 2217, 1245, 454, 1487, 958, 2090, 1830, 1376, 1797, 671, 306, 894, 176, 656, 2084, 772, 1640, 2048, 252, 1661, 2088]",1,22.0,65.0,350.0,0.7644017725258493,, -"[2080, 487, 1772, 1705, 1367, 2081, 655, 608, 719, 239, 1787, 1782, 133, 1770, 1584, 1798, 1656, 2089, 112, 102, 695, 2085, 1797, 887, 1771]",0,24.0,5.0,100.0,0.10487444608567208,, -"[31, 1594, 31, 1594, 31, 1594, 31, 1594, 31, 1594]",0,24.0,10.0,150.0,0.11521418020679468,, -"[611, 2690, 611, 2690, 611, 2690, 611, 2690, 611, 2690]",0,24.0,10.0,350.0,0.08677991137370754,, -"[1279, 2291, 229, 2267, 2034, 1203, 86, 1224, 1847, 1527, 659, 146, 1810, 527, 2021, 2292, 2118, 306, 835, 1479, 2280, 2405, 175, 816, 1980, 586, 1677, 1803, 88, 1026, 300, 2266, 2155, 1448, 2203, 2135, 738, 1538, 2241, 1405, 2014, 2117, 1644, 544, 1772, 142, 1991, 1635, 2162, 731, 2311, 1169, 1775, 1358, 1376, 81, 897, 1502, 1927, 276, 261, 2071, 1110, 1850, 1517, 204, 1930, 109, 2418, 120, 280, 737, 1117, 1979, 1198, 593, 1309, 679, 1508, 1759, 1916, 1355, 851, 1908, 1851, 436, 1695, 2189, 719, 1773, 2178, 62, 1351, 2059, 1149, 490, 69, 1770, 2057, 597, 1215, 838, 680, 2185, 1500, 242, 1769, 1503, 1200, 2243, 1842, 2576, 1133, 1441, 2295, 2186, 1914, 1013, 2163, 1174, 1395, 1080, 415, 1779, 354, 2051, 770, 94, 1463, 510, 65, 1894, 1777, 2575, 1251, 552, 1843, 608, 542, 1781, 112, 763, 126, 1957, 581, 1782, 1954, 1771, 2282, 2476]",1,24.0,30.0,350.0,0.6639586410635155,, -"[2130, 1889, 1891, 2039, 118, 691, 1675, 2396, 1332, 2059, 738, 1420, 1812, 454, 748, 842, 791, 973, 1979, 1540, 1224, 142, 2042, 2102, 566, 2054, 1026, 1270, 1909, 1986, 745, 277, 1848, 478, 490, 887, 718, 604, 1039, 1139, 1927, 779, 436, 1676, 1652, 1918, 2217, 1396, 1042, 1421, 504, 160, 2162, 149, 733, 1916, 621, 1919, 2070, 1013, 2040, 57, 673, 1152, 1928, 146, 2172, 471, 1027, 61, 2071, 878, 994, 2395, 1849, 1625, 910, 1069, 1085, 955, 1592, 514, 2228, 597, 175, 211, 596, 1566, 2053, 69, 1358, 2295, 1410, 191, 1021, 161, 543, 158, 1399, 506, 1512, 571, 2035, 1929, 1732, 2041, 1376, 1912, 778, 1635, 1920, 86, 95, 224, 525, 1974, 2394, 1002, 586, 2055, 1628, 2069, 2190, 1570, 1879, 2019, 2016, 1917, 1759, 1080, 2034, 1968, 14, 1370, 2103, 382, 756, 2386, 1487, 2023, 1982, 724, 1269, 935, 1703, 437, 1474, 1239, 743, 2419, 1441, 1293, 1266, 2304, 1898, 2182, 2388, 885, 272, 2344, 2010, 1894, 141, 1525, 619, 696, 1630, 1654, 1336, 1885, 2036, 356, 1497, 2011, 2024, 1696, 1713, 239, 41, 1344, 1338, 1309, 1529, 617, 1268, 2038, 1869, 1403, 995, 993, 25, 1464, 897, 2057, 1914, 1906, 49, 2022, 325, 2156, 2309, 1907, 702, 1758, 1644, 1843, 1284, 498, 634, 1620, 2467, 1908, 2418, 2013, 1884, 1850, 164, 1178, 1539, 737, 2137, 2178, 754, 2015, 516, 1677, 1743, 65, 2012, 600, 1394, 1880, 1985, 1448, 2315, 1741, 1218, 1847, 593, 2116, 1807, 1964, 1494, 1671, 822, 1984, 704, 324, 438, 1882, 1359, 2037, 389, 2018, 2017, 1174, 1319, 851, 697, 1274, 969, 1220, 300, 1412, 2056, 2136, 2113, 417, 1665, 130, 777, 344, 76, 1117, 97, 1353, 2021, 1637, 921, 572, 1368, 1273, 162, 841, 562, 465, 1983, 88, 815, 415, 1043, 661, 1614, 1077, 2593, 717, 674, 1674, 2450, 1288]",1,24.0,60.0,350.0,0.7197193500738552,, -"[779, 1592, 778, 1362, 1676, 1359, 1675, 69, 1539, 604]",0,2.0,5.0,50.0,0.08677991137370754,, -"[144, 1593, 145, 537, 1698, 1165, 213, 108, 2161, 1327, 2622, 2209, 1647, 2192, 1328, 2160, 495, 1504, 2157, 1836]",0,2.0,10.0,50.0,0.08677991137370754,, -"[1782, 350, 2080, 2045, 2085, 426, 2046, 2089, 1367, 910, 302, 2048, 139, 1013, 1772, 650, 109, 519, 608, 308]",0,2.0,10.0,350.0,0.09010339734121123,, -"[2553, 1415, 1555, 1953, 559, 1414, 1470, 647, 1579, 1182, 2029, 371, 370, 1354, 1070, 472, 844, 1829, 1495, 1441, 940, 1183, 1408, 891, 1899, 1267, 1901, 1902, 801, 392, 264, 950, 645, 720, 1553, 1828, 1826, 1898, 1900, 238]",0,2.0,20.0,50.0,0.09785819793205318,, -"[389, 359, 1740, 2251, 121, 2252, 2096, 1725, 905, 1358, 704, 589, 1448, 980, 1070, 1784, 1505, 1077, 2143, 661, 2382, 935, 303, 505, 1801, 562, 189, 773, 814, 1777, 1733, 754, 2026, 118, 244, 1262, 2407, 454, 969, 1986, 115, 859, 1624, 2034, 1313, 1773, 1483, 1670, 102, 1778, 133, 1158, 1780, 406, 1776, 1616, 1009, 277, 2045, 1998, 1856, 1574, 1623, 1787, 643, 1196, 1251, 289, 836, 1258, 1871, 191, 1804, 1810, 2016, 1072, 973, 573, 945, 880, 671, 2046, 581, 239, 2086, 1337, 742, 1800, 1779, 2019, 1771, 630, 1367, 342, 1785, 2153, 1803, 2078, 1583, 1142]",1,2.0,50.0,200.0,0.6783604135893648,, -"[2080, 1772, 1367, 487, 2085, 1782, 655, 1705, 2089, 112]",0,3.0,5.0,150.0,0.09010339734121123,, -"[1798, 1584, 1367, 1782, 1656, 487, 350, 2080, 318, 1705]",0,3.0,5.0,300.0,0.09010339734121123,, -"[519, 236, 1998, 109, 1410, 1121, 1367, 1107, 308, 124]",0,3.0,5.0,350.0,0.09194977843426884,, -"[1593, 2159, 213, 2158, 2157, 1835, 145, 23, 898, 144, 537, 1165, 2160, 1504, 1836, 2192, 2622, 2161, 92, 2209, 1647, 108, 1698, 1327, 1328, 495, 1328, 495, 1327, 2160, 2192, 2161, 898, 1647, 1698, 1835, 2157, 2159, 144, 213, 537, 92, 1836, 108, 1504, 2209, 2158, 23, 145, 1593, 1165, 2622]",0,3.0,60.0,150.0,0.08677991137370754,, -"[2209, 898, 1647, 108, 2160, 2159, 1836, 1504, 1327, 1835, 495, 2157, 1328, 92, 2161, 23, 144, 2192, 1698, 2158, 1593, 213, 537, 145, 1165, 2622, 537, 145, 1165, 1593, 1698, 2192, 144, 213, 2622, 2160, 495, 898, 1328, 1647, 1835, 2157, 2159, 2161, 1327, 92, 1836, 108, 1504, 2209, 2158, 23]",0,3.0,65.0,200.0,0.08677991137370754,, -"[1787, 2045, 1651, 1670, 139, 2048, 2080, 109, 1805, 519, 1346, 1656, 1772, 133, 1245, 1798, 329, 153, 1193, 958]",0,4.0,10.0,100.0,0.09010339734121123,, -"[26, 2455, 2454, 122, 123, 99, 2604, 127, 2604, 99, 123, 127, 26, 122, 2454, 2455]",0,4.0,30.0,250.0,0.08677991137370754,, -"[1476, 404, 1170, 1476, 404, 1170]",0,4.0,40.0,350.0,0.08677991137370754,, -"[126, 2045, 308, 1337, 139, 487, 112, 138, 426, 350]",0,6.0,5.0,50.0,0.09010339734121123,, -"[1476, 404, 1170, 1476, 404, 1170]",0,6.0,5.0,100.0,0.08677991137370754,, -"[2158, 2157, 108, 2209, 145, 144, 2159, 1647, 1835, 537, 1593, 1504, 1698, 213, 2192, 2622, 2160, 1165, 898, 23, 92, 1836, 2161, 495, 1328, 1327, 1327, 495, 1328, 2161, 2192, 2160, 144, 213, 537, 898, 1647, 1698, 1835, 2157, 2159, 145, 1593, 1165, 92, 1836, 108, 1504, 2209, 2158, 23, 2622]",0,6.0,65.0,150.0,0.08677991137370754,, -"[153, 910, 109, 887, 302, 236, 426, 519, 350, 1538]",0,8.0,5.0,50.0,0.09010339734121123,, -"[1784, 1783, 176, 133, 1777, 1776, 1767, 102, 1448, 1780]",0,8.0,5.0,150.0,0.09010339734121123,, -"[1032, 587, 1032, 587]",0,8.0,5.0,350.0,0.09785819793205318,, -"[1412, 1735, 1620, 36, 1505, 1743, 742, 359, 1013, 1282, 505, 880, 969, 244, 589, 704, 814, 562, 2107, 2327, 2143, 1733, 754, 1358, 2083, 773, 1289, 1142, 389, 1725, 905, 1448, 762, 1740, 1583, 699, 1249, 2113, 671, 277, 1801, 2322, 2382, 1313, 1701, 1791, 661, 121, 1483, 2326, 630, 1661, 454, 1070, 406, 1564, 341, 1258, 2252, 2318, 1546, 1776, 935, 215, 1077, 2094, 303, 416, 836, 2451, 2251, 2407, 1336, 2016, 1622, 1852, 1830, 573, 1012, 1465, 1640, 1335, 603, 1624, 685, 1726, 1158, 2075, 1248, 118, 980, 2086, 945, 1779, 859, 1560, 2096, 1705, 1778, 1781, 191, 409, 1251, 2034, 973, 1787, 2081, 1804, 1777, 1490, 1072, 1856, 1799, 2045, 1998, 1986, 1311, 318, 1871, 563]",1,8.0,60.0,250.0,0.6835302806499262,, -"[1396, 2109, 1538, 842, 571, 2281, 724, 2136, 118, 1570, 745, 771, 1399, 1156, 2395, 1917, 2156, 1278, 30, 596, 1421, 593, 2024, 453, 2282, 2182, 2306, 1351, 2102, 1625, 1115, 438, 1842, 1818, 2350, 2021, 2274, 2071, 2103, 1137, 1358, 2396, 681, 787, 1275, 454, 306, 1487, 1268, 995, 1912, 2228, 1085, 1850, 1916, 1269, 41, 2405, 1986, 887, 1539, 1592, 160, 2034, 645, 478, 65, 649, 2397, 1655, 1500, 2326, 1566, 1668, 504, 161, 1420, 510, 2162, 1667, 120, 1013, 1133, 490, 1674, 204, 1121, 1630, 910, 2305, 2309, 2315, 651, 2418, 525, 2185, 277, 836, 1974, 1468, 2335, 1266, 1626, 942, 1417, 1080, 687, 1671, 696, 661, 1984, 2394, 309, 514, 1983, 175, 748, 1849, 1759, 1975]",1,8.0,60.0,350.0,0.7333825701624815,, -"[1594, 31, 1594, 31, 1594, 31, 1594, 31]",0,20.0,10.0,300.0,0.11004431314623338,, -"[2158, 2157, 108, 2209, 898, 1504, 1647, 92, 2159, 1835, 2160, 1836, 2622, 23, 537, 1327, 1328, 145, 1165, 1698, 1593, 2161, 495, 2192, 213, 144, 1165, 145, 2622, 1698, 2160, 1327, 1328, 898, 1647, 537, 1835, 2157, 2159, 92, 1593, 144, 145, 213, 537, 1165, 2192, 2622, 1698, 495, 2160, 1327, 1328, 898, 1647, 1835, 2157, 2159, 2161, 92, 1593, 144, 145, 213, 537, 1165, 2192, 2622, 1698, 495, 2160, 1327, 1328, 898, 1647, 1835, 2157, 2159, 2161, 92]",0,20.0,20.0,150.0,0.08677991137370754,, -"[2461, 106, 2461, 106, 2461, 106, 2461, 106]",0,20.0,40.0,350.0,0.08677991137370754,, -"[1133, 1903, 793, 2185, 2168, 1557, 1852, 1500, 1825, 957, 118, 215, 1713, 709, 1131, 1289, 650, 1622, 1733, 454, 1070, 1685, 341, 1183, 1829, 1901, 1801, 1826, 448, 1701, 943, 1358, 1725, 752, 1282, 1549, 189, 2318, 773, 244, 1616, 1564, 1262, 1089, 1784, 1335, 1726, 2096, 409, 704, 2082, 2075, 1013, 1412, 2092, 121, 2113, 971, 1735, 1743, 2451, 399, 1897, 318, 644, 937, 1538, 2079, 589, 342, 1142, 1061, 1168, 1550, 1113, 239, 1199, 359, 762, 1842, 2083, 1505, 1986, 1552, 1276, 1546, 969, 264, 1470, 603, 1962, 1620, 115, 389, 1140, 742, 1871, 1902, 2153, 1740, 836, 1248, 1661, 905, 36, 1824, 18, 2252, 1878, 1963, 1196, 2094, 814, 699, 661, 1856, 1411, 2326, 646, 935, 2081, 2107, 1495, 1583, 1896, 2385, 2251, 859, 367, 755, 176, 1880, 1898, 2208, 2300, 562, 880, 1624, 645, 2016, 107, 1776, 1249, 1650, 133, 1336, 119, 2019, 1769, 563, 2256, 2047, 2087, 1077, 950, 844, 1899, 2091, 1483, 581]",1,24.0,40.0,350.0,0.6949778434268833,, -"[2157, 2158, 1165, 108, 145, 537, 1593, 2209, 2159, 1835, 1836, 144, 213, 1504, 1647, 2192, 898, 2622, 1698, 92, 2160, 1327, 23, 2161, 495, 1328, 1327, 495, 1328, 2161, 2192, 2160, 144, 213, 537, 898, 1647, 1698, 1835, 2157, 2159, 145, 1593, 1165, 92, 1836, 108, 1504, 2209, 2158, 23, 2622, 1327, 495, 1328, 2161, 2192, 2160, 144, 213, 537, 898, 1647, 1698, 1835, 2157, 2159, 145, 1593, 1165, 92, 1836, 108, 1504, 2209, 2158, 23, 2622, 1327, 495, 1328, 2161, 2192, 2160, 144, 213, 537, 898, 1647, 1698, 1835, 2157, 2159, 145, 1593, 1165, 92, 1836, 108, 1504, 2209, 2158, 23, 2622]",0,24.0,50.0,200.0,0.08677991137370754,, -"[2288, 114, 1743, 2113, 969, 2287, 1681, 704, 1735, 1583, 2025, 1784, 1620, 826, 1878, 695, 2252, 135, 562, 1560, 1012, 1767, 2256, 1089, 407, 1701, 1282, 905, 1013, 544, 2326, 1726, 1358, 1609, 121, 1336, 1077, 330, 2407, 2318, 234, 1412, 610, 215, 1561, 2145, 389, 441, 399, 699, 2096, 1283, 2481, 682, 661, 742, 1313, 2019, 741, 359, 880, 754, 831, 1851, 671, 1768, 935, 1070, 638, 1552, 2329, 808, 1791, 2382, 1783, 980, 1722, 1852, 231, 2107, 1803, 2251, 1733, 2095, 189, 36, 1725, 1740, 2300, 314, 2208, 191, 505, 530, 1483, 337, 2082, 1616, 943, 1061, 2327, 1459, 18, 341, 1262, 1142, 1998, 102, 2106, 1801, 643, 1790, 1564, 1776, 1546, 1856, 945, 2026, 416, 1448, 133, 2083, 454, 1774, 814, 218, 118, 409, 1197, 2094, 1769, 963, 1311, 1661, 2034, 1871, 573, 1781, 1196, 1146, 1505, 1623, 1699, 1015, 1574, 1986, 1618, 244, 762, 459, 2451, 2153, 289, 2093, 589, 556, 836, 859, 1799, 1465, 960, 563, 1337, 973, 318, 1270, 1158, 2504, 2045, 2086, 2143, 1030, 1797, 1248, 1778, 1009, 277, 1773, 1072, 603, 1785, 1782, 1335, 1789, 1251, 1786, 581, 406, 1258, 387, 630, 1779, 1804, 2091, 303, 1622, 1777, 1772, 1670, 484]",1,24.0,50.0,300.0,0.7171344165435746,, -"[109, 306, 1149, 1809, 962, 13, 1358, 795, 661, 2216, 2212, 236, 1869, 719, 1787, 1882, 69, 1346, 1808, 1299, 610, 1029, 973, 1701, 1914, 827, 1995, 2010, 1095, 553, 1645, 1974, 1107, 351, 1998, 511, 1999, 261, 2112, 1592, 1695, 1821, 2045, 224, 1331, 554, 2117, 2015, 230, 1986, 2287, 506, 27, 1855, 2118, 1652, 598, 1288, 2034, 1819, 2166, 461, 681, 1355, 578, 2025, 2103, 356, 2102, 1843, 687, 581, 1814, 94, 482, 2485, 737, 708, 1729, 481, 88, 621, 366, 229, 549, 1812, 1683, 1127, 1479, 1822, 1405, 376, 1928, 748, 770, 851, 2178, 2274, 1062, 1376, 1849, 702, 1464, 1508, 2326, 1846, 382, 1741, 276, 1654, 1616, 1661, 1576, 1507, 2133, 181, 555, 716, 1581, 1847, 1810, 1583, 2359, 1670, 1708, 1574, 1677, 1820, 880, 1572, 30, 1811, 1841, 252, 415, 738, 514, 2078, 649, 1675, 519, 790, 1690, 2030, 1309, 334, 478, 1293, 2131, 1004, 1120, 1644, 1351, 416, 1894, 454, 2132, 249, 1338, 995, 682, 1171, 1845, 1295, 498, 1637, 2024, 1823, 471, 1049, 1815, 1732, 1463, 211, 1348, 1813, 1927, 779, 255, 1879, 1848, 1482, 467, 925, 2162, 1521, 1013, 993, 2425, 826, 2475, 120, 1039, 822, 1218, 239, 1625, 1978, 149, 1332, 130, 2402, 1919, 1776, 1857, 1851, 1203, 2153, 118, 1818, 778, 446, 1110, 93, 576, 586, 285, 1370, 437, 504, 697, 651, 897, 835, 539, 908, 885, 1525, 2405, 1979, 1494, 878, 1842, 61, 1115, 1222, 568, 1092, 1816, 1152]",1,24.0,60.0,350.0,0.7204579025110783,, -"[1956, 1904, 1902, 807, 1954, 645, 1880, 468, 1267, 1226, 1960, 119, 1650, 1961, 1955, 1901, 646, 1549, 971, 1898, 647, 1360, 1713, 1168, 676, 2491, 793, 1179, 479, 1113, 1537, 107, 755, 1022, 1959, 379, 2621, 801, 1276, 1962]",0,26.0,10.0,50.0,0.09785819793205318,, -"[1593, 537, 2192, 213, 1698, 144, 1165, 145, 495, 1327, 2160, 1328, 1835, 2161, 898, 2622, 108, 2209, 1647, 2159, 2157, 1504, 23, 2158, 1836, 92, 898, 92, 1835, 1836, 2160, 2157, 1328, 1647, 1698, 2159, 2161, 108, 2158, 495, 1327, 1504, 2209, 537, 23, 2192, 145, 1165, 1593, 144, 213, 2622, 898, 92, 1835, 1836, 2160, 2157, 1328, 1647, 1698, 2159, 2161, 108, 2158, 495, 1327, 1504, 2209, 537, 23, 2192, 145, 1165, 1593, 144, 213, 2622, 898, 92, 1835, 1836, 2160, 2157, 1328, 1647, 1698, 2159, 2161, 108, 2158, 495, 1327, 1504, 2209, 537, 23, 2192, 145, 1165, 1593, 144, 213, 2622]",0,26.0,30.0,350.0,0.08677991137370754,, -"[123, 2604, 99, 2455, 127, 26, 122, 2454, 2455, 26, 99, 122, 123, 2454, 2604, 127, 2455, 26, 99, 122, 123, 2454, 2604, 127, 2455, 26, 99, 122, 123, 2454, 2604, 127]",0,26.0,40.0,200.0,0.08677991137370754,, -"[1772, 563, 519, 318, 426, 1337, 1701, 1624, 1785, 1779, 1346, 236, 153, 1077, 459, 1986, 306, 109, 1072, 176]",0,28.0,5.0,150.0,0.09785819793205318,, -"[1735, 1743, 1412, 562, 969, 704, 505, 742, 1620, 2107, 359, 244, 1852, 1701, 1358, 1505, 36, 880, 699, 215, 1740, 2083, 1448, 1725, 1282, 589, 389, 754, 1830, 1013, 2327, 773, 814, 416, 2113, 2075, 1583, 454, 1313, 2143, 2252, 1733, 905, 1335, 1791, 121, 1070, 661, 1546, 1726, 1289, 1483, 341, 1336, 1249, 1258, 406, 1801, 2382, 118, 277, 1311, 1564, 1142, 1248, 2251, 2407, 671, 2034, 218, 1856, 2096, 1012, 1077, 2326, 836, 1871, 1459, 2016, 603, 2091, 935, 685, 1661, 1776, 409, 1986, 630, 1787, 2318, 573, 1560, 1624, 1574, 303, 2045, 1337, 2087, 1251, 191, 318, 1781, 1779, 239, 563, 1158, 2094, 980, 973, 945, 859, 1622, 2322, 1778, 2019, 1699, 2451, 342, 1998, 289, 2026, 1804, 1616, 115, 1640, 1777, 1799, 1262, 1072, 554, 2086, 1478, 1784, 102, 1772, 1490, 189, 1045, 581, 2080, 1797, 1670, 1705, 2090, 643, 1651, 519, 133, 1196, 1773, 1810, 1009, 2046, 1782, 1367, 887, 1780, 1789, 2047, 2078, 1572, 1571, 655, 2208, 2153, 252, 1798, 2084, 387, 1785, 1065, 1771, 487, 2048, 417, 1089, 384, 1465, 682, 1800, 426, 910, 2079, 1635, 20, 1803, 236, 1656, 308, 1561, 608, 598, 452, 1792, 112, 139, 1584, 2494, 1802, 1770]",1,28.0,50.0,250.0,0.7378138847858198,, -"[1472, 172, 48, 1725, 306, 236, 1779, 1891, 512, 958, 1740, 244, 399, 2130, 2346, 240, 1358, 2187, 1797, 2094, 756, 1640, 2106, 2349, 814, 1425, 2302, 733, 1781, 1804, 1789, 1469, 2303, 2186, 887, 109, 317, 1483, 2039, 1098, 1035, 2065, 1893, 1045, 1787, 973, 2451, 1890, 2008, 1810, 962, 1892, 1070, 1623, 2337, 180, 911, 417, 1798, 1065, 1785, 945, 1777, 342, 714, 1733, 2347, 1780, 2120, 1799, 1805, 1635, 2345, 788, 1514, 1705, 1802, 384, 1784, 2006, 1889, 397, 1089, 2041, 1662, 1692, 1336, 487, 2382, 482, 2034, 442, 1666, 1477, 1160, 1908, 1590, 2086, 1015, 1423, 2406, 1419, 1196, 2203, 1072, 862, 2617, 1624, 1618, 2066, 409, 1778, 2004, 2107, 33, 476, 45, 1803, 1061, 2259, 1998, 1773, 1807, 1801, 2005, 1812, 36, 519, 2236, 1478, 2261, 813, 949, 1709, 1436, 782, 794, 2332, 588, 1149, 1699, 2254, 484, 885, 1107, 1506, 893, 1051, 523, 1702, 1669, 1703, 1095, 86, 1143, 2238, 2009, 1800, 2383, 185, 20, 1219, 2640, 617, 2108, 1997, 2295, 2000, 530, 68, 2376, 1454, 1474, 2591, 1042, 2040, 863, 665, 160, 1552, 2043, 1262, 166, 745, 189, 1481, 1, 1531, 1146, 1519, 2, 1534, 245, 576, 38, 1082, 151, 1511, 808, 1987]",1,28.0,50.0,300.0,0.6927621861152142,, -"[1885, 2123, 665, 588, 698, 661, 231, 232, 2040, 869, 1337, 160, 1846, 2008, 745, 1772, 2003, 868, 1787, 332, 1440, 2007, 286, 467, 2120, 2383, 2000, 24, 176, 874, 2004, 1844, 2229, 261, 133, 1127, 1479, 553, 664, 924, 1820, 1323, 1636, 743, 673, 91, 792, 1995, 201, 681, 2009, 1854, 1778, 1522, 566, 1166, 1051, 1403, 2001, 2228, 476, 1558, 2357, 389, 1958, 2122, 2045, 266, 215, 1777, 1856, 1799, 157, 1884, 335, 973, 1805, 306, 563, 124, 896, 2121, 1857, 899, 426, 1346, 548, 366, 1532, 1852, 1045, 2066, 442, 1858, 343, 809, 1859, 2034, 1705, 1780, 179, 1870, 1987, 1665, 113, 1574, 1453, 1873, 1299, 1907, 1697, 344, 2276, 143, 289, 729, 1701, 1810, 1095, 1991, 1862, 277, 415, 1789, 45, 391, 633, 2259, 699, 733, 784, 1986, 935, 1875, 519, 465, 1999, 1661, 1993, 2, 1015, 158, 1876, 71, 441, 236, 109, 638, 2005, 384, 1519, 1052, 1992, 2362, 1989, 2304, 39, 1241, 409, 196, 2006, 68, 808, 2002, 1624, 1569, 1812, 153, 1855, 318, 1481, 1998, 416, 1853, 2335, 2093, 13, 1474, 349, 1779, 593, 94, 1908, 1505, 1788, 2094, 460, 1334, 1434, 887, 1385, 1861, 767, 1709, 226, 451, 1212, 1075, 249, 216]",1,28.0,50.0,350.0,0.6835302806499262,, -"[1072, 2280, 1085, 945, 2021, 838, 2185, 681, 239, 2153, 619, 1801, 1174, 242, 153, 1784, 671, 308, 1986, 2109, 687, 891, 2395, 1501, 436, 1146, 2200, 236, 2199, 115, 2155, 702, 1637, 1670, 2086, 1421, 1507, 1788, 2228, 1333, 2054, 456, 2182, 773, 1487, 2009, 2027, 935, 1909, 661, 2649, 341, 600, 771, 210, 1998, 55, 1163, 2195, 1370, 36, 1602, 1838, 1580, 2074, 1500, 215, 1396, 1295, 1156, 724, 2608, 598, 2359, 1372, 2419, 2457, 1552, 441, 2295, 1133, 2107, 2016, 1628, 252, 416, 1624, 2293, 65, 1218, 496, 896, 2165, 2283, 1358, 412, 1527, 1505, 1869, 563, 1975, 1787, 1376, 2164, 1399, 306, 2217, 1346, 133, 214, 973, 1574, 2394, 1131, 1645, 280, 1699, 2418, 644, 1416, 2046, 1928, 330, 1042, 847, 2048, 88, 2316, 2181, 1121, 543, 1171, 277, 118, 1538, 2019, 126, 603, 149, 2106, 324, 2485, 2653, 519, 572, 1420, 728, 1674, 2193, 2201, 109, 2094, 1337, 1701, 1079, 2194, 630, 868, 1027, 551, 822, 1224, 1215, 649, 2025, 415, 1343, 95, 2108, 2197, 1616, 1930, 318, 465, 2051, 992, 1525, 1979, 2047, 2396, 56, 1203, 349, 1905, 682, 1127, 1995, 1655, 1980, 1915, 2422, 2212, 2357, 2388, 2216, 816, 1498, 2045, 366, 1791, 2397, 454, 779, 1535, 2317, 57, 255, 331, 1830, 490, 861, 2071, 1092, 1848, 447, 1248, 1074, 504, 1856, 1855, 1448, 1976, 1676, 830, 1572, 1362, 2078, 1926, 1402, 25, 387, 1849, 553, 1110, 2189, 1917, 596, 778, 1973, 708, 2052, 1919, 699, 2304, 736, 2028, 693, 2282, 1309, 2405, 2132, 1772, 1920, 734, 176, 139, 2026, 1395, 1867, 41]",1,28.0,65.0,350.0,0.6857459379615952,, -"[1346, 1245, 542, 1640, 124, 350, 1367, 608, 1772, 103, 1656, 487, 452, 719, 1705, 1651, 1770, 1158, 302, 426]",0,30.0,5.0,100.0,0.09010339734121123,, -"[2479, 1236, 2479, 1236, 2479, 1236, 2479, 1236]",0,30.0,40.0,350.0,0.08677991137370754,, -"[1156, 1494, 897, 743, 126, 2046, 236, 787, 1149, 1856, 30, 1072, 1873, 878, 1358, 306, 771, 502, 1922, 1876, 323, 6, 1239, 483, 868, 1784, 2419, 160, 1979, 1976, 1337, 1925, 74, 1997, 1983, 1701, 391, 1990, 1651, 438, 498, 1852, 2005, 176, 201, 1772, 1870, 2021, 335, 133, 68, 325, 2048, 935, 745, 1558, 2096, 1989, 460, 45, 2047, 1118, 1980, 673, 603, 1602, 681, 1867, 792, 756, 1469, 1468, 1008, 162, 65, 1776, 1453, 718, 408, 2020, 894, 1248, 341, 118, 56, 277, 88, 1572, 2078, 1709, 1923, 1812, 153, 55, 487, 441, 2003, 1079, 433, 1998, 2405, 1095, 2004, 2001, 566, 412, 1616, 1023, 563, 2276, 109, 476, 815, 447, 2349, 2007, 2000, 415, 2232, 1416, 1080, 2281, 210, 1574, 1568, 232, 1992, 1996, 657, 816, 454, 1341, 86, 1987, 2485, 1986, 1266, 1875, 252, 1859, 436, 2002, 285, 519, 1377, 318, 1994, 139, 1309, 2008, 2178, 120, 1787, 274, 1974, 61, 1924, 308, 708, 490, 2045, 2117, 451, 2386, 734, 2590, 1732, 315, 2016, 2056, 1538, 630, 668, 885, 699, 2295, 215, 504, 1526, 2360, 60, 1844, 2009, 1984, 1729, 2155, 1845, 973, 1981, 2418, 95, 1376, 1652, 1171, 1505, 1676, 2, 1991, 1343, 1842]",1,30.0,50.0,350.0,0.7186115214180206,, diff --git a/pygip/models/defense/atom/csv_data/attack_PubMed.csv b/pygip/models/defense/atom/csv_data/attack_PubMed.csv deleted file mode 100644 index 8edd9edc..00000000 --- a/pygip/models/defense/atom/csv_data/attack_PubMed.csv +++ /dev/null @@ -1,200 +0,0 @@ -Sequence,Label,NCL,Query Budget,Num Sample Nodes,Fidelity,Gamma -"[7711, 2031, 2873, 7379, 11595, 18867, 4402, 10932, 2098, 8862, 8629, 1119, 3607, 17198, 18145, 13046, 3951, 4421, 9047, 12817, 656, 2602, 17332, 10308, 15498, 6691, 18897, 4638, 14756, 4130, 3240, 6578, 19125, 18812, 9087, 9574, 10769, 7659, 18623, 9358, 7926, 3831, 1398, 4405, 5941, 15944, 7712, 16747, 7997, 6586, 11396, 465, 3878, 13076, 16051, 5665, 13371, 11513, 18508, 17222, 10087, 8382, 13392, 3150, 6988, 18725, 3584, 3074, 4798, 17616, 6923, 18109, 5808, 9951, 9685, 10005, 17687, 514, 18121, 778, 16140, 17477, 6838, 17899, 9388, 11665, 7156, 2570, 12298, 14651, 16560, 2708, 7733, 17333, 8005, 1312, 12240, 953, 14821, 17988]",1,,,,, -"[101, 18806, 16229, 1539, 10063, 8537, 13024, 13980, 6349, 14130]",0,2,1,356,0.22249835167621848, -"[2142, 12649, 7780, 5500, 12150, 11386, 9943, 616, 16631, 14066, 7992, 2239, 11447, 8893, 18266, 11450, 8576, 11449, 1832, 13859, 11814, 2338, 7979, 17738, 18945, 17415, 3781, 11637, 13475, 5045]",1,3,3,419,0.6555256884921641, -"[5943, 2862, 18736, 1416, 17603, 17911, 10503, 2869, 16250, 2841, 16777, 5241, 8282, 7826, 8970, 5700, 17266, 5755, 4497, 11746, 12262, 9063, 3663, 4540, 10796, 17952, 2019, 4924, 13761, 5148]",1,4,3,341,0.7286098290815033, -"[17600, 14815, 10332, 15385, 8893, 2802, 12882, 16921, 2994, 10854, 14997, 4299, 4278, 19171, 11024, 15074, 5500, 16757, 1309, 1437, 17922, 5944, 4238, 9035, 10184, 15446, 16106, 14647, 878, 19176, 14489, 16692, 645, 4026, 5890, 3803, 6396, 3697, 675, 3336]",1,4,4,419,0.6816959983770351, -"[5470, 17917, 4214, 2338, 3651, 6125, 7861, 9229, 2477, 9004]",0,5,1,356,0.22249835167621848, -"[4938, 16332, 11450, 18411, 1204, 18436, 2150, 10270, 12513, 546, 754, 2383, 16113, 1068, 912, 16229, 18439, 7666, 2352, 1242, 755, 8851, 194, 10181, 8435, 3756, 13444, 18963, 6908, 17672, 6664, 5541, 5686, 14943, 12128, 9712, 1794, 8447, 7258, 11914]",1,5,4,405,0.6664807019323427, -"[3667, 18545, 1706, 19151, 15324, 18080, 16551, 12597, 1423, 4294, 5434, 4194, 3667, 18545, 1706, 19151, 15324, 18080, 16551, 12597, 1423, 4294, 5434, 4194, 3667, 18545, 1706, 19151, 15324, 18080, 16551, 12597, 1423, 4294, 5434, 4194, 3667, 18545, 1706, 19151, 15324, 18080, 16551, 12597, 1423, 4294, 5434, 4194, 3667, 18545, 1706, 19151, 15324, 18080, 16551, 12597, 1423, 4294, 5434, 4194, 3667, 18545, 1706, 19151, 15324, 18080, 16551, 12597, 1423, 4294, 5434, 4194, 3667, 18545, 1706, 19151, 15324, 18080, 16551, 12597, 1423, 4294, 5434, 4194, 3667, 18545, 1706, 19151, 15324, 18080, 16551, 12597, 1423, 4294, 5434, 4194, 3667, 18545, 1706, 19151, 15324, 18080, 16551, 12597, 1423, 4294, 5434, 4194, 3667, 18545, 1706, 19151, 15324, 18080, 16551, 12597, 1423, 4294, 5434, 4194]",1,,12,355,0.5464320129837197,1.5 -"[7110, 6236, 2912, 7978, 16533, 3720, 10032, 3640, 13405, 2199, 5605, 14465, 7110, 6236, 2912, 7978, 16533, 3720, 10032, 3640, 13405, 2199, 5605, 14465, 7110, 6236, 2912, 7978, 16533, 3720, 10032, 3640, 13405, 2199, 5605, 14465, 7110, 6236, 2912, 7978, 16533, 3720, 10032, 3640, 13405, 2199, 5605, 14465, 7110, 6236, 2912, 7978, 16533, 3720, 10032, 3640, 13405, 2199, 5605, 14465, 7110, 6236, 2912, 7978, 16533, 3720, 10032, 3640, 13405, 2199, 5605, 14465, 7110, 6236, 2912, 7978, 16533, 3720, 10032, 3640, 13405, 2199, 5605, 14465, 7110, 6236, 2912, 7978, 16533, 3720, 10032, 3640, 13405, 2199, 5605, 14465, 7110, 6236, 2912, 7978, 16533, 3720, 10032, 3640, 13405, 2199, 5605, 14465, 7110, 6236, 2912, 7978, 16533, 3720, 10032, 3640, 13405, 2199, 5605, 14465]",1,,12,405,0.6083582695136177,1.5 -"[592, 4837, 14484, 10842, 3683, 9249, 19159, 9042, 3969, 13550, 1142, 9523, 592, 4837, 14484, 10842, 3683, 9249, 19159, 9042, 3969, 13550, 1142, 9523, 592, 4837, 14484, 10842, 3683, 9249, 19159, 9042, 3969, 13550, 1142, 9523, 592, 4837, 14484, 10842, 3683, 9249, 19159, 9042, 3969, 13550, 1142, 9523, 592, 4837, 14484, 10842, 3683, 9249, 19159, 9042, 3969, 13550, 1142, 9523, 592, 4837, 14484, 10842, 3683, 9249, 19159, 9042, 3969, 13550, 1142, 9523, 592, 4837, 14484, 10842, 3683, 9249, 19159, 9042, 3969, 13550, 1142, 9523, 592, 4837, 14484, 10842, 3683, 9249, 19159, 9042, 3969, 13550, 1142, 9523, 592, 4837, 14484, 10842, 3683, 9249, 19159, 9042, 3969, 13550, 1142, 9523, 592, 4837, 14484, 10842, 3683, 9249, 19159, 9042, 3969, 13550, 1142, 9523]",1,,12,552,0.5240148095552062,1.5 -"[5336, 5918, 18830, 11356, 7137, 15916, 17248, 6682, 14743, 18297, 7969, 4297, 17830, 9355, 9059, 5336, 5918, 18830, 11356, 7137, 15916, 17248, 6682, 14743, 18297, 7969, 4297, 17830, 9355, 9059, 5336, 5918, 18830, 11356, 7137, 15916, 17248, 6682, 14743, 18297, 7969, 4297, 17830, 9355, 9059, 5336, 5918, 18830, 11356, 7137, 15916, 17248, 6682, 14743, 18297, 7969, 4297, 17830, 9355, 9059, 5336, 5918, 18830, 11356, 7137, 15916, 17248, 6682, 14743, 18297, 7969, 4297, 17830, 9355, 9059, 5336, 5918, 18830, 11356, 7137, 15916, 17248, 6682, 14743, 18297, 7969, 4297, 17830, 9355, 9059, 5336, 5918, 18830, 11356, 7137, 15916, 17248, 6682, 14743, 18297, 7969, 4297, 17830, 9355, 9059, 5336, 5918, 18830, 11356, 7137, 15916, 17248, 6682, 14743, 18297, 7969, 4297, 17830, 9355, 9059, 5336, 5918, 18830, 11356, 7137, 15916, 17248, 6682, 14743, 18297, 7969, 4297, 17830, 9355, 9059, 5336, 5918, 18830, 11356, 7137, 15916, 17248, 6682, 14743, 18297, 7969, 4297, 17830, 9355, 9059]",1,,15,355,0.5116904194350054,1.5 -"[16989, 1832, 5336, 9764, 10754, 17003, 46, 5497, 6572, 351, 17192, 16780, 5639, 3267, 313, 14962, 2304, 8945, 14484, 4743, 12870, 17993, 18830, 3322, 3018, 16989, 1832, 5336, 9764, 10754, 17003, 46, 5497, 6572, 351, 17192, 16780, 5639, 3267, 313, 14962, 2304, 8945, 14484, 4743, 12870, 17993, 18830, 3322, 3018, 16989, 1832, 5336, 9764, 10754, 17003, 46, 5497, 6572, 351, 17192, 16780, 5639, 3267, 313, 14962, 2304, 8945, 14484, 4743, 12870, 17993, 18830, 3322, 3018, 16989, 1832, 5336, 9764, 10754, 17003, 46, 5497, 6572, 351, 17192, 16780, 5639, 3267, 313, 14962, 2304, 8945, 14484, 4743, 12870, 17993, 18830, 3322, 3018, 16989, 1832, 5336, 9764, 10754, 17003, 46, 5497, 6572, 351, 17192, 16780, 5639, 3267, 313, 14962, 2304, 8945, 14484, 4743, 12870, 17993, 18830, 3322, 3018, 16989, 1832, 5336, 9764, 10754, 17003, 46, 5497, 6572, 351, 17192, 16780, 5639, 3267, 313, 14962, 2304, 8945, 14484, 4743, 12870, 17993, 18830, 3322, 3018, 16989, 1832, 5336, 9764, 10754, 17003, 46, 5497, 6572, 351, 17192, 16780, 5639, 3267, 313, 14962, 2304, 8945, 14484, 4743, 12870, 17993, 18830, 3322, 3018, 16989, 1832, 5336, 9764, 10754, 17003, 46, 5497, 6572, 351, 17192, 16780, 5639, 3267, 313, 14962, 2304, 8945, 14484, 4743, 12870, 17993, 18830, 3322, 3018, 16989, 1832, 5336, 9764, 10754, 17003, 46, 5497, 6572, 351, 17192, 16780, 5639, 3267, 313, 14962, 2304, 8945, 14484, 4743, 12870, 17993, 18830, 3322, 3018, 16989, 1832, 5336, 9764, 10754, 17003, 46, 5497, 6572, 351, 17192, 16780, 5639, 3267, 313, 14962, 2304, 8945, 14484, 4743, 12870, 17993, 18830, 3322, 3018]",1,,25,355,0.6095754932291931,1.5 -"[1204, 11450, 7116, 16210, 10842, 351, 4149, 19366, 17066, 5567, 5639, 8717, 17401, 12440, 14525, 4332, 5661, 2304, 15841, 16652, 1493, 18830, 2951, 10575, 11099, 1204, 11450, 7116, 16210, 10842, 351, 4149, 19366, 17066, 5567, 5639, 8717, 17401, 12440, 14525, 4332, 5661, 2304, 15841, 16652, 1493, 18830, 2951, 10575, 11099, 1204, 11450, 7116, 16210, 10842, 351, 4149, 19366, 17066, 5567, 5639, 8717, 17401, 12440, 14525, 4332, 5661, 2304, 15841, 16652, 1493, 18830, 2951, 10575, 11099, 1204, 11450, 7116, 16210, 10842, 351, 4149, 19366, 17066, 5567, 5639, 8717, 17401, 12440, 14525, 4332, 5661, 2304, 15841, 16652, 1493, 18830, 2951, 10575, 11099, 1204, 11450, 7116, 16210, 10842, 351, 4149, 19366, 17066, 5567, 5639, 8717, 17401, 12440, 14525, 4332, 5661, 2304, 15841, 16652, 1493, 18830, 2951, 10575, 11099, 1204, 11450, 7116, 16210, 10842, 351, 4149, 19366, 17066, 5567, 5639, 8717, 17401, 12440, 14525, 4332, 5661, 2304, 15841, 16652, 1493, 18830, 2951, 10575, 11099, 1204, 11450, 7116, 16210, 10842, 351, 4149, 19366, 17066, 5567, 5639, 8717, 17401, 12440, 14525, 4332, 5661, 2304, 15841, 16652, 1493, 18830, 2951, 10575, 11099, 1204, 11450, 7116, 16210, 10842, 351, 4149, 19366, 17066, 5567, 5639, 8717, 17401, 12440, 14525, 4332, 5661, 2304, 15841, 16652, 1493, 18830, 2951, 10575, 11099, 1204, 11450, 7116, 16210, 10842, 351, 4149, 19366, 17066, 5567, 5639, 8717, 17401, 12440, 14525, 4332, 5661, 2304, 15841, 16652, 1493, 18830, 2951, 10575, 11099, 1204, 11450, 7116, 16210, 10842, 351, 4149, 19366, 17066, 5567, 5639, 8717, 17401, 12440, 14525, 4332, 5661, 2304, 15841, 16652, 1493, 18830, 2951, 10575, 11099]",1,,25,454,0.6674950550286555,1.5 -"[11450, 9549, 1202, 10420, 17641, 15663, 16190, 2422, 18956, 8790, 555, 2806, 14437, 5610, 14096, 4935, 4297, 896, 14494, 17266, 17007, 19578, 1276, 8832, 16891, 11450, 9549, 1202, 10420, 17641, 15663, 16190, 2422, 18956, 8790, 555, 2806, 14437, 5610, 14096, 4935, 4297, 896, 14494, 17266, 17007, 19578, 1276, 8832, 16891, 11450, 9549, 1202, 10420, 17641, 15663, 16190, 2422, 18956, 8790, 555, 2806, 14437, 5610, 14096, 4935, 4297, 896, 14494, 17266, 17007, 19578, 1276, 8832, 16891, 11450, 9549, 1202, 10420, 17641, 15663, 16190, 2422, 18956, 8790, 555, 2806, 14437, 5610, 14096, 4935, 4297, 896, 14494, 17266, 17007, 19578, 1276, 8832, 16891, 11450, 9549, 1202, 10420, 17641, 15663, 16190, 2422, 18956, 8790, 555, 2806, 14437, 5610, 14096, 4935, 4297, 896, 14494, 17266, 17007, 19578, 1276, 8832, 16891, 11450, 9549, 1202, 10420, 17641, 15663, 16190, 2422, 18956, 8790, 555, 2806, 14437, 5610, 14096, 4935, 4297, 896, 14494, 17266, 17007, 19578, 1276, 8832, 16891, 11450, 9549, 1202, 10420, 17641, 15663, 16190, 2422, 18956, 8790, 555, 2806, 14437, 5610, 14096, 4935, 4297, 896, 14494, 17266, 17007, 19578, 1276, 8832, 16891, 11450, 9549, 1202, 10420, 17641, 15663, 16190, 2422, 18956, 8790, 555, 2806, 14437, 5610, 14096, 4935, 4297, 896, 14494, 17266, 17007, 19578, 1276, 8832, 16891, 11450, 9549, 1202, 10420, 17641, 15663, 16190, 2422, 18956, 8790, 555, 2806, 14437, 5610, 14096, 4935, 4297, 896, 14494, 17266, 17007, 19578, 1276, 8832, 16891, 11450, 9549, 1202, 10420, 17641, 15663, 16190, 2422, 18956, 8790, 555, 2806, 14437, 5610, 14096, 4935, 4297, 896, 14494, 17266, 17007, 19578, 1276, 8832, 16891]",1,,25,601,0.6337170969214384,1.5 -"[5639, 4051, 17858, 7560, 5567, 5497, 11714, 351, 16579, 14757, 5003, 14525, 2951, 13779, 11894, 8717, 17401, 5336, 2304, 5402, 16652, 4743, 18830, 8608, 10575, 11099, 18009, 5639, 4051, 17858, 7560, 5567, 5497, 11714, 351, 16579, 14757, 5003, 14525, 2951, 13779, 11894, 8717, 17401, 5336, 2304, 5402, 16652, 4743, 18830, 8608, 10575, 11099, 18009, 5639, 4051, 17858, 7560, 5567, 5497, 11714, 351, 16579, 14757, 5003, 14525, 2951, 13779, 11894, 8717, 17401, 5336, 2304, 5402, 16652, 4743, 18830, 8608, 10575, 11099, 18009, 5639, 4051, 17858, 7560, 5567, 5497, 11714, 351, 16579, 14757, 5003, 14525, 2951, 13779, 11894, 8717, 17401, 5336, 2304, 5402, 16652, 4743, 18830, 8608, 10575, 11099, 18009, 5639, 4051, 17858, 7560, 5567, 5497, 11714, 351, 16579, 14757, 5003, 14525, 2951, 13779, 11894, 8717, 17401, 5336, 2304, 5402, 16652, 4743, 18830, 8608, 10575, 11099, 18009, 5639, 4051, 17858, 7560, 5567, 5497, 11714, 351, 16579, 14757, 5003, 14525, 2951, 13779, 11894, 8717, 17401, 5336, 2304, 5402, 16652, 4743, 18830, 8608, 10575, 11099, 18009, 5639, 4051, 17858, 7560, 5567, 5497, 11714, 351, 16579, 14757, 5003, 14525, 2951, 13779, 11894, 8717, 17401, 5336, 2304, 5402, 16652, 4743, 18830, 8608, 10575, 11099, 18009, 5639, 4051, 17858, 7560, 5567, 5497, 11714, 351, 16579, 14757, 5003, 14525, 2951, 13779, 11894, 8717, 17401, 5336, 2304, 5402, 16652, 4743, 18830, 8608, 10575, 11099, 18009, 5639, 4051, 17858, 7560, 5567, 5497, 11714, 351, 16579, 14757, 5003, 14525, 2951, 13779, 11894, 8717, 17401, 5336, 2304, 5402, 16652, 4743, 18830, 8608, 10575, 11099, 18009, 5639, 4051, 17858, 7560, 5567, 5497, 11714, 351, 16579, 14757, 5003, 14525, 2951, 13779, 11894, 8717, 17401, 5336, 2304, 5402, 16652, 4743, 18830, 8608, 10575, 11099, 18009]",1,,27,355,0.7194299335598722,1.5 -"[185, 3531, 16026, 668, 10842, 10462, 19164, 16221, 3286, 13550, 15806, 13743, 3231, 7495, 1908, 2144, 4058, 3690, 4070, 99, 15852, 16214, 2767, 5661, 6680, 17593, 10441, 185, 3531, 16026, 668, 10842, 10462, 19164, 16221, 3286, 13550, 15806, 13743, 3231, 7495, 1908, 2144, 4058, 3690, 4070, 99, 15852, 16214, 2767, 5661, 6680, 17593, 10441, 185, 3531, 16026, 668, 10842, 10462, 19164, 16221, 3286, 13550, 15806, 13743, 3231, 7495, 1908, 2144, 4058, 3690, 4070, 99, 15852, 16214, 2767, 5661, 6680, 17593, 10441, 185, 3531, 16026, 668, 10842, 10462, 19164, 16221, 3286, 13550, 15806, 13743, 3231, 7495, 1908, 2144, 4058, 3690, 4070, 99, 15852, 16214, 2767, 5661, 6680, 17593, 10441, 185, 3531, 16026, 668, 10842, 10462, 19164, 16221, 3286, 13550, 15806, 13743, 3231, 7495, 1908, 2144, 4058, 3690, 4070, 99, 15852, 16214, 2767, 5661, 6680, 17593, 10441, 185, 3531, 16026, 668, 10842, 10462, 19164, 16221, 3286, 13550, 15806, 13743, 3231, 7495, 1908, 2144, 4058, 3690, 4070, 99, 15852, 16214, 2767, 5661, 6680, 17593, 10441, 185, 3531, 16026, 668, 10842, 10462, 19164, 16221, 3286, 13550, 15806, 13743, 3231, 7495, 1908, 2144, 4058, 3690, 4070, 99, 15852, 16214, 2767, 5661, 6680, 17593, 10441, 185, 3531, 16026, 668, 10842, 10462, 19164, 16221, 3286, 13550, 15806, 13743, 3231, 7495, 1908, 2144, 4058, 3690, 4070, 99, 15852, 16214, 2767, 5661, 6680, 17593, 10441, 185, 3531, 16026, 668, 10842, 10462, 19164, 16221, 3286, 13550, 15806, 13743, 3231, 7495, 1908, 2144, 4058, 3690, 4070, 99, 15852, 16214, 2767, 5661, 6680, 17593, 10441, 185, 3531, 16026, 668, 10842, 10462, 19164, 16221, 3286, 13550, 15806, 13743, 3231, 7495, 1908, 2144, 4058, 3690, 4070, 99, 15852, 16214, 2767, 5661, 6680, 17593, 10441]",1,,27,405,0.5540396612060658,1.5 -"[3558, 14066, 13475, 5005, 12632, 4938, 14810, 11099, 9655, 16332, 2395, 10446, 11484, 5067, 896, 7983, 13824, 17194, 16724, 47, 17246, 16109, 5470, 13009, 11636, 14946, 9004, 1473, 13427, 7861, 8241, 11406, 9864, 4314, 3087, 7796, 3558, 14066, 13475, 5005, 12632, 4938, 14810, 11099, 9655, 16332, 2395, 10446, 11484, 5067, 896, 7983, 13824, 17194, 16724, 47, 17246, 16109, 5470, 13009, 11636, 14946, 9004, 1473, 13427, 7861, 8241, 11406, 9864, 4314, 3087, 7796, 3558, 14066, 13475, 5005, 12632, 4938, 14810, 11099, 9655, 16332, 2395, 10446, 11484, 5067, 896, 7983, 13824, 17194, 16724, 47, 17246, 16109, 5470, 13009, 11636, 14946, 9004, 1473, 13427, 7861, 8241, 11406, 9864, 4314, 3087, 7796, 3558, 14066, 13475, 5005, 12632, 4938, 14810, 11099, 9655, 16332, 2395, 10446, 11484, 5067, 896, 7983, 13824, 17194, 16724, 47, 17246, 16109, 5470, 13009, 11636, 14946, 9004, 1473, 13427, 7861, 8241, 11406, 9864, 4314, 3087, 7796, 3558, 14066, 13475, 5005, 12632, 4938, 14810, 11099, 9655, 16332, 2395, 10446, 11484, 5067, 896, 7983, 13824, 17194, 16724, 47, 17246, 16109, 5470, 13009, 11636, 14946, 9004, 1473, 13427, 7861, 8241, 11406, 9864, 4314, 3087, 7796, 3558, 14066, 13475, 5005, 12632, 4938, 14810, 11099, 9655, 16332, 2395, 10446, 11484, 5067, 896, 7983, 13824, 17194, 16724, 47, 17246, 16109, 5470, 13009, 11636, 14946, 9004, 1473, 13427, 7861, 8241, 11406, 9864, 4314, 3087, 7796, 3558, 14066, 13475, 5005, 12632, 4938, 14810, 11099, 9655, 16332, 2395, 10446, 11484, 5067, 896, 7983, 13824, 17194, 16724, 47, 17246, 16109, 5470, 13009, 11636, 14946, 9004, 1473, 13427, 7861, 8241, 11406, 9864, 4314, 3087, 7796, 3558, 14066, 13475, 5005, 12632, 4938, 14810, 11099, 9655, 16332, 2395, 10446, 11484, 5067, 896, 7983, 13824, 17194, 16724, 47, 17246, 16109, 5470, 13009, 11636, 14946, 9004, 1473, 13427, 7861, 8241, 11406, 9864, 4314, 3087, 7796, 3558, 14066, 13475, 5005, 12632, 4938, 14810, 11099, 9655, 16332, 2395, 10446, 11484, 5067, 896, 7983, 13824, 17194, 16724, 47, 17246, 16109, 5470, 13009, 11636, 14946, 9004, 1473, 13427, 7861, 8241, 11406, 9864, 4314, 3087, 7796, 3558, 14066, 13475, 5005, 12632, 4938, 14810, 11099, 9655, 16332, 2395, 10446, 11484, 5067, 896, 7983, 13824, 17194, 16724, 47, 17246, 16109, 5470, 13009, 11636, 14946, 9004, 1473, 13427, 7861, 8241, 11406, 9864, 4314, 3087, 7796]",1,,36,454,0.6303190140487904,1.5 -"[4064, 5470, 10163, 10854, 677, 6454, 5868, 7517, 13815, 3648, 13703, 16376, 11304, 1477, 5822, 14044, 8479, 1700, 11637, 14731, 7356, 531, 47, 18951, 11529, 141, 14780, 834, 13734, 12261, 9849, 13009, 17602, 9558, 18419, 9278, 4064, 5470, 10163, 10854, 677, 6454, 5868, 7517, 13815, 3648, 13703, 16376, 11304, 1477, 5822, 14044, 8479, 1700, 11637, 14731, 7356, 531, 47, 18951, 11529, 141, 14780, 834, 13734, 12261, 9849, 13009, 17602, 9558, 18419, 9278, 4064, 5470, 10163, 10854, 677, 6454, 5868, 7517, 13815, 3648, 13703, 16376, 11304, 1477, 5822, 14044, 8479, 1700, 11637, 14731, 7356, 531, 47, 18951, 11529, 141, 14780, 834, 13734, 12261, 9849, 13009, 17602, 9558, 18419, 9278, 4064, 5470, 10163, 10854, 677, 6454, 5868, 7517, 13815, 3648, 13703, 16376, 11304, 1477, 5822, 14044, 8479, 1700, 11637, 14731, 7356, 531, 47, 18951, 11529, 141, 14780, 834, 13734, 12261, 9849, 13009, 17602, 9558, 18419, 9278, 4064, 5470, 10163, 10854, 677, 6454, 5868, 7517, 13815, 3648, 13703, 16376, 11304, 1477, 5822, 14044, 8479, 1700, 11637, 14731, 7356, 531, 47, 18951, 11529, 141, 14780, 834, 13734, 12261, 9849, 13009, 17602, 9558, 18419, 9278, 4064, 5470, 10163, 10854, 677, 6454, 5868, 7517, 13815, 3648, 13703, 16376, 11304, 1477, 5822, 14044, 8479, 1700, 11637, 14731, 7356, 531, 47, 18951, 11529, 141, 14780, 834, 13734, 12261, 9849, 13009, 17602, 9558, 18419, 9278, 4064, 5470, 10163, 10854, 677, 6454, 5868, 7517, 13815, 3648, 13703, 16376, 11304, 1477, 5822, 14044, 8479, 1700, 11637, 14731, 7356, 531, 47, 18951, 11529, 141, 14780, 834, 13734, 12261, 9849, 13009, 17602, 9558, 18419, 9278, 4064, 5470, 10163, 10854, 677, 6454, 5868, 7517, 13815, 3648, 13703, 16376, 11304, 1477, 5822, 14044, 8479, 1700, 11637, 14731, 7356, 531, 47, 18951, 11529, 141, 14780, 834, 13734, 12261, 9849, 13009, 17602, 9558, 18419, 9278, 4064, 5470, 10163, 10854, 677, 6454, 5868, 7517, 13815, 3648, 13703, 16376, 11304, 1477, 5822, 14044, 8479, 1700, 11637, 14731, 7356, 531, 47, 18951, 11529, 141, 14780, 834, 13734, 12261, 9849, 13009, 17602, 9558, 18419, 9278, 4064, 5470, 10163, 10854, 677, 6454, 5868, 7517, 13815, 3648, 13703, 16376, 11304, 1477, 5822, 14044, 8479, 1700, 11637, 14731, 7356, 531, 47, 18951, 11529, 141, 14780, 834, 13734, 12261, 9849, 13009, 17602, 9558, 18419, 9278]",1,,36,601,0.523761221,1.5 -"[9965, 8576, 3634, 3517, 10223, 2148, 5152, 1802, 13668, 7670, 14857, 2284, 9965, 8576, 3634, 3517, 10223, 2148, 5152, 1802, 13668, 7670, 14857, 2284, 9965, 8576, 3634, 3517, 10223, 2148, 5152, 1802, 13668, 7670, 14857, 2284, 9965, 8576, 3634, 3517, 10223, 2148, 5152, 1802, 13668, 7670, 14857, 2284, 9965, 8576, 3634, 3517, 10223, 2148, 5152, 1802, 13668, 7670, 14857, 2284, 9965, 8576, 3634, 3517, 10223, 2148, 5152, 1802, 13668, 7670, 14857, 2284, 9965, 8576, 3634, 3517, 10223, 2148, 5152, 1802, 13668, 7670, 14857, 2284, 9965, 8576, 3634, 3517, 10223, 2148, 5152, 1802, 13668, 7670, 14857, 2284, 9965, 8576, 3634, 3517, 10223, 2148, 5152, 1802, 13668, 7670, 14857, 2284, 9965, 8576, 3634, 3517, 10223, 2148, 5152, 1802, 13668, 7670, 14857, 2284]",1,,12,601,0.5072779834660446,2.5 -"[11816, 2753, 8005, 7791, 2891, 5084, 787, 10282, 10758, 10373, 12974, 13091, 13549, 8613, 9589, 11816, 2753, 8005, 7791, 2891, 5084, 787, 10282, 10758, 10373, 12974, 13091, 13549, 8613, 9589, 11816, 2753, 8005, 7791, 2891, 5084, 787, 10282, 10758, 10373, 12974, 13091, 13549, 8613, 9589, 11816, 2753, 8005, 7791, 2891, 5084, 787, 10282, 10758, 10373, 12974, 13091, 13549, 8613, 9589, 11816, 2753, 8005, 7791, 2891, 5084, 787, 10282, 10758, 10373, 12974, 13091, 13549, 8613, 9589, 11816, 2753, 8005, 7791, 2891, 5084, 787, 10282, 10758, 10373, 12974, 13091, 13549, 8613, 9589, 11816, 2753, 8005, 7791, 2891, 5084, 787, 10282, 10758, 10373, 12974, 13091, 13549, 8613, 9589, 11816, 2753, 8005, 7791, 2891, 5084, 787, 10282, 10758, 10373, 12974, 13091, 13549, 8613, 9589, 11816, 2753, 8005, 7791, 2891, 5084, 787, 10282, 10758, 10373, 12974, 13091, 13549, 8613, 9589, 11816, 2753, 8005, 7791, 2891, 5084, 787, 10282, 10758, 10373, 12974, 13091, 13549, 8613, 9589]",1,,15,405,0.5089516660749607,2.5 -"[13815, 15778, 1582, 7356, 13118, 12160, 2092, 19053, 6456, 11769, 10850, 7565, 14731, 141, 1700, 1358, 13703, 18330, 18063, 18839, 18419, 834, 7055, 2540, 4312, 13815, 15778, 1582, 7356, 13118, 12160, 2092, 19053, 6456, 11769, 10850, 7565, 14731, 141, 1700, 1358, 13703, 18330, 18063, 18839, 18419, 834, 7055, 2540, 4312, 13815, 15778, 1582, 7356, 13118, 12160, 2092, 19053, 6456, 11769, 10850, 7565, 14731, 141, 1700, 1358, 13703, 18330, 18063, 18839, 18419, 834, 7055, 2540, 4312, 13815, 15778, 1582, 7356, 13118, 12160, 2092, 19053, 6456, 11769, 10850, 7565, 14731, 141, 1700, 1358, 13703, 18330, 18063, 18839, 18419, 834, 7055, 2540, 4312, 13815, 15778, 1582, 7356, 13118, 12160, 2092, 19053, 6456, 11769, 10850, 7565, 14731, 141, 1700, 1358, 13703, 18330, 18063, 18839, 18419, 834, 7055, 2540, 4312, 13815, 15778, 1582, 7356, 13118, 12160, 2092, 19053, 6456, 11769, 10850, 7565, 14731, 141, 1700, 1358, 13703, 18330, 18063, 18839, 18419, 834, 7055, 2540, 4312, 13815, 15778, 1582, 7356, 13118, 12160, 2092, 19053, 6456, 11769, 10850, 7565, 14731, 141, 1700, 1358, 13703, 18330, 18063, 18839, 18419, 834, 7055, 2540, 4312, 13815, 15778, 1582, 7356, 13118, 12160, 2092, 19053, 6456, 11769, 10850, 7565, 14731, 141, 1700, 1358, 13703, 18330, 18063, 18839, 18419, 834, 7055, 2540, 4312, 13815, 15778, 1582, 7356, 13118, 12160, 2092, 19053, 6456, 11769, 10850, 7565, 14731, 141, 1700, 1358, 13703, 18330, 18063, 18839, 18419, 834, 7055, 2540, 4312, 13815, 15778, 1582, 7356, 13118, 12160, 2092, 19053, 6456, 11769, 10850, 7565, 14731, 141, 1700, 1358, 13703, 18330, 18063, 18839, 18419, 834, 7055, 2540, 4312]",1,,25,355,0.5942587614748694,2.5 -"[1612, 2835, 11368, 17206, 14722, 10173, 3108, 13928, 2657, 16990, 1761, 6291, 16228, 19068, 15329, 10801, 18221, 15568, 18366, 16297, 10744, 7110, 17863, 5788, 6125, 1612, 2835, 11368, 17206, 14722, 10173, 3108, 13928, 2657, 16990, 1761, 6291, 16228, 19068, 15329, 10801, 18221, 15568, 18366, 16297, 10744, 7110, 17863, 5788, 6125, 1612, 2835, 11368, 17206, 14722, 10173, 3108, 13928, 2657, 16990, 1761, 6291, 16228, 19068, 15329, 10801, 18221, 15568, 18366, 16297, 10744, 7110, 17863, 5788, 6125, 1612, 2835, 11368, 17206, 14722, 10173, 3108, 13928, 2657, 16990, 1761, 6291, 16228, 19068, 15329, 10801, 18221, 15568, 18366, 16297, 10744, 7110, 17863, 5788, 6125, 1612, 2835, 11368, 17206, 14722, 10173, 3108, 13928, 2657, 16990, 1761, 6291, 16228, 19068, 15329, 10801, 18221, 15568, 18366, 16297, 10744, 7110, 17863, 5788, 6125, 1612, 2835, 11368, 17206, 14722, 10173, 3108, 13928, 2657, 16990, 1761, 6291, 16228, 19068, 15329, 10801, 18221, 15568, 18366, 16297, 10744, 7110, 17863, 5788, 6125, 1612, 2835, 11368, 17206, 14722, 10173, 3108, 13928, 2657, 16990, 1761, 6291, 16228, 19068, 15329, 10801, 18221, 15568, 18366, 16297, 10744, 7110, 17863, 5788, 6125, 1612, 2835, 11368, 17206, 14722, 10173, 3108, 13928, 2657, 16990, 1761, 6291, 16228, 19068, 15329, 10801, 18221, 15568, 18366, 16297, 10744, 7110, 17863, 5788, 6125, 1612, 2835, 11368, 17206, 14722, 10173, 3108, 13928, 2657, 16990, 1761, 6291, 16228, 19068, 15329, 10801, 18221, 15568, 18366, 16297, 10744, 7110, 17863, 5788, 6125, 1612, 2835, 11368, 17206, 14722, 10173, 3108, 13928, 2657, 16990, 1761, 6291, 16228, 19068, 15329, 10801, 18221, 15568, 18366, 16297, 10744, 7110, 17863, 5788, 6125]",1,,25,405,0.5262463863670944,2.5 -"[5336, 15639, 7258, 7978, 10857, 11024, 16836, 12679, 3092, 2355, 6224, 4938, 2058, 6516, 10446, 15070, 17246, 12075, 15923, 8137, 5497, 9865, 2199, 206, 12971, 5336, 15639, 7258, 7978, 10857, 11024, 16836, 12679, 3092, 2355, 6224, 4938, 2058, 6516, 10446, 15070, 17246, 12075, 15923, 8137, 5497, 9865, 2199, 206, 12971, 5336, 15639, 7258, 7978, 10857, 11024, 16836, 12679, 3092, 2355, 6224, 4938, 2058, 6516, 10446, 15070, 17246, 12075, 15923, 8137, 5497, 9865, 2199, 206, 12971, 5336, 15639, 7258, 7978, 10857, 11024, 16836, 12679, 3092, 2355, 6224, 4938, 2058, 6516, 10446, 15070, 17246, 12075, 15923, 8137, 5497, 9865, 2199, 206, 12971, 5336, 15639, 7258, 7978, 10857, 11024, 16836, 12679, 3092, 2355, 6224, 4938, 2058, 6516, 10446, 15070, 17246, 12075, 15923, 8137, 5497, 9865, 2199, 206, 12971, 5336, 15639, 7258, 7978, 10857, 11024, 16836, 12679, 3092, 2355, 6224, 4938, 2058, 6516, 10446, 15070, 17246, 12075, 15923, 8137, 5497, 9865, 2199, 206, 12971, 5336, 15639, 7258, 7978, 10857, 11024, 16836, 12679, 3092, 2355, 6224, 4938, 2058, 6516, 10446, 15070, 17246, 12075, 15923, 8137, 5497, 9865, 2199, 206, 12971, 5336, 15639, 7258, 7978, 10857, 11024, 16836, 12679, 3092, 2355, 6224, 4938, 2058, 6516, 10446, 15070, 17246, 12075, 15923, 8137, 5497, 9865, 2199, 206, 12971, 5336, 15639, 7258, 7978, 10857, 11024, 16836, 12679, 3092, 2355, 6224, 4938, 2058, 6516, 10446, 15070, 17246, 12075, 15923, 8137, 5497, 9865, 2199, 206, 12971, 5336, 15639, 7258, 7978, 10857, 11024, 16836, 12679, 3092, 2355, 6224, 4938, 2058, 6516, 10446, 15070, 17246, 12075, 15923, 8137, 5497, 9865, 2199, 206, 12971]",1,,25,454,0.6276309783435614,2.5 -"[5336, 47, 7297, 12413, 7318, 10818, 10446, 19391, 15036, 3306, 12160, 14857, 4938, 15596, 13151, 3720, 2912, 17192, 2059, 9865, 16079, 15329, 3278, 13824, 11651, 5336, 47, 7297, 12413, 7318, 10818, 10446, 19391, 15036, 3306, 12160, 14857, 4938, 15596, 13151, 3720, 2912, 17192, 2059, 9865, 16079, 15329, 3278, 13824, 11651, 5336, 47, 7297, 12413, 7318, 10818, 10446, 19391, 15036, 3306, 12160, 14857, 4938, 15596, 13151, 3720, 2912, 17192, 2059, 9865, 16079, 15329, 3278, 13824, 11651, 5336, 47, 7297, 12413, 7318, 10818, 10446, 19391, 15036, 3306, 12160, 14857, 4938, 15596, 13151, 3720, 2912, 17192, 2059, 9865, 16079, 15329, 3278, 13824, 11651, 5336, 47, 7297, 12413, 7318, 10818, 10446, 19391, 15036, 3306, 12160, 14857, 4938, 15596, 13151, 3720, 2912, 17192, 2059, 9865, 16079, 15329, 3278, 13824, 11651, 5336, 47, 7297, 12413, 7318, 10818, 10446, 19391, 15036, 3306, 12160, 14857, 4938, 15596, 13151, 3720, 2912, 17192, 2059, 9865, 16079, 15329, 3278, 13824, 11651, 5336, 47, 7297, 12413, 7318, 10818, 10446, 19391, 15036, 3306, 12160, 14857, 4938, 15596, 13151, 3720, 2912, 17192, 2059, 9865, 16079, 15329, 3278, 13824, 11651, 5336, 47, 7297, 12413, 7318, 10818, 10446, 19391, 15036, 3306, 12160, 14857, 4938, 15596, 13151, 3720, 2912, 17192, 2059, 9865, 16079, 15329, 3278, 13824, 11651, 5336, 47, 7297, 12413, 7318, 10818, 10446, 19391, 15036, 3306, 12160, 14857, 4938, 15596, 13151, 3720, 2912, 17192, 2059, 9865, 16079, 15329, 3278, 13824, 11651, 5336, 47, 7297, 12413, 7318, 10818, 10446, 19391, 15036, 3306, 12160, 14857, 4938, 15596, 13151, 3720, 2912, 17192, 2059, 9865, 16079, 15329, 3278, 13824, 11651]",1,,25,552,0.6193640006086119,2.5 -"[3634, 9965, 8576, 5152, 2182, 2284, 13855, 18048, 56, 7670, 18636, 14857, 16257, 18453, 2395, 11174, 4934, 18956, 19010, 4538, 16446, 11726, 13977, 16294, 12235, 2862, 1042, 3634, 9965, 8576, 5152, 2182, 2284, 13855, 18048, 56, 7670, 18636, 14857, 16257, 18453, 2395, 11174, 4934, 18956, 19010, 4538, 16446, 11726, 13977, 16294, 12235, 2862, 1042, 3634, 9965, 8576, 5152, 2182, 2284, 13855, 18048, 56, 7670, 18636, 14857, 16257, 18453, 2395, 11174, 4934, 18956, 19010, 4538, 16446, 11726, 13977, 16294, 12235, 2862, 1042, 3634, 9965, 8576, 5152, 2182, 2284, 13855, 18048, 56, 7670, 18636, 14857, 16257, 18453, 2395, 11174, 4934, 18956, 19010, 4538, 16446, 11726, 13977, 16294, 12235, 2862, 1042, 3634, 9965, 8576, 5152, 2182, 2284, 13855, 18048, 56, 7670, 18636, 14857, 16257, 18453, 2395, 11174, 4934, 18956, 19010, 4538, 16446, 11726, 13977, 16294, 12235, 2862, 1042, 3634, 9965, 8576, 5152, 2182, 2284, 13855, 18048, 56, 7670, 18636, 14857, 16257, 18453, 2395, 11174, 4934, 18956, 19010, 4538, 16446, 11726, 13977, 16294, 12235, 2862, 1042, 3634, 9965, 8576, 5152, 2182, 2284, 13855, 18048, 56, 7670, 18636, 14857, 16257, 18453, 2395, 11174, 4934, 18956, 19010, 4538, 16446, 11726, 13977, 16294, 12235, 2862, 1042, 3634, 9965, 8576, 5152, 2182, 2284, 13855, 18048, 56, 7670, 18636, 14857, 16257, 18453, 2395, 11174, 4934, 18956, 19010, 4538, 16446, 11726, 13977, 16294, 12235, 2862, 1042, 3634, 9965, 8576, 5152, 2182, 2284, 13855, 18048, 56, 7670, 18636, 14857, 16257, 18453, 2395, 11174, 4934, 18956, 19010, 4538, 16446, 11726, 13977, 16294, 12235, 2862, 1042, 3634, 9965, 8576, 5152, 2182, 2284, 13855, 18048, 56, 7670, 18636, 14857, 16257, 18453, 2395, 11174, 4934, 18956, 19010, 4538, 16446, 11726, 13977, 16294, 12235, 2862, 1042]",1,,27,355,0.6483744991631587,2.5 -"[5336, 15639, 489, 10446, 7978, 11024, 1752, 12016, 3092, 15110, 12679, 6620, 13576, 6224, 2058, 17246, 15070, 4194, 9865, 18266, 15923, 2115, 5497, 12971, 8778, 7955, 18355, 5336, 15639, 489, 10446, 7978, 11024, 1752, 12016, 3092, 15110, 12679, 6620, 13576, 6224, 2058, 17246, 15070, 4194, 9865, 18266, 15923, 2115, 5497, 12971, 8778, 7955, 18355, 5336, 15639, 489, 10446, 7978, 11024, 1752, 12016, 3092, 15110, 12679, 6620, 13576, 6224, 2058, 17246, 15070, 4194, 9865, 18266, 15923, 2115, 5497, 12971, 8778, 7955, 18355, 5336, 15639, 489, 10446, 7978, 11024, 1752, 12016, 3092, 15110, 12679, 6620, 13576, 6224, 2058, 17246, 15070, 4194, 9865, 18266, 15923, 2115, 5497, 12971, 8778, 7955, 18355, 5336, 15639, 489, 10446, 7978, 11024, 1752, 12016, 3092, 15110, 12679, 6620, 13576, 6224, 2058, 17246, 15070, 4194, 9865, 18266, 15923, 2115, 5497, 12971, 8778, 7955, 18355, 5336, 15639, 489, 10446, 7978, 11024, 1752, 12016, 3092, 15110, 12679, 6620, 13576, 6224, 2058, 17246, 15070, 4194, 9865, 18266, 15923, 2115, 5497, 12971, 8778, 7955, 18355, 5336, 15639, 489, 10446, 7978, 11024, 1752, 12016, 3092, 15110, 12679, 6620, 13576, 6224, 2058, 17246, 15070, 4194, 9865, 18266, 15923, 2115, 5497, 12971, 8778, 7955, 18355, 5336, 15639, 489, 10446, 7978, 11024, 1752, 12016, 3092, 15110, 12679, 6620, 13576, 6224, 2058, 17246, 15070, 4194, 9865, 18266, 15923, 2115, 5497, 12971, 8778, 7955, 18355, 5336, 15639, 489, 10446, 7978, 11024, 1752, 12016, 3092, 15110, 12679, 6620, 13576, 6224, 2058, 17246, 15070, 4194, 9865, 18266, 15923, 2115, 5497, 12971, 8778, 7955, 18355, 5336, 15639, 489, 10446, 7978, 11024, 1752, 12016, 3092, 15110, 12679, 6620, 13576, 6224, 2058, 17246, 15070, 4194, 9865, 18266, 15923, 2115, 5497, 12971, 8778, 7955, 18355]",1,,27,454,0.6448242633260638,2.5 -"[5612, 8299, 18838, 10646, 16754, 1817, 10857, 10926, 2871, 15373, 8364, 6124, 2753, 18358, 12452, 3853, 1700, 13576, 4369, 7215, 16003, 2593, 14499, 5852, 10754, 2114, 7444, 5612, 8299, 18838, 10646, 16754, 1817, 10857, 10926, 2871, 15373, 8364, 6124, 2753, 18358, 12452, 3853, 1700, 13576, 4369, 7215, 16003, 2593, 14499, 5852, 10754, 2114, 7444, 5612, 8299, 18838, 10646, 16754, 1817, 10857, 10926, 2871, 15373, 8364, 6124, 2753, 18358, 12452, 3853, 1700, 13576, 4369, 7215, 16003, 2593, 14499, 5852, 10754, 2114, 7444, 5612, 8299, 18838, 10646, 16754, 1817, 10857, 10926, 2871, 15373, 8364, 6124, 2753, 18358, 12452, 3853, 1700, 13576, 4369, 7215, 16003, 2593, 14499, 5852, 10754, 2114, 7444, 5612, 8299, 18838, 10646, 16754, 1817, 10857, 10926, 2871, 15373, 8364, 6124, 2753, 18358, 12452, 3853, 1700, 13576, 4369, 7215, 16003, 2593, 14499, 5852, 10754, 2114, 7444, 5612, 8299, 18838, 10646, 16754, 1817, 10857, 10926, 2871, 15373, 8364, 6124, 2753, 18358, 12452, 3853, 1700, 13576, 4369, 7215, 16003, 2593, 14499, 5852, 10754, 2114, 7444, 5612, 8299, 18838, 10646, 16754, 1817, 10857, 10926, 2871, 15373, 8364, 6124, 2753, 18358, 12452, 3853, 1700, 13576, 4369, 7215, 16003, 2593, 14499, 5852, 10754, 2114, 7444, 5612, 8299, 18838, 10646, 16754, 1817, 10857, 10926, 2871, 15373, 8364, 6124, 2753, 18358, 12452, 3853, 1700, 13576, 4369, 7215, 16003, 2593, 14499, 5852, 10754, 2114, 7444, 5612, 8299, 18838, 10646, 16754, 1817, 10857, 10926, 2871, 15373, 8364, 6124, 2753, 18358, 12452, 3853, 1700, 13576, 4369, 7215, 16003, 2593, 14499, 5852, 10754, 2114, 7444, 5612, 8299, 18838, 10646, 16754, 1817, 10857, 10926, 2871, 15373, 8364, 6124, 2753, 18358, 12452, 3853, 1700, 13576, 4369, 7215, 16003, 2593, 14499, 5852, 10754, 2114, 7444]",1,,27,552,0.6282903078561647,2.5 -"[3517, 5513, 4858, 14519, 650, 10224, 1714, 233, 7670, 2552, 12612, 16572, 18434, 1484, 15560, 4505, 14915, 19010, 1733, 13003, 13285, 13475, 197, 6792, 2862, 9965, 10575, 5261, 8509, 440, 12741, 7222, 3702, 11536, 6319, 3794, 3517, 5513, 4858, 14519, 650, 10224, 1714, 233, 7670, 2552, 12612, 16572, 18434, 1484, 15560, 4505, 14915, 19010, 1733, 13003, 13285, 13475, 197, 6792, 2862, 9965, 10575, 5261, 8509, 440, 12741, 7222, 3702, 11536, 6319, 3794, 3517, 5513, 4858, 14519, 650, 10224, 1714, 233, 7670, 2552, 12612, 16572, 18434, 1484, 15560, 4505, 14915, 19010, 1733, 13003, 13285, 13475, 197, 6792, 2862, 9965, 10575, 5261, 8509, 440, 12741, 7222, 3702, 11536, 6319, 3794, 3517, 5513, 4858, 14519, 650, 10224, 1714, 233, 7670, 2552, 12612, 16572, 18434, 1484, 15560, 4505, 14915, 19010, 1733, 13003, 13285, 13475, 197, 6792, 2862, 9965, 10575, 5261, 8509, 440, 12741, 7222, 3702, 11536, 6319, 3794, 3517, 5513, 4858, 14519, 650, 10224, 1714, 233, 7670, 2552, 12612, 16572, 18434, 1484, 15560, 4505, 14915, 19010, 1733, 13003, 13285, 13475, 197, 6792, 2862, 9965, 10575, 5261, 8509, 440, 12741, 7222, 3702, 11536, 6319, 3794, 3517, 5513, 4858, 14519, 650, 10224, 1714, 233, 7670, 2552, 12612, 16572, 18434, 1484, 15560, 4505, 14915, 19010, 1733, 13003, 13285, 13475, 197, 6792, 2862, 9965, 10575, 5261, 8509, 440, 12741, 7222, 3702, 11536, 6319, 3794, 3517, 5513, 4858, 14519, 650, 10224, 1714, 233, 7670, 2552, 12612, 16572, 18434, 1484, 15560, 4505, 14915, 19010, 1733, 13003, 13285, 13475, 197, 6792, 2862, 9965, 10575, 5261, 8509, 440, 12741, 7222, 3702, 11536, 6319, 3794, 3517, 5513, 4858, 14519, 650, 10224, 1714, 233, 7670, 2552, 12612, 16572, 18434, 1484, 15560, 4505, 14915, 19010, 1733, 13003, 13285, 13475, 197, 6792, 2862, 9965, 10575, 5261, 8509, 440, 12741, 7222, 3702, 11536, 6319, 3794, 3517, 5513, 4858, 14519, 650, 10224, 1714, 233, 7670, 2552, 12612, 16572, 18434, 1484, 15560, 4505, 14915, 19010, 1733, 13003, 13285, 13475, 197, 6792, 2862, 9965, 10575, 5261, 8509, 440, 12741, 7222, 3702, 11536, 6319, 3794, 3517, 5513, 4858, 14519, 650, 10224, 1714, 233, 7670, 2552, 12612, 16572, 18434, 1484, 15560, 4505, 14915, 19010, 1733, 13003, 13285, 13475, 197, 6792, 2862, 9965, 10575, 5261, 8509, 440, 12741, 7222, 3702, 11536, 6319, 3794]",1,,36,405,0.6862098696556271,2.5 -"[1423, 7712, 468, 1349, 2692, 1895, 17341, 6336, 1146, 218, 11335, 14256, 17855, 13675, 4955, 2674, 12573, 6947, 11847, 14821, 10087, 778, 944, 16138, 4421, 12561, 2571, 5396, 15063, 8051, 5081, 17305, 117, 17847, 10094, 15395, 1423, 7712, 468, 1349, 2692, 1895, 17341, 6336, 1146, 218, 11335, 14256, 17855, 13675, 4955, 2674, 12573, 6947, 11847, 14821, 10087, 778, 944, 16138, 4421, 12561, 2571, 5396, 15063, 8051, 5081, 17305, 117, 17847, 10094, 15395, 1423, 7712, 468, 1349, 2692, 1895, 17341, 6336, 1146, 218, 11335, 14256, 17855, 13675, 4955, 2674, 12573, 6947, 11847, 14821, 10087, 778, 944, 16138, 4421, 12561, 2571, 5396, 15063, 8051, 5081, 17305, 117, 17847, 10094, 15395, 1423, 7712, 468, 1349, 2692, 1895, 17341, 6336, 1146, 218, 11335, 14256, 17855, 13675, 4955, 2674, 12573, 6947, 11847, 14821, 10087, 778, 944, 16138, 4421, 12561, 2571, 5396, 15063, 8051, 5081, 17305, 117, 17847, 10094, 15395, 1423, 7712, 468, 1349, 2692, 1895, 17341, 6336, 1146, 218, 11335, 14256, 17855, 13675, 4955, 2674, 12573, 6947, 11847, 14821, 10087, 778, 944, 16138, 4421, 12561, 2571, 5396, 15063, 8051, 5081, 17305, 117, 17847, 10094, 15395, 1423, 7712, 468, 1349, 2692, 1895, 17341, 6336, 1146, 218, 11335, 14256, 17855, 13675, 4955, 2674, 12573, 6947, 11847, 14821, 10087, 778, 944, 16138, 4421, 12561, 2571, 5396, 15063, 8051, 5081, 17305, 117, 17847, 10094, 15395, 1423, 7712, 468, 1349, 2692, 1895, 17341, 6336, 1146, 218, 11335, 14256, 17855, 13675, 4955, 2674, 12573, 6947, 11847, 14821, 10087, 778, 944, 16138, 4421, 12561, 2571, 5396, 15063, 8051, 5081, 17305, 117, 17847, 10094, 15395, 1423, 7712, 468, 1349, 2692, 1895, 17341, 6336, 1146, 218, 11335, 14256, 17855, 13675, 4955, 2674, 12573, 6947, 11847, 14821, 10087, 778, 944, 16138, 4421, 12561, 2571, 5396, 15063, 8051, 5081, 17305, 117, 17847, 10094, 15395, 1423, 7712, 468, 1349, 2692, 1895, 17341, 6336, 1146, 218, 11335, 14256, 17855, 13675, 4955, 2674, 12573, 6947, 11847, 14821, 10087, 778, 944, 16138, 4421, 12561, 2571, 5396, 15063, 8051, 5081, 17305, 117, 17847, 10094, 15395, 1423, 7712, 468, 1349, 2692, 1895, 17341, 6336, 1146, 218, 11335, 14256, 17855, 13675, 4955, 2674, 12573, 6947, 11847, 14821, 10087, 778, 944, 16138, 4421, 12561, 2571, 5396, 15063, 8051, 5081, 17305, 117, 17847, 10094, 15395]",1,,36,454,0.5497286605467363,2.5 -"[8221, 18362, 1439, 12679, 7054, 15003, 10580, 9013, 5618, 2786, 9355, 351, 14709, 14484, 2389, 5579, 9193, 5336, 14333, 16499, 2951, 19402, 8717, 46, 190, 14446, 19604, 5639, 2304, 3691, 8608, 1276, 16652, 10649, 2049, 5402, 8221, 18362, 1439, 12679, 7054, 15003, 10580, 9013, 5618, 2786, 9355, 351, 14709, 14484, 2389, 5579, 9193, 5336, 14333, 16499, 2951, 19402, 8717, 46, 190, 14446, 19604, 5639, 2304, 3691, 8608, 1276, 16652, 10649, 2049, 5402, 8221, 18362, 1439, 12679, 7054, 15003, 10580, 9013, 5618, 2786, 9355, 351, 14709, 14484, 2389, 5579, 9193, 5336, 14333, 16499, 2951, 19402, 8717, 46, 190, 14446, 19604, 5639, 2304, 3691, 8608, 1276, 16652, 10649, 2049, 5402, 8221, 18362, 1439, 12679, 7054, 15003, 10580, 9013, 5618, 2786, 9355, 351, 14709, 14484, 2389, 5579, 9193, 5336, 14333, 16499, 2951, 19402, 8717, 46, 190, 14446, 19604, 5639, 2304, 3691, 8608, 1276, 16652, 10649, 2049, 5402, 8221, 18362, 1439, 12679, 7054, 15003, 10580, 9013, 5618, 2786, 9355, 351, 14709, 14484, 2389, 5579, 9193, 5336, 14333, 16499, 2951, 19402, 8717, 46, 190, 14446, 19604, 5639, 2304, 3691, 8608, 1276, 16652, 10649, 2049, 5402, 8221, 18362, 1439, 12679, 7054, 15003, 10580, 9013, 5618, 2786, 9355, 351, 14709, 14484, 2389, 5579, 9193, 5336, 14333, 16499, 2951, 19402, 8717, 46, 190, 14446, 19604, 5639, 2304, 3691, 8608, 1276, 16652, 10649, 2049, 5402, 8221, 18362, 1439, 12679, 7054, 15003, 10580, 9013, 5618, 2786, 9355, 351, 14709, 14484, 2389, 5579, 9193, 5336, 14333, 16499, 2951, 19402, 8717, 46, 190, 14446, 19604, 5639, 2304, 3691, 8608, 1276, 16652, 10649, 2049, 5402, 8221, 18362, 1439, 12679, 7054, 15003, 10580, 9013, 5618, 2786, 9355, 351, 14709, 14484, 2389, 5579, 9193, 5336, 14333, 16499, 2951, 19402, 8717, 46, 190, 14446, 19604, 5639, 2304, 3691, 8608, 1276, 16652, 10649, 2049, 5402, 8221, 18362, 1439, 12679, 7054, 15003, 10580, 9013, 5618, 2786, 9355, 351, 14709, 14484, 2389, 5579, 9193, 5336, 14333, 16499, 2951, 19402, 8717, 46, 190, 14446, 19604, 5639, 2304, 3691, 8608, 1276, 16652, 10649, 2049, 5402, 8221, 18362, 1439, 12679, 7054, 15003, 10580, 9013, 5618, 2786, 9355, 351, 14709, 14484, 2389, 5579, 9193, 5336, 14333, 16499, 2951, 19402, 8717, 46, 190, 14446, 19604, 5639, 2304, 3691, 8608, 1276, 16652, 10649, 2049, 5402]",1,,36,552,0.5822893949383781,2.5 -"[616, 5336, 17387, 13442, 9042, 5567, 351, 2389, 1371, 16982, 6358, 5661, 12419, 46, 8840, 616, 5336, 17387, 13442, 9042, 5567, 351, 2389, 1371, 16982, 6358, 5661, 12419, 46, 8840, 616, 5336, 17387, 13442, 9042, 5567, 351, 2389, 1371, 16982, 6358, 5661, 12419, 46, 8840, 616, 5336, 17387, 13442, 9042, 5567, 351, 2389, 1371, 16982, 6358, 5661, 12419, 46, 8840, 616, 5336, 17387, 13442, 9042, 5567, 351, 2389, 1371, 16982, 6358, 5661, 12419, 46, 8840, 616, 5336, 17387, 13442, 9042, 5567, 351, 2389, 1371, 16982, 6358, 5661, 12419, 46, 8840, 616, 5336, 17387, 13442, 9042, 5567, 351, 2389, 1371, 16982, 6358, 5661, 12419, 46, 8840, 616, 5336, 17387, 13442, 9042, 5567, 351, 2389, 1371, 16982, 6358, 5661, 12419, 46, 8840, 616, 5336, 17387, 13442, 9042, 5567, 351, 2389, 1371, 16982, 6358, 5661, 12419, 46, 8840, 616, 5336, 17387, 13442, 9042, 5567, 351, 2389, 1371, 16982, 6358, 5661, 12419, 46, 8840]",1,,15,601,0.5386722117969265,3 -"[7110, 8447, 17754, 1315, 5622, 5461, 3642, 1310, 12774, 13142, 529, 8680, 2722, 9186, 9193, 3617, 3120, 15224, 2912, 301, 6125, 11847, 11949, 8652, 15603, 7110, 8447, 17754, 1315, 5622, 5461, 3642, 1310, 12774, 13142, 529, 8680, 2722, 9186, 9193, 3617, 3120, 15224, 2912, 301, 6125, 11847, 11949, 8652, 15603, 7110, 8447, 17754, 1315, 5622, 5461, 3642, 1310, 12774, 13142, 529, 8680, 2722, 9186, 9193, 3617, 3120, 15224, 2912, 301, 6125, 11847, 11949, 8652, 15603, 7110, 8447, 17754, 1315, 5622, 5461, 3642, 1310, 12774, 13142, 529, 8680, 2722, 9186, 9193, 3617, 3120, 15224, 2912, 301, 6125, 11847, 11949, 8652, 15603, 7110, 8447, 17754, 1315, 5622, 5461, 3642, 1310, 12774, 13142, 529, 8680, 2722, 9186, 9193, 3617, 3120, 15224, 2912, 301, 6125, 11847, 11949, 8652, 15603, 7110, 8447, 17754, 1315, 5622, 5461, 3642, 1310, 12774, 13142, 529, 8680, 2722, 9186, 9193, 3617, 3120, 15224, 2912, 301, 6125, 11847, 11949, 8652, 15603, 7110, 8447, 17754, 1315, 5622, 5461, 3642, 1310, 12774, 13142, 529, 8680, 2722, 9186, 9193, 3617, 3120, 15224, 2912, 301, 6125, 11847, 11949, 8652, 15603, 7110, 8447, 17754, 1315, 5622, 5461, 3642, 1310, 12774, 13142, 529, 8680, 2722, 9186, 9193, 3617, 3120, 15224, 2912, 301, 6125, 11847, 11949, 8652, 15603, 7110, 8447, 17754, 1315, 5622, 5461, 3642, 1310, 12774, 13142, 529, 8680, 2722, 9186, 9193, 3617, 3120, 15224, 2912, 301, 6125, 11847, 11949, 8652, 15603, 7110, 8447, 17754, 1315, 5622, 5461, 3642, 1310, 12774, 13142, 529, 8680, 2722, 9186, 9193, 3617, 3120, 15224, 2912, 301, 6125, 11847, 11949, 8652, 15603]",1,,25,355,0.6041487041639194,3 -"[4930, 4912, 555, 14536, 2422, 17641, 1796, 14096, 4297, 1854, 17007, 10932, 5610, 8832, 13305, 12402, 16891, 1276, 6604, 1205, 1151, 18733, 874, 1987, 12939, 4930, 4912, 555, 14536, 2422, 17641, 1796, 14096, 4297, 1854, 17007, 10932, 5610, 8832, 13305, 12402, 16891, 1276, 6604, 1205, 1151, 18733, 874, 1987, 12939, 4930, 4912, 555, 14536, 2422, 17641, 1796, 14096, 4297, 1854, 17007, 10932, 5610, 8832, 13305, 12402, 16891, 1276, 6604, 1205, 1151, 18733, 874, 1987, 12939, 4930, 4912, 555, 14536, 2422, 17641, 1796, 14096, 4297, 1854, 17007, 10932, 5610, 8832, 13305, 12402, 16891, 1276, 6604, 1205, 1151, 18733, 874, 1987, 12939, 4930, 4912, 555, 14536, 2422, 17641, 1796, 14096, 4297, 1854, 17007, 10932, 5610, 8832, 13305, 12402, 16891, 1276, 6604, 1205, 1151, 18733, 874, 1987, 12939, 4930, 4912, 555, 14536, 2422, 17641, 1796, 14096, 4297, 1854, 17007, 10932, 5610, 8832, 13305, 12402, 16891, 1276, 6604, 1205, 1151, 18733, 874, 1987, 12939, 4930, 4912, 555, 14536, 2422, 17641, 1796, 14096, 4297, 1854, 17007, 10932, 5610, 8832, 13305, 12402, 16891, 1276, 6604, 1205, 1151, 18733, 874, 1987, 12939, 4930, 4912, 555, 14536, 2422, 17641, 1796, 14096, 4297, 1854, 17007, 10932, 5610, 8832, 13305, 12402, 16891, 1276, 6604, 1205, 1151, 18733, 874, 1987, 12939, 4930, 4912, 555, 14536, 2422, 17641, 1796, 14096, 4297, 1854, 17007, 10932, 5610, 8832, 13305, 12402, 16891, 1276, 6604, 1205, 1151, 18733, 874, 1987, 12939, 4930, 4912, 555, 14536, 2422, 17641, 1796, 14096, 4297, 1854, 17007, 10932, 5610, 8832, 13305, 12402, 16891, 1276, 6604, 1205, 1151, 18733, 874, 1987, 12939]",1,,25,405,0.5492722016533955,3 -"[10276, 10441, 16168, 9256, 5174, 12523, 12160, 9765, 7500, 2460, 16760, 16703, 3702, 8822, 1239, 16579, 1558, 12253, 2361, 14940, 5090, 1316, 9042, 15658, 11509, 10276, 10441, 16168, 9256, 5174, 12523, 12160, 9765, 7500, 2460, 16760, 16703, 3702, 8822, 1239, 16579, 1558, 12253, 2361, 14940, 5090, 1316, 9042, 15658, 11509, 10276, 10441, 16168, 9256, 5174, 12523, 12160, 9765, 7500, 2460, 16760, 16703, 3702, 8822, 1239, 16579, 1558, 12253, 2361, 14940, 5090, 1316, 9042, 15658, 11509, 10276, 10441, 16168, 9256, 5174, 12523, 12160, 9765, 7500, 2460, 16760, 16703, 3702, 8822, 1239, 16579, 1558, 12253, 2361, 14940, 5090, 1316, 9042, 15658, 11509, 10276, 10441, 16168, 9256, 5174, 12523, 12160, 9765, 7500, 2460, 16760, 16703, 3702, 8822, 1239, 16579, 1558, 12253, 2361, 14940, 5090, 1316, 9042, 15658, 11509, 10276, 10441, 16168, 9256, 5174, 12523, 12160, 9765, 7500, 2460, 16760, 16703, 3702, 8822, 1239, 16579, 1558, 12253, 2361, 14940, 5090, 1316, 9042, 15658, 11509, 10276, 10441, 16168, 9256, 5174, 12523, 12160, 9765, 7500, 2460, 16760, 16703, 3702, 8822, 1239, 16579, 1558, 12253, 2361, 14940, 5090, 1316, 9042, 15658, 11509, 10276, 10441, 16168, 9256, 5174, 12523, 12160, 9765, 7500, 2460, 16760, 16703, 3702, 8822, 1239, 16579, 1558, 12253, 2361, 14940, 5090, 1316, 9042, 15658, 11509, 10276, 10441, 16168, 9256, 5174, 12523, 12160, 9765, 7500, 2460, 16760, 16703, 3702, 8822, 1239, 16579, 1558, 12253, 2361, 14940, 5090, 1316, 9042, 15658, 11509, 10276, 10441, 16168, 9256, 5174, 12523, 12160, 9765, 7500, 2460, 16760, 16703, 3702, 8822, 1239, 16579, 1558, 12253, 2361, 14940, 5090, 1316, 9042, 15658, 11509]",1,,25,601,0.5071258305015977,3 -"[16703, 12647, 5494, 1574, 9708, 8823, 12160, 4771, 13156, 833, 19071, 1852, 8406, 13694, 15851, 1987, 1909, 2460, 12311, 9999, 7445, 5081, 17172, 19445, 18625, 17365, 2962, 9181, 18733, 15993, 18544, 2422, 15986, 11869, 881, 15947, 16703, 12647, 5494, 1574, 9708, 8823, 12160, 4771, 13156, 833, 19071, 1852, 8406, 13694, 15851, 1987, 1909, 2460, 12311, 9999, 7445, 5081, 17172, 19445, 18625, 17365, 2962, 9181, 18733, 15993, 18544, 2422, 15986, 11869, 881, 15947, 16703, 12647, 5494, 1574, 9708, 8823, 12160, 4771, 13156, 833, 19071, 1852, 8406, 13694, 15851, 1987, 1909, 2460, 12311, 9999, 7445, 5081, 17172, 19445, 18625, 17365, 2962, 9181, 18733, 15993, 18544, 2422, 15986, 11869, 881, 15947, 16703, 12647, 5494, 1574, 9708, 8823, 12160, 4771, 13156, 833, 19071, 1852, 8406, 13694, 15851, 1987, 1909, 2460, 12311, 9999, 7445, 5081, 17172, 19445, 18625, 17365, 2962, 9181, 18733, 15993, 18544, 2422, 15986, 11869, 881, 15947, 16703, 12647, 5494, 1574, 9708, 8823, 12160, 4771, 13156, 833, 19071, 1852, 8406, 13694, 15851, 1987, 1909, 2460, 12311, 9999, 7445, 5081, 17172, 19445, 18625, 17365, 2962, 9181, 18733, 15993, 18544, 2422, 15986, 11869, 881, 15947, 16703, 12647, 5494, 1574, 9708, 8823, 12160, 4771, 13156, 833, 19071, 1852, 8406, 13694, 15851, 1987, 1909, 2460, 12311, 9999, 7445, 5081, 17172, 19445, 18625, 17365, 2962, 9181, 18733, 15993, 18544, 2422, 15986, 11869, 881, 15947, 16703, 12647, 5494, 1574, 9708, 8823, 12160, 4771, 13156, 833, 19071, 1852, 8406, 13694, 15851, 1987, 1909, 2460, 12311, 9999, 7445, 5081, 17172, 19445, 18625, 17365, 2962, 9181, 18733, 15993, 18544, 2422, 15986, 11869, 881, 15947, 16703, 12647, 5494, 1574, 9708, 8823, 12160, 4771, 13156, 833, 19071, 1852, 8406, 13694, 15851, 1987, 1909, 2460, 12311, 9999, 7445, 5081, 17172, 19445, 18625, 17365, 2962, 9181, 18733, 15993, 18544, 2422, 15986, 11869, 881, 15947, 16703, 12647, 5494, 1574, 9708, 8823, 12160, 4771, 13156, 833, 19071, 1852, 8406, 13694, 15851, 1987, 1909, 2460, 12311, 9999, 7445, 5081, 17172, 19445, 18625, 17365, 2962, 9181, 18733, 15993, 18544, 2422, 15986, 11869, 881, 15947, 16703, 12647, 5494, 1574, 9708, 8823, 12160, 4771, 13156, 833, 19071, 1852, 8406, 13694, 15851, 1987, 1909, 2460, 12311, 9999, 7445, 5081, 17172, 19445, 18625, 17365, 2962, 9181, 18733, 15993, 18544, 2422, 15986, 11869, 881, 15947]",1,,36,454,0.5819343713546685,3 -"[18957, 12944, 7720, 10350, 6073, 15096, 5596, 18906, 16652, 14433, 11652, 2526, 14431, 5871, 16462, 1068, 15860, 2041, 4849, 7888, 4917, 8730, 14654, 16552, 7108, 15377, 5395, 11450, 16100, 7740, 14445, 19005, 5946, 15342, 284, 6201, 19097, 11881, 12928, 8055, 18058, 2024, 8342, 510, 4099, 4298, 15851, 13857, 16637, 18733, 16958, 9368, 15783, 2229, 16028, 13235, 5029, 7857, 3713, 15207, 18773, 7137, 17015, 16585, 13616, 15189, 5089, 4297, 62, 13295, 18701, 4152, 9808, 3350, 88, 5221, 15498, 6609, 7956, 3331, 11449, 11390, 13123, 12022, 18199, 872, 18669, 9769, 5831, 5429, 16101, 4860, 8435, 8445, 1881, 4988, 8356, 7088, 8772, 16645]",1,2,10,302,0.6675457726834712, -"[5663, 18059, 10340, 10857, 18477, 531, 17952, 11762, 323, 2841, 16229, 13820, 1614, 957, 9314, 8447, 12235, 18766, 18791, 3327, 296, 2456, 10847, 2239, 7553, 17845, 5523, 12261, 17767, 17609, 4657, 17297, 15627, 17917, 12649, 6415, 3577, 670, 15663, 8864, 11141, 9186, 3526, 14679, 15884, 15329, 11024, 2886, 2182, 12612, 14105, 9831, 4929, 1830, 13405, 8203, 7861, 5470, 4404, 10863, 14943, 7295, 3665, 13009, 18839, 8130, 18616, 16727, 206, 1430, 6352, 9270, 16671, 13475, 6594, 11636, 10671, 8572, 806, 4924, 11864, 8748, 10173, 13246, 11484, 13741, 11264, 2583, 15236, 7992, 10623, 15095, 1752, 6949, 2338, 2177, 17496, 13668, 16170, 9692]",0,2,10,356,0.22249835167621848, -"[11449, 17415, 7780, 18893, 18050, 13369, 7398, 918, 11390, 17634, 2110, 7305, 5048, 7099, 1683, 1300, 17956, 1315, 18768, 16925, 12327, 2028, 4149, 15040, 5095, 15712, 7080, 14515, 2563, 11024, 8652, 5639, 7854, 16264, 14997, 17474, 2019, 5490, 3267, 4051, 5324, 17019, 3146, 17034, 11450, 18512, 8569, 16198, 3683, 16330, 4340, 17387, 14754, 2229, 1753, 6631, 9814, 2577, 4392, 5930, 19002, 14810, 12491, 8730, 11052, 15826, 3141, 6298, 2331, 18373, 17756, 16355, 11869, 1410, 11169, 16624, 16921, 1265, 4775, 4045, 18773, 15682, 3281, 12256, 6037, 10431, 1854, 7596, 16214, 7243, 8509, 9559, 10110, 8306, 10037, 4356, 7238, 2352, 11296, 12440]",1,2,10,404,0.6986864127402749, -"[616, 17415, 14076, 18773, 17387, 12789, 17538, 9864, 17192, 17859, 17034, 18736, 11450, 16305, 5095, 15841, 8221, 14857, 7388, 12278, 5336, 17122, 11390, 17003, 19002, 5500, 9390, 1230, 13846, 10350, 1439, 18349, 1204, 7230, 1068, 9769, 4581, 11814, 13996, 14515, 2573, 11548, 918, 5507, 16318, 18752, 17401, 9723, 2821, 14420, 16982, 2383, 2352, 18754, 1242, 12479, 2428, 16650, 1265, 5324, 18837, 5003, 16989, 2457, 11989, 572, 5402, 19189, 18035, 912, 16416, 3136, 8707, 5730, 12142, 8692, 6936, 16770, 9712, 12304, 4051, 6201, 9025, 8093, 17446, 11840, 951, 1794, 3721, 17628, 18009, 11822, 12457, 13623, 12613, 3307, 17409, 5497, 1144, 6501, 9634, 14893, 3453, 17164, 8295, 7089, 10065, 444, 10361, 7377, 9477, 8424, 11067, 14484, 6548, 2035, 16938, 16397, 3187, 5639, 1986, 19402, 15448, 6164, 7168, 15542, 9424, 18717, 14525, 4450, 2767, 1962, 2304, 5661, 18847, 12884, 8470, 5430, 13630, 5111, 11168, 2111, 11923, 5019, 15761, 13311, 17061, 11024, 17858, 6690, 6000, 15808, 15455, 7465, 5363, 443, 8086, 18040, 11463, 18310, 2594, 18774, 1013, 4192, 754, 11944, 3971, 7560, 15683, 17077, 10254, 1427, 4783, 16593, 3963, 3385, 6180, 18743, 18830, 11735, 14556, 3809, 7094, 11099, 10915, 7136, 1881, 5453, 16268, 7523, 16443, 17226, 9172, 14147, 15891, 2783, 2951, 18819, 18653, 7088]",1,2,20,302,0.7229294517421514, -"[5673, 11536, 12024, 8621, 12077, 18826, 12134, 9256, 13076, 4133, 1690, 9685, 1565, 15565, 15325, 5994, 9181, 15845, 9884, 12737, 15711, 17522, 6684, 13879, 18895, 16560, 6686, 2128, 16033, 1730, 10441, 15513, 6141, 10796, 16996, 2039, 12979, 13756, 8372, 5790, 16051, 14491, 8857, 18625, 4702, 8105, 5698, 8862, 14194, 9078, 4911, 16994, 12354, 8885, 10276, 17947, 8005, 4173, 3270, 10375, 7696, 16637, 11407, 14353, 10931, 6650, 1701, 14970, 4876, 4111, 8608, 3201, 10516, 12997, 19020, 8561, 4181, 6656, 2170, 7862, 16460, 13157, 13966, 15735, 3531, 13755, 4251, 3244, 13371, 14061, 1475, 12856, 5665, 17281, 17629, 4798, 1033, 4261, 16465, 14727, 1915, 11674, 14183, 14915, 7366, 12253, 5612, 6680, 10285, 2419, 15769, 518, 14686, 7113, 228, 7638, 16703, 12003, 10431, 19342, 18747, 9607, 1660, 15013, 10778, 917, 12019, 4548, 6340, 13463, 16, 16731, 18055, 2699, 3240, 357, 2414, 13300, 15231, 11605, 15057, 17939, 14021, 17399, 7219, 19021, 14578, 2707, 1091, 760, 13223, 3943, 4939, 18841, 1236, 17266, 15852, 18121, 13307, 1272, 11552, 12175, 10535, 16211, 12349, 1546, 453, 4103, 16705, 15604, 18558, 13126, 8198, 6923, 16244, 12489, 18724, 15426, 4411, 766, 16169, 11542, 13527, 18185, 1753, 8062, 2658, 168, 9134, 15479, 15505, 14011, 11523, 2411, 19335, 9965, 2599, 10969, 4913, 3342, 13934, 18767, 16812, 11934, 13227, 17087, 9107, 1914, 19426, 9074, 6903, 15046, 5079, 9304, 9505, 9017, 1152, 18816, 18972, 3695, 16480, 12490, 6115, 7027, 11641, 19124, 12224, 13369, 14644, 4102, 16826, 7782, 9258, 2304, 2876, 1973, 17892, 6832, 420, 5555, 8599, 10292, 17433, 18790, 17969, 10712, 5745, 5324, 5179, 15651, 14254, 10433, 6574, 13814, 11680, 13583, 16070, 16941, 10072, 10218, 7980, 11256, 14218, 9420, 18594, 3023, 15120, 1534, 12096, 9905, 8240, 12859, 17911, 11324, 16694, 364, 6182, 1920, 4972, 4547, 5875, 3282, 7948, 15752, 876, 2468, 1049, 5371, 13602, 3898, 13171, 6335, 3161, 15075, 8664, 6414, 9121, 10682, 4709, 6752]",1,2,30,302,0.7749150479281838, -"[1903, 12442, 17538, 10761, 4411, 10403, 11450, 4070, 15153, 8968, 1835, 16648, 10858, 16631, 11582, 1309, 481, 15577, 6504, 17716, 13009, 10816, 17767, 9033, 4079, 17499, 5048, 18898, 17026, 12304, 16242, 5375, 15139, 12664, 4112, 18145, 4829, 17911, 1893, 5899, 12564, 3577, 4002, 18221, 18765, 8080, 3249, 17542, 10632, 5547, 12812, 14973, 14234, 47, 5569, 17724, 9726, 14857, 6336, 6969, 7349, 18824, 14067, 12824, 5319, 12967, 11995, 62, 12552, 16037, 17025, 17946, 3702, 5441, 5736, 2137, 8488, 8502, 13746, 12019, 14420, 6344, 9070, 13479, 4758, 4007, 2134, 10577, 3190, 15087, 14415, 1164, 16982, 4262, 7740, 7445, 6611, 14624, 8753, 5704, 13547, 6084, 16355, 9629, 445, 10870, 17943, 8987, 8015, 15223, 2472, 17772, 12105, 7110, 384, 11400, 1808, 12797, 11809, 13925, 10952, 3215, 6566, 2544, 17837, 2343, 14052, 2803, 15804, 8621, 8221, 3468, 10820, 13499, 5046, 6125, 9298, 10312, 12559, 4587, 18743, 7778, 8954, 4893, 9644, 12267, 4710, 3908, 9774, 3111, 7, 17458, 7711, 4053, 19321, 2211, 6243, 14834, 17019, 10796, 7311, 548, 4645, 13859, 8271, 7864, 18897, 15626, 4242, 8263, 1410, 8556, 10457, 15922, 16904, 7315, 10055, 6833, 15074, 16613, 899, 13010, 1612, 19692, 7114, 7254, 999, 14647, 3919, 17281, 18026, 8734, 12816, 5800, 11386, 5672, 7400, 6220, 11190, 8763, 10427, 105, 12592, 4301, 12637, 15188, 16741, 4693, 7391, 10219, 2782, 9211, 9181, 18966, 306, 13132, 643, 8381, 8755, 8060, 8593, 1606, 4233, 10770, 17012, 10627, 245, 3306, 2959, 10091, 769, 2613, 14397, 9114, 8096, 4477, 12416, 4750, 5861, 14553, 17505, 12171, 2031, 10575, 766, 18712, 7200, 2093, 8822, 19151, 2947, 9643, 19218, 3747, 3950, 4421, 768, 4344, 2729, 7935, 9011, 19264, 14551, 11948, 7295, 12699, 14330, 4834, 19294, 16901, 14629, 5898, 15061, 3239, 15429, 10521, 1390, 9416, 4875, 15501, 8526, 17439, 4078, 4617, 1179, 13665, 2655, 8390, 15239, 6291, 7904, 8811, 16857, 7029, 8516, 2209, 7149, 17600, 4578, 15385]",1,2,30,356,0.7983466044530101, -"[11312, 1570, 294, 3886, 9041, 2862, 3351, 515, 6485, 973, 16741, 201, 2784, 9268, 2672, 8696, 13364, 9865, 483, 7060, 7585, 15891, 8540, 19032, 12457, 15193, 18862, 474, 13356, 129, 12882, 18226, 10949, 12983, 11108, 17863, 19068, 6236, 4206, 8093, 16538, 1917, 19129, 18532, 6952, 11024, 5888, 9416, 18782, 17176, 15954, 6483, 9406, 8221, 9723, 11447, 9943, 966, 14881, 9744, 1597, 11062, 1008, 10821, 6721, 6125, 7546, 16305, 12143, 15639, 16904, 17316, 12559, 17406, 897, 2194, 17600, 19044, 3781, 7481, 18548, 12593, 14647, 2534, 12483, 12427, 15718, 18687, 9070, 11400, 11249, 9451, 8271, 14643, 17530, 14290, 1614, 8328, 4477, 14613, 10556, 7780, 16789, 17732, 8632, 16770, 18165, 6031, 2382, 1324, 19156, 16631, 847, 7828, 15074, 13325, 12731, 15712, 819, 13859, 5647, 3330, 11620, 12150, 2579, 14237, 8318, 11264, 9893, 9455, 18224, 6869, 878, 7776, 10055, 9722, 8566, 14629, 15329, 8780, 8509, 8096, 19079, 1982, 10476, 616, 9035, 17035, 6667, 11906, 1095, 19066, 15542, 1191, 8893, 2919, 6068, 9548, 11907, 18311, 17402, 2612, 11386, 16089, 8331, 7568, 17267, 137, 6282, 16317, 10657, 18337, 12389, 14407, 18755, 16392, 15766, 8456, 8390, 14047, 1646, 5477, 13178, 15384, 13173, 4242, 18904, 12376, 1938, 13242, 4750, 11544, 17242, 16074, 5872, 6243, 6123, 7935, 8813, 11989, 4112, 15453, 17779, 9390, 7297, 9859, 13056, 9758, 18858, 5048, 7390, 10587, 4274, 6159, 15787, 4961, 14505, 17368, 2992, 1832, 16458, 11731, 8366, 12142, 1531, 11368, 18743, 9257, 5500, 19460, 15554, 11130, 2803, 13079, 14834, 18192, 15455, 7069, 9382, 986, 8037, 4891, 3390, 629, 16427, 645, 8381, 14664, 10521, 14453, 12321, 16318, 16260, 16574, 6220, 10245, 7596, 18671, 4946, 152, 19205, 17130, 11109, 14974, 4587, 17686, 11795, 4846, 16967, 14878, 14397, 6501, 9098, 7865, 5045, 2835, 19393, 2670, 2587, 17070, 17470, 16698, 6422, 4053, 12263, 10304, 15269, 2514, 8689, 15552, 6651, 3275, 16020, 16495, 12875, 2199, 9148, 7254, 4834, 7374]",1,2,30,404,0.679160116, -"[12884, 10575, 2862, 11450, 14284, 19273, 17105, 8094, 14512, 2806, 15377, 901, 6493, 7485, 11894, 12416, 1325, 7518, 15884, 15189, 9026, 18791, 1683, 10539, 1063, 16770, 8696, 18632, 15356, 18911, 7056, 5318, 6450, 5190, 11024, 4811, 17192, 7797, 9451, 13612, 17248, 14356, 134, 15373, 11152, 3385, 11449, 18442, 11604, 2887, 16604, 15952, 3634, 7829, 18446, 7888, 1416, 8512, 6369, 3227, 7553, 11816, 13297, 12075, 9835, 5431, 15265, 18957, 11372, 9771, 8576, 10699, 10568, 12431, 19308, 12036, 12706, 17961, 14265, 14147, 10532, 10420, 18942, 17537, 2724, 16121, 9774, 3153, 5926, 3517, 989, 17739, 5719, 11733, 2425, 6242, 10700, 11506, 3794, 18015]",1,3,10,302,0.6916873763757164, -"[9549, 3517, 8094, 15884, 16869, 6961, 10684, 10535, 2862, 3663, 10646, 7056, 18957, 12036, 14479, 11141, 18791, 13668, 11524, 11496, 15569, 15377, 8360, 12787, 16102, 12741, 16604, 3162, 18871, 3377, 17415, 11894, 18769, 3634, 7992, 16190, 9774, 17083, 4585, 10686, 240, 3856, 88, 18201, 1416, 5719, 332, 10433, 17266, 8271, 1593, 6143, 17214, 6893, 14622, 11604, 13346, 11369, 2848, 8043, 4844, 6438, 5943, 18632, 10750, 13313, 8696, 11059, 18746, 8528, 4609, 10503, 2610, 8675, 7518, 17934, 4850, 9612, 10727, 7776, 10770, 2414, 17537, 1838, 15883, 782, 7608, 2593, 2899, 17248, 5681, 11963, 18200, 4959, 3385, 10443, 8241, 3986, 13740, 9950]",1,3,10,356,0.6653141958715829, -"[5470, 12235, 5540, 4929, 2435, 4923, 12649, 15971, 1416, 2841, 11472, 7303, 10863, 2056, 77, 5157, 14843, 15627, 16229, 19053, 34, 10857, 16816, 15095, 6352, 1973, 3266, 17723, 1615, 12259, 2150, 5678, 10223, 9451, 7930, 9831, 10816, 2568, 15572, 8094, 15031, 9775, 9004, 15420, 2543, 13761, 7992, 12612, 3526, 6415, 14216, 3665, 7295, 7979, 2338, 10617, 3911, 3245, 8583, 16040, 11678, 14943, 15405, 194, 5716, 3634, 8447, 6051, 6949, 3244, 13561, 10575, 2846, 10415, 16358, 7861, 2182, 18956, 4682, 10420, 18266, 12075, 4820, 518, 6151, 2516, 2666, 1430, 7750, 13165, 35, 1752, 2787, 17794, 5375, 101, 17199, 19462, 8920, 16027, 14679, 8363, 9708, 17971, 18018, 19502, 10287, 16569, 11141, 14381, 2013, 13024, 3860, 12148, 13668, 512, 14017, 17293, 13839, 18411, 8783, 13250, 12305, 12365, 206, 18350, 18085, 2509, 8130, 10623, 19060, 13820, 9812, 15853, 12632, 14366, 17265, 262, 17879, 1091, 5873, 4214, 5152, 7129, 1614, 10059, 18839, 19010, 5901, 15331, 7832, 5430, 17952, 16332, 15201, 8143, 8241, 18440, 807, 13145, 14827, 9729, 4525, 17911, 47, 11024, 13475, 17912, 6285, 11762, 18087, 11351, 12984, 4670, 5252, 2862, 3838, 11233, 18806, 13830, 15408, 14816, 16250, 11510, 10015, 2087, 8959, 11363, 13405, 9229, 2239, 15236, 8576, 5067, 12617, 806, 14215, 6522, 11912, 17590]",1,3,20,404,0.6970634477861744, -"[13466, 17539, 12340, 6733, 15503, 16125, 12201, 12643, 6883, 6660, 1659, 16775, 4222, 10233, 2615, 1833, 17054, 19003, 15125, 3093, 18317, 18325, 2515, 16918, 5306, 10301, 16748, 15189, 4846, 16940, 13491, 11370, 8584, 14376, 14317, 13180, 4227, 40, 15375, 3082, 11544, 7180, 15639, 1753, 12002, 8376, 6095, 2914, 16624, 16457, 611, 18099, 7130, 2623, 12924, 18853, 15857, 12199, 6872, 15571, 3782, 17387, 1377, 4791, 15128, 10474, 18476, 5605, 3705, 17470, 10226, 6269, 17771, 16035, 9608, 10264, 5619, 1086, 15579, 18053, 9473, 18911, 18211, 10141, 17187, 16770, 6818, 1843, 12887, 12714, 13887, 9444, 2725, 1930, 9471, 5058, 14141, 10065, 10670, 11450]",1,4,10,459,0.7774002130141502, -"[645, 18768, 4200, 17811, 14647, 5872, 18736, 3781, 12882, 11294, 2006, 7293, 116, 1683, 8037, 8986, 2352, 4356, 13369, 17810, 8509, 15952, 8372, 5752, 18816, 6197, 11814, 12431, 1892, 14754, 5375, 12553, 3713, 2888, 6631, 11390, 18773, 284, 17415, 8840, 11024, 13859, 13995, 3324, 5918, 15385, 1242, 18224, 5029, 12027, 2943, 7, 12768, 18687, 17513, 2024, 7813, 11120, 9346, 4149, 4112, 16186, 4522, 17070, 4984, 18827, 9965, 6488, 5955, 1265, 7854, 6169, 18836, 14997, 6220, 4722, 8093, 12731, 12839, 8470, 1051, 3683, 5095, 8609, 2229, 2733, 8715, 8813, 11449, 5329, 8566, 4701, 1832, 15806, 18942, 1338, 15207, 8184, 18221, 8483, 1343, 17520, 15679, 10183, 16908, 11386, 6068, 11489, 15682, 4477, 3218, 6358, 1068, 18945, 9634, 5639, 19230, 10006, 3502, 15456, 2080, 7091, 918, 17354, 10476, 1889, 17645, 2623, 4899, 17387, 14484, 18192, 19074, 5080, 2488, 6335, 14515, 5324, 1274, 4161, 3390, 19002, 9320, 5934, 6334, 1481, 17307, 481, 16214, 11400, 351, 18190, 15074, 7584, 13668, 15826, 8894, 2871, 2110, 5048, 1439, 1945, 9040, 5641, 2946, 616, 16098, 1809, 15577, 15344, 1241, 14407, 15589, 10595, 17426, 14031, 4007, 18797, 1385, 3289, 4925, 2383, 2579, 19090, 6715, 11262, 10037, 2734, 2028, 8851, 11317, 19156, 3944, 15916, 6368, 10684, 11450, 9814, 4274, 8133]",1,4,20,356,0.6505046406654156, -"[432, 16268, 10812, 1265, 12628, 3284, 18774, 14147, 19285, 1264, 12946, 9794, 8672, 12614, 3422, 3577, 3146, 14082, 18959, 15920, 186, 592, 14593, 9000, 14751, 4058, 9040, 6715, 3963, 11372, 18696, 17911, 10520, 5567, 9032, 4051, 17164, 9390, 14484, 619, 3041, 6358, 17366, 16783, 11178, 10008, 11894, 9934, 88, 17226, 5020, 7293, 7765, 467, 19129, 523, 14656, 18793, 8221, 15597, 8626, 6968, 12836, 11412, 14335, 11262, 10842, 1584, 4450, 2549, 11215, 4073, 11390, 11450, 8676, 13617, 10595, 17388, 759, 12256, 17709, 18662, 9742, 11923, 8715, 9534, 9814, 17809, 10537, 17761, 16305, 16696, 13103, 5666, 16983, 2520, 15008, 777, 15644, 4356, 18825, 19082, 13400, 14556, 392, 2148, 7097, 16210, 4984, 4150, 15663, 616, 18190, 2670, 2735, 11896, 8153, 12570, 18729, 18618, 6015, 3729, 1161, 4607, 14876, 16541, 1056, 185, 18728, 6266, 7088, 18035, 9001, 6330, 4993, 8169, 5408, 17859, 15864, 4122, 14148, 1312, 18176, 198, 3876, 16514, 1694, 11174, 10179, 5012, 14702, 6706, 18475, 15860, 18994, 15973, 12680, 11870, 6035, 11271, 7956, 9888, 15590, 3036, 1035, 938, 14273, 7392, 2041, 3493, 5823, 1832, 16203, 19452, 1765, 2573, 18957, 18020, 7369, 2049, 11840, 18501, 7436, 15542, 4373, 926, 16566, 12589, 6628, 5441, 8080, 3720, 5497, 15226, 17611, 13649, 3046, 8351, 10187, 16218]",1,4,20,459,0.6799208804584876, -"[8893, 18337, 6243, 17863, 1752, 3743, 14290, 7295, 1597, 8755, 897, 11368, 201, 7553, 17542, 11024, 18743, 2821, 15787, 16179, 19129, 13475, 8241, 4378, 10601, 16631, 18226, 9148, 645, 10184, 8576, 10385, 15385, 9035, 523, 12166, 1439, 12150, 16880, 7089, 2759, 10905, 18840, 12480, 4206, 13356, 7992, 15448, 9451, 4752, 2835, 969, 3919, 9864, 754, 15013, 515, 15718, 18461, 7924, 18782, 8221, 3145, 19004, 18717, 16045, 11906, 5872, 12256, 6469, 18736, 15193, 18697, 5500, 2338, 12882, 19068, 1920, 5324, 17859, 15954, 18645, 15057, 18615, 4150, 7979, 474, 5497, 18726, 7585, 15306, 12789, 2658, 9070, 10521, 10500, 8540, 11989, 5647, 11700, 953, 17206, 12983, 4425, 7861, 15384, 8331, 2862, 12593, 10824, 14920, 5045, 14265, 1144, 15891, 2803, 9459, 1646, 14647, 12457, 11523, 18945, 5048, 18276, 6485, 2670, 2672, 6128, 10577, 7388, 2295, 11141, 11822, 5859, 17930, 14834, 11138, 15501, 17244, 9744, 13242, 979, 15265, 15430, 639, 19214, 6220, 10949, 15152, 129, 13433, 13914, 9505, 17939, 7780, 3671, 6493, 3510, 6810, 15254, 9304, 2766, 2706, 1305, 9041, 3649, 18478, 17745, 7311, 15207, 7135, 12649, 14110, 4961, 16606, 7481, 10066, 13259, 127, 5336, 17660, 3377, 809, 10952, 11002, 3092, 17266, 17399, 16789, 3516, 10718, 2129, 9612, 8264, 15046, 16177, 7600, 12559, 5442, 15476, 11080, 4803, 3781, 19450, 6869, 10079, 12955, 14456, 7315, 13223, 3889, 8425, 3147, 6483, 16433, 292, 17251, 16465, 1660, 14426, 9063, 10857, 19193, 3564, 9262, 8078, 7826, 10336, 15031, 12509, 17290, 2599, 14699, 18431, 4935, 15388, 9816, 18956, 4773, 2806, 7683, 1112, 8271, 17340, 18425, 8040, 7486, 9336, 9549, 12395, 5898, 16604, 19019, 11524, 11705, 11447, 17920, 5557, 16114, 12995, 6706, 1915, 14843, 5755, 15883, 12741, 2472, 16207, 3143, 4272, 2419, 6705, 10142, 4595, 14884, 16121, 12036, 7719, 815, 13895, 17878, 16120, 19455, 9945, 3262, 10503, 481, 1087, 2305, 10782, 12883, 4867, 909, 5707, 13937, 4016, 10950, 8805, 10864, 5858]",1,4,30,356,0.6622204189278288, -"[1582, 4538, 13955, 18455, 2281, 16033, 10015, 18754, 1194, 13013, 2080, 16989, 8579, 8945, 8715, 2024, 8851, 7056, 6488, 12229, 16378, 17387, 12256, 17549, 7780, 1794, 17603, 16416, 6261, 16868, 16880, 14353, 7981, 19478, 293, 9390, 5856, 18773, 4238, 14987, 1640, 2361, 5661, 12142, 14515, 11141, 733, 1063, 6412, 5324, 9723, 18816, 9424, 9648, 481, 2170, 14110, 14484, 16982, 9769, 6448, 2049, 616, 8221, 7745, 9752, 15498, 14525, 11894, 11145, 5470, 11536, 11678, 3146, 11390, 2134, 9864, 15891, 14607, 1690, 6000, 3674, 18819, 7901, 10301, 16652, 4868, 15663, 1954, 8763, 5507, 7992, 4553, 7089, 7388, 8470, 10915, 8862, 6949, 16579, 9634, 17859, 5029, 7939, 12628, 12623, 16214, 5930, 16650, 903, 13525, 14556, 11262, 9004, 15682, 1144, 5402, 3137, 17415, 5749, 4450, 14608, 15884, 15225, 6201, 18736, 16305, 17911, 12789, 2623, 16380, 3577, 5430, 2304, 15916, 16965, 1767, 1513, 546, 11980, 11989, 11524, 14711, 10065, 11735, 12741, 351, 3721, 18035, 10595, 5943, 12479, 12440, 4564, 17710, 15207, 14431, 13001, 2142, 15778, 14491, 1246, 15841, 6736, 11200, 18837, 5656, 13779, 5673, 2862, 962, 17955, 8093, 3082, 12304, 8483, 918, 16413, 17568, 5336, 19002, 8133, 17034, 11944, 5618, 7666, 12060, 14745, 1242, 1817, 14147, 3677, 5730, 5497, 17409, 4581, 18009, 1439, 5500, 17894, 15589, 8364, 10254, 2543, 2813, 2978, 8608, 6934, 3683, 4356, 16780, 10181, 17003, 7377, 16727, 15200, 9707, 19089, 13466, 16098, 4411, 13846, 15945, 8717, 11554, 11822, 17194, 17122, 15542, 14893, 3257, 46, 14069, 3307, 3090, 10350, 15008, 9246, 4505, 19194, 8688, 11610, 10243, 62, 9328, 5639, 16250, 1265, 16229, 2594, 1872, 2214, 6690, 15615, 9449, 15920, 5019, 13369, 11049, 11840, 15960, 14754, 14530, 8017, 9802, 6631, 17538, 10568, 11269, 574, 1683, 754, 10646, 1301, 18717, 13528, 1416, 18791, 19005, 11958, 2352, 11450, 13532, 2428, 18349, 7180, 5147, 9477, 14751, 13953, 13996, 11413, 17178, 15448, 891, 18968, 8795, 6273, 11024, 6180]",1,4,30,459,0.7885580970735914, -"[4098, 5913, 14515, 2214, 3190, 1107, 15356, 1754, 17582, 912, 19154, 18767, 6245, 14284, 14785, 1850, 2182, 12649, 12612, 4520, 6065, 17722, 11742, 14622, 10433, 15179, 8090, 15572, 9181, 650, 6718, 17354, 13369, 555, 15545, 14388, 1620, 1792, 18072, 3438, 6736, 18453, 5152, 15500, 6242, 14389, 11474, 5442, 11024, 18425, 1854, 18381, 12019, 6493, 11816, 13944, 7553, 17328, 15991, 12995, 4912, 4700, 2425, 16703, 8864, 1872, 2862, 10796, 16578, 11524, 13285, 12075, 17911, 4016, 17194, 17334, 13651, 14101, 15884, 8372, 9634, 18404, 18442, 2700, 1582, 6169, 14110, 16177, 11485, 8696, 1811, 15265, 1669, 15522, 1506, 7312, 3570, 4682, 16186, 10037, 17439, 9965, 9227, 15747, 2364, 8727, 9573, 8576, 7311, 7797, 7057, 6488, 11225, 18209, 9451, 3942, 13297, 1946, 11052, 9875, 620, 13336, 17739, 15221, 5318, 9779, 12193, 8221, 8905, 4556, 9218, 15006, 12780, 2138, 17363, 874, 14356, 4794, 7619, 10223, 2505, 7485, 1568, 3663, 3385, 14030, 1416, 5202, 13306, 18455, 14920, 4896, 2779, 14265, 2762, 9681, 2899, 11733, 2150, 15277, 1341, 6643, 6837, 2, 7565, 2813, 10420, 10539, 5513, 7326, 13174, 13152, 19010, 7883, 5431, 10636, 18697, 10782, 12374, 14879, 2422, 5385, 10670, 16604, 8608, 15189, 10149, 14754, 13440, 5564, 240, 3634, 8795, 1325, 3886, 14581, 14732, 16812, 7056, 5023, 13963, 15325, 369, 371, 13423, 17437, 12107, 17934, 9229, 7973, 9663, 15857, 8857, 17160, 9589, 18514, 9463, 16013, 4906, 4748, 15773, 4773, 18467, 484, 11729, 14519, 10684, 16347, 15776, 10575, 10389, 5870, 3137, 17266, 15448, 8970, 13668, 5492, 10623, 4042, 8510, 10015, 19634, 15853, 16250, 9707, 569, 1428, 14586, 18911, 8363, 17501, 16294, 528, 8158, 10944, 18776, 6448, 15405, 14130, 12741, 15292, 18064, 14085, 18956, 4935, 7061, 7752, 16190, 2806, 8749, 12706, 3576, 6051, 16093, 8335, 18921, 12289, 3517, 18769, 8058, 8262, 5374, 8766, 11152, 4810, 5116, 19212, 4540, 5375, 2938, 6970, 10333, 5521, 5190, 18632, 11834, 2887, 8532, 7448]",1,6,30,356,0.7067505198559618, -"[3105, 11483, 6729, 7273, 7712, 12573, 17731, 3524, 3267, 15734, 14737, 616, 8384, 14165, 14995, 15079, 11457, 1842, 15704, 2969, 3215, 10769, 687, 10981, 15592, 7379, 611, 8968, 3713, 16387, 18264, 9365, 14862, 60, 7780, 12431, 17299, 2692, 14341, 9229, 8094, 1302, 786, 9471, 16697, 7176, 3767, 3209, 13502, 9885, 11013, 1835, 4149, 13529, 16648, 11009, 14651, 12164, 7079, 8772, 16435, 284, 2571, 4810, 11390, 4853, 17961, 369, 4893, 985, 11582, 1119, 1695, 15207, 14640, 6599, 12523, 3325, 8608, 8372, 5966, 16184, 4118, 4647, 7272, 1872, 8262, 2116, 15851, 15692, 17753, 3658, 15221, 16641, 17782, 6051, 5497, 5813, 620, 484, 11450, 1521, 1903, 19344, 97, 10799, 17510, 16535, 3966, 7830, 3710, 11449, 3944, 3815, 5277, 6389, 18942, 3517, 12664, 8894, 19098, 1406, 8277, 16083, 6407, 13596, 13436, 18771, 10420, 9551, 15591, 1635, 1237, 818, 13440, 2019, 12314, 4701, 2137, 11101, 17399, 12932, 782, 13083, 18639, 2927, 9754, 4970, 1046, 5431, 12457, 2461, 6657, 12333, 19218, 10716, 9784, 13729, 7393, 4241, 5237, 14283, 4849, 9439, 7865, 5831, 19354, 19168, 2273, 17196, 14566, 14387, 7740, 14142, 5989, 435, 1205, 18445, 2605, 1941, 10700, 5747, 17373, 16547, 11082, 15127, 3126, 1172, 8073, 2610, 17389, 2495, 10050, 7351, 4750, 9732, 5816, 10467, 18786, 13651, 17114, 13725, 9765, 2789, 8839, 16121, 8080, 15936, 12247, 9146, 6512, 14519, 1616, 3228, 17616, 18873, 2629, 18388, 2689, 9417, 4216, 535, 15278, 4467, 8549, 9965, 14510, 18491, 15072, 16774, 12445, 17743, 10770, 19163, 4358, 16901, 16348, 9108, 10491, 4264, 9315, 13685, 11034, 17439, 1934, 7, 7858, 13479, 17366, 6611, 2646, 15506, 14124, 801, 15223, 10403, 7006, 10858, 5414, 14061, 8773, 12036, 16368, 3570, 17662, 8504, 6203, 2655, 6825, 4531, 1769, 11059, 16431, 10960, 9102, 16832, 1587, 10325, 13020, 13326, 1889, 2871, 17793, 12589, 14177, 7670, 3037, 263, 909, 17552, 4229, 7838, 8199, 10000, 7934, 8097, 2401, 4418, 5492, 19264, 9105, 9914, 4492, 17772, 15629, 11054, 4070, 922, 8987, 17574, 8698, 5898, 10761, 1313, 14036, 14551, 10742, 6293, 4084, 10686, 11496, 15498, 6624, 17361, 3439, 15093, 4808, 17280, 8209, 15762, 7927, 5441, 9974, 5112, 979, 11773, 17967, 10297, 6912, 5397, 11995, 3854, 9045, 8488, 10684, 14108, 4981, 2817, 11586, 8813, 3314, 8284, 4166, 6102, 13632, 11464, 1353, 15567, 15153, 2523, 15905, 6711, 6566, 5059, 7039, 1840, 3827, 1453, 17505, 10762, 3950, 5574, 2388, 6248, 8322, 3819, 2984, 7833, 7807, 19412, 16016, 5228, 4692, 3545, 7757, 10161, 9548, 3377, 19019, 14851, 10463, 12447, 13997, 9629, 7058, 7396, 5298, 16964, 6525, 9673]",1,6,40,404,0.6730232794035603, -"[17474, 13703, 16477, 11304, 2936, 10163, 3336, 16029, 2986, 1477, 4328, 9762, 11217, 4723, 18400, 2092, 6054, 13438, 2367, 4063, 8262, 11131, 10752, 17086, 2420, 13981, 15535, 11491, 7110, 16587, 1582, 2949, 18624, 4889, 2205, 529, 18050, 10788, 8625, 7894, 2456, 47, 7308, 13234, 9531, 5448, 2659, 15544, 6509, 6424]",0,3,5,197,0.22249835167621848, -"[7156, 19670, 3052, 2338, 1378, 17899, 2654, 12549, 13940, 4198, 3327, 1752, 18743, 12256, 13805, 13675, 16284, 14446, 18645, 5125, 5467, 10264, 11503, 2914, 427, 9193, 13180, 1377, 8900, 14515, 11651, 17932, 8840, 14131, 13074, 14647, 4237, 12649, 5324, 11269, 6092, 18812, 4578, 18782, 8561, 4213, 1728, 8648, 12643, 5872, 14727, 1938, 9370, 3524, 19002, 3278, 13941, 2674, 8077, 16632, 3813, 6243, 11332, 10350, 13625, 15458, 17415, 0, 2344, 884, 11504, 8102, 13804, 6145, 11595, 5730, 7876, 7180, 735, 12697, 14123, 6732, 15007, 9241, 17206, 5045, 13787, 16608, 13191, 10559, 18315, 17409, 2759, 14556, 7303, 8165, 13756, 15820, 8770, 12655, 1416, 18757, 1777, 9256, 14756, 11656, 19155, 10233, 8862, 6949, 11526, 12837, 7388, 3940, 1497, 7973, 16918, 13975, 2964, 14721, 11822, 9113, 18719, 10441, 6645, 16014, 17622, 10521, 1271, 9388, 5972, 6544, 2120, 12150, 4273, 18126, 18773, 10585, 17542, 6105, 168, 9191, 996, 5807, 12875, 16250, 5520, 21, 14147, 13337]",1,3,15,985,0.747172491, -"[16330, 2331, 9802, 2577, 35, 17474, 9814, 16866, 7398, 1683, 12440, 16624, 7099, 15682, 11169, 18050, 18887, 2896, 1300, 17034, 5490, 8392, 17634, 17756, 15952, 3267, 14101, 2028, 11450, 18942, 18893, 2563, 4000, 18816, 11449, 16925, 1315, 6298, 6631, 18768, 17303, 18221, 18073, 7780, 14754, 18512, 15456, 10183, 12327, 5048, 5095, 13369, 11641, 16214, 18373, 12019, 4045, 918, 1265, 14515, 5930, 12256, 2019, 4925, 7084, 19002, 9322, 3683, 11390, 18224, 8730, 10854, 17387, 19171, 755, 4356, 3141, 2281, 18754, 5051, 8652, 16198, 15826, 2337, 5250, 5639, 17415, 11262, 1448, 16921, 7080, 14997, 2994, 7305, 17665, 10862, 2352, 1242, 1437, 7243, 7293, 18827, 13444, 72, 2229, 18446, 14031, 3923, 19441, 18773, 3146, 14484, 4930, 340, 616, 165, 377, 2110, 11012, 18068, 16461, 10861, 16186, 1163, 141, 6334, 5804, 3263, 3719, 13678, 14815, 18097, 17861, 15446, 8406, 11120, 4299, 10150, 460, 11747, 8470, 5375, 7415, 12780, 901, 9371, 7434, 5890, 12499, 4064, 18230, 3136, 2463, 3603, 8787, 11229, 12046, 9946, 8393, 7030]",1,3,16,197,0.6536998529188011, -"[13439, 6962, 17056, 3218, 8918, 10006, 5345, 18276, 12480, 6391, 2459, 15462, 2031, 439, 18603, 12287, 14699, 1765, 12973, 18751, 16373, 4070, 18892, 4007, 7436, 9390, 11938, 5517, 6268, 8037, 2754, 10513, 17295, 19129, 925, 2862, 6417, 10662, 6276, 4292, 13697, 9322, 9794, 10705, 605, 16925, 10824, 18636, 9753, 2997, 11120, 15325, 15933, 11293, 9815, 1767, 16687, 9548, 7981, 1567, 2966, 17653, 15456, 16313, 18761, 9051, 13613, 6259, 14288, 7614, 6210, 8017, 3399, 18659, 17530, 14302, 5329, 13484, 3869, 8374, 14101, 18398, 1338, 7888, 12130, 15913, 5336, 13996, 4556, 432, 17215, 10757, 5199, 18410, 7326, 17418, 19049, 16652, 6381, 12045, 18696, 16806, 2255, 11463, 9467, 12256, 12613, 8687, 1555, 10067, 120, 10385, 74, 18078, 12818, 10254, 19104, 16413, 16593, 19143, 5859, 1761, 13643, 3457, 15060, 12249, 10966, 759, 8470, 1090, 16777, 19226, 4505, 5429, 14110, 2573, 105, 17194, 2523, 147, 14750, 13775, 5507, 2049, 6794, 8457, 19032, 8087, 17224, 2943, 13295, 6345, 17211, 9628, 2821, 11244, 13540, 6704, 13661, 4961]",1,3,16,1978,0.7106557792767663, -"[12247, 2134, 4631, 6377, 15961, 15571, 6657, 16167, 17439, 9758, 13441, 8338, 16319, 2667, 14432, 12617, 1377, 11249, 12416, 6247, 6812, 1889, 1621, 6778, 13442, 16378, 12517, 1395, 1324, 5888, 5443, 1301, 4577, 12569, 2291, 18729, 426, 8894, 11752, 14443, 8325, 10087, 16460, 3640, 7092, 7252, 6960, 13504, 19185, 1315, 1551, 7596, 1086, 14770, 5446, 13233, 9533, 5352, 10966, 7850, 130, 6095, 15629, 15376, 974, 17484, 3390, 15385, 431, 5077, 8118, 11380, 17426, 16697, 407, 2235, 5089, 1135, 955, 2734, 898, 17772, 12850, 8571, 12827, 3570, 196, 1694, 8529, 4836, 645, 164, 7110, 17242, 605, 17277, 13859, 47, 10124, 9175, 1269, 16822, 1852, 13402, 6500, 3677, 15314, 18739, 3346, 3683, 13900, 4750, 15124, 7454, 15570, 15929, 19265, 2049, 850, 543, 5541, 12200, 18887, 2579, 592, 15590, 11336, 17603, 19264, 12443, 7601, 1329, 13746, 19504, 18824, 5641, 6817, 1239, 18039, 1149, 19615, 8798, 7819, 15160, 1295, 694, 13617, 16535, 17713, 18963, 16308, 17260, 17762, 17366, 14699, 10611, 11802, 6036, 14387, 8022]",1,3,16,2957,0.6620682659633819, -"[13925, 2532, 18906, 18740, 6936, 9402, 8687, 6452, 12278, 14997, 14057, 11380, 12662, 18234, 16186, 6334, 1374, 16031, 903, 2406, 2623, 6789, 9574, 15390, 9568, 2110, 15036, 18957, 1142, 6855, 4861, 12427, 13675, 3634, 2674, 14721, 17025, 6659, 12327, 8424, 9389, 12659, 16637, 15982, 17297, 9752, 3691, 3141, 18745, 12240, 16729, 15561, 3772, 9965, 11372, 8932, 13235, 10204, 4298, 18636, 3799, 13940, 10174, 5839, 8685, 8445, 3761, 18341, 205, 4540, 5492, 6488, 14879, 14832, 3165, 339, 12618, 18701, 14618, 18742, 11264, 3410, 7610, 1857, 14008, 12312, 5550, 15826, 17215, 10754, 7584, 14277, 8508, 10269, 7844, 16740, 10753, 17771, 2154, 9014, 15612, 15225, 7020, 11458, 147, 3831, 6628, 2981, 9309, 13285, 17849, 8995, 8895, 13746, 4101, 12667, 12568, 10746, 2493, 8264, 1614, 9712, 17106, 2336, 8288, 19303, 12044, 13820, 262, 11245, 3124, 12665, 18748, 9092, 2255, 11049, 4923, 11922, 1972, 16260, 4085, 14912, 15087, 13731, 1056, 3609, 14123, 11016, 18312, 7534, 8783, 2755, 10603, 1246, 7465, 6417, 15074, 16925, 13597, 6569, 13750, 483, 1717, 13057, 898, 17510, 11408, 410, 16214, 13712, 5954, 500, 10770, 12647, 10523, 18791, 8299, 11814, 2886, 10748, 18357, 5225, 7765, 4237, 6884, 19531, 5974, 8970, 9794, 13705, 13028, 6828, 8225, 15121, 462, 7230, 8509, 9415, 9983, 14322, 6290, 6434, 3800, 3162, 3715, 4927, 1553, 6242, 11848, 5751, 14780, 15044, 8457, 14388, 1244, 11881, 8106, 1536, 8772, 8862, 9681, 7825, 2495, 15840, 11944, 18517, 7659, 13564, 10718, 11958, 1560, 12915, 8910, 7301, 16593, 11907, 6079, 12134, 15008, 2463, 17600, 14777, 4326, 13486, 62, 5564, 19040, 15329, 16101, 3923, 14305, 8715, 7774, 3453, 7538, 13748, 14742, 9523, 9362, 18766, 9827, 1264, 8696, 3517, 9744, 4553, 16853, 11358, 8987, 7080, 18782, 17645, 2798, 13203, 6335, 10101, 16826, 8356, 16028, 510, 8767, 8653, 11428, 17470, 3015, 1388, 6039, 4838, 1190, 4450]",1,3,29,2957,0.7311457118222854, -"[12126, 18904, 10179, 14593, 9657, 11331, 7445, 8450, 9073, 17073, 17366, 11372, 16007, 2882, 4149, 663, 17356, 9375, 7230, 7091, 15973, 7625, 3757, 4415, 1753, 16679, 8800, 8746, 16097, 18994, 14105, 2038, 18161, 18910, 2505, 2421, 7573, 9893, 10353, 9908, 8791, 16796, 2082, 14810, 3384, 12644, 9630, 16875, 3665, 18774, 12875, 1257, 16824, 1761, 3632, 16057, 12582, 18043, 3002, 1678, 5375, 15960, 12120, 10910, 18839, 19082, 10513, 7889, 4450, 5549, 18920, 5005, 15214, 7092, 8453, 3840, 5199, 8097, 8967, 4297, 3352, 3250, 12741, 14001, 4073, 4505, 17164, 8608, 16866, 3331, 14750, 10937, 4332, 1683, 12182, 5249, 14353, 18959, 679, 16380, 11584, 6048, 13525, 13937, 12971, 11480, 11052, 3677, 18956, 8972, 5720, 2612, 14186, 1584, 14453, 12480, 2650, 9484, 5029, 2140, 11988, 18696, 7, 10595, 15864, 8454, 6180, 12583, 179, 16060, 5402, 2888, 12943, 3225, 13604, 1837, 18713, 4340, 7465, 5060, 6808, 19074, 3923, 13498, 605, 14513, 9808, 10757, 9775, 14912, 3577, 1260, 10020, 9390, 871, 16689, 13548, 1304, 16360, 13614, 646, 10291, 17002, 3688, 10862, 9881, 9407, 5538, 6273, 7054, 4837, 17194, 7572, 11320, 1102, 17401, 3535, 15721, 5639, 3687, 7116, 16514, 1635, 8537, 2862, 18230, 8424, 9471, 3468, 10687, 1363, 14808, 1887, 16469, 19159, 765, 14103, 2395, 17911, 18718, 16591, 14679, 1465, 5771, 18632, 9328, 2371, 7302, 6727, 14147, 5003, 12880, 17971, 7854, 11814, 19615, 1551, 4038, 2382, 4632, 12632, 47, 6604, 5464, 9970, 15225, 9876, 8455, 16069, 6526, 11467, 5495, 3402, 9814, 11170, 10283, 17496, 16724, 552, 1919, 5019, 15866, 14797, 4503, 2355, 3851, 6895, 2670, 13623, 11458, 4487, 13982, 12831, 645, 6608, 14101, 16216, 8288, 16412, 5596, 9406, 8210, 185, 14821, 1481, 1539, 4631, 6833, 5102, 8986]",1,3,27,1978,0.7380940305320282, -"[2668, 3600, 4261, 14970, 574, 14647, 12812, 17801, 1416, 17399, 13934, 2788, 15138, 6903, 8476, 3136, 1033, 14495, 18159, 11449, 13525, 4826, 4939, 10714, 5046, 15845, 14502, 10885, 16051, 9229, 18354, 1423, 14822, 12126, 6510, 16770, 435, 10699, 1701, 8435, 13859, 8395, 18497, 4415, 11897, 8105, 1068, 13483, 7830, 13987, 1567, 10433, 9137, 9345, 18869, 13623, 6686, 69, 4077, 17819, 12685, 9879, 9911, 17105, 2229, 6274, 14519, 4152, 14269, 3287, 5324, 10691, 4878, 1390, 12913, 15385, 4150, 9617, 5596, 1316, 16541, 9805, 901, 7193, 11068, 5639, 11025, 16560, 4518, 15735, 5800, 9069, 5467, 1825, 6445, 5137, 5336, 484, 1478, 12979, 16576, 12233, 9419, 5941, 8824, 4308, 10087, 13451, 15377, 1683, 4752, 7113, 2414, 4281, 11474, 9115, 4914, 4708, 2019, 11333, 5513, 5497, 5463, 7500, 8147, 16075, 14626, 15137, 11082, 15601, 6310, 17848, 2206, 7823, 7619, 6555, 6690, 1119, 4560, 9366, 616, 10096, 2644, 10933, 18701, 18041, 14841, 16387, 13541, 3746, 2148, 14446, 18105, 8062, 9523, 11598, 14391, 3944, 2463, 14108, 18558, 14554, 18786, 17487, 12943, 12196, 834, 4094, 12455, 1887, 1628, 4156, 3281, 3485, 16468, 1573, 6656, 1546, 6184, 15278, 17782, 30, 9864, 3439, 9581, 15040, 15545, 818, 10193, 9965, 2623, 3052, 14640, 204, 12643, 951, 3501, 17982, 17956, 3888, 1919, 15118, 14061, 15218, 16456, 16597, 8221, 13759, 5946, 9365, 12431, 15268, 9938, 7404, 8293, 11407, 700, 8314, 3370, 19163, 14265, 3923, 14277, 251, 10114, 12125, 6149, 2929, 15945, 7128, 16344, 7854, 10171, 1574, 12502, 989, 5471, 8392, 11045, 15079, 8961, 2232, 11716, 1749, 18187, 10684, 14471, 14496, 10513, 5254, 17815, 465, 14533, 16419, 9846, 10431, 18878, 18322, 2281, 18502, 3713, 5036, 18655, 5244, 4522, 8608, 18095, 918, 15544, 19142, 19212, 14956, 8621, 16144, 19236, 389, 11340, 14797, 11495, 324, 4807, 5579, 18768, 11339, 16563, 9140, 6413, 17274, 2361, 4611, 4058, 5108, 8542, 16505, 17537, 12686, 5859, 1792, 2428, 1954, 13364, 5661, 3342, 8950, 9322, 1063, 9698, 1205, 16614, 12750, 3517, 18475, 11145, 86, 88, 6267, 4977, 6343, 12415, 7580, 3082, 11360, 11052, 12582, 18906, 14810, 431, 10213, 17143, 12675, 18841, 2343, 8262, 19342, 5673, 4099, 15221, 3286, 19313, 2559, 17054, 5871, 12180, 5667, 14997, 4144, 2932, 452, 4538, 4680, 6763, 17072, 18700, 3190, 18825, 18816, 10960, 9602, 16841, 3256]",1,3,36,1978,0.6522290409291475, -"[4914, 7813, 15806, 6621, 7377, 19139, 6690, 14556, 12152, 9634, 8544, 13692, 668, 922, 15577, 17744, 16879, 9983, 11532, 18973, 9693, 9346, 11752, 6180, 5661, 11707, 7321, 6778, 12256, 6452, 2912, 2024, 18111, 5372, 4837, 18095, 5549, 7536, 12664, 12247, 18502, 16816, 4520, 18797, 15860, 18919, 12443, 12768, 16625, 12772, 5254, 3254, 8093, 14967, 17034, 7110, 14092, 13336, 4222, 2463, 16098, 6511, 8079, 15508, 14207, 10242, 7875, 12304, 7407, 8109, 10121, 13189, 13865, 4332, 1870, 3453, 14484, 17110, 9224, 9289, 14110, 18768, 17599, 16500, 14315, 9296, 13688, 6923, 17513, 2815, 4985, 14751, 2733, 17426, 14640, 16918, 17810, 4895, 10149, 10513, 5618, 1338, 13675, 3116, 3531, 4200, 16051, 6903, 887, 3285, 7, 19268, 4458, 10842, 17524, 5406, 9884, 18753, 18035, 7830, 19002, 11024, 8834, 6298, 5936, 11221, 15826, 10649, 18518, 9328, 11652, 7901, 7219, 2999, 2352, 19346, 14774, 4165, 6972, 12248, 8009, 8900, 4045, 18728, 6793, 19350, 5016, 3890, 1239, 13995, 16348, 7939, 18349, 11012, 5797, 13994, 2373, 18725, 1889, 6284, 12817, 3374, 7973, 11380, 9805, 19349, 7175, 15912, 2559, 18288, 4051, 592, 5926, 13723, 5752, 2528, 2929, 18742, 9470, 13799, 8987, 15852, 15664, 15571, 324, 10243, 8957, 10769, 5639, 11076, 9842, 18733, 1125, 17192, 18576, 12523, 17424, 1926, 9256, 1507, 15165, 4553, 2201, 1964, 5052, 15001, 12542, 16537, 13759, 8012, 13763, 4798, 6197, 307, 10429, 16541, 19005, 13580, 69, 1846, 13369, 1037, 1343, 62, 9891, 6898, 18289, 12178, 19361, 8272, 2997, 17689, 17388, 4581, 1245, 1240, 17869, 11660, 10596, 15857, 18825, 14553, 13466, 10665, 3352, 2577, 6149, 14510, 407, 4680, 14288, 18960, 7829, 8184, 5180, 8141, 1224, 3944, 8483, 13633, 586, 3858, 12924, 14654, 1334, 2734, 46, 15477, 13646, 12839, 2563, 1482, 5089, 16965, 14227, 12876, 2039, 1540, 4261, 1204, 4161, 11121, 7243, 717, 3554, 10276, 2006, 9249, 15916, 3674, 15523, 974, 9035, 14265, 8851, 13829, 5641, 16054, 8133, 10455, 16637, 18449, 1180, 3501, 3132, 19088, 12105, 1919, 10051, 11052, 4775, 15960, 19017, 2460, 7507, 1363, 11903, 16886, 10919, 5584, 13441, 12027, 1568, 8405, 3984, 3869, 12569, 15824, 6794, 2346, 6598, 4733, 16552, 5763, 5227, 1810, 3241, 9766, 6873, 761, 15679, 1035, 877, 16733, 4765, 7194, 8894, 14699, 16438, 18150, 7398, 16318, 18487, 14650, 3664, 14737, 5515, 5568, 1377, 9731]",1,3,36,2957,0.7279504995688999, -"[15189, 194, 7992, 2359, 16869, 14835, 5311, 9001, 10899, 15571, 7518, 2337, 17295, 4846, 4214, 17198, 1568, 17300, 2722, 1257, 12036, 6663, 12235, 15758, 3461, 18891, 94, 5152, 18675, 10558, 755, 19178, 14618, 18659, 6417, 1371, 5892, 17756, 5749, 16775, 3151, 1448, 13939, 8449, 5579, 4811, 10110, 7869, 7979, 8992]",1,5,5,2957,0.6677486433027336, -"[806, 5324, 574, 11450, 18411, 1813, 1068, 1204, 16518, 912, 15663, 3577, 12643, 12036, 754, 14843, 6207, 10617, 9809, 620, 18276, 7553, 17054, 10123, 17530, 5144, 10665, 3756, 15639, 13325, 16777, 3649, 11472, 1430, 6733, 918, 12962, 15798, 8783, 14465, 2862, 14502, 8707, 13761, 4505, 1242, 6766, 2150, 8910, 1341, 16538, 17687, 99, 3162, 8018, 3541, 1733, 13475, 13999, 17130, 11044, 8460, 3734, 10446, 8302, 16772, 12103, 6815, 1544, 12075, 3140, 8447, 7258, 15857, 2344, 13272, 1484, 5618, 9438, 1144, 10010, 17739, 6952, 16521, 0, 17613, 17277, 4938, 12256, 17399, 10646, 2477, 11651, 2138, 2295, 11024, 15099, 17470, 14130, 4540, 1416, 101, 47, 6319, 16953, 6833, 6125, 19032, 16851, 16985, 6092, 8262, 2680, 12160, 10509, 1331, 2199, 7428, 13297, 14066, 18157, 9920, 12893, 3517, 1265, 16250, 8780, 9328, 6239, 11733, 826, 15024, 15743, 546, 7054, 2887, 11152, 12624, 10539, 18573, 11729, 8748, 10938, 2759, 12972, 15031, 3745, 4064, 3886, 4846]",1,5,15,197,0.7435208195973019, -"[11456, 6501, 14008, 13312, 15765, 9574, 7092, 3754, 12955, 4817, 13089, 10754, 1881, 9806, 6884, 7293, 15965, 17275, 16955, 9193, 18357, 1635, 9646, 514, 2322, 16378, 2154, 13550, 12831, 13634, 16073, 11480, 625, 10746, 10471, 1809, 6869, 4261, 4961, 17194, 17537, 10372, 9249, 13677, 439, 16464, 12100, 7232, 12007, 10757, 19063, 12240, 14227, 2049, 17328, 11540, 15384, 14838, 9113, 17034, 19542, 8754, 5550, 11378, 17993, 7560, 2414, 15822, 6171, 16131, 973, 2130, 19689, 5843, 11358, 19074, 7287, 16826, 3968, 1088, 8317, 15945, 17162, 10359, 15727, 569, 8856, 85, 13996, 2441, 1885, 8727, 14970, 4611, 14652, 1612, 18501, 5784, 15841, 1412, 10287, 1954, 11912, 13325, 395, 3746, 18498, 263, 8715, 6000, 15250, 9882, 500, 6415, 5327, 2526, 6664, 14157, 16098, 2644, 11369, 12870, 10595, 3104, 16360, 17335, 13285, 8626, 9302, 10110, 19028, 17366, 1144, 11814, 5048, 14490, 8056, 7246, 7194, 7549, 12979, 13336, 350, 10083, 15448, 8017, 1582, 3683, 576, 6298]",1,5,15,2957,0.6664807019323427, -"[8271, 2112, 7080, 9041, 10118, 15180, 4608, 8623, 6488, 17732, 14620, 9247, 3188, 4238, 18337, 8893, 15975, 11239, 9956, 105, 13628, 15442, 18769, 11368, 16198, 9573, 13176, 15891, 5045, 7818, 12219, 10782, 9218, 18945, 13118, 6167, 3658, 4477, 14722, 3427, 12150, 3137, 6184, 4206, 11932, 8950, 9998, 4143, 2794, 616, 2356, 5261, 10184, 13928, 7752, 2891, 3965, 17739, 18461, 6568, 5048, 13924, 8510, 4911, 4411, 15556, 11217, 8980, 17937, 897, 2264, 543, 6869, 9148, 14911, 9456, 3377, 5429, 12825, 17722, 9960, 3302, 16820, 13503, 12617, 12876, 10594, 7809, 14515, 710, 7838, 17496, 16579, 12593, 12871, 13600, 18373, 13466, 1868, 15253, 12513, 5029, 17681, 13440, 8250, 16852, 15325, 19096, 223, 1163, 1315, 9579, 10977, 7149, 18487, 3535, 2565, 11733, 14618, 14075, 10276, 9520, 4848, 10123, 14834, 4553, 14997, 3082, 14456, 17250, 10211, 12340, 10045, 19003, 11894, 15664, 4296, 4663, 8393, 14147, 5477, 16800, 10385, 18620, 5599, 910, 8770, 4366, 1683, 15544, 3111, 16136, 1576, 10433, 4834, 17551, 12483, 12799, 19002, 18211]",1,5,16,2957,0.7486940203884972, -"[11179, 18867, 766, 7711, 4281, 17533, 16664, 4421, 18824, 18404, 17687, 8822, 8862, 18898, 10087, 16703, 11809, 3207, 1425, 14624, 8753, 8005, 16613, 19151, 6336, 15940, 10258, 8624, 3719, 4932, 17281, 17892, 14391, 14277, 9626, 18897, 10728, 15434, 7130, 19294, 1467, 15455, 6936, 4893, 16177, 1842, 2207, 4164, 18630, 1179, 12592, 5158, 14315, 15966, 769, 12747, 12778, 11024, 10615, 7480, 12647, 1628, 15461, 4514, 2031, 6725, 13233, 8302, 18210, 2160, 14553, 7267, 12741, 9643, 13371, 11296, 850, 4906, 4261, 105, 9471, 2912, 6411, 12133, 18558, 6702, 13318, 16620, 159, 2344, 11142, 4624, 4402, 8060, 15824, 12776, 8834, 12564, 4976, 4252, 4262, 17499, 4427, 1514, 4520, 16965, 18497, 8890, 14973, 2880, 9181, 9118, 4828, 193, 16172, 7862, 6092, 14317, 15498, 6105, 9681, 11285, 17304, 15971, 14218, 16772, 13272, 10433, 9738, 15918, 17982, 11944, 16211, 8404, 5797, 8664, 7458, 12991, 7454, 10932, 4292, 14330, 18381, 16196, 17869, 11651, 10629, 2762, 9114, 15087, 18763, 8885, 5180, 4847, 19132, 2615, 8823, 10441, 9088, 10207, 611, 696, 5974, 12699, 4458, 14768, 11580, 10343, 46, 10662, 12019, 19091, 15826, 18430, 16717, 1398, 5913, 2898, 13882, 10854, 10216, 7554, 11454, 4833, 6510, 17659, 7311, 12877, 13863, 11450, 16654, 2983, 10282, 12763, 3191, 9652, 11949, 12876, 4865, 13746, 18884, 6733, 8372, 14970, 18926, 10213, 18623, 13263, 4411, 13305, 1687, 10065, 13555, 17518, 10781, 3249, 16460, 5052, 17966, 1972, 3168, 2858, 8280, 18809, 16295, 9414, 12726, 4904, 13766, 7151, 18957, 1118, 6778, 2933, 4602, 11372, 1720, 357, 6790, 1410, 11139, 542, 5430, 4942, 10720, 12416, 3374, 13189, 8602, 7768, 16152, 18928, 10088, 3135, 4048, 2625, 2700, 6043, 999, 8788, 12171, 13143, 8208, 7966, 14827, 12160, 11449, 1606, 7866, 12371, 11403, 21, 10120, 16477, 10474, 17760, 14019, 15386, 130, 8510, 4700, 18467, 1272, 5157, 1321, 1482, 13483, 15242, 13441, 17902]",1,5,29,985,0.6968098595120962, -"[424, 11468, 14008, 13730, 6419, 16747, 18843, 13795, 7774, 5029, 14556, 18673, 15713, 468, 15916, 12422, 7103, 3716, 3601, 11009, 18967, 11084, 18856, 11526, 12734, 15479, 11866, 8426, 10198, 5500, 13529, 2278, 48, 12545, 12654, 17222, 563, 14988, 11595, 15989, 16284, 4581, 17493, 14110, 2098, 10475, 15406, 13940, 11442, 12027, 17219, 18472, 7852, 8445, 12164, 2982, 18294, 14356, 12502, 7154, 6541, 18600, 9802, 5934, 4854, 15019, 16641, 16787, 6598, 14284, 656, 11394, 9409, 7095, 9910, 9193, 18889, 16387, 10165, 18442, 6586, 1865, 2584, 8668, 7080, 18992, 4537, 13891, 18058, 7444, 2522, 4911, 7130, 1565, 16087, 749, 6018, 18113, 6099, 17299, 14699, 11469, 15756, 6440, 1172, 15390, 13632, 2882, 13525, 1463, 17816, 9130, 14107, 2116, 6048, 16597, 7923, 3376, 15562, 12944, 17341, 18494, 17255, 18078, 4927, 6262, 5809, 3930, 13682, 18124, 8919, 5018, 4142, 19140, 15993, 13708, 1043, 11597, 11764, 13828, 8352, 9040, 477, 11139, 8290, 5429, 4849, 18719, 7994, 16054, 2533, 11847, 9881, 18741, 18742, 7780, 19684, 18464, 1424, 18870, 2873, 9529, 15008, 12439, 8987, 14770, 9471, 15049, 17815, 7756, 13046, 6838, 7190, 13160, 283, 761, 13569, 18625, 12664, 12474, 10885, 948, 17001, 11957, 6069, 9491, 17755, 17819, 4144, 5843, 15320, 553, 13549, 10776, 15769, 7830, 6098, 14003, 13729, 17519, 13912, 7708, 11275, 8886, 16138, 5002, 15044, 787, 9885, 8038, 8125, 11206, 4546, 15811, 14756, 1271, 17874, 14670, 6628, 357, 5284, 12298, 9589, 19079, 7074, 12092, 687, 14637, 4522, 13774, 5784, 3573, 4638, 18823, 17820, 743, 6599, 16697, 144, 15823, 12052, 13189, 18000, 10723, 17872, 2571, 5968, 2110, 13364, 15864, 6437, 15470, 4095, 18150, 9649, 14341, 3241, 887, 2401, 14427, 10648, 16821, 13326, 8685, 7113, 2927, 2020, 2052, 16706, 18812]",1,5,27,2957,0.6661256783486331, -"[1239, 6344, 18701, 7191, 14654, 11264, 5336, 16552, 8144, 19129, 8916, 9568, 1954, 1612, 14897, 16869, 14502, 4152, 12377, 6125, 18968, 17687, 1378, 3267, 3677, 8356, 5871, 18782, 16214, 6833, 17634, 17215, 324, 8445, 2037, 3104, 17429, 4564, 9325, 10568, 6073, 8778, 18224, 2852, 19299, 15742, 7716, 9634, 2383, 15916, 9864, 11881, 6201, 19646, 3287, 11497, 4541, 5497, 11672, 1057, 2526, 10309, 6180, 17485, 19097, 17054, 10121, 8715, 14607, 17015, 2854, 19490, 18442, 11859, 14420, 11467, 11332, 6174, 10457, 18769, 8799, 15920, 5467, 19486, 4298, 18819, 19404, 11761, 12972, 2428, 14272, 6375, 15377, 4559, 4553, 15096, 4534, 5421, 14542, 17192, 16579, 17554, 4989, 5095, 15067, 13471, 7780, 12340, 7382, 16378, 1204, 3331, 3286, 8687, 12479, 6092, 6614, 11111, 3146, 17034, 7326, 1889, 1428, 3153, 15360, 17129, 9270, 5734, 9346, 11776, 18035, 8483, 6488, 15826, 9687, 10742, 8435, 6039, 16770, 2623, 15188, 5859, 19002, 460, 15498, 14750, 13947, 13064, 2078, 7305, 8470, 5730, 11892, 15082, 17318, 16958, 4829, 3429, 5507, 8851, 1308, 14298, 12046, 16527, 18809, 18736, 15769, 6205, 15975, 16925, 11944, 8526, 4752, 13487, 147, 6273, 11142, 9523, 11178, 2226, 7465, 18786, 1544, 19020, 5930, 377, 12552, 18986, 11746, 5600, 3713, 17237, 8192, 0, 266, 12645, 926, 15189, 14076, 2909, 15804, 6969, 11482, 14069, 2343, 14751, 19106, 16402, 8621, 1792, 10215, 17689, 2929, 6164, 13616, 9515, 9769, 12160, 4340, 14445, 11024, 4911, 18783, 5618, 17415, 46, 11417, 14061, 11120, 10495, 16483, 4522, 13606, 4100, 877, 12789, 17339, 11456, 1800, 1708, 14512, 9576, 16772, 15577, 802, 4798, 3690, 10243, 7293, 17298, 17859, 4133, 3502, 6680, 2024, 9393, 11474, 6334, 9669, 10183, 8436, 10497, 3983, 7385, 4505, 284, 18754, 4895, 11449, 14633, 16622, 1641, 1246, 17538, 616, 2241, 15192, 8395, 4170, 2442, 14156, 7113, 7829, 314, 3082, 1640, 13083, 16462, 2080, 1887, 9007, 5804, 11450, 16650, 13859, 2673, 18733, 4449, 10782, 14492, 10513, 708, 7239, 17359, 15945, 7636, 7495, 6977, 18841, 5699, 16637, 6702, 18252, 13779, 18816, 7537, 618, 5329, 12771, 9963, 19045, 3600, 351, 6690, 8221, 16652, 14110, 11827, 12281, 5661, 1309, 12979, 15239, 4371, 62, 7852, 974, 4099, 12884, 9814, 5752, 15325, 2019, 3136, 11372, 11049, 12327, 16380, 7301, 8750, 6733, 9033, 13846, 12680, 18871, 9846, 5598, 18487, 2229, 4717]",1,5,36,985,0.7131409443627327, -"[4149, 12799, 8459, 9042, 8062, 16214, 7813, 10441, 3839, 2794, 11539, 9040, 4916, 4826, 6770, 6097, 6197, 3309, 10097, 2131, 18816, 10231, 7844, 1573, 1086, 16805, 1931, 5552, 18797, 9864, 14688, 1325, 6914, 6898, 4058, 2184, 14566, 11574, 16770, 18687, 12552, 17487, 5596, 6048, 8050, 10433, 1841, 4732, 1301, 10431, 6798, 8022, 17811, 13171, 4168, 4112, 18576, 15952, 15452, 12967, 15519, 18874, 12121, 11957, 7293, 19544, 13217, 6035, 3180, 7, 5829, 2331, 5324, 2110, 16023, 18891, 8640, 698, 10816, 17399, 13354, 14886, 7481, 13369, 1517, 19263, 642, 17017, 892, 7321, 9197, 16154, 1683, 18464, 16002, 14085, 18768, 18637, 9814, 11780, 1304, 11782, 9092, 8483, 2343, 1072, 6402, 2352, 3026, 10504, 10680, 6184, 11720, 8470, 4222, 6022, 18710, 8390, 1272, 1300, 8335, 13278, 2659, 2725, 17594, 7261, 2612, 5058, 1637, 18776, 9485, 15118, 11955, 2200, 17415, 6662, 17937, 7953, 5422, 16751, 18821, 3419, 16500, 17645, 15199, 18317, 15639, 6160, 3201, 13235, 4514, 8392, 1966, 3281, 14618, 16656, 14317, 1053, 17634, 4998, 7087, 962, 898, 12019, 8509, 15853, 6201, 1515, 7080, 332, 377, 17028, 9665, 18233, 14644, 6148, 15278, 454, 4483, 19010, 18502, 6844, 440, 12628, 12955, 16943, 896, 14431, 14871, 4238, 515, 2302, 7915, 2579, 5095, 2028, 15235, 12659, 11345, 7326, 4961, 2936, 6725, 3005, 15777, 7254, 13925, 7588, 592, 5785, 15804, 18566, 6837, 7921, 10423, 18072, 5477, 3454, 12875, 2623, 19206, 9416, 633, 10456, 13606, 1474, 8862, 1145, 10038, 17402, 9456, 12804, 16645, 17194, 1036, 3671, 7670, 4556, 11496, 15124, 1679, 6228, 3410, 6255, 4261, 8983, 14141, 7854, 2552, 5386, 18655, 15513, 8664, 16940, 9579, 4356, 2185, 2, 11467, 15254, 901, 14336, 13325, 14212, 12002, 2061, 18620, 4283, 14284, 8331, 5210, 18701, 3093, 3122, 5500, 15891, 15081, 19129, 14535, 15498, 1343, 18355, 5704, 16150, 17852, 2993, 19177, 2628, 13439, 11349, 7301, 1390, 13863, 8462, 2414, 19090, 301, 4200, 18945, 16430, 9482, 17520, 6352, 8447, 9347, 6952, 16179, 12483, 14666, 7534, 11886, 7303, 11049, 9586, 17496, 16332, 18682, 15036, 2041, 2680, 7981, 12827, 6335, 10464, 9041, 18396, 7073, 1568, 11738, 18827, 18461, 17938, 4161, 3126, 16626, 7872, 4511, 3339, 14622, 10425, 6242, 6334, 15464, 8685, 3743, 3895, 8584, 460, 766, 10952, 14881, 12327, 17835, 14110, 15615, 11672, 17974, 11491, 16794, 3146]",1,5,36,2957,0.7654308464776589, -"[18736, 15769, 16051, 996, 901, 16383, 6019, 12789, 11449, 11390, 6211, 12706, 15711, 4911, 15189, 10196, 4811, 8969, 17082, 5500, 12205, 18743, 2983, 926, 156, 19174, 12457, 8364, 4450, 4045, 10805, 13846, 17629, 4581, 5375, 4427, 9592, 1475, 9775, 1087, 17034, 11674, 4120, 3244, 1821, 5639, 10350, 3453, 12431, 4482, 357, 12142, 12219, 9808, 4479, 10300, 8762, 8093, 6508, 18035, 5859, 19171, 9798, 6276, 5449, 7366, 2338, 1905, 7358, 9565, 11840, 3721, 14484, 3753, 8561, 5665, 14301, 1272, 5336, 19185, 3082, 3201, 16017, 4895, 6779, 3885, 15118, 15943, 8509, 125, 3177, 7518, 9751, 11989, 16560, 16582, 6540, 16342, 19101, 18847, 15929, 5497, 8392, 4214, 5507, 18580, 518, 2331, 7463, 1640, 5763, 15031, 6298, 4918, 6247, 6209, 9864, 5555, 4133, 18754, 11641, 14147, 14556, 12193, 18487, 15841, 13996, 8730, 7704, 10796, 13400, 13442, 16305, 3565, 1678, 616, 3531, 2352, 1828, 2594, 11407, 17017, 3663, 13354, 16871, 4261, 4016, 14420, 17724, 14857]",1,7,15,985,0.7559466450271339, -"[3754, 3831, 17266, 6798, 4791, 15031, 8089, 407, 14980, 10461, 2660, 14821, 4222, 18942, 13261, 14194, 1239, 14057, 6710, 7797, 12459, 4822, 10422, 16033, 7225, 13374, 2510, 11674, 9579, 5605, 7130, 6778, 18624, 8926, 17146, 9178, 1901, 6903, 6568, 16967, 17859, 18869, 5150, 2653, 3518, 16513, 9576, 12002, 2004, 15448, 6659, 2981, 7504, 869, 15455, 6183, 6628, 18736, 18957, 18242, 12747, 4907, 8788, 18888, 6735, 5596, 14484, 11858, 3405, 15857, 15698, 18789, 16729, 9471, 12924, 10373, 5670, 5288, 11834, 3400, 19125, 4650, 1833, 13756, 16843, 9402, 2211, 8623, 11321, 7847, 5090, 7774, 13143, 973, 2112, 5058, 9614, 11580, 1690, 18100, 4908, 16882, 15405, 8002, 8221, 1576, 11642, 17455, 18825, 19155, 8128, 14758, 10744, 17197, 12281, 4478, 13441, 16250, 13876, 11752, 18023, 15982, 15531, 766, 14525, 658, 12734, 9921, 5268, 15919, 19181, 3052, 543, 6867, 13256, 6296, 3575, 16444, 1142, 2337, 3039, 77, 4193, 18230, 6599, 8626, 17191, 12926, 3670, 18317]",1,7,15,1978,0.7633514226302176, -"[4222, 9473, 1843, 15189, 11142, 15664, 3273, 18942, 13597, 8497, 130, 15961, 8857, 12332, 12558, 9440, 12340, 18673, 4546, 8629, 16918, 9490, 16869, 13304, 10180, 6510, 10185, 17869, 2880, 6077, 10690, 4086, 5178, 8012, 16839, 4393, 7296, 18211, 12160, 18967, 11468, 14008, 3848, 656, 19437, 18033, 7273, 9332, 17684, 17933, 9046, 14108, 10280, 2753, 13466, 8135, 856, 3929, 16778, 1574, 1565, 4336, 15571, 10919, 5224, 1086, 9083, 15266, 15377, 10301, 18367, 4694, 13876, 5964, 3858, 338, 4821, 15857, 13717, 3757, 579, 10663, 18894, 11188, 16672, 3073, 17932, 14044, 735, 19163, 15819, 16501, 7226, 17573, 6689, 17298, 8426, 3673, 10377, 3052, 15467, 17124, 1395, 1558, 8291, 12738, 12974, 6243, 4513, 7444, 15557, 10265, 17524, 18814, 11580, 1868, 3018, 4400, 110, 410, 1062, 17493, 2154, 3695, 13270, 6306, 5565, 16211, 16772, 15919, 5307, 7666, 9275, 1774, 7243, 3540, 18558, 696, 506, 15823, 13620, 8024, 12673, 18347, 13577, 10225, 6947, 5715, 6419, 13460]",1,7,15,2957,0.7634021402850332, -"[17420, 6715, 8204, 302, 2987, 10846, 9723, 10638, 14886, 14603, 377, 1144, 2783, 10385, 13307, 9782, 3600, 8822, 1388, 7091, 6224, 11271, 7584, 12550, 11539, 14535, 5859, 7400, 1286, 9634, 3124, 13972, 17003, 5872, 5882, 16443, 12371, 8131, 11262, 1507, 4422, 14810, 4980, 14212, 6684, 13325, 14525, 642, 19049, 7326, 9794, 13123, 4984, 13630, 15480, 3285, 6628, 7377, 13143, 14796, 10775, 16131, 9516, 1274, 6789, 5019, 5463, 16593, 15986, 18891, 16415, 12880, 9173, 4099, 3658, 7621, 11869, 10816, 1945, 8924, 3232, 116, 16650, 14801, 4708, 1881, 3461, 3677, 105, 9644, 17025, 15639, 6513, 2430, 19628, 12281, 1568, 17965, 2668, 15542, 6048, 17019, 8753, 3218, 9256, 16483, 19002, 17380, 19102, 18747, 17858, 3187, 11746, 7272, 3284, 5402, 11443, 4752, 609, 7720, 15312, 905, 10268, 6201, 7239, 18793, 3691, 11099, 4653, 5567, 14218, 253, 18351, 18368, 12060, 150, 12876, 5730, 5324, 5048, 15032, 11024, 14896, 19171, 8055, 16645, 2948, 1416, 7108, 5515, 16681, 15841, 19376, 1425, 7971, 6794, 4696, 13994, 11073, 618]",1,7,16,2957,0.7109600852056601, -"[16780, 16330, 5500, 8817, 1425, 11170, 5198, 4849, 14045, 18145, 7475, 3207, 4402, 9864, 18739, 4238, 5946, 5336, 12811, 6467, 1144, 324, 8295, 17518, 17911, 3983, 8664, 10441, 5497, 3244, 8454, 18374, 8435, 17025, 18736, 9424, 14044, 7193, 5021, 7388, 717, 4298, 18733, 17859, 13723, 19097, 17072, 10796, 11839, 14525, 4788, 2148, 11755, 14212, 2184, 12327, 10451, 3365, 11390, 804, 3634, 18702, 11574, 17054, 641, 10264, 17872, 15542, 18897, 951, 12281, 9390, 116, 11977, 18652, 11470, 112, 2039, 18720, 7584, 18728, 4084, 12337, 12440, 17965, 10471, 8862, 17687, 8037, 11478, 5859, 370, 7084, 4932, 5132, 8221, 15639, 11894, 11831, 12326, 5934, 8894, 1046, 3239, 8125, 6412, 1954, 15866, 16416, 12431, 3017, 19129, 11822, 7895, 1909, 7210, 1806, 12576, 16552, 18793, 18774, 1767, 15841, 10433, 5048, 4578, 1304, 8445, 16415, 11641, 9890, 18787, 12789, 17026, 16101, 3082, 8730, 3577, 9819, 481, 3885, 8093, 918, 3043, 16626, 4980, 3984, 9846, 15416, 13630, 15156, 204, 1567, 253, 17246, 1330, 903, 6703, 12660, 14640, 1416, 9723, 357, 16579, 4581, 18816, 4917, 754, 7836, 6789, 5307, 1068, 6972, 7780, 5604, 3084, 3267, 2794, 12628, 15405, 15824, 17982, 14699, 18745, 11178, 16305, 7871, 4262, 964, 3307, 3721, 4752, 16024, 16214, 4436, 2929, 11746, 9523, 7899, 14556, 5355, 13623, 5402, 2414, 1055, 16268, 72, 13859, 4625, 5054, 16965, 15605, 88, 9978, 4252, 6134, 12085, 6073, 12592, 6312, 13466, 7305, 962, 981, 16904, 12763, 16772, 4421, 12046, 1118, 9256, 5906, 440, 11140, 10621, 3440, 11262, 15916, 13606, 9548, 16703, 3603, 19144, 15498, 1560, 9649, 18625, 16645, 18762, 13703, 2577, 5752, 15663, 5104, 3805, 19005, 14031, 371, 3942, 18986, 872, 15851, 14603, 14586, 10385, 11958, 8972, 3713, 6048, 4895, 11054, 5095, 17756, 16541, 840, 17056, 8115, 11951, 3493, 15561, 3146, 17409, 18747, 17228, 5804, 9814, 18043, 1582, 8250, 6511]",1,7,29,1978,0.7867322615002282, -"[9081, 17542, 969, 19044, 834, 6243, 8529, 1938, 18931, 3380, 16631, 7830, 18645, 16717, 2854, 18768, 10846, 14502, 10406, 4239, 235, 14886, 13882, 9943, 10824, 5471, 4281, 814, 9173, 1056, 17122, 15513, 6443, 7174, 2841, 4511, 18748, 6197, 2563, 1841, 15783, 18464, 17105, 12664, 7326, 16794, 12553, 9921, 8987, 15826, 10601, 16869, 13616, 12192, 2292, 2281, 12160, 14244, 4707, 1582, 18840, 12960, 1761, 3531, 10744, 14632, 3346, 16527, 19447, 1338, 3800, 11762, 8148, 8017, 8823, 7819, 1475, 12238, 12618, 8210, 17954, 14246, 7470, 7636, 17298, 3855, 4908, 3399, 17811, 3331, 2919, 4497, 8372, 17409, 3567, 6368, 5737, 1835, 9227, 5898, 17487, 12726, 3869, 13151, 112, 15851, 13000, 8318, 3207, 1567, 9026, 19114, 16365, 16051, 18587, 12150, 16213, 10420, 3126, 6790, 1893, 13241, 432, 6448, 5508, 15885, 5872, 18819, 7836, 11186, 15207, 12768, 3683, 2406, 2132, 18995, 16355, 19079, 13983, 7956, 15841, 12593, 10595, 10174, 3350, 18900, 612, 1588, 2460, 962, 721, 4914, 15973, 4984, 9326, 16569, 8566, 5752, 10742, 7221, 10216, 3032, 8037, 7080, 15188, 8851, 4893, 1360, 16789, 4924, 5568, 3665, 15627, 7117, 8698, 6795, 6609, 4811, 1493, 8621, 10862, 16692, 84, 14912, 1233, 3281, 7142, 3017, 19028, 12839, 10193, 3481, 180, 16026, 6040, 11052, 11142, 8227, 12967, 18774, 168, 13793, 17780, 18224, 10006, 14375, 14334, 12882, 12580, 4274, 8248, 15277, 925, 17513, 6174, 12256, 13846, 9687, 1137, 13203, 8152, 12678, 18755, 4902, 12060, 5104, 351, 17773, 11288, 759, 4186, 17339, 17625, 5137, 10036, 11674, 2835, 221, 13705, 16776, 18072, 16154, 12643, 8816, 7191, 17418, 8359, 8803, 8440, 15278, 15189, 14609, 9319, 11016, 1345, 9890, 17810, 5899, 19164, 14442, 9813, 2532, 18751, 7219, 5108, 6488, 18199, 18841, 16471, 4980, 15329, 8117, 9893, 18043, 440, 9712, 13305, 1544, 16121, 13595, 3831, 15506, 18839, 13215, 7793, 3187, 16345, 17639, 19074, 3695]",1,7,29,2957,0.7529035857381955, -"[2735, 14442, 7971, 18787, 13285, 10992, 18563, 13723, 1402, 11816, 4988, 40, 3135, 3642, 4393, 9983, 1316, 782, 12823, 4520, 17717, 19004, 18899, 4556, 18743, 18, 7981, 9670, 10512, 16582, 8472, 7443, 11655, 4932, 3309, 17662, 17724, 7851, 8109, 6833, 1568, 13052, 431, 19342, 3130, 11370, 928, 16543, 15940, 14053, 14379, 9687, 5104, 10165, 2117, 1749, 15045, 16866, 14768, 373, 4294, 10455, 7895, 3617, 8165, 9173, 18767, 10754, 5177, 15520, 15561, 15006, 17439, 15661, 15038, 11460, 2460, 10262, 8364, 1903, 3249, 2038, 5058, 4912, 10210, 2974, 3534, 10545, 1056, 6034, 2753, 12918, 18080, 2549, 5202, 14108, 9548, 1456, 14566, 3232, 15218, 8250, 8763, 833, 7862, 12002, 440, 13616, 1046, 9884, 3239, 12413, 15239, 1720, 3054, 15153, 14452, 6510, 892, 15176, 15087, 12201, 6442, 8770, 4179, 14168, 10915, 4891, 6704, 10431, 19089, 9608, 14991, 19074, 15121, 12392, 8980, 12306, 16125, 7407, 15952, 5596, 11795, 6715, 18823, 7720, 9366, 16896, 4902, 5581, 4131, 8823, 10096, 9911, 12646, 3429, 4363, 1544, 15615, 3876, 13474, 18093, 15122, 41, 5750, 18381, 13400, 3746, 10808, 8584, 10258, 4007, 12238, 5294, 12152, 10629, 9181, 4917, 7272, 14591, 4895, 15997, 10558, 18252, 18199, 13595, 4879, 2561, 1973, 3285, 6393, 6950, 15210, 18739, 10631, 18573, 804, 14900, 14345, 6064, 11536, 7778, 1581, 2559, 4340, 95, 18811, 1205, 10919, 7831, 4421, 18889, 625, 5340, 11082, 5640, 14218, 10818, 7712, 8483, 10149, 13306, 16839, 884, 9634, 5051, 13447, 15201, 12944, 5578, 14149, 3387, 253, 16703, 19217, 297, 18928, 2644, 6497, 14006, 11478, 4443, 8229, 4261, 17328, 16216, 13215, 11674, 1840, 10131, 481, 18404, 1835, 3846, 18610, 1425, 11615, 1407, 6466, 16250, 11331, 3640, 10215, 16945, 4238, 3168, 18145, 11456, 9669, 17729]",1,7,27,2957,0.7381447481868438, -"[8696, 7891, 8816, 18281, 1678, 5544, 5784, 4158, 14566, 7653, 2044, 14997, 1591, 19026, 1145, 1640, 6167, 10063, 10773, 4837, 18956, 13779, 5464, 16057, 8537, 8509, 13668, 2361, 6812, 16444, 14827, 4221, 5987, 18751, 6706, 592, 3289, 15826, 11215, 4218, 3168, 7482, 6994, 2110, 10037, 10010, 18276, 17071, 11224, 4238, 17253, 15971, 5943, 12189, 5374, 10513, 17198, 10953, 18762, 6048, 17723, 616, 7194, 8746, 5538, 14361, 2255, 18241, 17375, 17809, 3886, 6303, 7531, 16179, 1331, 2369, 3365, 7092, 13999, 16118, 10929, 3946, 18773, 7491, 8907, 3457, 8727, 16538, 1485, 2747, 1484, 2392, 19074, 7283, 5288, 19710, 4505, 2405, 18920, 646, 62, 12445, 5567, 13972, 14912, 10165, 12885, 6902, 2882, 9669, 3683, 12075, 2579, 2197, 47, 14971, 2298, 15853, 10882, 1887, 13623, 16295, 1038, 7116, 1182, 16462, 769, 6874, 2862, 1085, 718, 99, 15663, 19532, 2892, 7, 17025, 4893, 18973, 2557, 10702, 18713, 17603, 6972, 8700, 8362, 4077, 13757, 3688, 5500, 13203, 15342, 15307, 6207, 1363, 13604, 8516, 10977, 5141, 7939, 8280, 13142, 7617, 3469, 11170, 7293, 14036, 752, 4099, 14648, 10401, 6787, 14699, 5102, 17399, 8317, 14886, 5812, 12664, 1837, 15510, 11061, 8644, 4774, 16749, 3187, 11604, 17246, 14866, 3677, 17584, 3141, 18161, 1614, 14663, 2755, 7511, 4042, 12249, 2165, 10081, 11248, 860, 19596, 14593, 3044, 4748, 11375, 9508, 5906, 12126, 439, 7492, 13819, 10757, 11390, 5029, 15529, 3384, 14817, 822, 7302, 15923, 973, 11372, 7861, 3632, 6727, 820, 17215, 7729, 10353, 4518, 12603, 3136, 2992, 12413, 18806, 1046, 16944, 1832, 11450, 754, 9853, 18696, 11311, 13952, 3721, 13317, 289, 7346, 19408, 319, 12680, 14412, 4987, 19698, 16802, 3587, 7189, 6266, 6833, 18911, 9184, 7889, 12356, 14821, 13550, 9769, 9197, 12953, 2262, 6815, 16870, 5730, 3146, 1204, 4956, 13018, 2767, 4817, 12875, 13149, 2951, 12679, 7230, 13294, 542, 8093, 4689, 17634, 2304, 9040, 3641, 1401, 3577, 16339, 9917, 15333, 15003, 17683, 1416, 16770, 17034, 1425, 7369, 4060, 16777, 10065, 220, 9775, 13250, 287, 2888, 2389, 18009, 16776, 14869, 7091, 18568, 18531, 16412, 5430, 16443, 17960, 4024, 17019, 15292, 1037, 12146, 7780, 13103, 19300, 18086, 6715, 5002, 2192, 619, 3324, 10915, 5720, 8446, 2140, 358, 2561, 11370, 2005, 10526, 12307, 4415, 1265, 7136, 3840, 10842, 5887, 18951, 16989, 1257, 7596, 16629]",1,7,36,1978,0.7700968707206979, -"[10649, 13121, 926, 8454, 5500, 14973, 5839, 1484, 806, 5618, 14021, 16305, 1068, 11453, 5356, 523, 2170, 3017, 3337, 4653, 4840, 4556, 12001, 1568, 3570, 11366, 12632, 14265, 17025, 2460, 9390, 12326, 4150, 11442, 5997, 9568, 6314, 5102, 15003, 18895, 16904, 16544, 1361, 15891, 15189, 5497, 2389, 13879, 5470, 5002, 15916, 12192, 9173, 5614, 7518, 6684, 3207, 18242, 18411, 11716, 9041, 1789, 14483, 15841, 1683, 14987, 9816, 7326, 16582, 18742, 9463, 689, 937, 1462, 7118, 11449, 3137, 15682, 3667, 8192, 16318, 2150, 13086, 16989, 9921, 16778, 2239, 17198, 8727, 5596, 14608, 18898, 6722, 16747, 11408, 15542, 18227, 16869, 10573, 1144, 16770, 4939, 12088, 15513, 9965, 12246, 4546, 12229, 754, 2343, 1325, 10595, 8138, 1840, 12836, 15570, 6690, 6324, 17542, 5324, 6611, 7313, 10350, 4058, 9723, 10204, 10165, 17354, 3082, 4051, 11390, 4964, 14763, 5273, 6861, 13285, 17781, 13174, 13241, 5265, 894, 18957, 18926, 4038, 14754, 12256, 5029, 9328, 6781, 3054, 5564, 54, 17437, 6118, 820, 3983, 18728, 9073, 15024, 1889, 7388, 7475, 15990, 3731, 3165, 9289, 13083, 12680, 14900, 13677, 2529, 19082, 3185, 1103, 19376, 11059, 3631, 2128, 2670, 12324, 7711, 7180, 4932, 12979, 10243, 8748, 2994, 17590, 11678, 12431, 2281, 16652, 4820, 4021, 10088, 3239, 8576, 18440, 12664, 4984, 5871, 9808, 17697, 8435, 10443, 18745, 2134, 13882, 12178, 9040, 9920, 1839, 185, 10544, 15884, 17034, 15207, 7297, 14609, 7716, 13746, 645, 4882, 994, 12514, 7, 7021, 18754, 5872, 15806, 2031, 13383, 8608, 481, 17328, 18748, 0, 3634, 6273, 14333, 12789, 912, 1551, 12238, 2915, 15278, 17190, 5790, 13305, 18497, 14438, 2020, 14386, 15356, 15945, 13934, 6790, 10699, 8894, 17392, 3126, 8127, 15840, 1585, 7939, 4002, 6543, 372, 1954, 14036, 11816, 810, 18299, 3734, 6815, 5104, 13582, 11054, 19273, 4477, 5458, 3307, 3911, 8567, 963, 11357, 14688, 19343, 14061, 4914, 1972, 17192, 816, 5690, 15522, 17518, 8962, 8153, 14300, 6501, 11369, 8282, 12780, 3757, 15780, 3942, 242, 8923, 17387, 14622, 3104, 5898, 18521, 16355, 13908, 12976, 2613, 5858, 2000, 7636, 13399, 17026, 8750, 2790, 6832, 3281, 6411, 15377, 5963, 11374, 8823, 2148, 18436, 10425, 12150, 5152, 2506, 7829, 3540, 6298, 12312, 14388, 11814, 13551, 2477, 9649, 6239, 4281, 1085, 6036, 5968, 8482, 14070, 159, 14578, 11586, 7015, 2869, 18162, 8762]",1,7,36,2957,0.7732413653192677, -"[18728, 11524, 14453, 18768, 13525, 14305, 15826, 13633, 2406, 12991, 17911, 15025, 8795, 17399, 630, 8701, 7736, 112, 16378, 14008, 8915, 10401, 7185, 12628, 5089, 8544, 15663, 258, 13364, 2049, 16650, 6544, 8342, 12457, 9587, 3901, 1683, 11957, 9873, 1395, 1063, 12152, 16989, 7780, 16625, 9014, 2708, 18736, 4788, 957]",1,9,5,2957,0.693868236, -"[12797, 5098, 16274, 14857, 6819, 19176, 8783, 13325, 18221, 12075, 17538, 620, 18224, 1896, 2039, 13723, 2862, 17139, 10304, 11262, 17392, 16757, 18455, 3743, 4813, 16996, 3440, 8621, 11524, 2644, 13845, 3517, 9256, 14920, 142, 18786, 12304, 4773, 1144, 18956, 8795, 3803, 15975, 10441, 12442, 11733, 13013, 11776, 14915, 9033, 14563, 16451, 10243, 15712, 592, 9821, 1416, 7114, 266, 7118, 12518, 12036, 3942, 18870, 2579, 9351, 3468, 18765, 13471, 17415, 19029, 14608, 8192, 17357, 12706, 6706, 11349, 6169, 8910, 8263, 15325, 17878, 9568, 18966, 8103, 9687, 1379, 11084, 2776, 3405, 14843, 16579, 370, 1813, 15031, 9490, 17307, 14727, 6969, 11905, 14987, 3702, 14760, 7039, 12741, 11017, 7745, 12995, 440, 19002, 6204, 2071, 14792, 16489, 9328, 8221, 3301, 17649, 11708, 17558, 7311, 4301, 11932, 1424, 19621, 2505, 6077, 8426, 15277, 78, 12092, 7752, 8708, 15546, 12915, 17701, 10383, 3333, 9218, 6961, 13261, 6527, 7774, 746, 11898, 6347, 18630, 19701, 7370, 41]",1,9,15,197,0.7226758634680732, -"[3587, 645, 13668, 1539, 3800, 18911, 963, 9193, 17026, 10845, 15888, 11762, 5943, 5054, 15612, 62, 3136, 3632, 17019, 13203, 19049, 11440, 14410, 2110, 4073, 7073, 5051, 18159, 17056, 12550, 13261, 17399, 18816, 10397, 4077, 14163, 6798, 9980, 17809, 14284, 4837, 7302, 11262, 11084, 2134, 3279, 37, 16378, 16272, 14274, 7901, 6445, 16552, 8293, 16104, 572, 1813, 1868, 15377, 6812, 18590, 17719, 13146, 16579, 1654, 8741, 15597, 18632, 15462, 18967, 4431, 14317, 5812, 13937, 9523, 14279, 7484, 15324, 2992, 1887, 10896, 4914, 2549, 4912, 6488, 4238, 14036, 3524, 14997, 11302, 18762, 15100, 4181, 16777, 14566, 8356, 17780, 18771, 14945, 4870, 17198, 18956, 16229, 5440, 3645, 10537, 13844, 12019, 11687, 9097, 5635, 11215, 3207, 16057, 2202, 17859, 12075, 5328, 88, 17366, 18768, 14320, 5102, 11449, 783, 4932, 5513, 10285, 14654, 16958, 15721, 5661, 7711, 3386, 5431, 18883, 5541, 5467, 16770, 3864, 14879, 4445, 6004, 786, 5348, 13549, 6637, 16086, 12205, 4415]",1,9,15,2957,0.7746107419992899, -"[8730, 3923, 5639, 9846, 4930, 3082, 16250, 19182, 14484, 7776, 4112, 1683, 16443, 962, 18362, 11039, 2836, 8293, 2229, 15841, 62, 9257, 1246, 1889, 13525, 12027, 1794, 6690, 3677, 11269, 15483, 18717, 3286, 5800, 574, 3971, 12019, 9669, 16514, 17538, 9808, 9477, 263, 5029, 2024, 1612, 18957, 11390, 4356, 18793, 5795, 16086, 7495, 15789, 16789, 2870, 11814, 5081, 18754, 14397, 4477, 15766, 616, 16650, 18728, 1954, 17653, 17603, 6820, 9416, 3488, 14147, 18942, 5402, 11944, 12884, 3136, 15003, 16462, 3453, 1054, 546, 5336, 7445, 15384, 986, 9070, 17267, 3137, 733, 12478, 1242, 4750, 4553, 17164, 204, 17415, 5497, 19393, 4961, 13996, 4522, 340, 3195, 13400, 1640, 13251, 4964, 7560, 2428, 10513, 2992, 18816, 10245, 3287, 9493, 3099, 17631, 2821, 10270, 15784, 14857, 11262, 23, 351, 17387, 17242, 822, 2343, 2463, 8316, 18773, 11894, 14912, 19002, 3307, 6952, 7716, 11346, 14796, 5147, 510, 10948, 4392, 2970, 18224, 3577, 6201, 13751, 16535, 129, 2089, 12882, 9758, 8372, 918, 18817, 10227, 14237, 14647]",1,9,16,985,0.6662271136582645, -"[7481, 14317, 18945, 18874, 6798, 2612, 13325, 12799, 8862, 8390, 9416, 12142, 4238, 14284, 12483, 7080, 2725, 7254, 1344, 14409, 4514, 18701, 406, 19263, 10575, 16150, 10952, 2302, 17034, 13261, 14622, 7981, 8483, 1567, 14997, 3141, 515, 11731, 8696, 7135, 616, 11524, 14515, 13716, 17194, 17415, 12875, 18862, 12628, 1679, 3674, 13606, 4965, 8447, 15833, 7780, 14722, 3306, 5058, 7534, 16775, 4818, 1246, 7924, 9041, 7130, 11740, 18827, 3093, 18956, 14544, 15856, 9346, 3743, 16751, 3126, 1456, 16179, 4445, 5102, 9103, 13928, 10184, 3671, 6181, 2680, 1086, 18765, 8089, 3458, 10617, 14212, 16533, 11372, 12373, 14431, 16462, 3753, 6952, 1597, 3146, 11585, 18682, 12246, 5045, 1901, 10961, 18040, 18233, 17496, 14141, 2211, 8331, 5500, 6362, 7261, 8893, 194, 6352, 1761, 13369, 5618, 3895, 4490, 3609, 2936, 440, 10862, 8640, 13383, 7966, 4864, 15081, 12075, 8271, 17645, 10348, 17291, 17911, 18519, 7872, 19217, 937, 18461, 14481, 11750, 18911, 8335, 4024, 16692, 4026, 17223, 17198, 7797, 10572, 16538, 8221, 5946, 15377, 6509]",1,9,16,1978,0.7627428107724299, -"[7899, 2127, 1221, 15742, 17832, 7518, 4193, 7194, 4058, 14265, 4421, 19490, 7295, 12771, 2532, 5102, 8077, 4534, 11827, 16131, 7861, 1309, 12109, 10616, 4914, 18718, 5212, 4726, 15427, 9574, 9902, 3558, 16802, 16380, 19079, 4714, 1962, 6015, 8455, 16285, 19044, 4541, 9532, 17952, 11288, 8006, 17266, 6706, 8799, 12193, 9794, 1581, 10904, 15510, 12249, 12126, 8877, 17547, 15965, 5483, 9963, 11168, 3531, 552, 1539, 17191, 14821, 11762, 11299, 17584, 2943, 19005, 6133, 4315, 14531, 16771, 18891, 6158, 6631, 4416, 18920, 13760, 7054, 186, 4399, 4077, 11049, 10998, 2322, 18882, 15864, 17485, 13306, 12790, 2659, 16203, 4402, 10801, 5536, 11732, 6231, 2313, 18161, 16079, 13475, 2862, 4826, 13389, 9657, 14005, 7481, 7568, 13668, 15649, 116, 7137, 9163, 15027, 619, 19492, 8454, 12001, 12775, 16397, 5762, 4038, 8447, 7465, 478, 5549, 284, 12411, 15152, 2972, 14281, 6375, 880, 10036, 12095, 11067, 9711, 2031, 19149, 17461, 18341, 3365, 9935, 14654, 9372, 16870, 10372, 7586, 5579, 5906, 5652, 17951, 19174, 5907, 11863, 1425]",1,9,16,2957,0.6976213419891464, -"[5921, 6764, 3567, 4152, 16958, 18733, 4411, 18767, 3249, 6039, 12561, 15305, 9167, 7407, 2050, 11177, 15167, 12262, 6992, 8476, 15155, 10516, 17187, 15377, 16078, 17443, 12673, 294, 2098, 6149, 1375, 804, 14654, 2983, 16213, 6650, 11052, 3190, 11717, 3168, 10572, 3956, 17012, 963, 16211, 10412, 15189, 15940, 6680, 14234, 5174, 7277, 7180, 11464, 8089, 3831, 13925, 14801, 12502, 17701, 1043, 99, 13008, 3543, 18808, 12793, 1139, 2052, 6144, 19198, 16342, 17506, 8109, 865, 9418, 16527, 5968, 6141, 2422, 15577, 8104, 16513, 7311, 11142, 6901, 8062, 5568, 18816, 10265, 13726, 768, 630, 1692, 10279, 7862, 12019, 3805, 5604, 18768, 11894, 4912, 7778, 9989, 15989, 18841, 13863, 12643, 17025, 16144, 14786, 18701, 18957, 8498, 5596, 7579, 12425, 12924, 1475, 6838, 8348, 14371, 18906, 2708, 3496, 18753, 9007, 948, 17080, 8090, 17824, 16637, 999, 10204, 16703, 6142, 15571, 8823, 1410, 15562, 1025, 18600, 3451, 10397, 4141, 12416, 4514, 16625, 6555, 1478, 4828, 11536, 11499, 15141, 7445, 2571, 4849, 1290, 16891, 7323, 1887, 9397, 12462, 15325, 13997, 10717, 11110, 15103, 8544, 19577, 10005, 5054, 4181, 14077, 8263, 13595, 1753, 13840, 8728, 18186, 2947, 19565, 1574, 3713, 12083, 2441, 8024, 16389, 13241, 12710, 4251, 12979, 18430, 5964, 3017, 7399, 15735, 6702, 417, 13263, 9203, 9587, 16088, 10796, 2669, 6569, 17661, 17019, 14051, 13157, 13934, 19442, 10976, 8998, 15919, 6656, 611, 18715, 4246, 6725, 15591, 15882, 15813, 14069, 6934, 1316, 4963, 14652, 5440, 11938, 10308, 6411, 2232, 64, 11396, 17285, 3014, 4647, 17159, 18019, 8608, 8290, 5104, 18245, 17819, 16384, 3582, 1118, 5891, 15122, 5569, 1570, 11288, 3279, 13162, 10770, 6904, 16541, 150, 17744, 14627, 11409, 1213, 12280, 12298, 1602, 9568, 9081, 3531, 9173, 18221, 7304, 10749, 13512, 13257, 5610, 19236, 16519, 6903, 8105, 2213, 435, 12152, 4821, 16295, 13558, 18080, 13828, 2217, 5494, 6184]",1,9,29,1978,0.6620175483085662, -"[5866, 10575, 6949, 2759, 17917, 3162, 14271, 18050, 6215, 2509, 18766, 10389, 7992, 12649, 7269, 9456, 5716, 10857, 3374, 4923, 9186, 10847, 11810, 12075, 12148, 6833, 4938, 10340, 531, 1813, 15546, 5374, 7295, 2390, 2337, 2862, 13261, 13325, 714, 16727, 8335, 206, 8783, 18911, 10386, 15853, 2148, 14632, 8610, 2841, 11450, 4154, 18956, 16136, 12769, 7827, 18018, 5513, 2954, 5523, 7861, 1690, 13896, 17297, 1069, 9729, 10055, 11610, 4682, 11312, 5772, 11024, 2, 896, 15240, 8920, 8864, 18297, 4996, 5540, 2425, 14827, 9965, 17909, 3566, 19194, 14671, 5890, 18893, 1430, 11188, 913, 13037, 9685, 5944, 5445, 18859, 9004, 9482, 16031, 2422, 12893, 6, 18467, 9264, 18350, 11637, 8632, 8576, 16538, 17265, 1614, 13167, 6120, 7979, 1675, 268, 7521, 15095, 11746, 6085, 1752, 9865, 5747, 7776, 8192, 5700, 15627, 15663, 13761, 14499, 151, 3649, 17861, 966, 13820, 5461, 18736, 15325, 11707, 14175, 4929, 6302, 10214, 3537, 3427, 7399, 17945, 12443, 18230, 3505, 3399, 5023, 6236, 720, 11275, 3227, 15236, 10373, 17603, 1144, 13475, 3090, 6718, 16728, 15200, 7553, 11246, 10854, 900, 2516, 3087, 5870, 5673, 4657, 3118, 10185, 4934, 1671, 4533, 10471, 18649, 18735, 4299, 13110, 15408, 17794, 9084, 10388, 15405, 1821, 8498, 7930, 9797, 16034, 11123, 1855, 18224, 141, 5470, 1045, 7151, 8425, 13405, 12099, 5189, 3930, 9762, 11893, 15031, 3173, 6661, 18512, 18924, 8696, 3351, 1611, 9989, 14999, 15684, 10861, 11785, 14781, 3951, 18843, 10443, 2305, 9775, 2170, 18574, 8780, 8707, 13020, 16921, 7796, 3918, 3342, 9752, 5993, 7, 14101, 14265, 5859, 7296, 4073, 9229, 5239, 323, 18221, 14519, 5440, 3102, 12442, 8947, 4846, 4540, 2284, 16224, 4840, 10601, 18839, 5051, 9555, 10204, 4214, 5488, 755, 10419, 18756, 8393]",1,9,27,985,0.792564792, -"[18870, 7733, 14123, 11084, 13844, 9409, 11275, 7215, 18843, 3279, 10279, 17839, 8998, 2798, 787, 12926, 17195, 14754, 4393, 9186, 2386, 1253, 2361, 10600, 10896, 12772, 18719, 14609, 6077, 7317, 15893, 5348, 3799, 18121, 16330, 3692, 11936, 236, 15468, 8104, 11902, 11813, 4219, 8054, 5467, 4702, 2425, 9797, 18315, 17701, 18741, 18760, 12132, 11823, 19053, 2862, 17198, 13278, 11674, 17437, 14756, 17365, 12413, 15070, 9014, 1785, 17406, 12071, 3207, 13472, 13020, 13820, 1205, 14107, 9256, 8919, 13963, 1842, 8861, 11407, 2191, 13597, 9161, 136, 12647, 7452, 18294, 15045, 13475, 12573, 3333, 11665, 12320, 15841, 8384, 9241, 17440, 16225, 6411, 6511, 2195, 3229, 18347, 10005, 13809, 528, 19044, 11055, 6569, 9389, 8234, 7691, 802, 2117, 19394, 902, 14810, 18073, 2059, 10776, 11039, 7756, 17911, 15356, 11907, 15114, 7316, 13491, 9589, 7370, 13071, 10388, 3244, 11448, 15544, 3326, 401, 996, 11297, 9956, 4952, 5587, 1945, 2019, 583, 3331, 15065, 11468, 13405, 11144, 3716, 4445, 12098, 17019, 11898, 11992, 8920, 13142, 10769, 9695, 5427, 963, 3930, 7103, 18011, 17469, 12430, 5968, 11816, 1544, 17599, 18656, 4932, 9595, 14745, 19221, 2533, 12019, 5665, 518, 12235, 1271, 15307, 6129, 10629, 14727, 296, 15151, 12349, 18221, 5928, 9809, 10818, 9299, 9166, 616, 1643, 1318, 4893, 1973, 3505, 3124, 8697, 3306, 14305, 18809, 1906, 577, 13885, 973, 10253, 3577, 7864, 3715, 7084, 2128, 18290, 1310, 9277, 3052, 1886, 7629, 11533, 19096, 17474, 8972, 13371, 12738, 2456, 466, 4655, 9083, 10881, 5342, 8393, 4765, 6692, 6862, 5340, 4387, 10866, 14910, 2482, 4759, 13924, 17771, 18748, 8024, 7301, 5799, 4597, 18967, 8569, 2645, 16129, 5716, 16106, 2182, 7139, 16869, 5307, 2819, 9839, 10601, 12348, 5663, 5602, 5316, 79, 2338]",1,9,27,1978,0.7974336866663285, -"[11831, 6048, 4914, 6787, 3289, 1640, 4774, 8893, 2549, 16650, 18966, 18762, 5102, 17056, 11358, 3632, 4632, 4238, 1484, 37, 7230, 572, 11170, 2838, 1102, 6032, 2110, 8318, 4503, 18659, 817, 19444, 15971, 11306, 11061, 2579, 15003, 4505, 16179, 10219, 3043, 5907, 10904, 14821, 14566, 3136, 4477, 17762, 14420, 16057, 18632, 10513, 12166, 7091, 646, 5003, 6303, 13721, 18773, 15721, 14945, 7223, 1363, 3494, 16802, 759, 13779, 7377, 8265, 15663, 10842, 13668, 14827, 7388, 2882, 18920, 7939, 15472, 18572, 5002, 12831, 15826, 18501, 19178, 16735, 4522, 17584, 12256, 6452, 12613, 15841, 19596, 14515, 2389, 2255, 5987, 7302, 3141, 16550, 4937, 16443, 17880, 13373, 12230, 10953, 18957, 13066, 11604, 7780, 18281, 15923, 4752, 11912, 18743, 12142, 1002, 15808, 4414, 7560, 14953, 18723, 6381, 8696, 616, 13859, 14036, 8093, 12971, 17198, 11292, 16538, 6391, 12480, 13484, 17603, 17415, 18774, 18554, 2721, 5513, 2689, 10537, 13317, 11746, 7116, 18891, 14253, 13604, 9867, 9718, 18696, 15803, 16514, 16048, 7901, 18190, 4691, 17821, 18816, 11450, 17781, 4837, 19408, 10165, 8436, 5507, 3468, 645, 16982, 10081, 12457, 5453, 9560, 7097, 18689, 7504, 8547, 2134, 5538, 16906, 15510, 4961, 642, 8470, 7847, 10812, 3065, 2172, 511, 19351, 8644, 5549, 9753, 14647, 8941, 17192, 7052, 1182, 12884, 15050, 6000, 14453, 19582, 16770, 9846, 12236, 8259, 10699, 8048, 7, 14103, 15858, 9882, 19698, 14410, 7137, 13836, 19049, 14797, 13018, 2044, 12307, 7667, 14076, 18956, 14741, 8483, 15891, 1485, 17680, 3677, 5784, 16687, 16944, 15384, 8221, 7456, 3603, 5996, 13683, 18035, 9808, 9634, 7620, 11944, 8509, 4051, 17019, 17034, 62, 4766, 7094, 7073, 13103, 17858, 11989, 5823, 15462, 13623, 15188, 439, 9390, 14816, 4631, 126, 4077, 14648, 18483, 10998, 14274, 951, 17727, 4385, 11262, 11894, 13498, 13202, 3084, 2668, 791, 12680, 3800, 14521, 11390, 14147, 10930, 10954, 11840, 3457, 10794, 14860, 2526, 14460, 3610, 9328, 17066, 2573, 6164, 11950, 11687, 8951, 10254, 11067, 10773, 7092, 9769, 17308, 7484, 8510, 3587, 18830, 16305, 8712, 7584, 12625, 360, 8275, 7625, 8816, 5497, 18568, 17025, 552, 18736, 12943, 18959, 10371, 15911, 8446, 17660, 6869, 15361, 1519, 9548, 9040, 14549, 15649, 17702, 6423, 2058, 7136, 1978, 4415, 6004, 118, 8227, 3002, 10270, 17334, 4964, 7929, 2623, 19026, 14320, 18349, 8727, 7089, 7331, 5661, 12117]",1,9,36,985,0.705736167, -"[18736, 12564, 17165, 14371, 6767, 14742, 164, 17383, 16780, 6812, 13402, 9652, 7185, 11399, 8998, 928, 10769, 18122, 18827, 105, 16355, 19531, 15087, 1756, 4811, 4912, 5132, 630, 12015, 17101, 15513, 210, 3239, 1205, 4246, 13540, 7116, 3136, 7669, 9, 18762, 15278, 5224, 6000, 1398, 351, 6953, 11652, 4133, 357, 7875, 8851, 14147, 17510, 11977, 10488, 14278, 18725, 1063, 10136, 13989, 13787, 12463, 17519, 18728, 15025, 4238, 452, 18768, 4895, 1091, 1533, 14770, 5095, 10466, 13633, 12628, 17219, 4052, 18659, 13483, 7452, 15664, 17196, 14385, 435, 7058, 2110, 17886, 4436, 14077, 7829, 5605, 8289, 9819, 8263, 1568, 804, 10638, 12055, 16426, 16964, 1410, 12711, 8822, 9648, 8762, 7456, 3683, 1478, 3690, 17301, 11482, 17072, 1493, 2193, 9315, 6690, 18747, 15096, 17848, 7091, 15479, 9192, 11403, 4733, 1467, 7981, 19002, 9921, 16989, 16733, 12811, 1766, 4559, 7712, 2211, 7063, 17771, 18767, 14092, 11262, 16684, 13662, 14194, 15498, 7939, 19300, 15882, 16822, 840, 3350, 14234, 8221, 17724, 17639, 6284, 11842, 10513, 17019, 5243, 1544, 4932, 16650, 14626, 4149, 4332, 19151, 2983, 12884, 12340, 18093, 9620, 3733, 16409, 15239, 18931, 16357, 4976, 17026, 15933, 4914, 1797, 4985, 1301, 8093, 12678, 481, 18009, 12125, 9376, 3461, 7108, 3439, 1573, 1753, 1717, 3453, 18760, 3517, 13233, 4184, 6883, 962, 9145, 5568, 15341, 1469, 2695, 4165, 1640, 18310, 11461, 10742, 17081, 7503, 1794, 11170, 17295, 16697, 6481, 11595, 1628, 13143, 17628, 7496, 16552, 11881, 14912, 3848, 16999, 6511, 332, 12002, 18799, 185, 6794, 4281, 8012, 18072, 8916, 2559, 6704, 12445, 964, 16918, 3852, 14502, 16770, 15912, 9240, 17910, 4732, 6346, 17762, 15682, 14886, 14123, 10932, 8980, 2049, 9766, 15840, 15458, 14207, 2140, 18816, 5328, 5946, 5089, 17034, 5708, 15505, 1363, 16101, 10770, 18898, 7534, 14603, 6553, 3146, 11580, 7778, 12924, 11651, 8842, 15605, 18423, 15768, 4980, 6411, 19349, 16384, 3664, 8834, 14900, 18812, 4722, 7720, 4222, 18824, 7092, 5569, 918, 4694, 10762, 3757, 16958, 668, 3289, 314, 16582, 1683, 15305, 5096, 150, 12979, 18763, 99, 1754, 6934, 18773, 19477, 17025, 14510, 1606, 10308, 16239, 18873, 4826, 13795, 533, 8034, 16805, 9581, 621, 10259, 1635, 3493, 4450, 15952, 8715, 6407, 7815, 10441, 14654, 17662, 5429, 2708, 8317, 901, 8415, 15503, 3772, 1515, 5375, 11454, 14149, 407, 6201]",1,9,36,1978,0.6602931480448344, -"[17634, 2255, 5029, 13203, 3136, 9497, 9808, 7939, 12972, 9048, 17726, 5440, 2623, 9718, 9184, 9625, 18762, 1145, 18380, 616, 5541, 17025, 12374, 6949, 531, 3800, 8576, 13940, 16384, 15971, 7116, 16048, 4051, 6411, 16748, 18957, 1045, 8509, 3289, 1425, 18341, 9256, 2110, 7117, 4477, 18357, 13256, 13550, 4221, 18234, 1678, 16073, 5784, 12786, 514, 2971, 15008, 1656, 474, 19402, 19536, 3384, 8893, 16692, 8816, 18945, 4524, 5843, 17019, 18572, 10717, 8318, 7136, 8244, 13859, 13604, 10767, 7533, 2582, 1821, 6569, 1242, 2588, 7692, 12137, 5577, 12235, 1238, 4489, 8986, 7130, 7937, 7097, 6356, 16881, 2010, 6872, 14827, 4284, 8976, 8028, 3207, 4774, 2626, 5339, 14088, 5906, 5318, 3683, 2680, 13844, 4923, 14388, 16689, 9192, 6715, 7992, 1046, 1830, 11816, 4591, 2798, 15045, 4343, 15329, 13405, 10580, 7774, 12457, 2390, 15627, 3063, 5549, 554, 4149, 10746, 7842, 12256, 1003, 10644, 2158, 7743, 6358, 17584, 11449, 62, 5214, 9830, 14893, 15220, 17063, 6210, 9806, 18816, 7484, 5470, 13337, 5051, 6120, 7443, 5614, 5703, 10635, 4214, 2338, 18956, 3227, 18970, 7348, 8612, 12902, 12513, 9692, 18281, 10510, 16462, 18317, 822, 9115, 1794, 11567, 1584, 5464, 15788, 18696, 17625, 14886, 10215, 16729, 5241, 1143, 13206, 17540, 13053, 1303, 9040, 7929, 10859, 12583, 18723, 10083, 21, 12000, 11372, 4455, 10842, 8800, 9886, 1085, 8879, 14816, 13947, 18028, 8482, 15984, 14819, 19549, 8640, 13887, 12107, 546, 5708, 2841, 12307, 6749, 15853, 11458, 15954, 5823, 15427, 14648, 13020, 12885, 2751, 8342, 2486, 10599, 2561, 9148, 449, 9197, 15044, 14198, 7486, 16676, 3187, 2182, 2044, 9229, 17308, 11264, 11107, 13031, 3280, 16113, 15084, 19193, 11311, 11002, 17295, 12995, 14521, 17738, 8843, 3139, 572, 6319, 4188, 1257, 18742, 12186, 4593, 205, 10633, 1174, 18973, 7596, 12060, 9965, 13570, 10699, 15225, 2650, 7709, 12081, 645, 2154, 11453, 7113, 8998, 7232, 16732, 3795, 5351, 13576, 5854, 2134, 12965, 3126, 8862, 7545, 5491, 9775, 10659, 18773, 7780, 11456, 17297, 13942, 10252, 13876, 2981, 1037, 14419, 9809, 18366, 16695, 11894, 12708, 17762, 12486, 2111, 528, 187, 11738, 3523, 11106, 8130, 11262, 17003, 1068, 7267, 5384, 15884, 13422, 896, 18584, 552, 4323, 15597, 6572, 12879, 3831, 17940, 14977, 14030, 19002, 14317, 10256, 8682, 17725, 892, 3960, 14334, 7653, 7154, 10344, 19320, 14537, 18752]",1,9,36,2957,0.805294923, -"[11450, 8372, 5174, 11894, 4932, 12160, 924, 2019, 6039, 18145, 17471, 11450, 8372, 5174, 11894, 4932, 12160, 924, 2019, 6039, 18145, 17471, 11450, 8372, 5174, 11894, 4932, 12160, 924, 2019, 6039, 18145, 17471, 11450, 8372, 5174, 11894, 4932, 12160, 924, 2019, 6039, 18145, 17471, 11450, 8372, 5174, 11894, 4932, 12160, 924, 2019, 6039, 18145, 17471, 11450, 8372, 5174, 11894, 4932, 12160, 924, 2019, 6039, 18145, 17471, 11450, 8372, 5174, 11894, 4932, 12160, 924, 2019, 6039, 18145, 17471, 11450, 8372, 5174, 11894, 4932, 12160, 924, 2019, 6039, 18145, 17471, 11450, 8372, 5174, 11894, 4932, 12160, 924, 2019, 6039, 18145, 17471, 11450, 8372, 5174, 11894, 4932, 12160, 924, 2019, 6039, 18145, 17471]",1,,11,1978,0.5683927575188923,4 -"[10754, 9471, 10373, 13364, 1205, 1423, 17493, 14821, 17933, 8384, 18898, 10754, 9471, 10373, 13364, 1205, 1423, 17493, 14821, 17933, 8384, 18898, 10754, 9471, 10373, 13364, 1205, 1423, 17493, 14821, 17933, 8384, 18898, 10754, 9471, 10373, 13364, 1205, 1423, 17493, 14821, 17933, 8384, 18898, 10754, 9471, 10373, 13364, 1205, 1423, 17493, 14821, 17933, 8384, 18898, 10754, 9471, 10373, 13364, 1205, 1423, 17493, 14821, 17933, 8384, 18898, 10754, 9471, 10373, 13364, 1205, 1423, 17493, 14821, 17933, 8384, 18898, 10754, 9471, 10373, 13364, 1205, 1423, 17493, 14821, 17933, 8384, 18898, 10754, 9471, 10373, 13364, 1205, 1423, 17493, 14821, 17933, 8384, 18898, 10754, 9471, 10373, 13364, 1205, 1423, 17493, 14821, 17933, 8384, 18898]",1,,11,2978,0.5608358269513618,4 -"[5336, 11024, 2338, 5610, 7929, 10446, 11944, 2543, 8288, 10098, 6125, 16989, 7097, 8299, 16624, 5614, 2862, 5336, 11024, 2338, 5610, 7929, 10446, 11944, 2543, 8288, 10098, 6125, 16989, 7097, 8299, 16624, 5614, 2862, 5336, 11024, 2338, 5610, 7929, 10446, 11944, 2543, 8288, 10098, 6125, 16989, 7097, 8299, 16624, 5614, 2862, 5336, 11024, 2338, 5610, 7929, 10446, 11944, 2543, 8288, 10098, 6125, 16989, 7097, 8299, 16624, 5614, 2862, 5336, 11024, 2338, 5610, 7929, 10446, 11944, 2543, 8288, 10098, 6125, 16989, 7097, 8299, 16624, 5614, 2862, 5336, 11024, 2338, 5610, 7929, 10446, 11944, 2543, 8288, 10098, 6125, 16989, 7097, 8299, 16624, 5614, 2862, 5336, 11024, 2338, 5610, 7929, 10446, 11944, 2543, 8288, 10098, 6125, 16989, 7097, 8299, 16624, 5614, 2862, 5336, 11024, 2338, 5610, 7929, 10446, 11944, 2543, 8288, 10098, 6125, 16989, 7097, 8299, 16624, 5614, 2862, 5336, 11024, 2338, 5610, 7929, 10446, 11944, 2543, 8288, 10098, 6125, 16989, 7097, 8299, 16624, 5614, 2862, 5336, 11024, 2338, 5610, 7929, 10446, 11944, 2543, 8288, 10098, 6125, 16989, 7097, 8299, 16624, 5614, 2862]",1,,17,1978,0.6126185525181316,4 -"[5336, 7110, 7861, 11024, 10446, 47, 9322, 16384, 19053, 1315, 10435, 17194, 11099, 4291, 6833, 2862, 4938, 7517, 2199, 6949, 3190, 1108, 5336, 7110, 7861, 11024, 10446, 47, 9322, 16384, 19053, 1315, 10435, 17194, 11099, 4291, 6833, 2862, 4938, 7517, 2199, 6949, 3190, 1108, 5336, 7110, 7861, 11024, 10446, 47, 9322, 16384, 19053, 1315, 10435, 17194, 11099, 4291, 6833, 2862, 4938, 7517, 2199, 6949, 3190, 1108, 5336, 7110, 7861, 11024, 10446, 47, 9322, 16384, 19053, 1315, 10435, 17194, 11099, 4291, 6833, 2862, 4938, 7517, 2199, 6949, 3190, 1108, 5336, 7110, 7861, 11024, 10446, 47, 9322, 16384, 19053, 1315, 10435, 17194, 11099, 4291, 6833, 2862, 4938, 7517, 2199, 6949, 3190, 1108, 5336, 7110, 7861, 11024, 10446, 47, 9322, 16384, 19053, 1315, 10435, 17194, 11099, 4291, 6833, 2862, 4938, 7517, 2199, 6949, 3190, 1108, 5336, 7110, 7861, 11024, 10446, 47, 9322, 16384, 19053, 1315, 10435, 17194, 11099, 4291, 6833, 2862, 4938, 7517, 2199, 6949, 3190, 1108, 5336, 7110, 7861, 11024, 10446, 47, 9322, 16384, 19053, 1315, 10435, 17194, 11099, 4291, 6833, 2862, 4938, 7517, 2199, 6949, 3190, 1108, 5336, 7110, 7861, 11024, 10446, 47, 9322, 16384, 19053, 1315, 10435, 17194, 11099, 4291, 6833, 2862, 4938, 7517, 2199, 6949, 3190, 1108, 5336, 7110, 7861, 11024, 10446, 47, 9322, 16384, 19053, 1315, 10435, 17194, 11099, 4291, 6833, 2862, 4938, 7517, 2199, 6949, 3190, 1108]",1,,22,1978,0.5595171679261551,4 -"[11450, 616, 10842, 12256, 11894, 11024, 11449, 9634, 8509, 150, 18214, 5325, 6715, 7780, 15311, 5872, 19048, 1489, 8687, 3702, 1841, 17366, 11450, 616, 10842, 12256, 11894, 11024, 11449, 9634, 8509, 150, 18214, 5325, 6715, 7780, 15311, 5872, 19048, 1489, 8687, 3702, 1841, 17366, 11450, 616, 10842, 12256, 11894, 11024, 11449, 9634, 8509, 150, 18214, 5325, 6715, 7780, 15311, 5872, 19048, 1489, 8687, 3702, 1841, 17366, 11450, 616, 10842, 12256, 11894, 11024, 11449, 9634, 8509, 150, 18214, 5325, 6715, 7780, 15311, 5872, 19048, 1489, 8687, 3702, 1841, 17366, 11450, 616, 10842, 12256, 11894, 11024, 11449, 9634, 8509, 150, 18214, 5325, 6715, 7780, 15311, 5872, 19048, 1489, 8687, 3702, 1841, 17366, 11450, 616, 10842, 12256, 11894, 11024, 11449, 9634, 8509, 150, 18214, 5325, 6715, 7780, 15311, 5872, 19048, 1489, 8687, 3702, 1841, 17366, 11450, 616, 10842, 12256, 11894, 11024, 11449, 9634, 8509, 150, 18214, 5325, 6715, 7780, 15311, 5872, 19048, 1489, 8687, 3702, 1841, 17366, 11450, 616, 10842, 12256, 11894, 11024, 11449, 9634, 8509, 150, 18214, 5325, 6715, 7780, 15311, 5872, 19048, 1489, 8687, 3702, 1841, 17366, 11450, 616, 10842, 12256, 11894, 11024, 11449, 9634, 8509, 150, 18214, 5325, 6715, 7780, 15311, 5872, 19048, 1489, 8687, 3702, 1841, 17366, 11450, 616, 10842, 12256, 11894, 11024, 11449, 9634, 8509, 150, 18214, 5325, 6715, 7780, 15311, 5872, 19048, 1489, 8687, 3702, 1841, 17366]",1,,22,2978,0.5493229193082112,4 -"[5324, 5336, 11024, 10842, 17194, 12839, 11003, 8221, 8482, 7929, 15816, 12278, 17495, 5461, 10104, 8299, 1142, 15224, 18839, 4987, 12679, 13034, 17066, 9523, 5494, 46, 5324, 5336, 11024, 10842, 17194, 12839, 11003, 8221, 8482, 7929, 15816, 12278, 17495, 5461, 10104, 8299, 1142, 15224, 18839, 4987, 12679, 13034, 17066, 9523, 5494, 46, 5324, 5336, 11024, 10842, 17194, 12839, 11003, 8221, 8482, 7929, 15816, 12278, 17495, 5461, 10104, 8299, 1142, 15224, 18839, 4987, 12679, 13034, 17066, 9523, 5494, 46, 5324, 5336, 11024, 10842, 17194, 12839, 11003, 8221, 8482, 7929, 15816, 12278, 17495, 5461, 10104, 8299, 1142, 15224, 18839, 4987, 12679, 13034, 17066, 9523, 5494, 46, 5324, 5336, 11024, 10842, 17194, 12839, 11003, 8221, 8482, 7929, 15816, 12278, 17495, 5461, 10104, 8299, 1142, 15224, 18839, 4987, 12679, 13034, 17066, 9523, 5494, 46, 5324, 5336, 11024, 10842, 17194, 12839, 11003, 8221, 8482, 7929, 15816, 12278, 17495, 5461, 10104, 8299, 1142, 15224, 18839, 4987, 12679, 13034, 17066, 9523, 5494, 46, 5324, 5336, 11024, 10842, 17194, 12839, 11003, 8221, 8482, 7929, 15816, 12278, 17495, 5461, 10104, 8299, 1142, 15224, 18839, 4987, 12679, 13034, 17066, 9523, 5494, 46, 5324, 5336, 11024, 10842, 17194, 12839, 11003, 8221, 8482, 7929, 15816, 12278, 17495, 5461, 10104, 8299, 1142, 15224, 18839, 4987, 12679, 13034, 17066, 9523, 5494, 46, 5324, 5336, 11024, 10842, 17194, 12839, 11003, 8221, 8482, 7929, 15816, 12278, 17495, 5461, 10104, 8299, 1142, 15224, 18839, 4987, 12679, 13034, 17066, 9523, 5494, 46, 5324, 5336, 11024, 10842, 17194, 12839, 11003, 8221, 8482, 7929, 15816, 12278, 17495, 5461, 10104, 8299, 1142, 15224, 18839, 4987, 12679, 13034, 17066, 9523, 5494, 46]",1,,26,1978,0.6277324136531927,4 -"[2523, 9471, 17301, 1842, 1205, 10373, 1521, 979, 1119, 611, 4061, 5904, 2571, 8598, 1497, 17819, 2306, 8998, 18849, 5174, 2692, 18664, 14284, 6504, 19625, 7227, 787, 3286, 14849, 19517, 4912, 16284, 8982, 19001, 4256, 14798, 15819, 9708, 2523, 9471, 17301, 1842, 1205, 10373, 1521, 979, 1119, 611, 4061, 5904, 2571, 8598, 1497, 17819, 2306, 8998, 18849, 5174, 2692, 18664, 14284, 6504, 19625, 7227, 787, 3286, 14849, 19517, 4912, 16284, 8982, 19001, 4256, 14798, 15819, 9708, 2523, 9471, 17301, 1842, 1205, 10373, 1521, 979, 1119, 611, 4061, 5904, 2571, 8598, 1497, 17819, 2306, 8998, 18849, 5174, 2692, 18664, 14284, 6504, 19625, 7227, 787, 3286, 14849, 19517, 4912, 16284, 8982, 19001, 4256, 14798, 15819, 9708, 2523, 9471, 17301, 1842, 1205, 10373, 1521, 979, 1119, 611, 4061, 5904, 2571, 8598, 1497, 17819, 2306, 8998, 18849, 5174, 2692, 18664, 14284, 6504, 19625, 7227, 787, 3286, 14849, 19517, 4912, 16284, 8982, 19001, 4256, 14798, 15819, 9708, 2523, 9471, 17301, 1842, 1205, 10373, 1521, 979, 1119, 611, 4061, 5904, 2571, 8598, 1497, 17819, 2306, 8998, 18849, 5174, 2692, 18664, 14284, 6504, 19625, 7227, 787, 3286, 14849, 19517, 4912, 16284, 8982, 19001, 4256, 14798, 15819, 9708, 2523, 9471, 17301, 1842, 1205, 10373, 1521, 979, 1119, 611, 4061, 5904, 2571, 8598, 1497, 17819, 2306, 8998, 18849, 5174, 2692, 18664, 14284, 6504, 19625, 7227, 787, 3286, 14849, 19517, 4912, 16284, 8982, 19001, 4256, 14798, 15819, 9708, 2523, 9471, 17301, 1842, 1205, 10373, 1521, 979, 1119, 611, 4061, 5904, 2571, 8598, 1497, 17819, 2306, 8998, 18849, 5174, 2692, 18664, 14284, 6504, 19625, 7227, 787, 3286, 14849, 19517, 4912, 16284, 8982, 19001, 4256, 14798, 15819, 9708, 2523, 9471, 17301, 1842, 1205, 10373, 1521, 979, 1119, 611, 4061, 5904, 2571, 8598, 1497, 17819, 2306, 8998, 18849, 5174, 2692, 18664, 14284, 6504, 19625, 7227, 787, 3286, 14849, 19517, 4912, 16284, 8982, 19001, 4256, 14798, 15819, 9708, 2523, 9471, 17301, 1842, 1205, 10373, 1521, 979, 1119, 611, 4061, 5904, 2571, 8598, 1497, 17819, 2306, 8998, 18849, 5174, 2692, 18664, 14284, 6504, 19625, 7227, 787, 3286, 14849, 19517, 4912, 16284, 8982, 19001, 4256, 14798, 15819, 9708, 2523, 9471, 17301, 1842, 1205, 10373, 1521, 979, 1119, 611, 4061, 5904, 2571, 8598, 1497, 17819, 2306, 8998, 18849, 5174, 2692, 18664, 14284, 6504, 19625, 7227, 787, 3286, 14849, 19517, 4912, 16284, 8982, 19001, 4256, 14798, 15819, 9708]",1,,38,1978,0.599685551,4 -"[15079, 1842, 4411, 2708, 18080, 10373, 1205, 2523, 611, 12160, 2571, 15079, 1842, 4411, 2708, 18080, 10373, 1205, 2523, 611, 12160, 2571, 15079, 1842, 4411, 2708, 18080, 10373, 1205, 2523, 611, 12160, 2571, 15079, 1842, 4411, 2708, 18080, 10373, 1205, 2523, 611, 12160, 2571, 15079, 1842, 4411, 2708, 18080, 10373, 1205, 2523, 611, 12160, 2571, 15079, 1842, 4411, 2708, 18080, 10373, 1205, 2523, 611, 12160, 2571, 15079, 1842, 4411, 2708, 18080, 10373, 1205, 2523, 611, 12160, 2571, 15079, 1842, 4411, 2708, 18080, 10373, 1205, 2523, 611, 12160, 2571, 15079, 1842, 4411, 2708, 18080, 10373, 1205, 2523, 611, 12160, 2571, 15079, 1842, 4411, 2708, 18080, 10373, 1205, 2523, 611, 12160, 2571]",1,,11,2978,0.6498453111528123,6 -"[17301, 10373, 10441, 2361, 1474, 9181, 12160, 17399, 7445, 12974, 15201, 8862, 17550, 5440, 10516, 11674, 17819, 16560, 16901, 14821, 10842, 16347, 17301, 10373, 10441, 2361, 1474, 9181, 12160, 17399, 7445, 12974, 15201, 8862, 17550, 5440, 10516, 11674, 17819, 16560, 16901, 14821, 10842, 16347, 17301, 10373, 10441, 2361, 1474, 9181, 12160, 17399, 7445, 12974, 15201, 8862, 17550, 5440, 10516, 11674, 17819, 16560, 16901, 14821, 10842, 16347, 17301, 10373, 10441, 2361, 1474, 9181, 12160, 17399, 7445, 12974, 15201, 8862, 17550, 5440, 10516, 11674, 17819, 16560, 16901, 14821, 10842, 16347, 17301, 10373, 10441, 2361, 1474, 9181, 12160, 17399, 7445, 12974, 15201, 8862, 17550, 5440, 10516, 11674, 17819, 16560, 16901, 14821, 10842, 16347, 17301, 10373, 10441, 2361, 1474, 9181, 12160, 17399, 7445, 12974, 15201, 8862, 17550, 5440, 10516, 11674, 17819, 16560, 16901, 14821, 10842, 16347, 17301, 10373, 10441, 2361, 1474, 9181, 12160, 17399, 7445, 12974, 15201, 8862, 17550, 5440, 10516, 11674, 17819, 16560, 16901, 14821, 10842, 16347, 17301, 10373, 10441, 2361, 1474, 9181, 12160, 17399, 7445, 12974, 15201, 8862, 17550, 5440, 10516, 11674, 17819, 16560, 16901, 14821, 10842, 16347, 17301, 10373, 10441, 2361, 1474, 9181, 12160, 17399, 7445, 12974, 15201, 8862, 17550, 5440, 10516, 11674, 17819, 16560, 16901, 14821, 10842, 16347, 17301, 10373, 10441, 2361, 1474, 9181, 12160, 17399, 7445, 12974, 15201, 8862, 17550, 5440, 10516, 11674, 17819, 16560, 16901, 14821, 10842, 16347]",1,,22,2978,0.699954354,6 -"[11450, 12160, 11894, 1843, 4149, 10842, 16541, 5324, 2994, 2623, 15664, 4152, 1833, 5325, 15589, 3286, 7973, 12019, 16775, 12643, 46, 19048, 8823, 12002, 11186, 2041, 11450, 12160, 11894, 1843, 4149, 10842, 16541, 5324, 2994, 2623, 15664, 4152, 1833, 5325, 15589, 3286, 7973, 12019, 16775, 12643, 46, 19048, 8823, 12002, 11186, 2041, 11450, 12160, 11894, 1843, 4149, 10842, 16541, 5324, 2994, 2623, 15664, 4152, 1833, 5325, 15589, 3286, 7973, 12019, 16775, 12643, 46, 19048, 8823, 12002, 11186, 2041, 11450, 12160, 11894, 1843, 4149, 10842, 16541, 5324, 2994, 2623, 15664, 4152, 1833, 5325, 15589, 3286, 7973, 12019, 16775, 12643, 46, 19048, 8823, 12002, 11186, 2041, 11450, 12160, 11894, 1843, 4149, 10842, 16541, 5324, 2994, 2623, 15664, 4152, 1833, 5325, 15589, 3286, 7973, 12019, 16775, 12643, 46, 19048, 8823, 12002, 11186, 2041, 11450, 12160, 11894, 1843, 4149, 10842, 16541, 5324, 2994, 2623, 15664, 4152, 1833, 5325, 15589, 3286, 7973, 12019, 16775, 12643, 46, 19048, 8823, 12002, 11186, 2041, 11450, 12160, 11894, 1843, 4149, 10842, 16541, 5324, 2994, 2623, 15664, 4152, 1833, 5325, 15589, 3286, 7973, 12019, 16775, 12643, 46, 19048, 8823, 12002, 11186, 2041, 11450, 12160, 11894, 1843, 4149, 10842, 16541, 5324, 2994, 2623, 15664, 4152, 1833, 5325, 15589, 3286, 7973, 12019, 16775, 12643, 46, 19048, 8823, 12002, 11186, 2041, 11450, 12160, 11894, 1843, 4149, 10842, 16541, 5324, 2994, 2623, 15664, 4152, 1833, 5325, 15589, 3286, 7973, 12019, 16775, 12643, 46, 19048, 8823, 12002, 11186, 2041, 11450, 12160, 11894, 1843, 4149, 10842, 16541, 5324, 2994, 2623, 15664, 4152, 1833, 5325, 15589, 3286, 7973, 12019, 16775, 12643, 46, 19048, 8823, 12002, 11186, 2041]",1,,26,2978,0.6502003347365218,6 -"[15079, 10087, 1581, 15520, 12125, 7379, 10769, 8629, 1558, 1423, 17101, 4421, 12711, 1205, 11482, 10886, 18095, 17819, 12573, 2691, 10785, 18826, 9254, 1852, 12720, 16822, 6101, 10488, 8209, 16976, 3822, 3501, 8886, 4247, 16181, 1796, 1478, 13746, 15079, 10087, 1581, 15520, 12125, 7379, 10769, 8629, 1558, 1423, 17101, 4421, 12711, 1205, 11482, 10886, 18095, 17819, 12573, 2691, 10785, 18826, 9254, 1852, 12720, 16822, 6101, 10488, 8209, 16976, 3822, 3501, 8886, 4247, 16181, 1796, 1478, 13746, 15079, 10087, 1581, 15520, 12125, 7379, 10769, 8629, 1558, 1423, 17101, 4421, 12711, 1205, 11482, 10886, 18095, 17819, 12573, 2691, 10785, 18826, 9254, 1852, 12720, 16822, 6101, 10488, 8209, 16976, 3822, 3501, 8886, 4247, 16181, 1796, 1478, 13746, 15079, 10087, 1581, 15520, 12125, 7379, 10769, 8629, 1558, 1423, 17101, 4421, 12711, 1205, 11482, 10886, 18095, 17819, 12573, 2691, 10785, 18826, 9254, 1852, 12720, 16822, 6101, 10488, 8209, 16976, 3822, 3501, 8886, 4247, 16181, 1796, 1478, 13746, 15079, 10087, 1581, 15520, 12125, 7379, 10769, 8629, 1558, 1423, 17101, 4421, 12711, 1205, 11482, 10886, 18095, 17819, 12573, 2691, 10785, 18826, 9254, 1852, 12720, 16822, 6101, 10488, 8209, 16976, 3822, 3501, 8886, 4247, 16181, 1796, 1478, 13746, 15079, 10087, 1581, 15520, 12125, 7379, 10769, 8629, 1558, 1423, 17101, 4421, 12711, 1205, 11482, 10886, 18095, 17819, 12573, 2691, 10785, 18826, 9254, 1852, 12720, 16822, 6101, 10488, 8209, 16976, 3822, 3501, 8886, 4247, 16181, 1796, 1478, 13746, 15079, 10087, 1581, 15520, 12125, 7379, 10769, 8629, 1558, 1423, 17101, 4421, 12711, 1205, 11482, 10886, 18095, 17819, 12573, 2691, 10785, 18826, 9254, 1852, 12720, 16822, 6101, 10488, 8209, 16976, 3822, 3501, 8886, 4247, 16181, 1796, 1478, 13746, 15079, 10087, 1581, 15520, 12125, 7379, 10769, 8629, 1558, 1423, 17101, 4421, 12711, 1205, 11482, 10886, 18095, 17819, 12573, 2691, 10785, 18826, 9254, 1852, 12720, 16822, 6101, 10488, 8209, 16976, 3822, 3501, 8886, 4247, 16181, 1796, 1478, 13746, 15079, 10087, 1581, 15520, 12125, 7379, 10769, 8629, 1558, 1423, 17101, 4421, 12711, 1205, 11482, 10886, 18095, 17819, 12573, 2691, 10785, 18826, 9254, 1852, 12720, 16822, 6101, 10488, 8209, 16976, 3822, 3501, 8886, 4247, 16181, 1796, 1478, 13746, 15079, 10087, 1581, 15520, 12125, 7379, 10769, 8629, 1558, 1423, 17101, 4421, 12711, 1205, 11482, 10886, 18095, 17819, 12573, 2691, 10785, 18826, 9254, 1852, 12720, 16822, 6101, 10488, 8209, 16976, 3822, 3501, 8886, 4247, 16181, 1796, 1478, 13746]",1,,38,985,0.5949688086422884,6 -"[5336, 11024, 19156, 12256, 1142, 7584, 3649, 10684, 7776, 483, 16483, 11059, 2690, 116, 12612, 3251, 12088, 5336, 11024, 19156, 12256, 1142, 7584, 3649, 10684, 7776, 483, 16483, 11059, 2690, 116, 12612, 3251, 12088, 5336, 11024, 19156, 12256, 1142, 7584, 3649, 10684, 7776, 483, 16483, 11059, 2690, 116, 12612, 3251, 12088, 5336, 11024, 19156, 12256, 1142, 7584, 3649, 10684, 7776, 483, 16483, 11059, 2690, 116, 12612, 3251, 12088, 5336, 11024, 19156, 12256, 1142, 7584, 3649, 10684, 7776, 483, 16483, 11059, 2690, 116, 12612, 3251, 12088, 5336, 11024, 19156, 12256, 1142, 7584, 3649, 10684, 7776, 483, 16483, 11059, 2690, 116, 12612, 3251, 12088, 5336, 11024, 19156, 12256, 1142, 7584, 3649, 10684, 7776, 483, 16483, 11059, 2690, 116, 12612, 3251, 12088, 5336, 11024, 19156, 12256, 1142, 7584, 3649, 10684, 7776, 483, 16483, 11059, 2690, 116, 12612, 3251, 12088, 5336, 11024, 19156, 12256, 1142, 7584, 3649, 10684, 7776, 483, 16483, 11059, 2690, 116, 12612, 3251, 12088, 5336, 11024, 19156, 12256, 1142, 7584, 3649, 10684, 7776, 483, 16483, 11059, 2690, 116, 12612, 3251, 12088]",1,,17,985,0.7097935791449004,8 -"[16579, 597, 10857, 13981, 10575, 10854, 14731, 18791, 6167, 9766, 16624, 11304, 11044, 11024, 5470, 46, 7517, 16579, 597, 10857, 13981, 10575, 10854, 14731, 18791, 6167, 9766, 16624, 11304, 11044, 11024, 5470, 46, 7517, 16579, 597, 10857, 13981, 10575, 10854, 14731, 18791, 6167, 9766, 16624, 11304, 11044, 11024, 5470, 46, 7517, 16579, 597, 10857, 13981, 10575, 10854, 14731, 18791, 6167, 9766, 16624, 11304, 11044, 11024, 5470, 46, 7517, 16579, 597, 10857, 13981, 10575, 10854, 14731, 18791, 6167, 9766, 16624, 11304, 11044, 11024, 5470, 46, 7517, 16579, 597, 10857, 13981, 10575, 10854, 14731, 18791, 6167, 9766, 16624, 11304, 11044, 11024, 5470, 46, 7517, 16579, 597, 10857, 13981, 10575, 10854, 14731, 18791, 6167, 9766, 16624, 11304, 11044, 11024, 5470, 46, 7517, 16579, 597, 10857, 13981, 10575, 10854, 14731, 18791, 6167, 9766, 16624, 11304, 11044, 11024, 5470, 46, 7517, 16579, 597, 10857, 13981, 10575, 10854, 14731, 18791, 6167, 9766, 16624, 11304, 11044, 11024, 5470, 46, 7517, 16579, 597, 10857, 13981, 10575, 10854, 14731, 18791, 6167, 9766, 16624, 11304, 11044, 11024, 5470, 46, 7517]",1,,17,1978,0.5675812750418421,8 -"[10373, 10754, 9471, 2361, 10842, 8862, 5108, 15788, 616, 7296, 17819, 973, 1142, 15302, 10516, 7774, 12760, 10373, 10754, 9471, 2361, 10842, 8862, 5108, 15788, 616, 7296, 17819, 973, 1142, 15302, 10516, 7774, 12760, 10373, 10754, 9471, 2361, 10842, 8862, 5108, 15788, 616, 7296, 17819, 973, 1142, 15302, 10516, 7774, 12760, 10373, 10754, 9471, 2361, 10842, 8862, 5108, 15788, 616, 7296, 17819, 973, 1142, 15302, 10516, 7774, 12760, 10373, 10754, 9471, 2361, 10842, 8862, 5108, 15788, 616, 7296, 17819, 973, 1142, 15302, 10516, 7774, 12760, 10373, 10754, 9471, 2361, 10842, 8862, 5108, 15788, 616, 7296, 17819, 973, 1142, 15302, 10516, 7774, 12760, 10373, 10754, 9471, 2361, 10842, 8862, 5108, 15788, 616, 7296, 17819, 973, 1142, 15302, 10516, 7774, 12760, 10373, 10754, 9471, 2361, 10842, 8862, 5108, 15788, 616, 7296, 17819, 973, 1142, 15302, 10516, 7774, 12760, 10373, 10754, 9471, 2361, 10842, 8862, 5108, 15788, 616, 7296, 17819, 973, 1142, 15302, 10516, 7774, 12760, 10373, 10754, 9471, 2361, 10842, 8862, 5108, 15788, 616, 7296, 17819, 973, 1142, 15302, 10516, 7774, 12760]",1,,17,2978,0.6846883400111579,8 -"[5336, 11024, 47, 896, 14731, 17912, 820, 11099, 1310, 17723, 5427, 14819, 14857, 12679, 19145, 14448, 3251, 12695, 18839, 13043, 16454, 4215, 5336, 11024, 47, 896, 14731, 17912, 820, 11099, 1310, 17723, 5427, 14819, 14857, 12679, 19145, 14448, 3251, 12695, 18839, 13043, 16454, 4215, 5336, 11024, 47, 896, 14731, 17912, 820, 11099, 1310, 17723, 5427, 14819, 14857, 12679, 19145, 14448, 3251, 12695, 18839, 13043, 16454, 4215, 5336, 11024, 47, 896, 14731, 17912, 820, 11099, 1310, 17723, 5427, 14819, 14857, 12679, 19145, 14448, 3251, 12695, 18839, 13043, 16454, 4215, 5336, 11024, 47, 896, 14731, 17912, 820, 11099, 1310, 17723, 5427, 14819, 14857, 12679, 19145, 14448, 3251, 12695, 18839, 13043, 16454, 4215, 5336, 11024, 47, 896, 14731, 17912, 820, 11099, 1310, 17723, 5427, 14819, 14857, 12679, 19145, 14448, 3251, 12695, 18839, 13043, 16454, 4215, 5336, 11024, 47, 896, 14731, 17912, 820, 11099, 1310, 17723, 5427, 14819, 14857, 12679, 19145, 14448, 3251, 12695, 18839, 13043, 16454, 4215, 5336, 11024, 47, 896, 14731, 17912, 820, 11099, 1310, 17723, 5427, 14819, 14857, 12679, 19145, 14448, 3251, 12695, 18839, 13043, 16454, 4215, 5336, 11024, 47, 896, 14731, 17912, 820, 11099, 1310, 17723, 5427, 14819, 14857, 12679, 19145, 14448, 3251, 12695, 18839, 13043, 16454, 4215, 5336, 11024, 47, 896, 14731, 17912, 820, 11099, 1310, 17723, 5427, 14819, 14857, 12679, 19145, 14448, 3251, 12695, 18839, 13043, 16454, 4215]",1,,22,985,0.556423391,8 -"[10754, 16754, 5470, 8299, 5430, 10857, 5440, 10373, 5612, 16121, 77, 10646, 18956, 1700, 12974, 15971, 1169, 12148, 11369, 5157, 17488, 7992, 10754, 16754, 5470, 8299, 5430, 10857, 5440, 10373, 5612, 16121, 77, 10646, 18956, 1700, 12974, 15971, 1169, 12148, 11369, 5157, 17488, 7992, 10754, 16754, 5470, 8299, 5430, 10857, 5440, 10373, 5612, 16121, 77, 10646, 18956, 1700, 12974, 15971, 1169, 12148, 11369, 5157, 17488, 7992, 10754, 16754, 5470, 8299, 5430, 10857, 5440, 10373, 5612, 16121, 77, 10646, 18956, 1700, 12974, 15971, 1169, 12148, 11369, 5157, 17488, 7992, 10754, 16754, 5470, 8299, 5430, 10857, 5440, 10373, 5612, 16121, 77, 10646, 18956, 1700, 12974, 15971, 1169, 12148, 11369, 5157, 17488, 7992, 10754, 16754, 5470, 8299, 5430, 10857, 5440, 10373, 5612, 16121, 77, 10646, 18956, 1700, 12974, 15971, 1169, 12148, 11369, 5157, 17488, 7992, 10754, 16754, 5470, 8299, 5430, 10857, 5440, 10373, 5612, 16121, 77, 10646, 18956, 1700, 12974, 15971, 1169, 12148, 11369, 5157, 17488, 7992, 10754, 16754, 5470, 8299, 5430, 10857, 5440, 10373, 5612, 16121, 77, 10646, 18956, 1700, 12974, 15971, 1169, 12148, 11369, 5157, 17488, 7992, 10754, 16754, 5470, 8299, 5430, 10857, 5440, 10373, 5612, 16121, 77, 10646, 18956, 1700, 12974, 15971, 1169, 12148, 11369, 5157, 17488, 7992, 10754, 16754, 5470, 8299, 5430, 10857, 5440, 10373, 5612, 16121, 77, 10646, 18956, 1700, 12974, 15971, 1169, 12148, 11369, 5157, 17488, 7992]",1,,22,1978,0.7113658264441852,8 -"[4340, 4893, 11024, 8299, 12304, 3190, 10769, 6125, 18080, 105, 18769, 766, 11907, 12592, 8526, 9150, 12036, 3126, 1309, 8125, 18743, 10932, 4340, 4893, 11024, 8299, 12304, 3190, 10769, 6125, 18080, 105, 18769, 766, 11907, 12592, 8526, 9150, 12036, 3126, 1309, 8125, 18743, 10932, 4340, 4893, 11024, 8299, 12304, 3190, 10769, 6125, 18080, 105, 18769, 766, 11907, 12592, 8526, 9150, 12036, 3126, 1309, 8125, 18743, 10932, 4340, 4893, 11024, 8299, 12304, 3190, 10769, 6125, 18080, 105, 18769, 766, 11907, 12592, 8526, 9150, 12036, 3126, 1309, 8125, 18743, 10932, 4340, 4893, 11024, 8299, 12304, 3190, 10769, 6125, 18080, 105, 18769, 766, 11907, 12592, 8526, 9150, 12036, 3126, 1309, 8125, 18743, 10932, 4340, 4893, 11024, 8299, 12304, 3190, 10769, 6125, 18080, 105, 18769, 766, 11907, 12592, 8526, 9150, 12036, 3126, 1309, 8125, 18743, 10932, 4340, 4893, 11024, 8299, 12304, 3190, 10769, 6125, 18080, 105, 18769, 766, 11907, 12592, 8526, 9150, 12036, 3126, 1309, 8125, 18743, 10932, 4340, 4893, 11024, 8299, 12304, 3190, 10769, 6125, 18080, 105, 18769, 766, 11907, 12592, 8526, 9150, 12036, 3126, 1309, 8125, 18743, 10932, 4340, 4893, 11024, 8299, 12304, 3190, 10769, 6125, 18080, 105, 18769, 766, 11907, 12592, 8526, 9150, 12036, 3126, 1309, 8125, 18743, 10932, 4340, 4893, 11024, 8299, 12304, 3190, 10769, 6125, 18080, 105, 18769, 766, 11907, 12592, 8526, 9150, 12036, 3126, 1309, 8125, 18743, 10932]",1,,22,2978,0.6753562915250799,8 -"[918, 14731, 4064, 16624, 14893, 13981, 10435, 6412, 14044, 9547, 46, 7517, 5497, 16029, 14908, 13703, 7894, 4230, 7434, 7356, 10163, 18788, 13310, 13405, 7695, 3571, 918, 14731, 4064, 16624, 14893, 13981, 10435, 6412, 14044, 9547, 46, 7517, 5497, 16029, 14908, 13703, 7894, 4230, 7434, 7356, 10163, 18788, 13310, 13405, 7695, 3571, 918, 14731, 4064, 16624, 14893, 13981, 10435, 6412, 14044, 9547, 46, 7517, 5497, 16029, 14908, 13703, 7894, 4230, 7434, 7356, 10163, 18788, 13310, 13405, 7695, 3571, 918, 14731, 4064, 16624, 14893, 13981, 10435, 6412, 14044, 9547, 46, 7517, 5497, 16029, 14908, 13703, 7894, 4230, 7434, 7356, 10163, 18788, 13310, 13405, 7695, 3571, 918, 14731, 4064, 16624, 14893, 13981, 10435, 6412, 14044, 9547, 46, 7517, 5497, 16029, 14908, 13703, 7894, 4230, 7434, 7356, 10163, 18788, 13310, 13405, 7695, 3571, 918, 14731, 4064, 16624, 14893, 13981, 10435, 6412, 14044, 9547, 46, 7517, 5497, 16029, 14908, 13703, 7894, 4230, 7434, 7356, 10163, 18788, 13310, 13405, 7695, 3571, 918, 14731, 4064, 16624, 14893, 13981, 10435, 6412, 14044, 9547, 46, 7517, 5497, 16029, 14908, 13703, 7894, 4230, 7434, 7356, 10163, 18788, 13310, 13405, 7695, 3571, 918, 14731, 4064, 16624, 14893, 13981, 10435, 6412, 14044, 9547, 46, 7517, 5497, 16029, 14908, 13703, 7894, 4230, 7434, 7356, 10163, 18788, 13310, 13405, 7695, 3571, 918, 14731, 4064, 16624, 14893, 13981, 10435, 6412, 14044, 9547, 46, 7517, 5497, 16029, 14908, 13703, 7894, 4230, 7434, 7356, 10163, 18788, 13310, 13405, 7695, 3571, 918, 14731, 4064, 16624, 14893, 13981, 10435, 6412, 14044, 9547, 46, 7517, 5497, 16029, 14908, 13703, 7894, 4230, 7434, 7356, 10163, 18788, 13310, 13405, 7695, 3571]",1,,26,985,0.5134655373535528,8 -"[11450, 11024, 10842, 2304, 19335, 7929, 4837, 14731, 4774, 17356, 16990, 3744, 13550, 3683, 12679, 10966, 4413, 8050, 11170, 2771, 13297, 3768, 5567, 15003, 2440, 8132, 11450, 11024, 10842, 2304, 19335, 7929, 4837, 14731, 4774, 17356, 16990, 3744, 13550, 3683, 12679, 10966, 4413, 8050, 11170, 2771, 13297, 3768, 5567, 15003, 2440, 8132, 11450, 11024, 10842, 2304, 19335, 7929, 4837, 14731, 4774, 17356, 16990, 3744, 13550, 3683, 12679, 10966, 4413, 8050, 11170, 2771, 13297, 3768, 5567, 15003, 2440, 8132, 11450, 11024, 10842, 2304, 19335, 7929, 4837, 14731, 4774, 17356, 16990, 3744, 13550, 3683, 12679, 10966, 4413, 8050, 11170, 2771, 13297, 3768, 5567, 15003, 2440, 8132, 11450, 11024, 10842, 2304, 19335, 7929, 4837, 14731, 4774, 17356, 16990, 3744, 13550, 3683, 12679, 10966, 4413, 8050, 11170, 2771, 13297, 3768, 5567, 15003, 2440, 8132, 11450, 11024, 10842, 2304, 19335, 7929, 4837, 14731, 4774, 17356, 16990, 3744, 13550, 3683, 12679, 10966, 4413, 8050, 11170, 2771, 13297, 3768, 5567, 15003, 2440, 8132, 11450, 11024, 10842, 2304, 19335, 7929, 4837, 14731, 4774, 17356, 16990, 3744, 13550, 3683, 12679, 10966, 4413, 8050, 11170, 2771, 13297, 3768, 5567, 15003, 2440, 8132, 11450, 11024, 10842, 2304, 19335, 7929, 4837, 14731, 4774, 17356, 16990, 3744, 13550, 3683, 12679, 10966, 4413, 8050, 11170, 2771, 13297, 3768, 5567, 15003, 2440, 8132, 11450, 11024, 10842, 2304, 19335, 7929, 4837, 14731, 4774, 17356, 16990, 3744, 13550, 3683, 12679, 10966, 4413, 8050, 11170, 2771, 13297, 3768, 5567, 15003, 2440, 8132, 11450, 11024, 10842, 2304, 19335, 7929, 4837, 14731, 4774, 17356, 16990, 3744, 13550, 3683, 12679, 10966, 4413, 8050, 11170, 2771, 13297, 3768, 5567, 15003, 2440, 8132]",1,,26,1978,0.5488664604148704,8 -"[10754, 10373, 10276, 5440, 19048, 4513, 16754, 14317, 12561, 4194, 597, 14821, 8998, 15735, 2692, 293, 10842, 16826, 1847, 12017, 17301, 1895, 17819, 528, 14092, 12974, 10754, 10373, 10276, 5440, 19048, 4513, 16754, 14317, 12561, 4194, 597, 14821, 8998, 15735, 2692, 293, 10842, 16826, 1847, 12017, 17301, 1895, 17819, 528, 14092, 12974, 10754, 10373, 10276, 5440, 19048, 4513, 16754, 14317, 12561, 4194, 597, 14821, 8998, 15735, 2692, 293, 10842, 16826, 1847, 12017, 17301, 1895, 17819, 528, 14092, 12974, 10754, 10373, 10276, 5440, 19048, 4513, 16754, 14317, 12561, 4194, 597, 14821, 8998, 15735, 2692, 293, 10842, 16826, 1847, 12017, 17301, 1895, 17819, 528, 14092, 12974, 10754, 10373, 10276, 5440, 19048, 4513, 16754, 14317, 12561, 4194, 597, 14821, 8998, 15735, 2692, 293, 10842, 16826, 1847, 12017, 17301, 1895, 17819, 528, 14092, 12974, 10754, 10373, 10276, 5440, 19048, 4513, 16754, 14317, 12561, 4194, 597, 14821, 8998, 15735, 2692, 293, 10842, 16826, 1847, 12017, 17301, 1895, 17819, 528, 14092, 12974, 10754, 10373, 10276, 5440, 19048, 4513, 16754, 14317, 12561, 4194, 597, 14821, 8998, 15735, 2692, 293, 10842, 16826, 1847, 12017, 17301, 1895, 17819, 528, 14092, 12974, 10754, 10373, 10276, 5440, 19048, 4513, 16754, 14317, 12561, 4194, 597, 14821, 8998, 15735, 2692, 293, 10842, 16826, 1847, 12017, 17301, 1895, 17819, 528, 14092, 12974, 10754, 10373, 10276, 5440, 19048, 4513, 16754, 14317, 12561, 4194, 597, 14821, 8998, 15735, 2692, 293, 10842, 16826, 1847, 12017, 17301, 1895, 17819, 528, 14092, 12974, 10754, 10373, 10276, 5440, 19048, 4513, 16754, 14317, 12561, 4194, 597, 14821, 8998, 15735, 2692, 293, 10842, 16826, 1847, 12017, 17301, 1895, 17819, 528, 14092, 12974]",1,,26,2978,0.5905563726733276,8 -"[19156, 5375, 6243, 616, 2092, 677, 129, 35, 5336, 1761, 897, 10240, 710, 8803, 14647, 15329, 10850, 14045, 14857, 15954, 8406, 13703, 1700, 2577, 2297, 11700, 3781, 7739, 2080, 18956, 1142, 15896, 7213, 9164, 11449, 12676, 4538, 4274, 19156, 5375, 6243, 616, 2092, 677, 129, 35, 5336, 1761, 897, 10240, 710, 8803, 14647, 15329, 10850, 14045, 14857, 15954, 8406, 13703, 1700, 2577, 2297, 11700, 3781, 7739, 2080, 18956, 1142, 15896, 7213, 9164, 11449, 12676, 4538, 4274, 19156, 5375, 6243, 616, 2092, 677, 129, 35, 5336, 1761, 897, 10240, 710, 8803, 14647, 15329, 10850, 14045, 14857, 15954, 8406, 13703, 1700, 2577, 2297, 11700, 3781, 7739, 2080, 18956, 1142, 15896, 7213, 9164, 11449, 12676, 4538, 4274, 19156, 5375, 6243, 616, 2092, 677, 129, 35, 5336, 1761, 897, 10240, 710, 8803, 14647, 15329, 10850, 14045, 14857, 15954, 8406, 13703, 1700, 2577, 2297, 11700, 3781, 7739, 2080, 18956, 1142, 15896, 7213, 9164, 11449, 12676, 4538, 4274, 19156, 5375, 6243, 616, 2092, 677, 129, 35, 5336, 1761, 897, 10240, 710, 8803, 14647, 15329, 10850, 14045, 14857, 15954, 8406, 13703, 1700, 2577, 2297, 11700, 3781, 7739, 2080, 18956, 1142, 15896, 7213, 9164, 11449, 12676, 4538, 4274, 19156, 5375, 6243, 616, 2092, 677, 129, 35, 5336, 1761, 897, 10240, 710, 8803, 14647, 15329, 10850, 14045, 14857, 15954, 8406, 13703, 1700, 2577, 2297, 11700, 3781, 7739, 2080, 18956, 1142, 15896, 7213, 9164, 11449, 12676, 4538, 4274, 19156, 5375, 6243, 616, 2092, 677, 129, 35, 5336, 1761, 897, 10240, 710, 8803, 14647, 15329, 10850, 14045, 14857, 15954, 8406, 13703, 1700, 2577, 2297, 11700, 3781, 7739, 2080, 18956, 1142, 15896, 7213, 9164, 11449, 12676, 4538, 4274, 19156, 5375, 6243, 616, 2092, 677, 129, 35, 5336, 1761, 897, 10240, 710, 8803, 14647, 15329, 10850, 14045, 14857, 15954, 8406, 13703, 1700, 2577, 2297, 11700, 3781, 7739, 2080, 18956, 1142, 15896, 7213, 9164, 11449, 12676, 4538, 4274, 19156, 5375, 6243, 616, 2092, 677, 129, 35, 5336, 1761, 897, 10240, 710, 8803, 14647, 15329, 10850, 14045, 14857, 15954, 8406, 13703, 1700, 2577, 2297, 11700, 3781, 7739, 2080, 18956, 1142, 15896, 7213, 9164, 11449, 12676, 4538, 4274, 19156, 5375, 6243, 616, 2092, 677, 129, 35, 5336, 1761, 897, 10240, 710, 8803, 14647, 15329, 10850, 14045, 14857, 15954, 8406, 13703, 1700, 2577, 2297, 11700, 3781, 7739, 2080, 18956, 1142, 15896, 7213, 9164, 11449, 12676, 4538, 4274]",1,,38,985,0.6004463153623777,8 -"[11450, 5325, 616, 7110, 13981, 19053, 9186, 2994, 5375, 10842, 10446, 3517, 14470, 16283, 529, 2337, 16332, 6331, 15325, 10165, 3705, 17474, 6412, 14857, 925, 16260, 7517, 19217, 15065, 18791, 7739, 13442, 13703, 15087, 13405, 4297, 897, 7815, 11450, 5325, 616, 7110, 13981, 19053, 9186, 2994, 5375, 10842, 10446, 3517, 14470, 16283, 529, 2337, 16332, 6331, 15325, 10165, 3705, 17474, 6412, 14857, 925, 16260, 7517, 19217, 15065, 18791, 7739, 13442, 13703, 15087, 13405, 4297, 897, 7815, 11450, 5325, 616, 7110, 13981, 19053, 9186, 2994, 5375, 10842, 10446, 3517, 14470, 16283, 529, 2337, 16332, 6331, 15325, 10165, 3705, 17474, 6412, 14857, 925, 16260, 7517, 19217, 15065, 18791, 7739, 13442, 13703, 15087, 13405, 4297, 897, 7815, 11450, 5325, 616, 7110, 13981, 19053, 9186, 2994, 5375, 10842, 10446, 3517, 14470, 16283, 529, 2337, 16332, 6331, 15325, 10165, 3705, 17474, 6412, 14857, 925, 16260, 7517, 19217, 15065, 18791, 7739, 13442, 13703, 15087, 13405, 4297, 897, 7815, 11450, 5325, 616, 7110, 13981, 19053, 9186, 2994, 5375, 10842, 10446, 3517, 14470, 16283, 529, 2337, 16332, 6331, 15325, 10165, 3705, 17474, 6412, 14857, 925, 16260, 7517, 19217, 15065, 18791, 7739, 13442, 13703, 15087, 13405, 4297, 897, 7815, 11450, 5325, 616, 7110, 13981, 19053, 9186, 2994, 5375, 10842, 10446, 3517, 14470, 16283, 529, 2337, 16332, 6331, 15325, 10165, 3705, 17474, 6412, 14857, 925, 16260, 7517, 19217, 15065, 18791, 7739, 13442, 13703, 15087, 13405, 4297, 897, 7815, 11450, 5325, 616, 7110, 13981, 19053, 9186, 2994, 5375, 10842, 10446, 3517, 14470, 16283, 529, 2337, 16332, 6331, 15325, 10165, 3705, 17474, 6412, 14857, 925, 16260, 7517, 19217, 15065, 18791, 7739, 13442, 13703, 15087, 13405, 4297, 897, 7815, 11450, 5325, 616, 7110, 13981, 19053, 9186, 2994, 5375, 10842, 10446, 3517, 14470, 16283, 529, 2337, 16332, 6331, 15325, 10165, 3705, 17474, 6412, 14857, 925, 16260, 7517, 19217, 15065, 18791, 7739, 13442, 13703, 15087, 13405, 4297, 897, 7815, 11450, 5325, 616, 7110, 13981, 19053, 9186, 2994, 5375, 10842, 10446, 3517, 14470, 16283, 529, 2337, 16332, 6331, 15325, 10165, 3705, 17474, 6412, 14857, 925, 16260, 7517, 19217, 15065, 18791, 7739, 13442, 13703, 15087, 13405, 4297, 897, 7815, 11450, 5325, 616, 7110, 13981, 19053, 9186, 2994, 5375, 10842, 10446, 3517, 14470, 16283, 529, 2337, 16332, 6331, 15325, 10165, 3705, 17474, 6412, 14857, 925, 16260, 7517, 19217, 15065, 18791, 7739, 13442, 13703, 15087, 13405, 4297, 897, 7815]",1,,38,2978,0.5816807830805903,8 -"[18622, 3931, 7115, 6692, 3930, 14284, 205, 10373, 7123, 10185, 6612, 10573, 9264, 11816, 242, 1865, 2437, 14789, 8998, 10510, 337, 8855, 4759, 1891, 3654, 13620, 2554, 2114, 10540, 6208, 9797, 1194, 17649, 2902, 9830, 8417, 13277, 5595, 1785, 4454, 2361, 11823, 10690, 13364, 10443, 9203, 13576, 15739, 8382, 6626, 2268, 15591, 9199, 6018, 6316, 13912, 10659, 12145, 6572, 973, 13632, 528, 14749, 9047, 5809, 2170, 4823, 1385, 8172, 11456, 16033, 18341, 2804, 1675, 9130, 2278, 6899, 7015, 9491, 15735, 1842, 18831, 9471, 18765, 15100, 6098, 6, 8708, 7399, 7900, 12514, 12191, 3629, 18825, 7994, 2117, 18742, 19087, 6120, 11883]",0,,,,, -"[12024, 13076, 8661, 5673, 13966, 760, 917, 2170, 1565, 2128, 12856, 10931, 228, 15565, 5698, 2707, 9607, 9017, 3270, 12349]",0,,,,, -"[6465, 14812, 10614, 10754, 11456, 15155, 16989, 12968, 338, 17636, 13533, 9453, 3484, 3630, 1116, 1312, 13269, 10270, 5, 3110]",0,,,,, -"[10350, 17034, 17415, 15207, 4051, 11049, 12628, 17387, 5095, 432, 11894, 19005, 5661, 6488, 9769, 5324, 18754, 13369, 7470, 16989, 1889, 62, 8221, 4238, 16318, 16650, 16652, 3141, 15945, 6690, 13779, 14893, 17538, 4564, 14431, 3983, 11024, 16305, 616, 14515, 12019, 2997, 7901, 18816, 14857, 5943, 1068, 1635, 11761, 13623, 16378, 5639, 18773, 6164, 1567, 3104, 7560, 16868, 18819, 3218, 2049, 9648, 351, 15916, 9634, 6180, 7377, 5429, 15663, 1694, 16579, 1640, 1242, 918, 3453, 18349, 11390, 1794, 8707, 7939, 11814, 14288, 14751, 13525, 15498, 1135, 7080, 2352, 8851, 11672, 8483, 11262, 3137, 3082, 7829, 15615, 1683, 13677, 14754, 8715, 4411, 16770, 16884, 17401, 1246, 11840, 17911, 11944, 5029, 10568, 13846, 16214, 3683, 8608, 8133, 17192, 8916, 912, 10744, 16772, 1954, 8653, 18009, 8526, 4505, 1301, 8717, 3942, 6273, 2383, 10595, 1767, 1204, 6201, 11450, 8405, 10243, 12552, 2037, 3577, 18920, 2080, 12440, 9802, 80, 4578, 12304, 105, 15682, 6631, 3126, 2592, 15325, 3146, 15133, 754, 12884, 16038, 14070, 12479, 16098, 5930, 7780, 11958, 7160, 1309, 2134, 8392, 3677, 962, 4356, 16380, 18252, 12256, 6023, 2142, 11200, 3087, 3307, 14069, 11413, 14370, 15960, 16274, 1439, 4332, 16228, 5477, 1057, 877, 18487, 7981, 5212, 6833, 2019, 6402, 2343, 14353, 6298, 6260, 6255, 13010, 14420, 6210, 2226, 1761, 4021, 6344, 15082, 4553, 8980, 5680, 1063, 15920, 15589, 4787, 9208, 11738, 2387, 18147, 14533, 5618, 4722, 11764, 2024, 17505, 14607, 13058, 14110, 478, 17237, 8269, 46, 3953, 16880, 13503, 3502, 10219, 3658, 14408, 10297, 10542, 3702, 4014, 4007, 19205, 4002, 1191, 6969, 643]",1,14,25,275,0.6597352538418624, -"[10350, 5324, 14751, 6690, 19005, 4564, 3146, 19002, 14431, 7780, 8916, 17387, 16378, 17911, 16989, 8707, 14353, 16650, 3141, 8851, 11049, 3577, 8483, 15189, 11262, 11894, 1242, 18816, 1635, 12440, 3658, 18773, 16214, 15663, 1761, 5943, 754, 17415, 2383, 15207, 7939, 17034, 11814, 10051, 16652, 14607, 1567, 14288, 62, 962, 4007, 18754, 16380, 1640, 14110, 15945, 3218, 13677, 2080, 6180, 2134, 18487, 7377, 1954, 8405, 14893, 16770, 15589, 6631, 1439, 12019, 1301, 5661, 11413, 5930, 16318, 5029, 13623, 14754, 8133, 13525, 3137, 12884, 14420, 9648, 616, 351, 16098, 16579, 10243, 18819, 9040, 17192, 11390, 3942, 12256, 15498, 16880, 15682, 1068, 2997, 15615, 11944, 8608, 1568, 3677, 1694, 11761, 7901, 1794, 912, 3453, 3683, 5618, 15960, 13779, 11840, 19143, 3674, 8221, 16073, 3082, 4553, 9634, 12304, 14515, 18920, 11450, 16772, 1063, 9769, 3104, 2229, 15916, 2142, 6201, 8653, 11971, 13705, 4332, 4356, 4505, 2532, 1246, 2592, 11449, 13846, 4895, 14149, 18349]",1,7,15,225,0.7295734645230004, -"[10420, 2422, 2806, 4935, 3399, 15031, 5494, 8790, 17007, 10190, 18839, 17266, 5610, 18733, 6129, 10420, 2422, 2806, 4935, 3399, 15031, 5494, 8790, 17007, 10190, 18839, 17266, 5610, 18733, 6129, 10420, 2422, 2806, 4935, 3399, 15031, 5494, 8790, 17007, 10190, 18839, 17266, 5610, 18733, 6129, 10420, 2422, 2806, 4935, 3399, 15031, 5494, 8790, 17007, 10190, 18839, 17266, 5610, 18733, 6129, 10420, 2422, 2806, 4935, 3399, 15031, 5494, 8790, 17007, 10190, 18839, 17266, 5610, 18733, 6129, 10420, 2422, 2806, 4935, 3399, 15031, 5494, 8790, 17007, 10190, 18839, 17266, 5610, 18733, 6129, 10420, 2422, 2806, 4935, 3399, 15031, 5494, 8790, 17007, 10190, 18839, 17266, 5610, 18733, 6129, 10420, 2422, 2806, 4935, 3399, 15031, 5494, 8790, 17007, 10190, 18839, 17266, 5610, 18733, 6129, 10420, 2422, 2806, 4935, 3399, 15031, 5494, 8790, 17007, 10190, 18839, 17266, 5610, 18733, 6129, 10420, 2422, 2806, 4935, 3399, 15031, 5494, 8790, 17007, 10190, 18839, 17266, 5610, 18733, 6129]",1,,15,200,0.6545113353958513,0.6 -"[11907, 15385, 10457, 474, 6243, 18221, 12653, 17600, 18461, 4758, 2587, 14857, 2690, 14613, 15193, 15329, 12483, 5788, 7213, 17917, 14629, 17206, 1191, 17863, 15787, 5899, 11672, 12983, 17316, 515, 11907, 15385, 10457, 474, 6243, 18221, 12653, 17600, 18461, 4758, 2587, 14857, 2690, 14613, 15193, 15329, 12483, 5788, 7213, 17917, 14629, 17206, 1191, 17863, 15787, 5899, 11672, 12983, 17316, 515, 11907, 15385, 10457, 474, 6243, 18221, 12653, 17600, 18461, 4758, 2587, 14857, 2690, 14613, 15193, 15329, 12483, 5788, 7213, 17917, 14629, 17206, 1191, 17863, 15787, 5899, 11672, 12983, 17316, 515, 11907, 15385, 10457, 474, 6243, 18221, 12653, 17600, 18461, 4758, 2587, 14857, 2690, 14613, 15193, 15329, 12483, 5788, 7213, 17917, 14629, 17206, 1191, 17863, 15787, 5899, 11672, 12983, 17316, 515, 11907, 15385, 10457, 474, 6243, 18221, 12653, 17600, 18461, 4758, 2587, 14857, 2690, 14613, 15193, 15329, 12483, 5788, 7213, 17917, 14629, 17206, 1191, 17863, 15787, 5899, 11672, 12983, 17316, 515, 11907, 15385, 10457, 474, 6243, 18221, 12653, 17600, 18461, 4758, 2587, 14857, 2690, 14613, 15193, 15329, 12483, 5788, 7213, 17917, 14629, 17206, 1191, 17863, 15787, 5899, 11672, 12983, 17316, 515, 11907, 15385, 10457, 474, 6243, 18221, 12653, 17600, 18461, 4758, 2587, 14857, 2690, 14613, 15193, 15329, 12483, 5788, 7213, 17917, 14629, 17206, 1191, 17863, 15787, 5899, 11672, 12983, 17316, 515, 11907, 15385, 10457, 474, 6243, 18221, 12653, 17600, 18461, 4758, 2587, 14857, 2690, 14613, 15193, 15329, 12483, 5788, 7213, 17917, 14629, 17206, 1191, 17863, 15787, 5899, 11672, 12983, 17316, 515, 11907, 15385, 10457, 474, 6243, 18221, 12653, 17600, 18461, 4758, 2587, 14857, 2690, 14613, 15193, 15329, 12483, 5788, 7213, 17917, 14629, 17206, 1191, 17863, 15787, 5899, 11672, 12983, 17316, 515, 11907, 15385, 10457, 474, 6243, 18221, 12653, 17600, 18461, 4758, 2587, 14857, 2690, 14613, 15193, 15329, 12483, 5788, 7213, 17917, 14629, 17206, 1191, 17863, 15787, 5899, 11672, 12983, 17316, 515]",1,,30,300,0.6217984480397627,0.2 -"[11989, 15945, 3683, 11894, 18717, 5324, 19002, 8608, 5500, 7901, 616, 12440, 7780, 2134, 15207, 18754, 7939, 10350, 18736, 17387, 11390, 15891, 16214, 14484, 14431, 13630, 17859, 8851, 2049, 11262, 5336, 6690, 14556, 8916, 5029, 15916, 9648, 12142, 3677, 12789, 3146, 11049, 16378, 4581, 13369, 4564, 7388, 1246, 16305, 5930, 12628, 14353, 5497, 10595, 15542, 5618, 14754, 15663, 15682, 8715, 3577, 6631, 3082, 2428, 18830, 18035, 13846, 9723, 16250, 14515, 2383, 15498, 8470, 16989, 1683, 5730, 1063, 4356, 1144, 754, 9864, 1954, 8653, 11269, 1301, 17192, 17538, 18773, 13996, 11840, 11822, 10818, 1889, 16770, 2352, 4411, 962, 3137, 2024, 16380, 15615, 6298, 2080, 15960, 546, 6180, 14147, 5507, 62, 5943, 8717, 14893, 3307, 6242, 17164, 3674, 4553, 8392, 1416, 8483, 9477, 3983, 13400, 1242, 1640, 2821, 16579, 16982, 17657, 15841, 7094, 10065, 8221, 2037, 574, 15589, 5639, 17415, 11067, 18819, 18009, 2594, 18743, 15448, 11958, 7829, 18816, 3942, 6488, 14069, 17122, 9390, 17409, 7089, 5402, 6000, 11944, 15225, 351, 1265, 11761, 5430, 6201, 2992, 3104, 2343, 11450, 16443, 14076, 16416, 12304, 10254, 1057, 10568, 2532, 9753, 12019, 15727, 6273, 14857, 4505, 3187, 14751, 14607, 2142, 19402, 4450, 1068, 3453, 17003, 11024, 17034, 11099, 17858, 8405, 15133, 8707, 5147, 918, 2592]",1,16,20,285,0.6899629761119845, -"[12256, 18632, 15663, 14110, 18839, 10824, 3577, 4752, 9041, 15891, 15405, 12646, 953, 12150, 7780, 11762, 15031, 18726, 2599, 10425, 5872, 17244, 12559, 2680, 3781, 9775, 4846, 837, 8094, 4016, 5943, 18645, 14465, 11450, 6952, 16777, 13937, 10046, 9769, 127, 17530, 2010, 3399, 14625, 5492, 16465, 18425, 11708, 18993, 5336, 8749, 125, 9816, 17266, 14622, 11885, 6781, 555, 8271, 5858, 18453, 15448, 754, 6493, 14483, 7, 8701, 8780, 1416, 7061, 5488, 19129, 17290, 2260, 14300, 10549, 9438, 3886, 5557, 14834, 7826, 17259, 8293, 11472, 8669, 13672, 16263, 13242, 10864, 9612, 1811, 1731, 15430, 4176, 3671, 15915, 354, 6837, 2422, 18461, 12364, 622, 11886, 6705, 3517, 1516, 2862, 4674, 14699, 12649, 4378, 90, 9572, 18838, 10535, 3925, 15453, 13948, 4120, 10944, 12509, 18697, 14792, 11746, 5116, 7776, 5455, 18615, 16207, 9751, 1878, 5422, 6508, 17667, 12706, 4918, 6211, 1183, 19032, 3145, 8461, 3663, 7683, 13914, 12607, 16190, 1506, 8158, 15805, 5707, 8282, 3936, 7911, 13761, 16114, 17473, 11193, 9459, 15585, 16515, 13932, 8372, 16880, 1813, 12955, 18756, 15369, 1998, 7600, 10190, 15388, 481, 10311, 15883, 18276, 14843, 15178, 4595, 10420, 909, 5755, 13306, 19450, 16604, 18431, 8425, 15551, 2806, 9549, 372, 3262, 17603, 16433, 5778, 1087, 4935, 10503, 5556, 1564, 12395]",1,20,20,250,0.7556423390982401, -"[12480, 17387, 11989, 14821, 5095, 14556, 15542, 16305, 5324, 14147, 15841, 3721, 14420, 3307, 17415, 10350, 11822, 17034, 11450, 1416, 11840, 6690, 14515, 19129, 14857, 3453, 7377, 616, 918, 6869, 2428, 16318, 4581, 2951, 16416, 17653, 5730, 9390, 9769, 9723, 12884, 13846, 1242, 14484, 7388, 16770, 10754, 4752, 951, 19193, 17192, 5639, 7168, 5661, 574, 16989, 1068, 8221, 13400, 15891, 7929, 17122, 6201, 2304, 17409, 16250, 11463, 12278, 14525, 9424, 8017, 5402, 4051, 18751, 12766, 2127, 12304, 11269, 12479, 11944, 11814, 4961, 18743, 6180, 12789, 2352, 16982, 17401, 11735, 4356, 733, 15448, 11067, 18723, 9070, 6164, 17538, 11390, 13623, 13996, 9864, 5048, 1265, 12457, 7094, 6032, 8626, 912, 17859, 13683, 2783, 8608, 8712, 17993, 10254, 18736, 7243, 5594, 16689, 1204, 13028, 1794, 5497, 5453, 4505, 7089, 16679, 2821, 16904, 18717, 8470, 7097, 9813, 8093, 1761, 5019, 5500, 4522, 11540, 5336, 9634, 2383, 3092, 5507, 5704, 754, 12256, 926, 17356, 3384, 13630, 4868, 16443, 16288, 3385, 6000, 7091, 3963, 14893, 11099, 11024, 1144, 14076, 10818, 4452, 18970, 8707, 6224, 7776, 18275, 19444, 2573, 7968, 5003, 12093, 4414, 17660, 2755, 7560, 18362, 18840, 15639, 1832, 3187, 10575, 809, 1881, 16097, 11453, 13099, 7138, 11002, 703, 7511, 12060, 8951, 7482, 17003, 12613, 14647, 7293, 2594, 8288, 15727, 1837, 5430, 11531, 4743, 9970, 12593, 7018, 10580, 18389, 5060, 15384, 10824, 7799, 17319, 5907, 17194, 546, 12142, 2931, 17657, 7230, 17603, 6577, 6107, 9753, 4450, 14103, 7596, 10065, 8577, 152, 7739, 15808, 18652, 17164, 11863, 17940, 15225, 441, 1345, 12679, 7573, 15761, 1465, 663, 11988]",1,16,25,275,0.7097935791449004, -"[12628, 5324, 19002, 7901, 5661, 10350, 14353, 1301, 11894, 11049, 8221, 14415, 5029, 4564, 1242, 14420, 47, 14515, 3146, 616, 3137, 7780, 918, 6201, 1063, 4553, 7110, 8608, 3082, 10575, 11390, 1640, 11262, 3683, 12019, 16229, 13846, 8851, 13369, 15975, 7939, 14566, 2080, 1246, 14431, 6690, 8483, 7377, 17940, 6180, 2841, 5095, 3453, 2352, 9648, 754, 2383, 4505, 12337, 11484, 15416, 3306, 5639, 7983, 2024, 2281, 1068, 11761, 12884, 11944, 11762, 2134, 13009, 7477, 5930, 8763, 962, 5212, 8916, 12440, 6631, 1794, 18976, 8516, 12479, 8286, 62, 2049, 11024, 9769, 8707, 7860, 2997, 17096, 3126, 2142, 12702, 4356, 1582, 912, 6703, 9802, 12304, 13525, 1612, 5790, 10710, 4112, 9508, 5375, 13971, 18656, 11840, 18736, 11210, 3983, 15570, 896, 8133, 5943]",1,16,12,250,0.7107064969315818, -"[12649, 7979, 19032, 17917, 3162, 7992, 11524, 1872, 14843, 15384, 4214, 6952, 15031, 18791, 16250, 15448, 966, 18717, 3886, 15954, 9451, 18736, 15236, 17859, 13325, 4846, 18773, 2784, 11810, 13761, 18411, 7388, 15841, 12741, 18035, 1813, 16982, 8970, 12646, 15385, 6788, 8783, 7891, 14857, 5336, 16305, 7181, 8221, 9775, 7553, 12036, 6690, 10818, 11390, 5497, 16416, 1430, 17387, 17409, 5500, 9864, 6643, 7168, 12789, 5507, 14556, 1309, 4682, 5402, 13630, 9390, 13400, 17122, 5943, 13716, 14515, 7089, 15542, 12304, 5324, 1614, 12075, 17034, 19002, 11814, 11989, 14484, 3108, 1416, 8763, 13996, 6180, 2938, 15457, 7571, 11840, 14147, 7377, 17164, 17538, 9424, 19402, 5639, 4965, 11822, 3351, 17603, 1741, 18830, 2862, 1311, 3453, 8707, 18400, 16443, 6164, 15883, 2304, 17415, 5430, 10350, 9634, 9723, 18754, 11944, 17858, 5095, 5730, 15971, 11785, 4581, 262, 18956, 3649, 8532, 8780, 13846, 1144, 2383, 2297, 18837, 14893, 15891, 10575, 8626, 10065, 12142, 14525, 12060, 5147, 3085, 8910, 6000, 11604, 17812, 2783, 17194, 18819, 733, 16538, 754, 16770, 11024, 10677, 11362, 11269, 17192, 16989, 14701, 11540, 1439, 17653, 6242, 14420, 11735, 18647, 2821, 2428, 11099, 16565]",1,16,18,250,0.7139017091849673, -"[12789, 8221, 16250, 9390, 5324, 5336, 6664, 14515, 9864, 2428, 7089, 9723, 1416, 10270, 16305, 15448, 5497, 11989, 14147, 9808, 2150, 1144, 14556, 11822, 15542, 5500, 1439, 11840, 5507, 14857, 6690, 754, 5730, 4581, 7377, 7388, 13400, 17859, 3453, 14105, 10350, 13846, 17952, 8821, 7258, 16332, 13996, 2304, 1310, 14484, 6180, 11099, 16229, 11269, 14525, 1345, 5402, 9424, 15841, 10588, 1885, 15891, 8914, 11049, 5661, 16914, 574, 1068, 9769, 10824, 10181, 2821, 918, 5461, 9712, 11365, 733, 12695, 19053, 12479, 6201, 3721, 199, 17672, 194, 11894, 17971, 17645, 8348, 17630, 18411, 3187, 5959, 10254, 1899, 6415, 2854, 16468, 1580, 670, 9065, 5147, 9695, 13086, 2722, 10287, 15916, 1794, 16934, 14703, 10818, 16416, 16443, 14679, 4938, 11540, 7800, 11463, 6907, 5430, 7094, 15858, 1507, 10065, 1242, 903, 13662, 7168, 6000, 10623, 11794, 5639, 12142, 6665, 12128, 16823, 6242, 16101, 5650, 8435, 11024, 11390, 17318, 8938, 1204, 11949, 6352, 11686, 2787, 6231, 5022, 4941, 14654, 14116, 5871, 12766, 5019, 11912, 17495, 1107, 1881, 1130, 14145, 16593, 4450, 2352, 15207, 4099, 3971, 3218, 13519, 11760, 546, 16462, 8476, 12632, 12304, 18146, 6854, 8447, 14943, 14420, 3963, 15254, 4152, 9729, 8017, 7666, 10425, 2783, 15026, 6133, 3756, 9477, 19063, 13741, 16318, 196, 9190, 9634]",1,20,20,285,0.7208500278947102, -"[13839, 7992, 17739, 18415, 1310, 413, 5427, 7861, 529, 3558, 18685, 7979, 12235, 2014, 15572, 194, 13830, 10961, 10038, 11529, 14819, 6780, 406, 18436, 13109, 15977, 1108, 3427, 6706, 8748, 18660, 10348, 4820, 14477, 9675, 9831, 5386, 15884, 7832, 17937, 14271, 4929, 7086, 8907, 13297, 8576, 2722, 7214, 18266, 12871, 323, 5529, 1698, 13939, 17382, 18411, 3756, 8447, 18901, 11484, 2516, 806, 2763, 13475, 5311, 9960, 4670, 6207, 16250, 19053, 17154, 820, 16454, 2897, 13310, 1317, 7258, 6831, 12186, 16332, 6543, 17912, 14943, 7297, 16109, 6148, 13761, 12460, 10617, 10446, 17621, 8262, 15307, 5233, 18806, 16040, 14066, 5407, 559, 6522]",0,5,10,150,0.22249835167621848, -"[13980, 2014, 16040, 7258, 13297, 1174, 13471, 6317, 5407, 1883, 18616, 16029, 5448, 9186, 5668, 13444, 8999, 12559, 18477, 2949, 4938, 12741, 957, 11707, 6782, 14780, 3648, 14066, 16624, 13234, 8234, 8755, 18806, 10388, 7924, 8763, 16332, 18502, 8864, 10223, 17265, 5513, 17646, 11820, 3860, 13716, 2284, 1758, 509, 18411, 16512, 8783, 8760, 18956, 2543, 5540, 8795, 9463, 16521, 16587, 10677, 7610, 1430, 15179, 5491, 10863, 9084, 8920, 4846, 7818, 2666, 16578, 284, 5035, 47, 8696, 3468, 3774, 9004, 18350, 491, 5336, 2516, 17326, 9875, 11363, 15633, 531, 17794, 9154, 12148, 8748, 11742, 9671, 17953, 8910, 10786, 8137, 7992, 6952, 1614, 12075, 14381, 2146, 18645, 17917, 18126, 8927, 3162, 12073, 9779, 9883, 15408, 1022, 2456, 12549, 2862, 14265, 16836, 17269, 19180, 4929, 5943, 650, 5360, 3671, 13742, 16538, 16193, 11385, 10459, 5564, 14130, 19027, 5692, 323, 12984, 19170, 11472, 15695, 11842, 6326, 5716, 3781, 4404, 12578, 10101, 13944, 6851, 9451, 7776, 11123, 11524, 197, 13475, 16565, 10857, 14781, 2632, 1010, 6929, 15839, 19201, 13668, 1821, 17738, 4597, 8905, 7595, 9904, 16253, 13824, 7553, 18059, 5928, 12663, 17551, 12725, 4314, 296]",0,18,18,285,0.22249835167621848, -"[14353, 15207, 9648, 16989, 16579, 3453, 1954, 16868, 19002, 10051, 5943, 4332, 8851, 14754, 1568, 3146, 17911, 6690, 12628, 16650, 7780, 5029, 6631, 15663, 5324, 7901, 7939, 2383, 4564, 11390, 1301, 8916, 15682, 11049, 62, 3577, 16652, 1246, 8608, 15945, 2343, 962, 15498, 1068, 16214, 478, 5471, 14556, 12019, 2352, 6201, 12304, 1640, 4553, 1063, 3683, 11840, 14618, 17387, 13369, 14751, 14683, 11262, 11413, 616, 6164, 18784, 2134, 16505, 3137, 14893, 2592, 8221, 901, 8707, 9687, 19374, 9769, 7377, 2037, 14515, 15916, 18487, 6298, 7560, 351, 2563, 11894, 16380, 15953, 2229, 11971, 13846, 8392, 2049, 918, 10350, 3104, 13623, 12440, 8405, 13677, 13779, 11450, 18768, 17034, 10568, 5639, 11449, 2997, 4505, 18399, 17401, 6180, 15920, 16378, 1767, 9634, 17105, 13525, 14431, 4895, 14207, 2080, 11944, 19273, 5661, 46, 15960, 16770, 14149, 16098, 17415, 3983, 8133, 17192, 12678, 3888, 17537, 1683, 3942, 15133, 8529, 877, 6276, 754, 11761, 5463, 8483, 3287, 150, 3307, 15615, 12479, 14069, 14147, 8653, 8715, 11958, 1057, 12347, 14607, 4411, 1242, 15189, 8043, 14420, 11200, 16772, 5930, 4356, 14857, 8093, 9802, 912, 17538, 1794, 18773, 2463, 231, 19143, 5579, 4798, 10165, 3082, 13354, 15550, 10243, 4811, 5095, 6927, 7518, 10699, 2142, 9040, 3531, 5610, 17634, 1889, 10595, 12943, 17923, 3153, 7174, 2520, 3677, 1204, 8717, 16318, 4034, 2024, 16305, 3226, 15589, 11814, 7716, 12884, 4959, 6488, 3674, 16637, 14288, 16880, 14110, 17688, 12416, 903, 15952, 1392, 3495, 5618, 13740, 7829, 2244, 18900, 18201, 17438, 4058, 8034, 12827, 17042, 6450, 974, 4609, 6273, 19308, 7742, 9026, 8528, 3856]",1,20,25,265,0.6635390779530355, -"[14381, 12235, 11141, 2239, 12612, 5470, 7861, 4214, 7992, 13820, 13405, 18766, 8576, 1821, 7979, 4923, 12649, 11637, 1752, 9877]",0,5,2,30,0.22249835167621848, -"[14651, 5198, 10423, 7333, 3168, 15298, 17301, 2927, 5174, 5904, 17815, 8209, 5261, 4912, 19001, 10325, 4333, 11478, 18968, 16780, 14651, 5198, 10423, 7333, 3168, 15298, 17301, 2927, 5174, 5904, 17815, 8209, 5261, 4912, 19001, 10325, 4333, 11478, 18968, 16780, 14651, 5198, 10423, 7333, 3168, 15298, 17301, 2927, 5174, 5904, 17815, 8209, 5261, 4912, 19001, 10325, 4333, 11478, 18968, 16780, 14651, 5198, 10423, 7333, 3168, 15298, 17301, 2927, 5174, 5904, 17815, 8209, 5261, 4912, 19001, 10325, 4333, 11478, 18968, 16780, 14651, 5198, 10423, 7333, 3168, 15298, 17301, 2927, 5174, 5904, 17815, 8209, 5261, 4912, 19001, 10325, 4333, 11478, 18968, 16780, 14651, 5198, 10423, 7333, 3168, 15298, 17301, 2927, 5174, 5904, 17815, 8209, 5261, 4912, 19001, 10325, 4333, 11478, 18968, 16780, 14651, 5198, 10423, 7333, 3168, 15298, 17301, 2927, 5174, 5904, 17815, 8209, 5261, 4912, 19001, 10325, 4333, 11478, 18968, 16780, 14651, 5198, 10423, 7333, 3168, 15298, 17301, 2927, 5174, 5904, 17815, 8209, 5261, 4912, 19001, 10325, 4333, 11478, 18968, 16780, 14651, 5198, 10423, 7333, 3168, 15298, 17301, 2927, 5174, 5904, 17815, 8209, 5261, 4912, 19001, 10325, 4333, 11478, 18968, 16780, 14651, 5198, 10423, 7333, 3168, 15298, 17301, 2927, 5174, 5904, 17815, 8209, 5261, 4912, 19001, 10325, 4333, 11478, 18968, 16780]",1,,20,300,0.6355429324948014,0.2 -"[15484, 86, 2494, 14616, 8010, 13430, 9726, 13334, 10387, 7408, 11736, 1683, 17939, 8392, 16534, 13602, 14618, 14254, 2994, 6728, 4272, 10164, 2599, 7463, 16694, 11698, 18724, 1027, 18055, 13171, 14292, 10093, 9808, 18777, 11256, 128, 4578, 12712, 11542, 12859, 15604, 1534, 13227, 18790, 15438, 11727, 7195, 19335, 14810, 10281, 4547, 6724, 9258, 3898, 4514, 7677, 2564, 5594, 16426, 13814, 14204, 2468, 6182, 306, 18271, 364, 8599, 1049, 5745, 3243, 13833, 18768, 14011, 1351, 11552, 8240, 13890, 17087, 5134, 10072, 19258, 13126, 15255, 9363, 4888, 13583, 6574, 1527, 15513, 8405, 295, 8525, 5179, 5661, 14686, 10218, 11449, 8863, 3885, 13354, 6977, 16980, 10375, 983, 3683, 12820, 12936, 4709, 6752, 1920, 15615, 13300, 285, 14729, 16244, 1208, 9798, 11450, 3023, 1877, 9420, 17190, 17745, 8972, 1915, 13223, 14609, 14949, 17969, 16941, 15075, 5355, 15013, 15046, 17433, 9121, 12436, 19187, 2865, 3531, 7948, 15752, 420, 9905, 8198, 12019, 12994, 5875, 6340, 276, 11324, 456, 10312, 11680, 2229, 18072, 13527, 13369, 5480, 12224, 9134, 4401, 10682, 14288, 9802, 2419, 18993, 16857, 15952, 1660, 15549, 3983, 15651, 2411, 3282, 6414, 9074, 13677, 15608, 10685, 15426, 3161, 2876, 4149, 4913, 2658, 10031, 19371, 13939, 15972, 19426, 16812, 16794, 18972, 1460, 15120, 12762, 11429, 11994, 357]",1,16,20,275,0.7746614596541056, -"[15722, 7240, 12974, 16528, 10425, 17018, 17933, 13118, 10874, 7398, 9965, 8406, 14388, 3136, 15973, 10204, 11894, 7814, 15278, 17453, 803, 8275, 17243, 10573, 18285, 16198, 6656, 13931, 15825, 35, 7940, 2216, 1315, 2896, 2902, 13924, 18453, 2554, 11181, 2109, 13703, 5262, 9322, 18712, 5197, 16347, 14008, 3876, 14918, 1474, 14810, 18903, 858, 16283, 10373, 12259, 2243, 7128, 12978, 11919, 12353, 6859, 5368, 4538, 10859, 7543, 19002, 834, 15975, 1582, 6490, 8950, 18839, 7123, 13306, 15022, 12514, 11929, 5375, 3577, 7356, 14146, 18893, 337, 6932, 12380, 15778, 2114, 15544, 11848, 12236, 605, 11217, 8623, 2804, 2915, 9327, 654, 3968, 16601, 5616, 5406, 10148, 5244, 18455, 976, 15770, 18068, 12812, 8015, 2708, 7663, 5547, 2337, 16229, 14090, 6697, 10389, 10572, 12491]",1,16,12,275,0.7554901861337932, -"[15788, 11188, 3831, 10690, 18742, 2361, 9810, 7123, 10754, 17493, 5796, 4546, 2708, 2115, 18622, 4225, 16051, 10257, 1365, 11233, 16033, 8656, 11407, 7852, 4544, 8912, 2128, 13743, 1244, 17629, 16129, 8862, 14821, 3408, 16729, 12762, 18341, 3372, 12489, 17216, 11753, 1565, 1045, 6586, 6122, 4939, 15444, 9089, 14915, 4756, 15913, 13844, 3573, 15852, 10435, 4798, 16637, 5090, 10302, 5665, 2557, 13200, 4411, 11112, 14639, 13301, 4163, 2098, 6511, 6527, 7755, 1012, 12251, 9830, 5085, 17399, 8608, 19421, 16924, 9256, 17193, 7184, 4260, 1891, 18528, 17391, 11674, 357, 4876, 16761, 14061, 15513, 12066, 2079, 15735, 17195, 3544, 14194, 14015, 4009]",1,9,10,200,0.7608155398894355, -"[16521, 10663, 16533, 8770, 10558, 11572, 11580, 15961, 14019, 13441]",0,5,1,10,0.22249835167621848, -"[1678, 9884, 854, 9363, 3374, 7136, 11855, 9798, 17725, 17726, 7426, 8404, 15510, 1996, 13604, 2994, 7929, 15615, 17409, 11994, 11024, 19592, 10281, 3663, 13385, 18580, 8525, 2430, 6977, 11641, 9184, 15302, 16599, 13017, 13202, 8041, 10292, 1877, 14483, 13300, 2284, 2304, 16525, 16962, 8669, 10064, 19710, 14686, 14381, 12868, 16604, 3744, 6781, 12527, 285, 5703, 10685, 7358, 13972, 6727, 18437, 11711, 1527, 14729, 10031, 2564, 7677, 12436, 19426, 14971, 1514, 11658, 12885, 7678, 11103, 8898, 7194, 14300, 16177, 13307, 1351, 1152, 9336, 7135, 6340, 13592, 1837, 18993, 4272, 10575, 6851, 19004, 19124, 639, 15484, 86, 17266, 5134, 14616, 276, 10419, 10312, 18055, 18594, 4514, 11736, 13043, 15255, 16465, 15608, 837, 10387, 11765, 11727, 3161, 5179, 8885, 7463, 12712, 19211, 14365, 4509, 10535, 15046, 13334, 13667, 7696, 14949, 13054, 16554, 799, 12602, 5707, 12175, 517, 6728, 12820, 18747, 15231, 7408, 15254, 983, 1218, 15013, 19258, 8863, 15752, 14204, 10503, 4888, 6301, 8959, 8010, 1915, 18790, 12997, 16534, 8806, 15426, 10833, 18724, 9017, 2766, 7782, 15952, 10133, 19455, 16677, 18777, 10375, 7195, 17939, 14010, 10093, 16, 2658, 10218, 13430, 16169, 19335, 456, 17087, 13227, 11942, 1058, 11993, 1027, 15057, 14775, 10175, 19371, 18972, 16611, 6724, 13342, 2419, 15969, 4939, 5862, 12359, 13527, 13126, 4032, 10712, 15075, 10072, 5858, 622, 7719, 6810, 1660, 18546, 295, 15673, 17433, 7001, 18271, 9134, 17745, 15972, 11698, 15604, 2494, 10164, 5371, 2411, 8198, 128, 16812, 8412, 9505, 15651, 10433, 15438, 16980, 10969, 19470, 11552, 13171, 7980, 10048, 16244, 15120, 8599, 2599, 11542, 13223, 2876, 6574, 4548, 4913, 6088, 876, 14011, 7948, 12859, 16694, 1920, 9905, 17969, 4102, 9304, 1460, 16606, 5079, 3889, 1049, 9420, 11523, 420, 364, 6752, 5745, 11256, 5875, 9258, 1534, 11324, 6182, 12224]",1,16,28,300,0.7787188720393569, -"[17122, 5500, 15841, 11840, 17387, 13400, 9864, 16982, 14556, 9390, 14525, 12789, 12142, 14857, 9424, 15542, 7377, 15448, 5336, 7388, 16305, 13996, 11989, 16250, 8221, 14484, 5497, 17409, 1416, 6180, 5402, 3453, 7089, 2821, 11822, 1144, 11814, 11269, 17192, 5324, 9723, 4581, 15008, 17034, 10350, 5661, 14515, 15891, 1794, 12978, 15225, 1439, 1068, 9634, 3754, 7168, 1242, 2304, 6000, 1204, 5730, 5507, 4868, 13630, 18949, 5639, 10254, 12060, 918, 14893, 12479, 18734, 10824, 16443, 7094, 10818, 13846, 1265, 14420, 3307, 11099, 14008, 2383, 546, 4450, 12884, 574, 11024, 5550, 18357, 733, 8707, 5430, 15727, 2951, 17615, 2594, 2114, 16989, 10065]",1,5,10,200,0.7230308870517828, -"[17474, 2092, 5868, 677, 16624, 13500, 169, 8927, 7308, 4818, 4020, 4064, 47, 13981, 15035, 2659, 3336, 2986, 9762, 4723]",0,4,2,30,0.22249835167621848, -"[18200, 5674, 5612, 4267, 4850, 7608, 11506, 11232, 18287, 8512, 1817, 4628, 6369, 12274, 16343, 6841, 18517, 15618, 18769, 15971, 10739, 2243, 10434, 15147, 18838, 16872, 6093, 2871, 14479, 6702, 12452, 6851, 4245, 3853, 10718, 4162, 5159, 7228, 19516, 19002]",1,5,4,80,0.6555256884921641, -"[18266, 19291, 18411, 18436, 1539, 17739, 11233, 1317, 2150, 9831, 17063, 18685, 11099, 9657, 806, 13440, 8442, 3427, 6502, 17937]",0,3,2,70,0.22249835167621848, -"[18411, 8138, 10816, 4581, 10348, 16233, 18736, 5491, 34, 12540, 11024, 18436, 8027, 3558, 7750, 14066, 10047, 5533, 519, 13993, 10810, 7357, 4938, 14574, 3875, 2869, 13013, 15858, 14270, 6543, 14105, 18685, 15880, 5958, 13953, 3756, 2666, 5374, 1539, 3119, 18084, 8410, 17928, 2783, 449, 1585, 4221, 12229, 4663, 5508, 19006, 18087, 4389, 6529, 15537, 14198, 10929, 1750, 16229, 4885, 17911, 14819, 5219, 1783, 11762, 18839, 11099, 4489, 19145, 3185, 7348, 6415, 19633, 12127, 12790, 1085, 2952, 16010, 3665, 18703, 4924, 13206, 5011, 9154, 5505, 300, 10045, 12871, 5288, 14632, 16819, 19032, 1011, 15663, 12765, 10214, 17723, 17405, 9073, 6118, 11450, 4497, 6939, 17952, 4524, 3087, 3849, 18230, 5241, 9075, 6831, 3577, 3427, 5005, 8244, 15627, 19489, 14917, 18536, 14472, 816, 8130, 14246, 16724, 7772, 2841, 9918, 14863, 12513, 6275, 6833, 3399, 12076, 3800, 936, 12923, 3973, 9322, 7062, 14544, 17246, 896, 7796, 2565, 12920, 13824, 10751, 16665, 10509, 7165]",1,3,15,150,0.7594968808642288, -"[1843, 4194, 3858, 6645, 2199, 2725, 9998, 2577, 18821, 6196, 14442, 5467, 1962, 6123, 14756, 0, 11394, 6721, 19460, 13364, 15664, 14276, 8629, 7523, 607, 16384, 16698, 13256, 9310, 18242, 6238, 17779, 10141, 4791, 4626, 869, 973, 17392, 15005, 11752, 5058, 1462, 15790, 15128, 6296, 17035, 1222, 15053, 11168, 18555, 16702, 14777, 15442, 532, 2981, 15552, 543, 1525, 3782, 8140, 3075, 17684, 6331, 15782, 1308, 1303, 9878, 1800, 7636, 1519, 1239, 4956, 847, 10657, 14203, 11629, 13173, 14721, 12348, 17896, 5532, 12590, 6501, 17854, 15698, 10252, 17253, 3049, 1095, 18671, 13544, 3623, 6568, 8775, 17187, 16982, 3773, 186, 3429, 7096, 19129, 18211, 15269, 15683, 2035, 8626, 16260, 189, 5911, 884, 8874, 12668, 10750, 9382, 8288, 7069, 11544, 8006, 15708, 18210, 5853, 16339, 13311, 13359, 13921, 8689, 12234, 9590, 18133, 2670, 15934, 1401, 13050, 1928, 16020, 7709, 7723, 13160, 4733, 9512, 17870, 10070, 4374, 13074, 2250, 7178, 15587, 17727, 10287, 2439, 18040, 4629, 6651, 9715, 13630, 11450, 16681, 15455, 1743, 12521, 15715, 11504, 14376, 14131, 6105, 17191, 12539, 9532, 4559, 2561, 2607, 15965, 1737, 7361, 18950, 12414, 10765, 6281, 6420, 9210, 15659, 2546, 15727, 10735, 13557, 457, 7154, 7729, 11024, 19518, 6269, 14602, 11003, 10494, 18635, 18951, 19034, 3010, 16295, 12568, 10260, 15843, 3605, 10493, 3275, 6640, 11041, 15542, 14573, 18241, 17838, 4192, 6496, 14516, 11944, 9712, 17595, 5782, 13028, 11370, 17523, 13819, 18583, 12529, 14281, 6787, 4792, 2021, 15043, 7538, 6681, 15755, 8887, 17185, 4689, 7283, 11389, 16824, 7465, 10104, 4783, 14832, 17966, 18888, 2664, 6744, 7918, 4797, 14378, 6353]",1,14,25,285,0.7199371101080286, -"[18645, 10433, 15385, 2835, 12457, 10431, 5477, 16880, 969, 6243, 14838, 7168, 16789, 9808, 11369, 3781, 5048, 4522, 2668, 4150, 1938, 5993, 6869, 8037, 9612, 1068, 14061, 10940, 17399, 5872, 15240, 6690, 18276, 18697, 18173, 16631, 18816, 204, 7560, 9846, 14420, 13779, 1977, 9572, 15615, 2400, 9864, 8093, 11814, 11293, 11019, 754, 12955, 10770, 12059, 9723, 17415, 3671, 7901, 17510, 13906, 5507, 12882, 16777, 5859, 12333, 16446, 2789, 18221, 1832, 19044, 17034, 18837, 1504, 14124, 13635, 2862, 15384, 7052, 15954, 11262, 395, 5500, 951, 11995, 5095, 13859, 574, 12559, 2532, 10037, 19334, 5559, 4051, 13014, 954, 8707, 14549, 1353, 5245, 16318, 13262, 19163, 18736, 645, 18726, 330, 11450, 2500, 14064, 1840, 10516, 481, 11885, 2134, 17978, 10112, 18649, 5336, 17871, 2428, 12088, 322, 10406, 15701, 17552, 5269, 17923, 5831, 11746, 14288, 2662, 10935, 5497, 2361, 5324, 5661, 11024, 12005, 5776, 1416, 12297, 19292, 6658, 10350, 4326, 1794, 13573, 4981, 6402, 5943, 7401, 4480, 6501, 7273, 17993, 14387, 17849, 12153, 7716, 6698, 4752, 483, 2581, 7436, 16641, 17373, 14860, 17832, 5078, 8293, 953, 11449, 12019, 16901, 16483, 4868, 12478, 15524, 13786, 2692, 7393, 13938, 14882, 5496, 19129, 7083, 15024, 12970, 13666, 8502, 15093, 4216, 18965, 8762, 9439, 15231, 2992, 2148, 15891, 12480, 10513, 4038, 17334, 3963, 4505, 17859, 2890, 7833, 19388, 4518, 2613, 19687, 13355, 6186, 14147, 8839, 4730, 13484, 2188, 4812, 3015, 6242, 5397, 10270, 14515, 8221, 11082, 6912, 13285, 14036, 19241, 10858, 9390, 9943, 4691, 6309, 14945, 4166, 12789, 5345, 14108, 8710, 13514, 4241, 4937, 1568, 18793, 18966, 9156]",1,18,25,265,0.6721610792716944, -"[19005, 18825, 5471, 15821, 13068, 16541, 17687, 15218, 12349, 8621, 14119, 10005, 16527, 787, 13853, 18823, 18007, 284, 18242, 16460, 7352, 9589, 15559, 18745, 8444, 99, 7366, 3816, 7275, 16434, 14896, 5665, 17438, 4537, 10358, 5166, 9031, 10441, 1402, 14827, 5698, 3782, 12160, 12199, 9017, 14781, 10204, 3168, 16729, 6, 1914, 3255, 518, 928, 16065, 9652, 14956, 10771, 17178, 16423, 18895, 18604, 3304, 10600, 2031, 5920, 11536, 4802, 18898, 11783, 1273, 16903, 1581, 12259, 10866, 17408, 11335, 19291, 5421, 6798, 4445, 10229, 1084, 12647, 2100, 3932, 10666, 1721, 17834, 14816, 3567, 9695, 3493, 7553, 10721, 16569, 5993, 15971, 16994, 4103, 13966, 8862, 4833, 1842, 2280, 228, 609, 13723, 7234, 1787, 14216, 88, 10201, 16579, 1091, 5673, 16347, 4616, 11193, 1321, 6506, 7008, 4939, 15201, 10670, 9607, 16548, 11936, 16608, 11842, 4893, 17522, 2707, 16402, 2128, 8208, 4609, 12346, 3270, 16070, 11665, 17760, 4871, 4141, 9386, 7407, 10530, 1544, 11895, 8998, 18440, 16197, 14805, 19323, 11917, 3799, 18644, 7110, 16637, 18121, 14077, 7414, 8343, 10185, 16560, 5578, 3079, 6342, 7879, 1329, 16258, 9770, 4586, 11014, 5157, 15852, 3627, 3179, 10442, 2170]",1,14,18,250,0.7690825176243851, -"[2338, 2390, 12612, 1752, 10340, 17917, 7992, 5470, 5716, 742, 2239, 15095, 2862, 7553, 8576, 7861, 4214, 16981, 2543, 15884, 2182, 13820, 323, 4923, 8864, 6949, 4404, 12075, 6326, 1614, 10847, 1430, 5540, 17646, 2359, 18766, 11815, 2099, 13037, 6826]",0,2,4,40,0.22249835167621848, -"[2373, 3667, 17684, 6296, 12786, 11173, 4559, 9473, 18867, 5316, 12200, 3315, 18121, 16997, 13466, 7896, 6095, 13883, 12340, 13849, 9941, 3268, 8605, 17178, 11139, 18824, 15603, 6883, 7358, 6981, 5306, 16748, 14442, 12592, 11655, 16513, 9226, 17510, 4791, 8873, 431, 12216, 6568, 7561, 3190, 16383, 7366, 1544, 2725, 12219, 18897, 3757, 8468, 6660, 9977, 3177, 15498, 13256, 1843, 18928, 7711, 8885, 10088, 1469, 3135, 7379, 5827, 19003, 19247, 13305, 1754, 2300, 5665, 15503, 18960, 17273, 16654, 5307, 10872, 18211, 2560, 14924, 4339, 13263, 867, 2591, 12924, 15005, 2262, 15984, 5369, 1129, 6377, 18145, 14193, 4345, 10087, 6733, 19705, 13142, 11790, 17054, 16816, 5826, 14621, 18263, 18745, 12408, 3642, 10658, 15882, 14502, 2090, 11304, 18853, 9294, 10771, 3013, 16004, 5963, 3617, 5982, 10932, 12823, 5166, 15428, 11134, 16775, 6829, 4957, 3252, 17629, 16158, 8621, 1455, 9809, 10933, 1530, 15793, 8638, 3129, 4576, 1378, 411, 18463, 10368, 10301, 17277, 2527, 16051, 15885, 4098, 12793, 18986, 1125, 1659, 3093, 13586, 6092, 14141, 2507, 18122, 6288, 8170, 18595, 7180, 16697, 8584, 4153, 15614, 19017, 6342, 12955, 1833, 17619, 13505, 16125, 15324, 16302, 15458, 7124, 12416, 16960, 8974, 6953, 14536, 1796, 16402, 40, 8617, 1710, 6127, 12469, 12693, 17774, 17656, 13938, 7826, 13272, 5058, 11399, 10665, 5702, 9029, 2344, 4184, 16013, 14591, 5413, 3692, 15183, 12002, 784, 17374, 17090, 12643, 15430, 7404, 2248, 4309, 256, 14485, 9432, 3602, 10358, 8987, 2812, 15477, 234, 417, 2894, 3121, 1262, 8651, 99, 1592, 8084, 4036, 18576, 1086, 7693, 11818, 996, 11981, 4764, 5849, 17221, 16888, 58, 106, 6942, 328, 15069, 55, 10042, 17591, 9790, 15868, 16493, 19107, 132, 19200, 15610, 4706, 19268, 10232, 3237, 17673, 18982, 5891, 1041, 3890, 4167, 2820, 4883, 12588, 10645, 15564, 2905, 15176]",1,20,28,300,0.6790586803266216, -"[2658, 11855, 6340, 7695, 11658, 15944, 12997, 4367, 3240, 10064, 10712, 1920, 10375, 18993, 11698, 19335, 8525, 8198, 16941, 15118, 13814, 17087, 19425, 1027, 10175, 15013, 16606, 6412, 285, 13667, 2658, 11855, 6340, 7695, 11658, 15944, 12997, 4367, 3240, 10064, 10712, 1920, 10375, 18993, 11698, 19335, 8525, 8198, 16941, 15118, 13814, 17087, 19425, 1027, 10175, 15013, 16606, 6412, 285, 13667, 2658, 11855, 6340, 7695, 11658, 15944, 12997, 4367, 3240, 10064, 10712, 1920, 10375, 18993, 11698, 19335, 8525, 8198, 16941, 15118, 13814, 17087, 19425, 1027, 10175, 15013, 16606, 6412, 285, 13667, 2658, 11855, 6340, 7695, 11658, 15944, 12997, 4367, 3240, 10064, 10712, 1920, 10375, 18993, 11698, 19335, 8525, 8198, 16941, 15118, 13814, 17087, 19425, 1027, 10175, 15013, 16606, 6412, 285, 13667, 2658, 11855, 6340, 7695, 11658, 15944, 12997, 4367, 3240, 10064, 10712, 1920, 10375, 18993, 11698, 19335, 8525, 8198, 16941, 15118, 13814, 17087, 19425, 1027, 10175, 15013, 16606, 6412, 285, 13667, 2658, 11855, 6340, 7695, 11658, 15944, 12997, 4367, 3240, 10064, 10712, 1920, 10375, 18993, 11698, 19335, 8525, 8198, 16941, 15118, 13814, 17087, 19425, 1027, 10175, 15013, 16606, 6412, 285, 13667, 2658, 11855, 6340, 7695, 11658, 15944, 12997, 4367, 3240, 10064, 10712, 1920, 10375, 18993, 11698, 19335, 8525, 8198, 16941, 15118, 13814, 17087, 19425, 1027, 10175, 15013, 16606, 6412, 285, 13667, 2658, 11855, 6340, 7695, 11658, 15944, 12997, 4367, 3240, 10064, 10712, 1920, 10375, 18993, 11698, 19335, 8525, 8198, 16941, 15118, 13814, 17087, 19425, 1027, 10175, 15013, 16606, 6412, 285, 13667, 2658, 11855, 6340, 7695, 11658, 15944, 12997, 4367, 3240, 10064, 10712, 1920, 10375, 18993, 11698, 19335, 8525, 8198, 16941, 15118, 13814, 17087, 19425, 1027, 10175, 15013, 16606, 6412, 285, 13667, 2658, 11855, 6340, 7695, 11658, 15944, 12997, 4367, 3240, 10064, 10712, 1920, 10375, 18993, 11698, 19335, 8525, 8198, 16941, 15118, 13814, 17087, 19425, 1027, 10175, 15013, 16606, 6412, 285, 13667]",1,,30,300,0.6761170563473144,1.5 -"[2725, 4222, 14019, 7110, 18576, 15183, 1833, 15375, 10558, 8788, 15664, 7815, 5605, 15882, 884, 19246, 5058, 3617, 15820, 14853, 38, 407, 8770, 12469, 2981, 4211, 7303, 1459, 16079, 16557, 2725, 4222, 14019, 7110, 18576, 15183, 1833, 15375, 10558, 8788, 15664, 7815, 5605, 15882, 884, 19246, 5058, 3617, 15820, 14853, 38, 407, 8770, 12469, 2981, 4211, 7303, 1459, 16079, 16557, 2725, 4222, 14019, 7110, 18576, 15183, 1833, 15375, 10558, 8788, 15664, 7815, 5605, 15882, 884, 19246, 5058, 3617, 15820, 14853, 38, 407, 8770, 12469, 2981, 4211, 7303, 1459, 16079, 16557, 2725, 4222, 14019, 7110, 18576, 15183, 1833, 15375, 10558, 8788, 15664, 7815, 5605, 15882, 884, 19246, 5058, 3617, 15820, 14853, 38, 407, 8770, 12469, 2981, 4211, 7303, 1459, 16079, 16557, 2725, 4222, 14019, 7110, 18576, 15183, 1833, 15375, 10558, 8788, 15664, 7815, 5605, 15882, 884, 19246, 5058, 3617, 15820, 14853, 38, 407, 8770, 12469, 2981, 4211, 7303, 1459, 16079, 16557, 2725, 4222, 14019, 7110, 18576, 15183, 1833, 15375, 10558, 8788, 15664, 7815, 5605, 15882, 884, 19246, 5058, 3617, 15820, 14853, 38, 407, 8770, 12469, 2981, 4211, 7303, 1459, 16079, 16557, 2725, 4222, 14019, 7110, 18576, 15183, 1833, 15375, 10558, 8788, 15664, 7815, 5605, 15882, 884, 19246, 5058, 3617, 15820, 14853, 38, 407, 8770, 12469, 2981, 4211, 7303, 1459, 16079, 16557, 2725, 4222, 14019, 7110, 18576, 15183, 1833, 15375, 10558, 8788, 15664, 7815, 5605, 15882, 884, 19246, 5058, 3617, 15820, 14853, 38, 407, 8770, 12469, 2981, 4211, 7303, 1459, 16079, 16557, 2725, 4222, 14019, 7110, 18576, 15183, 1833, 15375, 10558, 8788, 15664, 7815, 5605, 15882, 884, 19246, 5058, 3617, 15820, 14853, 38, 407, 8770, 12469, 2981, 4211, 7303, 1459, 16079, 16557, 2725, 4222, 14019, 7110, 18576, 15183, 1833, 15375, 10558, 8788, 15664, 7815, 5605, 15882, 884, 19246, 5058, 3617, 15820, 14853, 38, 407, 8770, 12469, 2981, 4211, 7303, 1459, 16079, 16557]",1,,30,300,0.6291525079880307,0.6 -"[2933, 3538, 17349, 3705, 11494, 2894, 58, 2923, 1687, 1281, 1886, 13976, 1821, 2182, 11743, 11937, 3617, 15955, 8740, 1853]",0,3,2,30,0.22249835167621848, -"[3245, 9708, 8009, 4833, 15711, 8583, 6702, 16903, 12876, 3890, 15479, 11460, 13036, 16214, 8281, 18509, 14183, 3240, 5023, 17760, 12737, 16569, 7366, 11136, 18644, 18745, 16548, 5790, 10739, 16292, 1321, 9031, 5578, 19346, 2528, 15420, 3201, 19060, 12306, 19413, 11454, 11407, 4893, 14485, 17479, 12365, 5597, 19112, 14816, 10428, 9581, 16051, 99, 11461, 12253, 16816, 4042, 1787, 18976, 3244, 16480, 4609, 16608, 18584, 5994, 401, 17438, 16705, 3642, 15458, 15522, 18558, 1872, 18007, 16347, 1972, 14930, 7057, 18440, 12096, 8743, 2623, 19291, 12964, 14915, 1973, 12490, 10431, 9685, 2170, 9670, 15884, 4133, 16634, 8749, 8343, 3124, 12647, 7561, 5678, 14216, 512, 17044, 17339, 15038, 3179, 12618, 13795, 1701, 17293, 16358, 1945, 13934, 9017, 7815, 5180, 3528, 11839, 4251, 4232, 46, 1402, 1615, 13157, 609, 18604, 10530, 6204, 13466, 11842, 7414, 4791, 3932, 14265, 15331, 3816, 17524, 903, 9222, 16314, 4514, 16560, 7229, 928, 1482, 7275, 1084, 5993, 3713, 12305, 12944, 4748, 16490, 2281, 19247, 13263, 3365, 16065, 15201, 4409, 6506, 13171, 13723, 7110, 12489, 15885, 7716, 9637, 13306, 16506, 168, 7638, 284, 14997, 13215, 8913, 16312, 13371, 3981, 12839, 14754, 12617, 12754, 14366, 3876, 16729, 7301, 17550, 18748, 16511, 8932, 1611, 19462, 12346, 3627, 7044, 14770, 8421, 2550, 7553]",1,14,20,250,0.7438251255261956, -"[3287, 4005, 2199, 11620, 2241, 4371, 1243, 4364, 7155, 15070, 5307, 1451, 2673, 14924, 9110, 16748, 13611, 12340, 12606, 11450, 2037, 1008, 6095, 1843, 12785, 734, 15810, 1708, 3757, 12444, 12573, 16697, 14206, 17869, 12832, 5306, 11725, 4860, 13256, 7712, 6407, 9268, 8093, 9494, 8647, 9473, 19061, 8175, 18554, 18728, 16826, 18928, 12851, 2999, 4967, 2309, 301, 1244, 11914, 5565, 13376, 973, 11417, 19003, 7322, 9386, 336, 13332, 2708, 2361, 12529, 14459, 6501, 2542, 2981, 6754, 17948, 17064, 9939, 13872]",1,5,8,80,0.659075924, -"[3705, 5489, 2933, 14707, 2191, 15572, 13920, 2923, 3935, 1687]",0,4,1,30,0.22249835167621848, -"[3713, 7712, 8772, 14737, 2692, 2571, 7273, 1842, 8968, 60, 4893, 687, 11390, 8372, 1119, 15851, 17961, 6389, 12664, 1406, 3215, 15207, 1521, 6657, 11582, 12457, 985, 11450, 6407, 4849, 2116, 2273, 7272, 8894, 3767, 1302, 11449, 786, 19218, 1237, 9754, 8608, 782, 1903, 611, 1835, 12523, 3228, 17114, 16535, 1205, 8277, 9108, 18445, 2461, 16648, 15278, 2605, 16641, 17399, 5831, 14640, 13083, 18873, 10491, 435, 16121, 97, 12333, 9784, 1695, 11101, 5237, 9439, 9765, 9965, 2019, 19163, 14387, 1046, 10716, 9146, 979, 1769, 4750, 535, 801, 4216, 7393, 8549, 5966, 17196, 14510, 4241, 1616, 2689, 13725, 10770, 2137, 2789, 14566, 2401, 2523, 17366, 8839, 3570, 4467, 10403, 18786, 17373, 8698, 4084, 7740, 14142, 14177, 9105, 16697, 14124, 11082, 11034, 922, 1934, 7670, 4358, 8073, 10742, 17510, 4418, 14061, 18388, 16901, 9315, 8987, 6512, 12445, 2610, 3517, 16431, 10858, 17439, 3037, 13479, 2655, 1941, 11059, 263, 1889, 17772, 11995, 12589, 17574, 8773, 14036, 5492, 17967, 10761, 17662, 2388, 17361, 16547, 1313, 6611, 17552, 7927, 1635, 7865, 15072, 1172, 4981, 8080, 11054, 15506, 8488, 3439, 16368, 15093, 5441, 6711, 19264, 11496, 8813, 11586, 5414, 4070, 17505, 14551, 7838, 8097, 15498, 5898, 3819, 6248, 17280, 3950, 12247, 15223, 4492, 909, 10297, 8199]",1,16,20,250,0.6545113353958513, -"[3858, 6296, 15664, 12202, 10344, 11752, 4733, 9998, 1843, 16295]",0,3,1,60,0.22249835167621848, -"[3992, 6499, 6930, 15468, 19125, 14756, 14128, 18099, 11394, 3782, 17440, 18785, 2873, 4755, 7659, 4585, 18694, 17535, 10141, 13322]",0,3,2,50,0.2819394431201501, -"[4064, 14044, 10850, 10163, 8696, 1437, 13703, 5868, 4296, 15975, 11604, 8207, 16106, 6454, 677, 18624, 11217, 9278, 9322, 12825, 1448, 18956, 14390, 8787, 18512, 3539, 11012, 14999, 834, 13815, 10766, 3433, 8780, 3575, 12380, 8275, 16921, 2659, 8449, 18400, 2936, 9299, 2281, 11769, 2819, 4299, 16866, 13234, 9775, 12292, 1577, 13981, 2337, 7099, 10977, 3077, 6118, 11747, 16461, 7434, 15325, 340, 17474, 5815, 11169, 5375, 616, 165, 16538, 18314, 14632, 463, 4986, 10572, 490, 5693, 7695, 10683, 5462, 5197, 10862, 15446, 755, 2367, 10861, 11024, 4065, 12634, 10150, 5890, 2994, 18683, 3923, 5944, 15776, 13714, 16680, 5953, 141, 19435, 3385, 11229, 6412, 3633, 9946, 17767, 14908, 16137, 11520, 5457, 4597, 17553, 7417, 12074, 18285, 1526, 3665, 2092, 3024, 12499, 2420, 2993, 13118, 620, 5051, 15778, 9021, 10147, 14815, 11349, 15174, 9371, 115, 10407, 7076, 18915, 19602, 17453, 13924, 5887, 8656, 9579, 6184, 17566, 7356, 4538, 17756, 7571, 11882, 12359, 18218, 1664, 1582, 3303, 16182, 10860, 17063, 3340, 4930, 15253, 19507, 18050, 9218, 15544, 1684, 19464, 17665, 7415, 17303, 19096, 3704, 8159, 3261, 775, 7030, 13374, 3336, 9286, 18893, 18735]",0,14,18,285,0.22249835167621848, -"[4581, 5336, 2428, 1144, 5500, 1439, 2821, 5324, 5497, 5195, 2951, 5661, 5507, 3721, 574, 2361, 19075, 2262, 3517, 14821, 636, 3187, 2477, 8763, 16650, 15307, 2783, 16953, 7373, 12109, 9576, 14805, 16615, 3307, 12774, 15970, 1416, 3453, 16521, 19166, 8862, 14266, 1068, 1881, 754, 16533, 13031, 10575, 19679, 17191, 17122, 12240, 17300, 1345, 15455, 2594, 15531, 16967, 5896, 7623, 19217, 15982, 18023, 11024, 6833, 6319, 47, 16297, 912, 11107, 12242, 16014, 3343, 2577, 2304, 12271, 3754, 16384, 5095, 918, 17613, 2175, 1265, 17198, 10995, 18521, 13335, 7110, 7847, 8262, 12182, 1711, 12926, 28, 19259, 7620, 18040, 2368, 18118, 16985, 3385, 12965, 14977, 4193, 1061, 3126, 12802, 18274, 5102, 4064, 5019, 4450, 1242, 2352, 15841, 6766, 733, 1484, 5147, 15427, 1204, 18440, 3102, 3971, 7504, 7428, 5639, 14682, 3840, 4868, 7743, 3963, 13542, 617, 6207, 13777, 17951, 4542, 1612, 18279, 11122, 5608, 2588, 17350, 16990, 5807, 10801, 5402, 5577, 1882, 10937, 3162, 1221, 5652, 11304, 18366, 5965, 5483, 14815, 12001, 15214, 1794, 16060, 2383, 12989, 546, 14195, 12986, 15424, 10182, 17637, 15984, 9902, 13048, 15904, 5430, 13170, 10767, 15907, 13040, 7624, 6125, 421, 1510, 8756, 2280, 4832, 14611, 10277, 14537, 14809, 7681, 3946, 19367, 9055, 7083, 10589, 5814, 957, 10046]",1,3,20,200,0.697824213, -"[4638, 1895, 2692, 17341, 4599, 4638, 1895, 2692, 17341, 4599, 4638, 1895, 2692, 17341, 4599, 4638, 1895, 2692, 17341, 4599, 4638, 1895, 2692, 17341, 4599, 4638, 1895, 2692, 17341, 4599, 4638, 1895, 2692, 17341, 4599, 4638, 1895, 2692, 17341, 4599, 4638, 1895, 2692, 17341, 4599, 4638, 1895, 2692, 17341, 4599]",1,,5,150,0.6209362479078967,0.6 -"[4673, 5685, 5525, 8572, 6755, 5792, 6634, 3432, 15169, 2016]",0,3,1,10,0.22249835167621848, -"[4936, 18271, 15615, 1351, 1027, 128, 14616, 11658, 6724, 15585, 12712, 17929, 13334, 11736, 2494, 1087, 2994, 18942, 6728, 2430, 4844, 15118, 15254, 10064, 9017, 4918, 12820, 2564, 14729, 8669, 9751, 4016, 6211, 6781, 18724, 5858, 16534, 10093, 19004, 14300, 19211, 16177, 15438, 10281, 12883, 4149, 6851, 4514, 2848, 7408, 18777, 4120, 11727, 3663, 10164, 13430, 7920, 18790, 6508, 15484, 7195, 16200, 7463, 5557, 15231, 15915, 9592, 2766, 837, 9363, 11765, 15673, 16554, 517, 16465, 15929, 7135, 2260, 16980, 11429, 19455, 7677, 10503, 16604, 622, 14686, 15255, 3161, 13126, 10048, 6977, 11698, 10535, 9336, 13163, 295, 5134, 13168, 15426, 8525, 18055, 12706, 18972, 125, 12359, 16611, 10387, 8010, 11641, 14204, 11994, 12994, 11855, 14483, 285, 15120, 4548, 1877, 19335, 19171]",1,14,12,275,0.7389055130090785, -"[5324, 19032, 3453, 7889, 2673, 11450, 2784, 6952, 10521, 14647, 10065, 4371, 2838, 5888, 3351, 14796, 7776, 4956, 6715, 18332, 10513, 6236, 9865, 17530, 142, 1786, 5477, 8632, 18548, 2670, 10821, 15070, 16339, 11894, 11312, 3886, 11914, 1439, 14417, 16538, 8780, 2255, 4758, 11417, 3671, 16817, 6874, 10297, 8331, 15152, 6693, 12593, 13859, 11184, 16089, 6331, 6452, 11907, 15476, 17242, 2577, 18743, 17965, 1584, 12836, 8037, 13325, 773, 18888, 6711, 5048, 2862, 17600, 5853, 15790, 969, 8081, 779, 8093, 9346, 18162, 15385, 12443, 9784, 7118, 1494, 966, 12256, 462, 6869, 16260, 18519, 16631, 15810, 16967, 10738, 11142, 9163, 19044, 9455, 1934, 18858, 18782, 4946, 6789, 10523, 9148, 15480, 62, 3246, 16425, 1386, 15639, 12875, 19492, 10949, 1840, 3037, 3366, 2210, 17176, 16814, 17542, 245, 14531, 9404, 13005, 8176, 13333, 12935, 10385, 3305, 4961, 15242, 12427, 15804, 6220, 19084, 8308, 17253, 4274, 18904, 13242, 17130, 10304, 1832, 3005, 753, 3031, 7821, 7242, 5408, 8509, 9340, 4846, 16982, 7828, 12946, 2874, 1106, 15784, 1761, 6364, 9794, 4569, 15195, 14505, 15831, 19249, 18337, 15733, 986, 6224, 7297, 5872, 5705, 16513, 1462, 18645, 11130]",1,14,18,295,0.6954404828320738, -"[5661, 6180, 7377, 1068, 2951, 754, 3385, 1143, 1345, 14448, 5324, 1144, 5430, 574, 546, 6201, 3187, 1416, 7388, 5500, 2821, 2783, 1242, 5507, 6164, 4221, 14663, 8017, 18415, 11024, 9390, 5730, 2352, 6000, 4450, 5497, 4581, 2383, 3963, 2304, 5019, 4938, 6690, 11044, 1881, 9424, 8221, 733, 5639, 5402, 8626, 912, 3971, 7089, 14105, 19355, 5095, 2428, 4868, 1265, 7560, 5336, 3756, 5147, 7094, 1204, 8707, 8093, 3721, 7258, 1794, 13830, 8447, 18660, 3453, 10446, 8138, 8748, 16040, 11262, 3526, 13761, 15663, 3307, 6242, 2594, 8470, 7168, 7759, 8028, 17415, 1439, 918, 14066, 6082, 13475, 2516, 8537, 10617, 18839, 2897, 10063, 1539, 10307, 14679, 17723, 18266, 15572, 15084, 3558, 15444, 18436, 5523, 10441, 7237, 16250, 324, 10123, 19033, 11762, 19291, 14718, 9687, 8621, 3577, 19154, 17063, 11842, 16996, 12762, 12632, 2138, 18685, 5120, 10261, 18411, 6008, 10863, 14893, 6284, 10961, 11810, 1085, 17739, 5386, 4437, 10612, 16165, 14017, 1108, 15833, 6151, 3427, 17540, 6969, 11852, 16229, 8790, 16007, 2150, 14639, 8656, 7576, 13440, 5261, 19312, 5186, 17678, 14915, 8888, 16656, 6148, 9568, 9073, 8192, 2773, 11536, 6207, 14563, 1511, 17937, 13045, 18455, 6169, 2284, 12678, 3517, 12213, 16373, 11233, 2862, 9657, 9370, 6167, 19010, 5880, 370, 1614, 9810, 15157, 15236, 7707, 3702, 3535, 10742, 8879, 10900, 16974, 4555, 2557, 10435, 6993, 15975, 7401, 8350, 11776, 16771, 14437, 13711, 15325, 11099, 17120, 16451, 15942, 8471, 18630, 6902, 13453, 11785, 3301, 14043, 6398, 6831, 16579, 12765, 12871, 333, 6349, 1317, 8875, 2600, 18786, 8442, 18901, 8263, 13471, 3405, 8751, 9831, 806]",1,14,25,300,0.8003753106456357, -"[5943, 16777, 3146, 5500, 18190, 18276, 5930, 6273, 8707, 15826, 2134, 62, 1242, 7780, 11034, 6631, 5618, 5324, 18736, 6180, 6690, 7939, 951, 3683, 8608, 2352, 6201, 3453, 1439, 2689, 10649, 5095, 15639, 10165, 616, 4564, 2383, 962, 8221, 754, 3307, 18086, 3577, 7829, 6164, 1204, 17399, 9328, 1683, 3942, 1246, 3104, 5639, 4581, 17308, 2024, 15024, 7293, 7377, 5029, 478, 4356, 16735, 18554, 5661, 13952, 11024, 12278, 8133, 8405, 1767, 15721, 14912, 7901, 1057, 6488, 19408, 3674, 5507, 2080, 13317, 6787, 12037, 8715, 10010, 4411, 12256, 15911, 877, 11450, 19596, 2343, 15050, 11894, 12249, 3677, 4051, 12478, 4522, 4238]",1,7,10,200,0.722422275, -"[616, 5029, 11894, 4356, 8851, 17122, 15841, 17859, 1416, 3146, 2134, 14515, 3082, 8221, 16250, 14353, 11262, 1683, 16416, 4553, 11390, 8608, 12440, 4505, 9648, 7939, 7780, 18736, 1063, 18773, 16305, 733, 1242, 12628, 6201, 10595, 12019, 14857, 19002, 17387, 546, 12789, 6000, 3733, 5930, 13996, 2352, 9723, 6039, 14147, 5497, 17034, 17415, 16770, 3137, 11822, 8483, 4581, 9864, 2080, 8017, 1246, 2997, 8470, 18035, 11049, 18729, 18717, 5402, 1767, 15448, 3577, 5336, 5500, 10243, 18816, 1265, 9390, 8916, 7388, 5507, 62, 5730, 2049, 15542, 478, 1301, 1640, 16318, 5324, 351, 9769, 17409, 3677, 6690, 15891, 4564, 9802, 754, 3721, 17538, 12304, 10513, 18768, 962, 18754, 2343, 1439, 8715, 903, 10350, 3307, 18349, 14288, 11989, 8133, 7829, 16982, 13846, 4332, 5943, 10915, 1889, 11958, 11450, 13925, 11979, 5147, 2304, 14525, 13630, 2024, 15953, 46, 9424, 877, 1144, 3674, 1204, 8717, 15920, 5095, 6488, 5025, 2037, 6273, 574, 10254, 18009, 5639, 1057, 17653, 8653, 7901, 13623, 11944, 11412, 1954, 17192, 13369, 918, 2592, 16989, 8405, 1345, 7377, 14069, 16073, 14556, 14110, 13779, 7168, 8115, 12142, 2142, 17401, 17858, 12884, 1881, 7560, 5618, 2383, 18819, 6298, 8566, 3385, 16443, 5661, 7089, 11735, 15225, 11413, 3683, 14420, 10818, 17194, 12060, 11024, 2594, 2951]",1,20,20,300,0.7395141248668662, -"[6377, 2090, 18625, 9567, 12125, 12592, 6813, 6953, 9617, 18670, 14536, 18725, 1544, 10088, 7317, 17217, 17519, 10932, 2373, 13633, 12015, 7379, 10087, 14371, 16625, 11590, 14742, 10769, 4893, 14626, 10308, 17649, 17101, 19029, 4469, 12895, 17205, 14385, 4194, 9809]",1,5,4,90,0.6525840645128569, -"[6586, 11055, 16729, 8608, 13940, 10163, 2659, 10198, 14175, 7150, 4546, 19421, 9014, 11450, 8855, 3408, 9471, 15444, 9087, 16826, 10397, 6270, 3573, 4544, 18622, 2128, 10435, 9810, 2361, 11407, 13844, 2115, 2079, 10880, 4225, 15913, 7184, 5796, 12489, 10754, 4798, 8912, 2557, 14821, 3922, 13200, 12762, 10441, 16033, 11024, 11233, 5090, 11188, 1891, 10690, 357, 11802, 7990, 18757, 2425, 11228, 2951, 9249, 17933, 17193, 16129, 6684, 17216, 8656, 9766, 7232, 6527, 9083, 15788, 3831, 12817, 1400, 14110, 18341, 18402, 18742, 11674, 14015, 2098, 16051, 9579, 8668, 13576, 11456, 4411, 14092, 6122, 3015, 10443, 15513, 5178, 18728, 2039, 16896, 4756, 15355, 6572, 15735, 4908, 7021, 8998, 7540, 5108, 10257, 7485, 9830, 1565, 17399, 8862, 19020, 3483, 6903, 18320, 7219, 7854, 11219, 10796, 14176, 9256, 7015, 6240, 9623, 668, 7515, 4939, 18357, 1045, 17689, 6680, 16989, 13301, 4323, 3520, 12251, 5085, 1446, 8342, 1365, 14305, 7221, 528, 12650, 12145, 337, 11816, 14639, 10364, 4161, 18949, 17391, 3054, 2771, 14194, 5595, 910, 903, 16761, 5117, 14893, 18289, 1887, 16375, 4876, 17053, 6738, 8495, 10465, 6167, 2268, 7123, 3531, 10510, 973, 5475, 4823]",1,20,18,285,0.7848049906172339, -"[6833, 17403, 755, 5427, 18415, 2338, 429, 10575, 13444, 19053, 47, 8763, 2577, 194, 406, 8665, 3120, 4923, 17290, 9520, 12235, 12786, 542, 301, 12632, 4284, 1821, 16582, 3197, 16384, 17621, 18996, 10980, 9582, 8600, 9665, 19217, 1315, 7776, 8447, 6942, 18199, 15020, 16332, 8561, 17112, 4595, 10888, 15699, 11304]",0,5,5,225,0.22249835167621848, -"[7111, 13820, 13980, 1163, 3648, 5448, 17551, 17326, 755, 2632, 1883, 984, 8652, 2456, 5407, 7360, 13234, 296, 10163, 10857, 136, 1508, 1315, 18050, 14044, 14195, 2577, 8234, 16029, 16943, 17474, 16624, 4310, 18983, 7356, 1752, 7810, 1576, 10850, 1174]",0,5,4,70,0.22249835167621848, -"[755, 6509, 6424, 7481, 9943, 14815, 18073, 4818, 4299, 141, 13703, 19044, 9775, 10521, 16330, 10184, 16904, 17474, 11229, 16921, 16624, 3979, 18945, 8940, 3326, 5872, 14101, 13118, 9322, 2568, 645, 3336, 9859, 8950, 11386, 11683, 10854, 2835, 16866, 11907, 18645, 14647, 6190, 11731, 4930, 12074, 1437, 5462, 17798, 969, 2092, 10150, 6483, 8037, 4177, 4803, 616, 8271, 13981, 72, 16461, 18338, 8540, 7894, 19129, 16446, 11520, 11144, 2067, 4065, 15446, 710, 4296, 17665, 6412, 18097, 2936, 8308, 11882, 9849, 5693, 2514, 8927, 16789, 7776, 4997, 16425, 2275, 10945, 4824, 5868, 5944, 18224, 5048, 1191, 3575, 19441, 5336, 9286, 9371, 14390, 7543, 2819, 16106, 12653, 14044, 4312, 18230, 5045, 17542, 677, 13714, 15035, 13500, 18331, 4889, 19171, 5051, 5375, 18624, 753, 19260, 18300, 18512, 47, 7415, 7585, 16179, 18221, 3433, 10708, 11447, 169, 4580, 17861, 4, 129, 1448, 17267, 18782, 2540, 8393, 35, 14045, 16631, 1938, 11747, 2994, 10861, 5953, 4328, 16218, 12256, 12499, 3373, 6071, 12483, 14999, 19583, 11217, 12292, 18852, 6460, 15384, 5890, 3108, 6630, 10949, 15776, 10683, 12634, 2263, 6243, 3005, 165, 9278, 9946, 10240, 17244, 13217, 2659, 5496, 789, 4961, 3781, 15954, 17086, 6948, 15043, 7828, 2337, 5993, 4064, 11130, 15476, 3159, 12032, 4285, 463, 4848]",1,18,20,265,0.719734239, -"[7780, 16868, 16650, 16652, 17015, 4297, 12019, 754, 1640, 12960, 16098, 3146, 17911, 15207, 5324, 8221, 4564, 19143, 8916, 8851, 3453, 62, 15663, 8707, 3577, 1063, 16579, 17709, 10051, 14353, 962, 4553, 2049, 12440, 351, 1954, 2997, 16989, 16378, 15615, 13369, 7939, 12628, 11244, 2383, 8392, 18199, 5930, 10629, 11262, 9648, 14618, 15945, 2352, 5943, 2229, 1301, 5471, 3609, 1246, 5029, 11467, 3331, 6201, 10595, 10568, 6690, 8483, 13623, 15189, 8717, 14431, 8608, 18487, 17387, 9802, 3104, 901, 11413, 1683, 7901, 6164, 2854, 16214, 4099, 19002, 11971, 11390, 7377, 253, 11814, 7120, 15239, 616, 13779, 16770, 15851, 5639, 5871, 11241, 8730, 7971, 2592, 17401, 16958, 3653, 46, 8685, 5618, 14110, 11200, 13525, 8653, 7534, 7191, 3683, 918, 15840, 5661, 12431, 11761, 14754, 11881, 12884, 147, 1056, 4152, 6488, 16772, 2024, 2134, 8715, 1204, 11958, 11450, 8356, 18009, 6180, 17034, 4356, 14515, 8772, 6273, 478, 15682, 3137, 14420, 3531, 7829, 17727, 11049, 14069, 6039, 4860, 1456, 2080, 11449, 3674, 11372, 4298, 18942, 17645, 11178, 16637, 8133, 18233, 6631, 18906, 5095, 12304, 6381, 14607, 4058, 4332, 9649, 14857, 17538, 877, 19048, 3307, 11894, 17634, 13336, 15589, 8405, 14463, 14751, 1057, 1568, 3772, 16028, 10243, 16380, 4340, 2142, 231, 15133, 912, 4895, 18701, 3677, 1242, 14893, 18572, 16622, 18252, 15916, 1767, 13677, 18957, 2037, 9634, 16626, 3142, 15920, 15960, 150, 11944, 15498, 17192, 9769, 17415, 1835, 14207, 12046, 8747, 7210, 14288, 9808, 15642, 1068, 6242, 13925, 2041, 7560, 7537, 8687, 10350, 7231, 1320, 18201, 10727, 4505, 15377, 4411, 13313, 7888, 18454, 16318, 11840]",1,18,25,295,0.7329208297408328, -"[7939, 1640, 351, 7780, 6690, 18736, 16416, 17387, 4356, 9864, 3146, 14556, 1144, 962, 11989, 9723, 2134, 5029, 616, 17415, 12256, 18773, 17859, 14147, 14515, 1416, 18754, 4505, 1246, 13996, 5336, 5324, 5930, 17192, 16989, 5507, 11822, 754, 19002, 18717, 17538, 15542, 2383, 4581, 8470, 3674, 15225, 14857, 12789, 8626, 8093, 16982, 17409, 5943, 13630, 3683, 15448, 3082, 1265, 1683, 17401, 4564, 15891, 5618, 10065, 3453, 8483, 17767, 4332, 4450, 9424, 14484, 11840, 16305, 7094, 15727, 8133, 5661, 13400, 18743, 1761, 16250, 14525, 5500, 14997, 1767, 6631, 7901, 2304, 10350, 7388, 2080, 1068, 13009, 5430, 8221, 5497, 2049, 17034, 17911, 11269, 8707, 5639, 7089, 12479, 1439, 10915, 18035, 5730, 2594, 18837, 14415, 7168, 12884, 11390, 2037, 11735, 1204, 3306, 16770, 2343, 2592, 18656, 1242, 62, 1889, 2783, 16242, 3677, 2024, 11099, 18221, 6000, 918, 12060, 3983, 11450, 11463, 2951, 12142, 14893, 14662, 10818, 11585, 13846, 3942, 1301, 6180, 9346, 9617, 11024, 14420, 733, 8516, 5402, 546, 6291, 3577, 18009, 18819, 16706, 16318, 11814, 5476, 9634, 5147, 2821, 6242, 5095, 3137, 11944, 6298, 2428, 7560, 10575, 2352, 46, 9390, 1794, 17003, 18625, 1063, 6833, 6201, 10744, 1057, 17122, 3104, 5312, 3307, 15324, 12766, 47, 6344, 6164, 478, 3721, 1954, 17792, 574]",1,14,20,275,0.7682710351473347, -"[8005, 7366, 6686, 4791, 14502, 12160, 4939, 12924, 4911, 3240, 6923, 14019, 4133, 5994, 18558, 18745, 19346, 5619, 14376, 12125, 15857, 6903, 14833, 12643, 14770, 5794, 13263, 13795, 3890, 19588, 3695, 10319, 8121, 10433, 5486, 15479, 8009, 2814, 19196, 6684]",1,2,4,80,0.6526854998224882, -"[806, 12871, 16229, 11099, 8130, 18839, 19033, 12765, 2783, 4524, 9075, 8410, 8537, 7962, 4489, 1539, 10929, 17723, 10063, 9073, 11874, 14663, 10484, 13164, 5958, 5386, 13045, 449, 5219, 3119, 4221, 5214, 11199, 1143, 1085, 16165, 6543, 6891, 17950, 6745]",0,4,4,50,0.22249835167621848, -"[8250, 16378, 17034, 8483, 1246, 4007, 1694, 4578, 1597, 5029, 8037, 16242, 15663, 1635, 12628, 15577, 4581, 14515, 3146, 616, 6125, 9346, 11024, 14997, 6908, 5288, 7080, 17600, 1505, 16780, 5500, 16989, 10635, 11678, 2383, 15682, 13600, 19074, 3141, 18221, 19402, 15880, 534, 4238, 10575, 6833, 10015, 7780, 18224, 5686, 18754, 9683, 17461, 18166, 7318, 1567, 3674, 17724, 962, 14246, 1813, 1767, 8482, 7605, 4497, 2803, 15916, 1761, 9328, 5704, 16652, 9634, 3218, 12440, 18827, 1794, 3658, 7981, 1439, 13203, 14987, 14110, 17414, 8271, 13333, 2071, 16724, 16904, 17398, 13009, 17681, 10734, 5232, 3470, 3721, 5930, 14699, 8715, 17710, 17718, 8980, 18417, 9692, 4038, 8803, 8002, 17455, 17462, 4722, 15546, 5347, 1779, 432, 16661, 12236, 3825, 16854, 3324, 18735, 19333, 14608, 10914, 4662, 17609, 17767, 2471, 3469, 16113, 12256, 2888, 5819, 12375, 9331, 179, 8419, 6284, 15036, 19254, 15533, 2080, 17520, 9065, 18945, 14270, 816, 13842, 871, 16776, 720, 12148, 8133, 15729, 17496, 7497, 19118, 6062, 3785, 19650, 11453, 17435, 18461, 14198, 4218, 11463, 3211, 16303, 7197, 8907, 6636, 1980, 5429, 6824, 19051, 12620, 2024, 7126, 1640, 4924, 1763, 19466]",1,14,18,300,0.6813916924481412, -"[8750, 15236, 12875, 4150, 15542, 9893, 18411, 17387, 18736, 2526, 11262, 11785, 12259, 12680, 5324, 1614, 1522, 3721, 7783, 8461, 7293, 1311, 12860, 10065, 18791, 10376, 13988, 9451, 7792, 14827, 8262, 5157, 8910, 17960, 11453, 6874, 5430, 6863, 4218, 11914, 12649, 77, 1872, 11857, 9864, 3162, 966, 18983, 15235, 7388, 1513, 8432, 5943, 7889, 10977, 11472, 2389, 16851, 3961, 15480, 3649, 3886, 15106, 17917, 9865, 9549, 8093, 12036, 14418, 2469, 78, 13538, 17034, 8564, 16565, 11246, 3652, 2383, 18061, 5189, 7992, 1430, 19706, 10384, 1439, 2473, 6236, 5887, 9316, 13926, 19451, 19252, 17530, 8632, 11604, 7553, 17130, 16533, 5336, 8763, 9005, 15971, 3936, 986, 2776, 3036, 1312, 11283, 17812, 5538, 17192, 4581, 14701, 6770, 4922, 5144, 6224, 18400, 11932, 11362]",1,14,12,265,0.6536491352639854, -"[903, 5029, 18487, 18572, 5324, 284, 16650, 5943, 8851, 17911, 1246, 14431, 9648, 15207, 15663, 4564, 15682, 7560, 12628, 7939, 11049, 16378, 16868, 15916, 8707, 14751, 8608, 3146, 12618, 1640, 10051, 1954, 3082, 11894, 1568, 912, 3577, 16514, 1242, 16579, 19002, 11390, 8916, 3453, 14556, 18754, 15589, 11262, 46, 2532, 1068, 62, 10568, 8483, 4138, 11840, 11200, 17387, 6690, 8221, 11971, 16380, 2592, 18920, 16770, 15189, 5471, 19005, 8476, 6488, 14515, 6180, 1204, 2383, 14857, 16880, 754, 14288, 17538, 574, 15920, 2854, 15498, 16637, 7901, 6334, 150, 8717, 3137, 2134, 11955, 18819, 4798, 7377, 17054, 8715, 18768, 14207, 2343, 14420, 5639, 962, 11449, 4553, 17499, 14149, 19143, 18773, 14754, 5618, 11944, 5375, 18169, 18816, 18349, 2024, 1889, 10862, 8392, 17295, 10350, 6164, 15960, 4895, 10595, 3983, 3346, 7326, 5579, 11761, 2997, 231, 16989, 7470, 6276, 11413, 12440, 16772, 351, 15945, 4837, 6273, 11450, 2049, 5095, 4077, 12884, 16214, 16652, 14110, 16098, 7829, 14069, 12479, 18009, 17034, 7780, 3683, 3287, 10513, 11814, 4332, 4058, 3677, 14618, 10243, 9814, 14607, 10011, 8133, 6201, 2563, 14320, 572, 17401, 17415, 8034, 616, 13215, 13677]",1,18,18,300,0.682558199, -"[9458, 8390, 2886, 15385, 13009, 10521, 14397, 10744, 2177, 11264, 8037, 11907, 11585, 15329, 5242, 3126, 14647, 1830, 4477, 7780, 5048, 8526, 306, 18945, 10173, 15841, 11386, 11672, 1711, 17267, 8813, 4242, 18170, 16179, 529, 11974, 141, 8262, 2835, 163, 15174, 15082, 2612, 11400, 3300, 394, 1761, 18450, 4007, 4834, 17767, 16851, 13839, 13085, 9070, 14469, 4190, 6212, 11032, 17530, 16804, 18025, 12304, 6833, 11461, 6125, 3440, 5035, 15501, 2356, 9034, 4053, 15844, 14855, 3577, 16137, 12959, 7121, 13010, 15074, 8441, 2657, 15740, 10118, 17837, 8063, 5800, 18257, 8019, 19265, 15800, 17184, 14544, 1318, 3945, 18373, 7924, 11169, 10945, 3327]",1,9,10,225,0.6789572450169904, -"[16921, 14815, 10388, 7076, 2886, 8864, 10977, 12235, 529, 18477, 1752, 620, 7979, 18942, 9322, 17917, 8138, 13820, 17794, 4064]",0,2,2,600,0.22249835167621848, -"[4733, 11580, 2561, 15961, 7973, 8516, 10282, 3306, 10253, 9029]",0,3,1,400,0.22249835167621848, -"[12649, 7295, 11636, 2862, 14044, 11461, 4404, 11012, 6454, 12148]",0,3,1,1400,0.22858447025409545, -"[10446, 16605, 6522, 14819, 5427, 10435, 10348, 10857, 14066, 9810, 1614, 13297, 9154, 16040, 5241, 13830, 2862, 806, 18415, 15663]",0,2,2,600,0.22249835167621848, -"[3520, 16656, 2846, 8879, 13830, 6148, 1984, 11529, 19053, 5583, 2722, 14398, 5386, 16454, 406, 14541, 101, 5558, 1637, 13131, 17194, 6665, 12632, 11637, 8986, 8952, 10612, 4549, 14066, 2996, 14943, 18776, 18411, 206, 9883, 6826, 4557, 10469, 4311, 2864, 3580, 16093, 5282, 13839, 18415, 4820, 19333, 5211, 13761, 13114, 413, 12460, 12658, 194, 13024, 2262, 9582, 8365, 8665, 1679, 4121, 16981, 17403, 8783, 4923, 4840, 6291, 16040, 10617, 9655, 3558, 5267, 8137, 1430, 2714, 5928, 17621, 5668, 14265, 10547, 4529, 7005, 13037, 8907, 15010, 11447, 17297, 18266, 18731, 5311, 5233, 15146, 7832, 9520, 2214, 1838, 13297, 10059, 11225, 18901, 18048, 17917, 6204, 11141, 5540, 4310, 13980, 14237, 13250, 5448, 19170, 9154, 8526, 5716, 598, 509, 9451, 8037, 10677, 3266, 72, 4864, 7741, 17542, 14027, 12075, 12620, 3445, 4934, 3357, 2862, 14130, 3306, 13668, 11998, 14271, 10865, 12235, 9833, 10635, 13561, 10446, 8241, 4404, 13405, 57, 2670, 677, 4794, 1174, 6783, 296, 4024, 10223, 17447, 9229, 14085, 14456, 4938, 6739, 9353, 13234, 2492, 7258, 13674, 2133, 4157, 8447, 1883, 9877, 3134, 1004, 17068, 1612, 17184, 8433, 5696, 16649, 18806, 2014, 2059, 18350, 1614, 19355, 17912, 5407, 2150, 18533, 19027, 13444, 720, 12708, 11524, 2099, 5562, 9329, 10340, 11706, 5513, 4218]",0,9,20,600,0.22249835167621848, -"[12259, 15971, 7861, 5157, 11472, 4682, 4929, 10575, 12235, 15095]",0,2,1,300,0.2355327889638383, -"[12612, 2182, 2338, 13668, 9451, 18731, 16538, 6949, 1821, 14781]",0,3,1,150,0.2737739006948319, -"[18766, 6949, 7979, 12649, 8999, 5716, 7887, 7992, 8576, 7295, 8028, 5798, 15095, 4756, 16516, 10388, 12235, 1752, 16981, 12148, 6826, 4404, 18059, 4214, 18350, 2516, 6167, 5360, 10857, 2390, 9383, 9810, 4437, 2338, 5470, 8656, 13820, 15854, 10612, 19194, 13114, 11141, 7861, 5120, 10827, 8471, 2509, 16836, 10004, 9883, 9052, 14823, 3651, 2787, 11233, 1963, 11724, 12927, 13320, 3700, 12762, 19073, 17291, 8081, 12213, 17313, 495, 14030, 9543, 7764, 11637, 5783, 16192, 18202, 14255, 11801, 9215, 6357, 9724, 15890]",0,3,8,100,0.22249835167621848, -"[5716, 2338, 17917, 18436, 8262, 1752, 19291, 2763, 2722, 10388]",0,4,1,150,0.22249835167621848, -"[18415, 4284, 12320, 8423, 14448, 4219, 14042, 2722, 5407, 10617, 806, 6270, 13500, 17256, 10123, 10360, 3786, 10059, 14673, 9547, 12752, 11044, 1108, 5009, 12272, 13453, 6166, 7447, 13384, 6331, 19688, 666, 16055, 10021, 470, 11785, 10306, 7362, 4869, 18470, 15441, 15662, 17693, 13144, 11793, 5186, 14777, 10432, 15833, 1499]",0,3,5,100,0.22249835167621848, -"[11012, 8770, 10389, 4846, 7810, 10223, 869, 2862, 10344, 8241, 4222, 7973, 10388, 15884, 4404, 5866, 14781, 4529, 15855, 4733, 13939, 11141, 18632, 7553, 11044, 2516, 13981, 1315, 10105, 7883, 9998, 2725, 2183, 7360, 3011, 18050, 7979, 755, 18911, 11752, 15095, 406, 13733, 15849, 9434, 12075, 7056, 3791, 715, 14044, 3886, 531, 1201, 1614, 13668, 10558, 9775, 14586, 3162, 7356, 9451, 15198, 1752, 8866, 5470, 7303, 6204, 13475, 14045, 1310, 9229, 1064, 9029, 6949, 8760, 10850, 10340, 15253, 12148, 18791, 7861, 6154, 11637, 2456, 12649, 13864, 13009, 10474, 5360, 18411, 18415, 2157, 19053, 2509, 4938, 8447, 4923, 11406, 5732, 16193, 16224, 5716, 7295, 1659, 14519, 7992, 2543, 15685, 8810, 4929, 1816, 7930, 3093, 17917, 194, 15431, 18766, 2390, 4129, 10575, 15435, 5540, 12405, 40, 3634, 18986, 10924, 12208, 10635, 13471, 16384, 19010, 12984, 6326, 5152, 10857, 8864, 4820, 1671, 7595, 10471, 9186, 12002, 5564, 11604, 9877, 13855, 2666, 72, 5491, 11225, 8907, 4024, 6242, 4172, 19027, 2248, 206, 9463, 17163, 5901, 4214, 1325, 15307, 16253, 2759, 1802, 677, 16658, 5513, 615, 12756, 11524, 7818, 8903, 9004, 2262, 966, 9786, 4864, 16521, 7832, 17911, 323, 2338, 5311, 2577, 15641, 11328, 18453, 12113, 18350, 6833, 8576, 15572, 14271, 13431, 18048, 16034, 14622, 11787, 7110, 11116, 5427, 657, 5523, 13820, 8530, 8119, 17184, 15125, 7300, 4616, 13310, 14429, 15007, 2177, 1430, 1710, 16525, 10847, 17265, 6085, 8987, 14983, 12235, 2787, 8735, 7879, 8019, 1281, 210, 10955, 15546, 14707, 139, 8262, 5489, 3102, 3538, 15020, 18257, 3935, 2239, 19253, 18450, 7891, 10232, 9084, 13976, 2214, 19427, 11123, 8075, 47, 1152, 7055, 3705, 4552, 6867, 2053, 598, 8270, 6051, 19217, 11998, 16242, 9729, 8920, 12413, 11363, 15408, 12773, 14905, 7670, 14381, 14415, 2302, 16722, 11494, 2182, 17297, 13920, 12612, 7153, 17794, 2933, 2148, 10391, 10565, 11743, 16930, 8740, 18010, 14486, 1886, 2923, 95, 15955, 11174]",0,3,30,350,0.22249835167621848, -"[16330, 2886, 16624, 2577, 11169, 15065, 5400, 5250, 17338, 13678, 14417, 1508, 7810, 6653, 3263, 5375, 3957, 3719, 2059, 6783, 14044, 4000, 17756, 15849, 6125, 677, 17303, 755, 16223, 72, 7815, 14045, 984, 18373, 17220, 529, 4514, 7099, 14654, 8625, 11856, 136, 17092, 7496, 13245, 7111, 8652, 13444, 4128, 5716]",0,7,5,100,0.22249835167621848, -"[15444, 15778, 4404, 11233, 9579, 2557, 755, 2543, 11973, 8480, 9810, 11648, 1315, 15535, 10865, 8652, 7356, 17415, 7979, 16624, 19096, 3455, 16330, 2717, 16579, 13703, 12204, 18766, 490, 11450, 5437, 9299, 5475, 18290, 2936, 9186, 17904, 10850, 10223, 7855, 6167, 16943, 10847, 13475, 79, 16727, 13875, 7360, 4848, 7515, 13114, 8447, 13405, 10854, 17474, 6412, 14745, 5407, 10441, 13047, 1576, 4923, 9687, 6454, 2338, 4986, 18050, 3965, 14044, 7894, 5117, 47, 8392, 16451, 11024, 16996, 10752, 15338, 9160, 6969, 18791, 18455, 3702, 15345, 6157, 5663, 140, 19210, 14336, 6949, 18630, 18073, 1310, 12650, 597, 3535, 18373, 14893, 4399, 13815]",0,9,10,300,0.22249835167621848, -"[16656, 15035, 6148, 2092, 10435, 14745, 6349, 17189, 17273, 2659]",0,2,1,150,0.22249835167621848, -"[35, 5375, 10854, 5462, 8950, 9322, 2337, 16921, 2700, 18050]",0,2,1,350,0.2226505046406654, -"[2092, 6454, 2936, 9299, 9874, 13703, 4296, 5378, 8950, 6863, 3077, 11973, 9395, 9062, 9064, 898, 9218, 1560, 8112, 3503]",0,2,2,50,0.22249835167621848, -"[1752, 2239, 4214, 4846, 5470, 18766, 7979, 5523, 7992, 17917]",0,3,1,100,0.2920829740832784, -"[12612, 1802, 11363, 7861, 15095, 3651, 2182, 4923, 16981, 17297, 10786, 1821, 18766, 4657, 9004, 8576, 12578, 2390, 10223, 5419]",0,3,2,150,0.22249835167621848, -"[14044, 17474, 1477, 4020, 16820, 4064, 15770, 14045, 5749, 858, 2092, 6454, 169, 11217, 10211, 9322, 9923, 17453, 3185, 13981, 1576, 14571, 8368, 18455, 2544, 5815, 7360, 10766, 8406, 2656, 7405, 11659, 1582, 6267, 15975, 5497, 4108, 2109, 2367, 12812]",0,3,4,100,0.22249835167621848, -"[16332, 17474, 14745, 9186, 2543, 5470, 1752, 10572, 6412, 296, 7258, 4214, 10038, 16579, 8234, 14195, 15535, 14893, 6270, 2092]",0,4,2,250,0.22249835167621848, -"[13405, 9229, 17794, 10863, 16578, 13820, 18791, 10847, 10223, 2182, 4840, 2239, 2543, 14780, 15633, 11637, 957, 9004, 7553, 9186, 17953, 2759, 531, 7610, 9877, 14381, 19194, 14781, 7930, 17917, 16836, 19027, 18411, 1430, 4929, 2862, 2214, 8241, 8843, 12612, 1711, 4535, 8864, 13114, 14404, 2477, 6326, 3468, 9883, 509, 18766, 3860, 8755, 742, 1821, 16981, 16521, 9451, 13475, 18059, 10340, 6352, 11385, 2456, 529, 8760, 3162, 18616, 3357, 5353, 5540, 15884, 101, 10015, 12663, 8081, 5716, 6949, 11141, 9463]",0,4,8,150,0.26469544048283206, -"[17403, 4024, 5427, 2338, 47, 18791, 8910, 10340, 406, 10961, 5716, 11484, 4214, 4929, 7861, 18415, 15572, 9520, 8137, 19170, 509, 3194, 9582, 3526, 15236, 1310, 9186, 8665, 17917, 10388, 14819, 3120, 15977, 15307, 12075, 17613, 11729, 10038, 2262, 5470]",0,5,4,250,0.22249835167621848, -"[3357, 10388, 2516, 529, 18766, 5564, 17917, 2862, 11637, 3634, 18839, 19027, 5513, 2841, 15627, 6326, 2214, 12632, 5523, 5540, 12075, 11912, 13820, 3577, 16229, 19063, 17952, 9186, 18048, 5360, 14843, 15663, 11363, 8864, 11141, 10340, 7553, 742, 5663, 12612, 17794, 10863, 6125, 17220, 6151, 8241, 12235, 8843, 9451, 2456]",0,7,5,350,0.22249835167621848, \ No newline at end of file diff --git a/pygip/models/defense/base.py b/pygip/models/defense/base.py deleted file mode 100644 index 44ade19f..00000000 --- a/pygip/models/defense/base.py +++ /dev/null @@ -1,73 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Union, Optional - -import torch - -from pygip.datasets import Dataset -from pygip.utils.hardware import get_device - - -class BaseDefense(ABC): - supported_api_types = set() - supported_datasets = set() - - def __init__(self, dataset: Dataset, attack_node_fraction: float, - device: Optional[Union[str, torch.device]] = None): - self.device = torch.device(device) if device else get_device() - print(f"Using device: {self.device}") - - # graph data - self.dataset = dataset - self.graph_dataset = dataset.graph_dataset - self.graph_data = dataset.graph_data - - # meta data - self.num_nodes = dataset.num_nodes - self.num_features = dataset.num_features - self.num_classes = dataset.num_classes - - # params - self.attack_node_fraction = attack_node_fraction - - self._check_dataset_compatibility() - - def _check_dataset_compatibility(self): - cls_name = self.dataset.__class__.__name__ - - if self.supported_api_types and self.dataset.api_type not in self.supported_api_types: - raise ValueError( - f"API type '{self.dataset.api_type}' is not supported. Supported: {self.supported_api_types}") - - if self.supported_datasets and cls_name not in self.supported_datasets: - raise ValueError(f"Dataset '{cls_name}' is not supported. Supported: {self.supported_datasets}") - - @abstractmethod - def defend(self): - """ - Execute the defense mechanism. - """ - raise NotImplementedError - - def _load_model(self): - """ - Load pre-trained model. - """ - raise NotImplementedError - - def _train_target_model(self): - """ - This is an optional method. - """ - raise NotImplementedError - - def _train_defense_model(self): - """ - This is an optional method. - """ - raise NotImplementedError - - def _train_surrogate_model(self): - """ - This is an optional method. - """ - raise NotImplementedError diff --git a/pygip/models/nn/__init__.py b/pygip/models/nn/__init__.py deleted file mode 100644 index d40027c9..00000000 --- a/pygip/models/nn/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .backbones import GCN, GraphSAGE, ShadowNet, AttackNet diff --git a/pygip/models/nn/backbones.py b/pygip/models/nn/backbones.py deleted file mode 100644 index 09daf841..00000000 --- a/pygip/models/nn/backbones.py +++ /dev/null @@ -1,123 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from dgl.nn.pytorch import GraphConv, SAGEConv -from torch_geometric.nn import GATConv -from torch_geometric.nn import GCNConv - - -class GCN(nn.Module): - """A simple GCN Network.""" - - def __init__(self, feature_number, label_number): - super(GCN, self).__init__() - self.layers = nn.ModuleList() - self.layers.append(GraphConv(feature_number, 16, activation=F.relu)) - self.layers.append(GraphConv(16, label_number)) - self.dropout = nn.Dropout(p=0.5) - - def forward(self, g, features): - x = self.layers[0](g, features) - x = F.relu(x) - x = self.layers[1](g, x) - return x - - -class GraphSAGE(nn.Module): - """ - A GraphSAGE model implemented with PyG's SAGEConv module. - - It consists of two SAGEConv layers: - - The first layer projects features to 'hidden_channels', - - The second layer outputs 'out_channels'. - """ - - def __init__(self, in_channels, hidden_channels, out_channels): - """ - Initializes the GraphSAGE model. - - Parameters - ---------- - in_channels : int - The dimensionality of the input features. - hidden_channels : int - The dimensionality of the hidden layer. - out_channels : int - The dimensionality of the output layer (or the number of classes). - """ - super(GraphSAGE, self).__init__() - self.conv1 = SAGEConv(in_channels, hidden_channels, aggregator_type='mean') - self.conv2 = SAGEConv(hidden_channels, out_channels, aggregator_type='mean') - - def forward(self, blocks, x): - """ - Forward pass. - - Parameters - ---------- - blocks : list of dgl.DGLGraph - A list of subgraphs sampled for multiple layers. - x : torch.Tensor - The node features of shape (num_nodes, in_channels). - - Returns - ------- - torch.Tensor - The model outputs (logits) of shape (num_nodes, out_channels). - """ - x = self.conv1(blocks[0], x) - x = F.relu(x) - x = self.conv2(blocks[1], x) - return x - - -class ShadowNet(torch.nn.Module): - """A shadow model GCN.""" - - def __init__(self, feature_number, label_number): - super(ShadowNet, self).__init__() - self.layer1 = GraphConv(feature_number, 16) - self.layer2 = GraphConv(16, label_number) - - def forward(self, g, features): - x = torch.nn.functional.relu(self.layer1(g, features)) - x = self.layer2(g, x) - return x - - -class AttackNet(nn.Module): - """An attack model GCN.""" - - def __init__(self, feature_number, label_number): - super(AttackNet, self).__init__() - self.layers = nn.ModuleList() - self.layers.append(GraphConv(feature_number, 16, activation=F.relu)) - self.layers.append(GraphConv(16, label_number)) - self.dropout = nn.Dropout(p=0.5) - - def forward(self, g, features): - x = F.relu(self.layers[0](g, features)) - x = self.layers[1](g, x) - return x - - - -class GAT(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, heads=8): - super().__init__() - self.conv1 = GATConv(in_channels, hidden_channels, heads=heads) - self.conv2 = GATConv(hidden_channels*heads, out_channels, heads=1) - def forward(self, x, edge_index): - x = F.relu(self.conv1(x, edge_index)) - return self.conv2(x, edge_index) - - -class GCN_PyG(nn.Module): # Rename to avoid clash with existing DGL GCN - def __init__(self, in_channels, hidden_channels, out_channels): - super().__init__() - self.conv1 = GCNConv(in_channels, hidden_channels) - self.conv2 = GCNConv(hidden_channels, out_channels) - - def forward(self, x, edge_index): - x = F.relu(self.conv1(x, edge_index)) - return self.conv2(x, edge_index) diff --git a/pygip/utils/__init__.py b/pygip/utils/__init__.py deleted file mode 100644 index 530365ec..00000000 --- a/pygip/utils/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .metrics import GraphNeuralNetworkMetric - -__all__ = [ - 'GraphNeuralNetworkMetric', -] diff --git a/pygip/utils/dglTopyg.py b/pygip/utils/dglTopyg.py deleted file mode 100644 index c159b956..00000000 --- a/pygip/utils/dglTopyg.py +++ /dev/null @@ -1,13 +0,0 @@ -from torch_geometric.utils import from_networkx -import networkx as nx - -def dgl_to_pyg_data(dgl_graph): - nx_graph = dgl_graph.to_networkx(node_attrs=['feat', 'label', 'train_mask', 'val_mask', 'test_mask']) - pyg_data = from_networkx(nx_graph) - pyg_data.x = pyg_data.feat - pyg_data.y = pyg_data.label - pyg_data.train_mask = pyg_data.train_mask - pyg_data.val_mask = pyg_data.val_mask - pyg_data.test_mask = pyg_data.test_mask - return pyg_data - diff --git a/pygip/utils/hardware.py b/pygip/utils/hardware.py deleted file mode 100644 index 685a928b..00000000 --- a/pygip/utils/hardware.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -import torch - -_DEFAULT_DEVICE_STR = os.getenv("PYGIP_DEVICE") or ("cuda:0" if torch.cuda.is_available() else "cpu") -_default_device = torch.device(_DEFAULT_DEVICE_STR) - - -def get_device(): - return _default_device - - -def set_device(device_str): - global _default_device - _default_device = torch.device(device_str) diff --git a/pygip/utils/metrics.py b/pygip/utils/metrics.py deleted file mode 100644 index 695b11e4..00000000 --- a/pygip/utils/metrics.py +++ /dev/null @@ -1,348 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Dict - -import numpy as np -from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score - - -class MetricBase(ABC): - def __init__(self): - self.preds = [] - self.labels = [] - - @abstractmethod - def update(self, *args, **kwargs) -> None: - """Update internal metric state.""" - pass - - @abstractmethod - def compute(self) -> Dict[str, float]: - """Compute and return all metric results.""" - pass - - def reset(self) -> None: - """Reset internal state.""" - self.preds = [] - self.labels = [] - - @staticmethod - def _cat_to_numpy(a: List) -> np.ndarray: - if len(a) == 0: - raise ValueError("Empty tensor list, nothing to compute.") - return torch.cat(a).cpu().numpy() - - def compute_default_metrics(self, preds, labels) -> Dict[str, float]: - preds = self._cat_to_numpy(preds) - labels = self._cat_to_numpy(labels) - return { - 'Acc': accuracy_score(labels, preds), - 'F1': f1_score(labels, preds, average='macro'), - 'Precision': precision_score(labels, preds, average='macro'), - 'Recall': recall_score(labels, preds, average='macro'), - } - - def __repr__(self): - results = self.compute() - for name, value in results.items(): - print(f"{name}: {value:.4f}") - - -class AttackMetric(MetricBase): - def __init__(self): - super().__init__() - self.query_label = [] - self.reset() - - def reset(self) -> None: - super().reset() - self.query_label = [] - - def update(self, preds, labels, query_label): - self.preds.append(preds.detach().cpu()) - self.labels.append(labels.detach().cpu()) - self.query_label.append(query_label.detach().cpu()) - - def compute_fidelity(self, preds_label, query_label) -> Dict[str, float]: - preds_label = self._cat_to_numpy(preds_label) - query_label = self._cat_to_numpy(query_label) - return { - 'Fidelity': (preds_label == query_label).astype(float).mean().item() - } - - def compute(self): - defaults = self.compute_default_metrics(self.preds, self.labels) - fidelity = self.compute_fidelity(self.preds, self.query_label) - results = defaults | fidelity - print(f"acc: {results['Acc']:.4f}, fidelity: {results['Fidelity']:.4f}") - return results - - -class DefenseMetric(MetricBase): - def __init__(self): - super().__init__() - self.wm_preds = [] - self.wm_label = [] - self.reset() - - def update(self, preds, labels): - self.preds.append(preds.detach().cpu()) - self.labels.append(labels.detach().cpu()) - - def reset(self) -> None: - super().reset() - self.wm_preds = [] - self.wm_label = [] - - def update_wm(self, wm_preds, wm_label): - self.wm_preds.append(wm_preds.detach().cpu()) - self.wm_label.append(wm_label.detach().cpu()) - - def compute_wm(self): - wm_preds = self._cat_to_numpy(self.wm_preds) - wm_label = self._cat_to_numpy(self.wm_label) - return {"WM Acc": accuracy_score(wm_label, wm_preds)} - - def compute(self): - defaults = self.compute_default_metrics(self.preds, self.labels) - wm_acc = self.compute_wm() - results = defaults | wm_acc - print(f"acc: {results['Acc']:.4f}, wm acc: {results['WM Acc']:.4f}") - return results - - -import torch -import numpy as np -import time - - -class AttackCompMetric: - def __init__(self, gpu_count=None): - self.train_target_time = [] - self.query_target_time = [] - self.train_surrogate_time = [] - self.inference_surrogate_time = [] - self.attack_time = [] - - self.start_time = 0 - self.total_time = 0 - - self.gpu_count = gpu_count or (1 if torch.cuda.is_available() else 0) - - if torch.cuda.is_available(): - torch.cuda.reset_peak_memory_stats() - - def start(self): - self.start_time = time.time() - - def end(self): - self.total_time = time.time() - self.start_time - - def update(self, train_target_time=None, query_target_time=None, train_surrogate_time=None, attack_time=None, - inference_surrogate_time=None): - if train_target_time is not None: - self.train_target_time.append(train_target_time) - if query_target_time is not None: - self.query_target_time.append(query_target_time) - if train_surrogate_time is not None: - self.train_surrogate_time.append(train_surrogate_time) - if attack_time is not None: - self.attack_time.append(attack_time) - if inference_surrogate_time is not None: - self.inference_surrogate_time.append(inference_surrogate_time) - - def compute(self): - peak_mem = 0 - if torch.cuda.is_available(): - peak_mem = torch.cuda.max_memory_allocated() / (1024 ** 3) # GB - - gpu_hours = (self.total_time / 3600.0) * self.gpu_count - - print( - f"attack time: {np.mean(self.attack_time):.4f}, inference time: {np.mean(self.inference_surrogate_time):.4f}, gpu mem: {peak_mem:.4f}, gpu hours: {gpu_hours:.4f}") - - return { - 'train_target_time': np.mean(self.train_target_time), - 'query_target_time': np.mean(self.query_target_time), - 'train_surrogate_time': np.mean(self.train_surrogate_time), - 'attack_time': np.mean(self.attack_time), - 'inference_surrogate_time': np.mean(self.inference_surrogate_time), - 'total_time': self.total_time, - 'peak_gpu_mem(GB)': peak_mem, - 'gpu_hours': gpu_hours - } - - -class DefenseCompMetric: - def __init__(self, gpu_count=None): - self.train_target_time = [] - self.train_defense_time = [] - self.inference_defense_time = [] - self.defense_time = [] - - self.start_time = 0 - self.total_time = 0 - - self.gpu_count = gpu_count or (1 if torch.cuda.is_available() else 0) - - if torch.cuda.is_available(): - torch.cuda.reset_peak_memory_stats() - - def start(self): - self.start_time = time.time() - - def end(self): - self.total_time = time.time() - self.start_time - - def update(self, train_target_time=None, train_defense_time=None, inference_defense_time=None, defense_time=None): - if train_target_time is not None: - self.train_target_time.append(train_target_time) - if train_defense_time is not None: - self.train_defense_time.append(train_defense_time) - if inference_defense_time is not None: - self.inference_defense_time.append(inference_defense_time) - if defense_time is not None: - self.defense_time.append(defense_time) - - def compute(self): - peak_mem = 0 - if torch.cuda.is_available(): - peak_mem = torch.cuda.max_memory_allocated() / (1024 ** 3) # GB - - gpu_hours = (self.total_time / 3600.0) * self.gpu_count - - print( - f"defense time: {np.mean(self.defense_time):.4f}, inference time: {np.mean(self.inference_defense_time):.4f}, gpu mem: {peak_mem:.4f}, gpu hours: {gpu_hours:.4f}") - - return { - 'train_target_time': np.mean(self.train_target_time), - 'train_defense_time': np.mean(self.train_defense_time), - 'inference_defense_time': np.mean(self.inference_defense_time), - 'defense_time': np.mean(self.defense_time), - 'total_time': self.total_time, - 'peak_gpu_mem(GB)': peak_mem, - 'gpu_hours': gpu_hours - } - - -class GraphNeuralNetworkMetric: - """ - Graph Neural Network Metric Class. - - This class evaluates two metrics, fidelity and accuracy, for a given - GNN model on a specified graph and features. - """ - - def __init__(self, fidelity=0, accuracy=0, model=None, - graph=None, features=None, mask=None, - labels=None, query_labels=None): - self.model = model if model is not None else None - self.graph = graph if graph is not None else None - self.features = features if features is not None else None - self.mask = mask if mask is not None else None - self.labels = labels if labels is not None else None - self.query_labels = query_labels if query_labels is not None else None - self.accuracy = accuracy - self.fidelity = fidelity - - def evaluate_helper(self, model, graph, features, labels, mask): - """Helper function to evaluate the model's performance.""" - if model is None or graph is None or features is None or labels is None or mask is None: - return None - model.eval() - with torch.no_grad(): - logits = model(graph, features) - logits = logits[mask] - labels = labels[mask] - _, indices = torch.max(logits, dim=1) - correct = torch.sum(indices == labels) - return correct.item() * 1.0 / len(labels) - - def evaluate(self): - """Main function to update fidelity and accuracy scores.""" - self.accuracy = self.evaluate_helper( - self.model, self.graph, self.features, self.labels, self.mask) - self.fidelity = self.evaluate_helper( - self.model, self.graph, self.features, self.query_labels, self.mask) - - def __str__(self): - """Returns a string representation of the metrics.""" - return f"Fidelity: {self.fidelity:.4f}, Accuracy: {self.accuracy:.4f}" - - @staticmethod - def calculate_surrogate_fidelity(target_model, surrogate_model, data, mask=None): - """ - Calculate fidelity between target and surrogate model predictions. - - Args: - target_model: Original model - surrogate_model: Extracted surrogate model - data: Input graph data - mask: Optional mask for evaluation on specific nodes - - Returns: - float: Fidelity score (percentage of matching predictions) - """ - target_model.eval() - surrogate_model.eval() - - with torch.no_grad(): - # Get predictions from both models - target_logits = target_model(data) - surrogate_logits = surrogate_model(data) - - # Apply mask if provided - if mask is not None: - target_logits = target_logits[mask] - surrogate_logits = surrogate_logits[mask] - - # Get predicted classes - target_preds = target_logits.argmax(dim=1) - surrogate_preds = surrogate_logits.argmax(dim=1) - - # Calculate fidelity - matches = (target_preds == surrogate_preds).sum().item() - total = len(target_preds) - - return (matches / total) * 100 - - @staticmethod - def evaluate_surrogate_extraction(target_model, surrogate_model, data, - train_mask=None, val_mask=None, test_mask=None): - """ - Comprehensive evaluation of surrogate extraction attack. - - Args: - target_model: Original model - surrogate_model: Extracted surrogate model - data: Input graph data - train_mask: Mask for training nodes - val_mask: Mask for validation nodes - test_mask: Mask for test nodes - - Returns: - dict: Dictionary containing fidelity scores for different data splits - """ - results = {} - - # Overall fidelity - results['overall_fidelity'] = GraphNeuralNetworkMetric.calculate_surrogate_fidelity( - target_model, surrogate_model, data - ) - - # Split-specific fidelity if masks are provided - if train_mask is not None: - results['train_fidelity'] = GraphNeuralNetworkMetric.calculate_surrogate_fidelity( - target_model, surrogate_model, data, train_mask - ) - - if val_mask is not None: - results['val_fidelity'] = GraphNeuralNetworkMetric.calculate_surrogate_fidelity( - target_model, surrogate_model, data, val_mask - ) - - if test_mask is not None: - results['test_fidelity'] = GraphNeuralNetworkMetric.calculate_surrogate_fidelity( - target_model, surrogate_model, data, test_mask - ) - - return results diff --git a/pyhazards/__init__.py b/pyhazards/__init__.py new file mode 100644 index 00000000..6515a2d8 --- /dev/null +++ b/pyhazards/__init__.py @@ -0,0 +1,110 @@ +from importlib.metadata import PackageNotFoundError, version + +try: + __version__ = version("pyhazards") +except PackageNotFoundError: + __version__ = "0.0.0" # fallback + +from .datasets import ( + DataBundle, + DataSplit, + Dataset, + FeatureSpec, + LabelSpec, + GraphTemporalDataset, + graph_collate, + available_datasets, + load_dataset, + register_dataset, +) +from .tasks import HazardTask, available_hazard_tasks, get_hazard_task, has_hazard_task +from .configs import ( + BenchmarkConfig, + DatasetRef, + ExperimentConfig, + ModelRef, + ReportConfig, + dump_experiment_config, + load_experiment_config, +) +from .benchmarks import ( + Benchmark, + BenchmarkResult, + BenchmarkRunSummary, + available_benchmarks, + build_benchmark, + get_benchmark, + register_benchmark, + run_benchmark, +) +from .models import ( + CNNPatchEncoder, + ClassificationHead, + MLPBackbone, + RegressionHead, + SegmentationHead, + TemporalEncoder, + available_models, + build_model, + register_model, + WildfireMamba, + wildfire_mamba_builder, +) +from .metrics import ClassificationMetrics, MetricBase, RegressionMetrics, SegmentationMetrics +from .reports import BenchmarkReport, export_report_bundle +from .engine import BenchmarkRunner, Trainer +from .interactive_map import RAI_FIRE_URL, open_interactive_map + +__all__ = [ + "__version__", + "DataBundle", + "DataSplit", + "Dataset", + "FeatureSpec", + "LabelSpec", + "GraphTemporalDataset", + "graph_collate", + "available_datasets", + "load_dataset", + "register_dataset", + "HazardTask", + "available_hazard_tasks", + "get_hazard_task", + "has_hazard_task", + "BenchmarkConfig", + "DatasetRef", + "ExperimentConfig", + "ModelRef", + "ReportConfig", + "dump_experiment_config", + "load_experiment_config", + "Benchmark", + "BenchmarkResult", + "BenchmarkRunSummary", + "available_benchmarks", + "build_benchmark", + "get_benchmark", + "register_benchmark", + "run_benchmark", + "CNNPatchEncoder", + "ClassificationHead", + "RegressionHead", + "SegmentationHead", + "MLPBackbone", + "TemporalEncoder", + "available_models", + "build_model", + "register_model", + "WildfireMamba", + "wildfire_mamba_builder", + "BenchmarkReport", + "export_report_bundle", + "BenchmarkRunner", + "Trainer", + "MetricBase", + "ClassificationMetrics", + "RegressionMetrics", + "SegmentationMetrics", + "RAI_FIRE_URL", + "open_interactive_map", +] diff --git a/pyhazards/__main__.py b/pyhazards/__main__.py new file mode 100644 index 00000000..dc6c9065 --- /dev/null +++ b/pyhazards/__main__.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +import argparse +from typing import Sequence + +from .interactive_map import open_interactive_map + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="python -m pyhazards", + description="PyHazards command line utilities.", + ) + subparsers = parser.add_subparsers(dest="command") + + subparsers.add_parser( + "map", + help="Open the external RAI Fire interactive map.", + description="Open the external RAI Fire interactive map.", + ) + return parser + + +def main(argv: Sequence[str] | None = None) -> int: + parser = build_parser() + args = parser.parse_args(argv) + + if args.command == "map": + url = open_interactive_map(open_browser=True) + print(f"RAI Fire interactive map: {url}") + return 0 + + parser.print_help() + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/pyhazards/appendix_a_catalog.py b/pyhazards/appendix_a_catalog.py new file mode 100644 index 00000000..fa92c3ae --- /dev/null +++ b/pyhazards/appendix_a_catalog.py @@ -0,0 +1,325 @@ +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, List, Sequence + +from .model_catalog import ModelCard, group_cards_by_hazard, load_model_cards, public_catalog_cards + + +REPO_ROOT = Path(__file__).resolve().parent.parent +DOCS_SOURCE_DIR = REPO_ROOT / "docs" / "source" +APPENDIX_A_PAGE_PATH = DOCS_SOURCE_DIR / "appendix_a_coverage.rst" + +GENERATED_MARKER = ( + ".. This file is generated by scripts/render_appendix_a_docs.py. Do not edit by hand." +) + +STATUS_LABELS = { + "core": "Implemented", + "variant": "Variant only", + "experimental": "Experimental", + "missing": "Missing", +} + + +@dataclass(frozen=True) +class AppendixAEntry: + hazard_family: str + source_name: str + item_type: str + source_url: str + status: str + mapped_models: Sequence[str] = () + notes: str = "" + + +APPENDIX_A_ENTRIES: List[AppendixAEntry] = [ + AppendixAEntry("Earthquake", "PhaseNet", "Baseline", "https://github.com/AI4EPS/PhaseNet", "core", ("phasenet",), "Model adapter is implemented, but the SeisBench / pick-benchmark data path is still missing."), + AppendixAEntry("Earthquake", "EQTransformer", "Baseline", "https://github.com/smousavi05/EQTransformer", "core", ("eqtransformer",), "Model adapter is implemented, but the benchmark stack remains lighter than the PDF target."), + AppendixAEntry("Earthquake", "GPD", "Baseline", "https://github.com/interseismic/generalized-phase-detection", "core", ("gpd",), "Model adapter is implemented behind the shared picking interface."), + AppendixAEntry("Earthquake", "EQNet", "Baseline", "https://github.com/AI4EPS/EQNet", "core", ("eqnet",), "Model adapter is implemented behind the shared picking interface."), + AppendixAEntry("Earthquake", "SeisBench", "Benchmark / Data Ecosystem", "https://github.com/seisbench/seisbench", "core", notes="A synthetic-backed SeisBench-compatible waveform adapter is registered for smoke benchmarking."), + AppendixAEntry("Earthquake", "pick-benchmark", "Benchmark", "https://github.com/seisbench/pick-benchmark", "core", notes="A synthetic-backed pick-benchmark-compatible waveform adapter is registered for smoke benchmarking."), + AppendixAEntry("Earthquake", "pyCSEP", "Benchmark / Reports", "https://github.com/SCECCode/pycsep", "core", notes="The forecasting smoke benchmark exports a pyCSEP-style JSON artifact."), + AppendixAEntry("Earthquake", "AEFA", "Dataset / Forecast Benchmark", "https://github.com/chenyk1990/aefa", "core", notes="A synthetic-backed AEFA-style forecasting dataset adapter is registered."), + AppendixAEntry("Wildfire", "wildfire_forecasting", "Baseline", "https://github.com/Orion-AI-Lab/wildfire_forecasting", "core", ("wildfire_forecasting",)), + AppendixAEntry("Wildfire", "WildfireSpreadTS", "Baseline / Benchmark", "https://github.com/SebastianGer/WildfireSpreadTS", "core", ("wildfirespreadts",)), + AppendixAEntry("Wildfire", "ASUFM", "Baseline", "https://github.com/bronteee/fire-asufm", "core", ("asufm",)), + AppendixAEntry("Wildfire", "WRF-SFIRE", "Simulator Adapter", "https://github.com/openwfm/WRF-SFIRE", "core", ("wrf_sfire",), "The current adapter is lightweight and synthetic-backed rather than a full external simulator binding."), + AppendixAEntry("Wildfire", "ForeFire", "Simulator Adapter", "https://github.com/forefireAPI/forefire", "core", ("forefire",), "The current adapter is lightweight and synthetic-backed rather than a full external simulator binding."), + AppendixAEntry("Wildfire", "FireCastNet", "Optional Baseline", "https://github.com/SeasFire/firecastnet", "core", ("firecastnet",)), + AppendixAEntry("Flood", "NeuralHydrology", "Baseline Family", "https://github.com/neuralhydrology/neuralhydrology", "core", ("neuralhydrology_lstm", "neuralhydrology_ealstm"), "The LSTM and EA-LSTM adapters are implemented, but Caravan / WaterBench benchmark backing is still missing."), + AppendixAEntry("Flood", "Caravan", "Dataset", "https://github.com/kratzert/Caravan", "core", notes="A synthetic-backed Caravan adapter is registered for streamflow smoke benchmarking."), + AppendixAEntry("Flood", "WaterBench", "Dataset", "https://github.com/uihilab/WaterBench", "core", notes="A synthetic-backed WaterBench adapter is registered for streamflow smoke benchmarking."), + AppendixAEntry("Flood", "FloodCast", "Baseline", "https://github.com/HydroPML/FloodCast", "core", ("floodcast",), "The model adapter is implemented, but FloodCastBench-backed evaluation is not wired yet."), + AppendixAEntry("Flood", "FloodCastBench", "Benchmark", "https://github.com/HydroPML/FloodCastBench", "core", notes="A synthetic-backed FloodCastBench-style inundation adapter is registered."), + AppendixAEntry("Flood", "UrbanFloodCast", "Baseline", "https://github.com/HydroPML/UrbanFloodCast", "core", ("urbanfloodcast",), "The model adapter is implemented on synthetic inundation fixtures today."), + AppendixAEntry("Flood", "HydroBench", "Benchmark / Diagnostics", "https://github.com/EMscience/HydroBench", "core", notes="A synthetic-backed HydroBench adapter is registered for streamflow smoke benchmarking."), + AppendixAEntry("Flood", "google-research/flood-forecasting", "Reference Baseline", "https://github.com/google-research/flood-forecasting", "core", ("google_flood_forecasting",)), + AppendixAEntry("Hurricane / Tropical Cyclone", "Hurricast", "Baseline", "https://github.com/leobix/hurricast", "core", ("hurricast",), "The model adapter is implemented, but the real TCBench / IBTrACS data path is still missing."), + AppendixAEntry("Hurricane / Tropical Cyclone", "tropicalcyclone_MLP", "Baseline", "https://github.com/wenweixu/tropicalcyclone_MLP", "core", ("tropicalcyclone_mlp",), "The model adapter is implemented as a basin-filtered storm baseline."), + AppendixAEntry("Hurricane / Tropical Cyclone", "TCIF-fusion", "Baseline", "https://github.com/wangchong96/TCIF-fusion", "core", ("tcif_fusion",), "The model adapter is implemented behind the shared storm evaluator."), + AppendixAEntry("Hurricane / Tropical Cyclone", "SAF-Net", "Baseline", "https://github.com/xuguangning1218/TI_Prediction", "core", ("saf_net",), "The model adapter is implemented behind the shared storm evaluator."), + AppendixAEntry("Hurricane / Tropical Cyclone", "TropiCycloneNet", "Baseline", "https://github.com/xiaochengfuhuo/TropiCycloneNet", "core", ("tropicyclonenet",), "The model adapter is implemented, but the public benchmark/data track remains synthetic-first."), + AppendixAEntry("Hurricane / Tropical Cyclone", "TropiCycloneNet-Dataset", "Dataset", "https://github.com/xiaochengfuhuo/TropiCycloneNet-Dataset", "core", notes="A synthetic-backed TropiCycloneNet-Dataset adapter is registered."), + AppendixAEntry("Hurricane / Tropical Cyclone", "TCBench Alpha", "Benchmark", "https://github.com/msgomez06/TCBench_Alpha", "core", notes="A synthetic-backed TCBench Alpha adapter is registered."), + AppendixAEntry("Hurricane / Tropical Cyclone", "IBTrACS", "Dataset", "https://www.ncei.noaa.gov/products/international-best-track-archive", "core", notes="A synthetic-backed IBTrACS adapter is registered."), + AppendixAEntry("Hurricane / Tropical Cyclone", "GraphCast / GenCast", "Foundation Adapter", "https://github.com/google-deepmind/graphcast", "experimental", ("graphcast_tc",), "The current wrapper is intentionally lightweight and should not be counted as stable core coverage."), + AppendixAEntry("Hurricane / Tropical Cyclone", "Pangu-Weather", "Foundation Adapter", "https://github.com/198808xc/Pangu-Weather", "experimental", ("pangu_tc",), "The current wrapper is intentionally lightweight and should not be counted as stable core coverage."), + AppendixAEntry("Hurricane / Tropical Cyclone", "FourCastNet", "Foundation Adapter", "https://github.com/NVlabs/FourCastNet", "experimental", ("fourcastnet_tc",), "The current wrapper is intentionally lightweight and should not be counted as stable core coverage."), +] + + +def appendix_a_alignment_issues(cards: Sequence[ModelCard]) -> List[str]: + issues: List[str] = [] + mapping = {card.model_name: card for card in cards} + for entry in APPENDIX_A_ENTRIES: + for model_name in entry.mapped_models: + card = mapping.get(model_name) + if card is None: + issues.append( + "Coverage entry '{name}' maps to missing model card '{model_name}'.".format( + name=entry.source_name, + model_name=model_name, + ) + ) + continue + if entry.status == "core" and card.catalog_status != "core": + issues.append( + "Coverage entry '{name}' expects core status, found '{status}' on '{model_name}'.".format( + name=entry.source_name, + status=card.catalog_status, + model_name=model_name, + ) + ) + if entry.status == "experimental" and card.catalog_status != "experimental": + issues.append( + "Coverage entry '{name}' expects experimental status, found '{status}' on '{model_name}'.".format( + name=entry.source_name, + status=card.catalog_status, + model_name=model_name, + ) + ) + return issues + + +def _summary_rows() -> Dict[str, Dict[str, int]]: + summary: Dict[str, Dict[str, int]] = {} + for entry in APPENDIX_A_ENTRIES: + bucket = summary.setdefault( + entry.hazard_family, + {"core": 0, "experimental": 0, "missing": 0}, + ) + if entry.status in bucket: + bucket[entry.status] += 1 + return summary + + +def _grouped_non_core_cards(cards: Sequence[ModelCard]) -> Dict[str, List[List[ModelCard]]]: + grouped: Dict[str, List[List[ModelCard]]] = {} + public_cards = [card for card in public_catalog_cards(cards) if card.catalog_status in {"variant", "experimental"}] + for hazard, hazard_cards in group_cards_by_hazard(public_cards).items(): + entries: List[List[ModelCard]] = [] + family_seen = set() + ordered_cards = sorted(hazard_cards, key=lambda item: item.display_name.lower()) + for card in ordered_cards: + if card.family_key: + if card.family_key in family_seen: + continue + family_seen.add(card.family_key) + entries.append([member for member in ordered_cards if member.family_key == card.family_key]) + else: + entries.append([card]) + grouped[hazard] = entries + return grouped + + +def _linked_models(cards: Sequence[ModelCard]) -> str: + if not cards: + return "None" + return ", ".join( + ":doc:`{name} `".format( + name=card.display_name, + slug=card.module_doc_name, + ) + for card in cards + ) + + +def render_appendix_a_page(cards: Sequence[ModelCard] | None = None) -> str: + if cards is None: + cards = load_model_cards() + card_map = {card.model_name: card for card in cards} + summary = _summary_rows() + non_core = _grouped_non_core_cards(cards) + + lines: List[str] = [ + GENERATED_MARKER, + "", + "Coverage Audit", + "==============", + "", + "Overview", + "--------", + "", + "This page audits the current PyHazards implementation against the", + "planned methods, benchmarks, and datasets listed in ``pyhazard_plan.pdf``.", + "It separates implemented public entries from variant-only entries,", + "experimental wrappers, and items that are still missing.", + "", + "Status meanings:", + "", + "- ``Implemented``: a public PyHazards adapter exists for the named method or resource.", + "- ``Experimental``: a lightweight wrapper exists, but it should not be counted as stable core coverage.", + "- ``Missing``: no aligned adapter or benchmark integration is present yet.", + "", + "Hazard Summary", + "--------------", + "", + ".. list-table::", + " :widths: 26 18 18 18", + " :header-rows: 1", + " :class: dataset-list", + "", + " * - Hazard Family", + " - Implemented", + " - Experimental", + " - Missing", + ] + + for hazard_family, counts in summary.items(): + lines.extend( + [ + " * - {hazard}".format(hazard=hazard_family), + " - {count}".format(count=counts["core"]), + " - {count}".format(count=counts["experimental"]), + " - {count}".format(count=counts["missing"]), + ] + ) + + lines.extend( + [ + "", + "Method and Resource Matrix", + "--------------------------", + "", + ".. list-table::", + " :widths: 22 22 16 14 24 34", + " :header-rows: 1", + " :class: dataset-list", + "", + " * - Hazard Family", + " - Method / Resource", + " - Type", + " - Status", + " - PyHazards Mapping", + " - Notes", + ] + ) + + for entry in APPENDIX_A_ENTRIES: + mapped_cards = [card_map[name] for name in entry.mapped_models if name in card_map] + lines.extend( + [ + " * - {hazard}".format(hazard=entry.hazard_family), + " - `{name} <{url}>`_".format(name=entry.source_name, url=entry.source_url), + " - {item_type}".format(item_type=entry.item_type), + " - ``{status}``".format(status=STATUS_LABELS[entry.status]), + " - {mapping}".format(mapping=_linked_models(mapped_cards)), + " - {notes}".format(notes=entry.notes or " "), + ] + ) + + lines.extend( + [ + "", + "Current Public Non-Core Implementations", + "-" * len("Current Public Non-Core Implementations"), + "", + "These entries remain in the public catalog, but they are not counted as", + "part of the current core method set.", + "", + ".. list-table::", + " :widths: 18 18 28 36", + " :header-rows: 1", + " :class: dataset-list", + "", + " * - Hazard Family", + " - Catalog Status", + " - Public Entry", + " - Why it is non-core", + ] + ) + + for hazard, entries in non_core.items(): + for entry_cards in entries: + first = entry_cards[0] + if len(entry_cards) > 1: + label = first.family_label or first.display_name + public_entry = "{label} ({members})".format( + label=label, + members=", ".join( + ":doc:`{name} `".format( + name=card.display_name, + slug=card.module_doc_name, + ) + for card in entry_cards + ), + ) + reason = "Same-paper family variants are grouped so they do not count as multiple core methods." + elif first.catalog_status == "experimental": + public_entry = ":doc:`{name} `".format( + name=first.display_name, + slug=first.module_doc_name, + ) + reason = "Wrapper-style experimental adapter pending stronger benchmark and dataset support." + else: + public_entry = ":doc:`{name} `".format( + name=first.display_name, + slug=first.module_doc_name, + ) + reason = "Implemented outside the current core method set and kept public as an additional model." + + lines.extend( + [ + " * - {hazard}".format(hazard=hazard), + " - ``{status}``".format(status=first.catalog_status), + " - {entry}".format(entry=public_entry), + " - {reason}".format(reason=reason), + ] + ) + + lines.extend( + [ + "", + "Execution Note", + "--------------", + "", + "Use `.github/ROADMAP_EXECUTION.md `_", + "as the checked-in multi-agent handoff for finishing the remaining roadmap work.", + "", + ] + ) + return "\n".join(lines) + + +def sync_generated_appendix_a_docs(check: bool = False) -> List[Path]: + cards = load_model_cards() + content = render_appendix_a_page(cards) + current = APPENDIX_A_PAGE_PATH.read_text(encoding="utf-8") if APPENDIX_A_PAGE_PATH.exists() else None + if current == content: + return [] + if not check: + APPENDIX_A_PAGE_PATH.parent.mkdir(parents=True, exist_ok=True) + APPENDIX_A_PAGE_PATH.write_text(content, encoding="utf-8") + return [APPENDIX_A_PAGE_PATH] + + +__all__ = [ + "APPENDIX_A_ENTRIES", + "APPENDIX_A_PAGE_PATH", + "appendix_a_alignment_issues", + "render_appendix_a_page", + "sync_generated_appendix_a_docs", +] diff --git a/pyhazards/benchmark_cards/aefa.yaml b/pyhazards/benchmark_cards/aefa.yaml new file mode 100644 index 00000000..3137452f --- /dev/null +++ b/pyhazards/benchmark_cards/aefa.yaml @@ -0,0 +1,26 @@ +slug: aefa +display_name: AEFA +kind: ecosystem +hazard_family: Earthquake +benchmark_key: earthquake +support_status: synthetic-backed +summary: > + AEFA-style forecasting dataset support for the shared earthquake forecasting path. +description: + - > + The AEFA alignment is implemented as a synthetic-backed dense-grid forecasting adapter + used by the WaveCastNet benchmark config. + - > + It keeps the forecasting task and metric shape aligned without claiming a full AEFA data pipeline. +source: + title: AEFA + url: https://github.com/chenyk1990/aefa +tasks: + - earthquake.forecasting +metrics: + - mae + - mse +smoke_configs: + - pyhazards/configs/earthquake/wavecastnet_benchmark_smoke.yaml +linked_models: + - wavecastnet diff --git a/pyhazards/benchmark_cards/caravan.yaml b/pyhazards/benchmark_cards/caravan.yaml new file mode 100644 index 00000000..cf2388e5 --- /dev/null +++ b/pyhazards/benchmark_cards/caravan.yaml @@ -0,0 +1,31 @@ +slug: caravan +display_name: Caravan +kind: ecosystem +hazard_family: Flood +benchmark_key: flood +support_status: synthetic-backed +summary: > + Caravan-style streamflow benchmark coverage for the shared flood streamflow evaluator. +description: + - > + The current Caravan alignment is a metadata-backed streamflow adapter layered on top + of the shared synthetic graph-temporal flood dataset. + - > + It currently drives the public smoke runs for NeuralHydrology LSTM and Google Flood Forecasting. +source: + title: Caravan - A global community dataset for large-sample hydrology + url: https://www.nature.com/articles/s41597-023-01975-w + repo_url: https://github.com/kratzert/Caravan +tasks: + - flood.streamflow +metrics: + - mae + - rmse + - nse + - kge +smoke_configs: + - pyhazards/configs/flood/neuralhydrology_lstm_smoke.yaml + - pyhazards/configs/flood/google_flood_forecasting_smoke.yaml +linked_models: + - neuralhydrology_lstm + - google_flood_forecasting diff --git a/pyhazards/benchmark_cards/earthquake_benchmark.yaml b/pyhazards/benchmark_cards/earthquake_benchmark.yaml new file mode 100644 index 00000000..467da8e4 --- /dev/null +++ b/pyhazards/benchmark_cards/earthquake_benchmark.yaml @@ -0,0 +1,40 @@ +slug: earthquake_benchmark +display_name: Earthquake Benchmark +kind: family +hazard_family: Earthquake +benchmark_key: earthquake +support_status: synthetic-backed +summary: > + Shared PyHazards evaluator family for earthquake phase-picking and wavefield-forecasting runs. +description: + - > + The earthquake benchmark family groups the picking and forecasting paths under one + registered evaluator and benchmark runner entrypoint. + - > + Current public coverage is synthetic-backed but already exposes the same task and + report shape used across the earthquake smoke configs. +tasks: + - earthquake.picking + - earthquake.forecasting +metrics: + - p_pick_mae + - s_pick_mae + - precision + - recall + - f1 + - mae + - mse +smoke_configs: + - pyhazards/configs/earthquake/phasenet_smoke.yaml + - pyhazards/configs/earthquake/eqtransformer_smoke.yaml + - pyhazards/configs/earthquake/gpd_smoke.yaml + - pyhazards/configs/earthquake/eqnet_smoke.yaml + - pyhazards/configs/earthquake/wavecastnet_benchmark_smoke.yaml +linked_models: + - phasenet + - eqtransformer + - gpd + - eqnet + - wavecastnet +notes: + - "Forecasting runs export a pyCSEP-style report artifact through the shared earthquake benchmark." diff --git a/pyhazards/benchmark_cards/flood_benchmark.yaml b/pyhazards/benchmark_cards/flood_benchmark.yaml new file mode 100644 index 00000000..028de0fa --- /dev/null +++ b/pyhazards/benchmark_cards/flood_benchmark.yaml @@ -0,0 +1,40 @@ +slug: flood_benchmark +display_name: Flood Benchmark +kind: family +hazard_family: Flood +benchmark_key: flood +support_status: synthetic-backed +summary: > + Shared PyHazards evaluator family for streamflow forecasting and inundation prediction. +description: + - > + The flood benchmark family keeps streamflow and inundation scoring under one shared + evaluator contract while preserving hazard-task-specific metrics. + - > + Current public coverage is synthetic-backed, but the same family already drives the + streamflow and inundation smoke configs used across the flood models. +tasks: + - flood.streamflow + - flood.inundation +metrics: + - mae + - rmse + - nse + - kge + - pixel_mae + - iou + - f1 +smoke_configs: + - pyhazards/configs/flood/hydrographnet_smoke.yaml + - pyhazards/configs/flood/neuralhydrology_lstm_smoke.yaml + - pyhazards/configs/flood/neuralhydrology_ealstm_smoke.yaml + - pyhazards/configs/flood/google_flood_forecasting_smoke.yaml + - pyhazards/configs/flood/floodcast_smoke.yaml + - pyhazards/configs/flood/urbanfloodcast_smoke.yaml +linked_models: + - hydrographnet + - neuralhydrology_lstm + - neuralhydrology_ealstm + - google_flood_forecasting + - floodcast + - urbanfloodcast diff --git a/pyhazards/benchmark_cards/floodcastbench.yaml b/pyhazards/benchmark_cards/floodcastbench.yaml new file mode 100644 index 00000000..b82e6757 --- /dev/null +++ b/pyhazards/benchmark_cards/floodcastbench.yaml @@ -0,0 +1,29 @@ +slug: floodcastbench +display_name: FloodCastBench +kind: ecosystem +hazard_family: Flood +benchmark_key: flood +support_status: synthetic-backed +summary: > + FloodCastBench-style inundation benchmark coverage for the shared flood inundation evaluator. +description: + - > + The current FloodCastBench alignment is implemented as a synthetic raster inundation + adapter used by the public inundation smoke configs. + - > + It documents the benchmark/data protocol behind the FloodCast and UrbanFloodCast smoke paths. +source: + title: FloodCastBench + url: https://github.com/HydroPML/FloodCastBench +tasks: + - flood.inundation +metrics: + - pixel_mae + - iou + - f1 +smoke_configs: + - pyhazards/configs/flood/floodcast_smoke.yaml + - pyhazards/configs/flood/urbanfloodcast_smoke.yaml +linked_models: + - floodcast + - urbanfloodcast diff --git a/pyhazards/benchmark_cards/hydrobench.yaml b/pyhazards/benchmark_cards/hydrobench.yaml new file mode 100644 index 00000000..d9d8de8b --- /dev/null +++ b/pyhazards/benchmark_cards/hydrobench.yaml @@ -0,0 +1,28 @@ +slug: hydrobench +display_name: HydroBench +kind: ecosystem +hazard_family: Flood +benchmark_key: flood +support_status: synthetic-backed +summary: > + HydroBench-style streamflow diagnostics coverage for the shared flood streamflow evaluator. +description: + - > + The current HydroBench alignment uses a metadata-backed streamflow adapter over the + shared synthetic flood streamflow dataset. + - > + It is currently exercised through the HydroGraphNet smoke benchmark path. +source: + title: HydroBench + url: https://github.com/EMscience/HydroBench +tasks: + - flood.streamflow +metrics: + - mae + - rmse + - nse + - kge +smoke_configs: + - pyhazards/configs/flood/hydrographnet_smoke.yaml +linked_models: + - hydrographnet diff --git a/pyhazards/benchmark_cards/ibtracs.yaml b/pyhazards/benchmark_cards/ibtracs.yaml new file mode 100644 index 00000000..0623b545 --- /dev/null +++ b/pyhazards/benchmark_cards/ibtracs.yaml @@ -0,0 +1,33 @@ +slug: ibtracs +display_name: IBTrACS +kind: ecosystem +hazard_family: Tropical Cyclone +benchmark_key: tc +support_status: synthetic-backed +summary: > + IBTrACS-backed storm benchmark coverage for the shared tropical cyclone evaluator. +description: + - > + The current IBTrACS alignment uses a metadata-backed storm-history adapter over the + shared synthetic tropical-cyclone dataset. + - > + It is the benchmark ecosystem currently used by Hurricast and the experimental + weather-model adapter smoke configs. +source: + title: IBTrACS + url: https://www.ncei.noaa.gov/products/international-best-track-archive +tasks: + - tc.track_intensity +metrics: + - track_error + - intensity_mae +smoke_configs: + - pyhazards/configs/tc/hurricast_smoke.yaml + - pyhazards/configs/tc/graphcast_tc_smoke.yaml + - pyhazards/configs/tc/pangu_tc_smoke.yaml + - pyhazards/configs/tc/fourcastnet_tc_smoke.yaml +linked_models: + - hurricast + - graphcast_tc + - pangu_tc + - fourcastnet_tc diff --git a/pyhazards/benchmark_cards/pick_benchmark.yaml b/pyhazards/benchmark_cards/pick_benchmark.yaml new file mode 100644 index 00000000..ff09850a --- /dev/null +++ b/pyhazards/benchmark_cards/pick_benchmark.yaml @@ -0,0 +1,31 @@ +slug: pick_benchmark +display_name: pick-benchmark +kind: ecosystem +hazard_family: Earthquake +benchmark_key: earthquake +support_status: synthetic-backed +summary: > + pick-benchmark-compatible waveform picking support routed through the shared earthquake evaluator. +description: + - > + The current pick-benchmark path reuses the synthetic waveform picking bundle and tags + it as a pick-benchmark-style benchmark adapter. + - > + It supports the earthquake picking smoke path for the transformer and CNN picking baselines. +source: + title: pick-benchmark + url: https://github.com/seisbench/pick-benchmark +tasks: + - earthquake.picking +metrics: + - p_pick_mae + - s_pick_mae + - precision + - recall + - f1 +smoke_configs: + - pyhazards/configs/earthquake/eqtransformer_smoke.yaml + - pyhazards/configs/earthquake/gpd_smoke.yaml +linked_models: + - eqtransformer + - gpd diff --git a/pyhazards/benchmark_cards/pycsep.yaml b/pyhazards/benchmark_cards/pycsep.yaml new file mode 100644 index 00000000..7354e89e --- /dev/null +++ b/pyhazards/benchmark_cards/pycsep.yaml @@ -0,0 +1,28 @@ +slug: pycsep +display_name: pyCSEP +kind: ecosystem +hazard_family: Earthquake +benchmark_key: earthquake +support_status: synthetic-backed +summary: > + pyCSEP-style forecasting report export for the earthquake forecasting smoke path. +description: + - > + The current pyCSEP alignment is implemented as a report export contract inside the + shared earthquake benchmark rather than as a standalone benchmark family. + - > + It documents the forecasting artifact shape used by the WaveCastNet smoke config. +source: + title: pyCSEP + url: https://github.com/SCECCode/pycsep +tasks: + - earthquake.forecasting +metrics: + - mae + - mse +smoke_configs: + - pyhazards/configs/earthquake/wavecastnet_benchmark_smoke.yaml +linked_models: + - wavecastnet +notes: + - "Current repo support is report-export alignment, not a separate pyCSEP benchmark runner." diff --git a/pyhazards/benchmark_cards/seisbench.yaml b/pyhazards/benchmark_cards/seisbench.yaml new file mode 100644 index 00000000..69262490 --- /dev/null +++ b/pyhazards/benchmark_cards/seisbench.yaml @@ -0,0 +1,33 @@ +slug: seisbench +display_name: SeisBench +kind: ecosystem +hazard_family: Earthquake +benchmark_key: earthquake +support_status: synthetic-backed +summary: > + SeisBench-shaped waveform picking support for the shared earthquake benchmark family. +description: + - > + The current SeisBench path uses a synthetic waveform adapter that preserves the same + picking task shape expected by the shared earthquake benchmark. + - > + It exists today as a benchmark-compatible smoke path rather than a full external + SeisBench ingestion pipeline. +source: + title: SeisBench - A Toolbox for Machine Learning in Seismology + url: https://joss.theoj.org/papers/10.21105/joss.04418 + repo_url: https://github.com/seisbench/seisbench +tasks: + - earthquake.picking +metrics: + - p_pick_mae + - s_pick_mae + - precision + - recall + - f1 +smoke_configs: + - pyhazards/configs/earthquake/phasenet_smoke.yaml + - pyhazards/configs/earthquake/eqnet_smoke.yaml +linked_models: + - phasenet + - eqnet diff --git a/pyhazards/benchmark_cards/tcbench_alpha.yaml b/pyhazards/benchmark_cards/tcbench_alpha.yaml new file mode 100644 index 00000000..5bd16526 --- /dev/null +++ b/pyhazards/benchmark_cards/tcbench_alpha.yaml @@ -0,0 +1,30 @@ +slug: tcbench_alpha +display_name: TCBench Alpha +kind: ecosystem +hazard_family: Tropical Cyclone +benchmark_key: tc +support_status: synthetic-backed +summary: > + TCBench Alpha-style storm benchmark coverage for the shared tropical cyclone evaluator. +description: + - > + The current TCBench Alpha alignment uses a metadata-backed storm-history adapter over + the shared synthetic tropical-cyclone dataset. + - > + It currently drives the tropicalcyclone_MLP, SAF-Net, and TCIF-fusion smoke configs. +source: + title: TCBench Alpha + url: https://github.com/msgomez06/TCBench_Alpha +tasks: + - tc.track_intensity +metrics: + - track_error + - intensity_mae +smoke_configs: + - pyhazards/configs/tc/tropicalcyclone_mlp_smoke.yaml + - pyhazards/configs/tc/saf_net_smoke.yaml + - pyhazards/configs/tc/tcif_fusion_smoke.yaml +linked_models: + - tropicalcyclone_mlp + - saf_net + - tcif_fusion diff --git a/pyhazards/benchmark_cards/tropical_cyclone_benchmark.yaml b/pyhazards/benchmark_cards/tropical_cyclone_benchmark.yaml new file mode 100644 index 00000000..225629e0 --- /dev/null +++ b/pyhazards/benchmark_cards/tropical_cyclone_benchmark.yaml @@ -0,0 +1,40 @@ +slug: tropical_cyclone_benchmark +display_name: Tropical Cyclone Benchmark +kind: family +hazard_family: Tropical Cyclone +benchmark_key: tc +support_status: synthetic-backed +summary: > + Shared PyHazards evaluator family for tropical cyclone and hurricane track-intensity forecasting. +description: + - > + The tropical cyclone benchmark family is the single storm evaluator used by the + hurricane-specific and all-basin tropical-cyclone smoke configs. + - > + Current coverage is synthetic-backed, but the same evaluator contract already scores + core storm baselines and experimental weather-model adapters. +tasks: + - tc.track_intensity +metrics: + - track_error + - intensity_mae +smoke_configs: + - pyhazards/configs/tc/hurricast_smoke.yaml + - pyhazards/configs/tc/tropicalcyclone_mlp_smoke.yaml + - pyhazards/configs/tc/tropicyclonenet_smoke.yaml + - pyhazards/configs/tc/saf_net_smoke.yaml + - pyhazards/configs/tc/tcif_fusion_smoke.yaml + - pyhazards/configs/tc/graphcast_tc_smoke.yaml + - pyhazards/configs/tc/pangu_tc_smoke.yaml + - pyhazards/configs/tc/fourcastnet_tc_smoke.yaml +linked_models: + - hurricast + - tropicalcyclone_mlp + - tropicyclonenet + - saf_net + - tcif_fusion + - graphcast_tc + - pangu_tc + - fourcastnet_tc +notes: + - "IBTrACS, TCBench Alpha, and TropiCycloneNet-Dataset are surfaced as the public storm benchmark ecosystems." diff --git a/pyhazards/benchmark_cards/tropicyclonenet_dataset.yaml b/pyhazards/benchmark_cards/tropicyclonenet_dataset.yaml new file mode 100644 index 00000000..d0407f05 --- /dev/null +++ b/pyhazards/benchmark_cards/tropicyclonenet_dataset.yaml @@ -0,0 +1,26 @@ +slug: tropicyclonenet_dataset +display_name: TropiCycloneNet-Dataset +kind: ecosystem +hazard_family: Tropical Cyclone +benchmark_key: tc +support_status: synthetic-backed +summary: > + TropiCycloneNet-Dataset-backed storm benchmark coverage for the shared tropical cyclone evaluator. +description: + - > + The current TropiCycloneNet-Dataset alignment uses a metadata-backed storm-history adapter + over the shared synthetic tropical-cyclone dataset. + - > + It exists today to support the public TropiCycloneNet smoke benchmark path. +source: + title: TropiCycloneNet-Dataset + url: https://github.com/xiaochengfuhuo/TropiCycloneNet-Dataset +tasks: + - tc.track_intensity +metrics: + - track_error + - intensity_mae +smoke_configs: + - pyhazards/configs/tc/tropicyclonenet_smoke.yaml +linked_models: + - tropicyclonenet diff --git a/pyhazards/benchmark_cards/waterbench.yaml b/pyhazards/benchmark_cards/waterbench.yaml new file mode 100644 index 00000000..fb28e812 --- /dev/null +++ b/pyhazards/benchmark_cards/waterbench.yaml @@ -0,0 +1,29 @@ +slug: waterbench +display_name: WaterBench +kind: ecosystem +hazard_family: Flood +benchmark_key: flood +support_status: synthetic-backed +summary: > + WaterBench-style streamflow benchmark coverage for the shared flood evaluator. +description: + - > + The current WaterBench alignment uses a metadata-only adapter over the shared + synthetic streamflow bundle and preserves the streamflow task contract. + - > + It is currently exercised by the EA-LSTM smoke benchmark path. +source: + title: "WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting" + url: https://neurips.cc/virtual/2023/80632 + repo_url: https://github.com/uihilab/WaterBench +tasks: + - flood.streamflow +metrics: + - mae + - rmse + - nse + - kge +smoke_configs: + - pyhazards/configs/flood/neuralhydrology_ealstm_smoke.yaml +linked_models: + - neuralhydrology_ealstm diff --git a/pyhazards/benchmark_cards/wildfire_benchmark.yaml b/pyhazards/benchmark_cards/wildfire_benchmark.yaml new file mode 100644 index 00000000..f965b5fa --- /dev/null +++ b/pyhazards/benchmark_cards/wildfire_benchmark.yaml @@ -0,0 +1,49 @@ +slug: wildfire_benchmark +display_name: Wildfire Benchmark +kind: family +hazard_family: Wildfire +benchmark_key: wildfire +support_status: synthetic-backed +summary: > + Shared PyHazards evaluator family for wildfire danger and wildfire spread experiments. +description: + - > + The wildfire benchmark family is the single scoring layer for tabular danger tasks, + weekly forecasting tasks, and raster spread tasks. + - > + Current coverage is synthetic-backed, but it already exposes a single hazard-level + evaluator contract across wildfire danger and wildfire spread smoke configs. +tasks: + - wildfire.danger + - wildfire.spread +metrics: + - accuracy + - macro_f1 + - auc + - pr_auc + - mae + - rmse + - iou + - f1 + - burned_area_mae +smoke_configs: + - pyhazards/configs/wildfire/wildfire_danger_smoke.yaml + - pyhazards/configs/wildfire/wildfire_forecasting_smoke.yaml + - pyhazards/configs/wildfire/asufm_smoke.yaml + - pyhazards/configs/wildfire/wildfire_spread_smoke.yaml + - pyhazards/configs/wildfire/wildfirespreadts_smoke.yaml + - pyhazards/configs/wildfire/forefire_smoke.yaml + - pyhazards/configs/wildfire/wrf_sfire_smoke.yaml + - pyhazards/configs/wildfire/firecastnet_smoke.yaml +linked_models: + - wildfire_fpa + - asufm + - wildfire_aspp + - wildfirespreadts + - forefire + - wrf_sfire + - firecastnet +notes: + - "WildfireSpreadTS is the public Appendix-A benchmark ecosystem surfaced on this page." + - "Run artifacts are organized under runs/wildfire_benchmark/{smoke,real,archive}." + - "The canonical experiment-setting schema lives in pyhazards/benchmarks/wildfire_benchmark/experiment_settings.py." diff --git a/pyhazards/benchmark_cards/wildfirespreadts_ecosystem.yaml b/pyhazards/benchmark_cards/wildfirespreadts_ecosystem.yaml new file mode 100644 index 00000000..85771ead --- /dev/null +++ b/pyhazards/benchmark_cards/wildfirespreadts_ecosystem.yaml @@ -0,0 +1,36 @@ +slug: wildfirespreadts_ecosystem +display_name: WildfireSpreadTS +kind: ecosystem +hazard_family: Wildfire +benchmark_key: wildfire +support_status: synthetic-backed +summary: > + Temporal wildfire spread benchmark coverage for the shared wildfire spread evaluator. +description: + - > + WildfireSpreadTS is the public wildfire benchmark ecosystem surfaced from Appendix A. + - > + The current repo uses a synthetic temporal spread dataset to exercise the same + spread-task contract for WildfireSpreadTS-style evaluation. +source: + title: "WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction" + url: https://openreview.net/forum?id=RgdGkPRQ03 + repo_url: https://github.com/SebastianGer/WildfireSpreadTS +tasks: + - wildfire.spread +metrics: + - iou + - f1 + - burned_area_mae +smoke_configs: + - pyhazards/configs/wildfire/wildfire_spread_smoke.yaml + - pyhazards/configs/wildfire/wildfirespreadts_smoke.yaml + - pyhazards/configs/wildfire/forefire_smoke.yaml + - pyhazards/configs/wildfire/wrf_sfire_smoke.yaml + - pyhazards/configs/wildfire/firecastnet_smoke.yaml +linked_models: + - wildfire_aspp + - wildfirespreadts + - forefire + - wrf_sfire + - firecastnet diff --git a/pyhazards/benchmark_catalog.py b/pyhazards/benchmark_catalog.py new file mode 100644 index 00000000..1e3e4c28 --- /dev/null +++ b/pyhazards/benchmark_catalog.py @@ -0,0 +1,901 @@ +from __future__ import annotations + +from collections import defaultdict +from pathlib import Path +from typing import Dict, Iterable, List, Literal, Optional, Sequence, Set + +import yaml +from pydantic import BaseModel, Field, model_validator + +from .benchmarks import available_benchmarks, build_benchmark +from .configs import load_experiment_config +from .model_catalog import card_by_registry_name, load_model_cards +from .tasks import has_hazard_task + + +REPO_ROOT = Path(__file__).resolve().parent.parent +BENCHMARK_CARDS_DIR = Path(__file__).resolve().parent / "benchmark_cards" +DOCS_SOURCE_DIR = REPO_ROOT / "docs" / "source" +BENCHMARK_DOCS_DIR = DOCS_SOURCE_DIR / "benchmarks" +BENCHMARK_PAGE_PATH = DOCS_SOURCE_DIR / "pyhazards_benchmarks.rst" + +GENERATED_MARKER = ( + ".. This file is generated by scripts/render_benchmark_docs.py. Do not edit by hand." +) + +HAZARD_DISPLAY_ORDER = [ + "Wildfire", + "Earthquake", + "Flood", + "Tropical Cyclone", +] + +SUPPORT_STATUS_LABELS = { + "synthetic-backed": "Synthetic-backed", + "real-backed": "Real-backed", + "experimental": "Experimental", +} + +TASK_DISPLAY_LABELS = { + "wildfire.danger": "Danger", + "wildfire.spread": "Spread", + "earthquake.picking": "Phase Picking", + "earthquake.forecasting": "Wavefield Forecasting", + "flood.streamflow": "Streamflow", + "flood.inundation": "Inundation", + "tc.track_intensity": "Track + Intensity", +} + +SUPPORT_STATUS_BADGE_ROLES = { + "synthetic-backed": "info", + "real-backed": "success", + "experimental": "warning", +} + +METRIC_DISPLAY_LABELS = { + "accuracy": "Accuracy", + "macro_f1": "Macro F1", + "auc": "AUC", + "pr_auc": "PR-AUC", + "mae": "MAE", + "rmse": "RMSE", + "mse": "MSE", + "iou": "IoU", + "f1": "F1", + "precision": "Precision", + "recall": "Recall", + "nse": "NSE", + "kge": "KGE", + "p_pick_mae": "P-pick MAE", + "s_pick_mae": "S-pick MAE", + "pixel_mae": "Pixel MAE", + "burned_area_mae": "Burned-area MAE", + "track_error": "Track Error", + "intensity_mae": "Intensity MAE", +} + + +class BenchmarkSource(BaseModel): + title: str + url: str + repo_url: Optional[str] = None + + +class BenchmarkCard(BaseModel): + slug: str + display_name: str + kind: Literal["family", "ecosystem"] + hazard_family: str + benchmark_key: str + support_status: Literal["synthetic-backed", "real-backed", "experimental"] + summary: str + description: List[str] + tasks: List[str] + metrics: List[str] + smoke_configs: List[str] + linked_models: List[str] = Field(default_factory=list) + notes: List[str] = Field(default_factory=list) + source: Optional[BenchmarkSource] = None + + @model_validator(mode="after") + def validate_card(self) -> "BenchmarkCard": + if self.kind == "ecosystem" and self.source is None: + raise ValueError("ecosystem benchmark cards require a source block") + return self + + @property + def doc_path(self) -> Path: + return BENCHMARK_DOCS_DIR / f"{self.slug}.rst" + + @property + def doc_target(self) -> str: + return f"benchmarks/{self.slug}" + + +def load_benchmark_cards(cards_dir: Path = BENCHMARK_CARDS_DIR) -> List[BenchmarkCard]: + cards: List[BenchmarkCard] = [] + seen_slugs: Set[str] = set() + for path in sorted(cards_dir.glob("*.y*ml")): + raw = yaml.safe_load(path.read_text(encoding="utf-8")) or {} + card = BenchmarkCard.model_validate(raw) + if path.stem != card.slug: + raise ValueError( + "Benchmark card filename must match slug: " + f"{path.name} vs {card.slug}" + ) + if card.slug in seen_slugs: + raise ValueError(f"Duplicate benchmark card slug detected: {card.slug}") + seen_slugs.add(card.slug) + cards.append(card) + return cards + + +def _ordered_unique(items: Iterable[str]) -> List[str]: + seen = set() + ordered: List[str] = [] + for item in items: + if item not in seen: + ordered.append(item) + seen.add(item) + return ordered + + +def _single_line(text: str) -> str: + return " ".join(text.split()) + + +def _indent_lines(lines: Sequence[str], prefix: str = " ") -> List[str]: + return [prefix + line if line else "" for line in lines] + + +def _badge(role: str, text: str) -> str: + return f":bdg-{role}:`{text}`" + + +def _task_labels(tasks: Sequence[str]) -> List[str]: + return _ordered_unique(TASK_DISPLAY_LABELS.get(task, task) for task in tasks) + + +def _count_phrase(count: int, noun: str) -> str: + suffix = "" if count == 1 else "s" + return f"{count} {noun}{suffix}" + + +def _benchmark_metrics(benchmark_key: str) -> Dict[str, Sequence[str]]: + benchmark = build_benchmark(benchmark_key) + metric_names_by_task = getattr(benchmark, "metric_names_by_task", {}) + return {task: list(metrics) for task, metrics in metric_names_by_task.items()} + + +def _order_key(hazard: str) -> tuple[int, str]: + if hazard in HAZARD_DISPLAY_ORDER: + return (HAZARD_DISPLAY_ORDER.index(hazard), hazard.lower()) + return (len(HAZARD_DISPLAY_ORDER), hazard.lower()) + + +def _group_cards(cards: Sequence[BenchmarkCard], kind: str) -> Dict[str, List[BenchmarkCard]]: + grouped: Dict[str, List[BenchmarkCard]] = defaultdict(list) + for card in cards: + if card.kind == kind: + grouped[card.hazard_family].append(card) + return { + hazard: sorted(hazard_cards, key=lambda item: item.display_name.lower()) + for hazard, hazard_cards in sorted(grouped.items(), key=lambda item: _order_key(item[0])) + } + + +def _benchmark_doc_link(card: BenchmarkCard, absolute: bool = False) -> str: + target = f"/{card.doc_target}" if absolute else card.doc_target + return f":doc:`{card.display_name} <{target}>`" + + +def _linked_models(card: BenchmarkCard, absolute: bool = False) -> str: + if not card.linked_models: + return "None." + model_map = card_by_registry_name(load_model_cards()) + links: List[str] = [] + for name in card.linked_models: + model_card = model_map[name] + target = ( + f"/modules/{model_card.module_doc_name}" + if absolute + else f"modules/{model_card.module_doc_name}" + ) + links.append(f":doc:`{model_card.display_name} <{target}>`") + return ", ".join(links) + "." + + +def _linked_configs(card: BenchmarkCard) -> str: + if not card.smoke_configs: + return "None." + return ", ".join(f"``{Path(path).name}``" for path in card.smoke_configs) + "." + + +def _source_sentence(card: BenchmarkCard) -> str: + if card.source is None: + return "" + sentence = f"`{card.source.title} <{card.source.url}>`_" + if card.source.repo_url: + sentence += f" (`repo <{card.source.repo_url}>`__)" + return sentence + "." + + +def _status_label(card: BenchmarkCard) -> str: + return SUPPORT_STATUS_LABELS[card.support_status] + + +def _family_lookup(cards: Sequence[BenchmarkCard]) -> Dict[str, BenchmarkCard]: + return { + card.benchmark_key: card + for card in cards + if card.kind == "family" + } + + +def _ecosystem_links( + cards: Sequence[BenchmarkCard], + benchmark_key: str, + absolute: bool = False, +) -> str: + ecosystems = [ + _benchmark_doc_link(card, absolute=absolute) + for card in cards + if card.kind == "ecosystem" and card.benchmark_key == benchmark_key + ] + if not ecosystems: + return "None." + return ", ".join(ecosystems) + "." + + +def _row_summary(card: BenchmarkCard, family_lookup: Dict[str, BenchmarkCard]) -> str: + task_text = ", ".join(f"``{task}``" for task in card.tasks) + metric_text = ", ".join(f"``{metric}``" for metric in card.metrics) + support_text = _status_label(card) + summary_text = _single_line(card.summary).rstrip(".") + if card.kind == "family": + return ( + f"{summary_text}." + f" Covers {task_text}. Key metrics: {metric_text}. " + f"Support: {support_text}. Smoke configs: {_linked_configs(card)}" + ) + + family = family_lookup[card.benchmark_key] + source = _source_sentence(card) + linked_models = _linked_models(card) + return ( + f"{summary_text}." + f" Runs through {_benchmark_doc_link(family)} with {task_text}. " + f"Key metrics: {metric_text}. Support: {support_text}. " + f"Smoke configs: {_linked_configs(card)} Linked models: {linked_models} " + f"{source}".strip() + ) + + +def benchmark_catalog_alignment_issues(cards: Optional[Sequence[BenchmarkCard]] = None) -> List[str]: + if cards is None: + cards = load_benchmark_cards() + + issues: List[str] = [] + registered = set(available_benchmarks()) + model_map = card_by_registry_name(load_model_cards()) + family_lookup = _family_lookup(cards) + + for card in cards: + if card.benchmark_key not in registered: + issues.append( + f"Benchmark card '{card.slug}' points to unregistered benchmark '{card.benchmark_key}'." + ) + continue + + metric_names_by_task = _benchmark_metrics(card.benchmark_key) + benchmark_tasks = list(metric_names_by_task.keys()) + benchmark_metrics = _ordered_unique( + metric for task in benchmark_tasks for metric in metric_names_by_task.get(task, []) + ) + + if card.kind == "family" and set(card.tasks) != set(benchmark_tasks): + issues.append( + f"Family benchmark card '{card.slug}' tasks do not match benchmark '{card.benchmark_key}'." + ) + + for task in card.tasks: + if not has_hazard_task(task): + issues.append(f"Benchmark card '{card.slug}' declares unknown hazard task '{task}'.") + elif task not in metric_names_by_task: + issues.append( + f"Benchmark card '{card.slug}' uses hazard task '{task}' not declared by '{card.benchmark_key}'." + ) + + for metric in card.metrics: + if metric not in benchmark_metrics: + issues.append( + f"Benchmark card '{card.slug}' declares metric '{metric}' not exposed by '{card.benchmark_key}'." + ) + + if card.kind == "ecosystem" and card.benchmark_key not in family_lookup: + issues.append( + f"Ecosystem benchmark card '{card.slug}' points to benchmark '{card.benchmark_key}' without a family card." + ) + + for config_path in card.smoke_configs: + path = REPO_ROOT / config_path + if not path.exists(): + issues.append(f"Smoke config does not exist: {config_path}") + continue + config = load_experiment_config(path) + if config.benchmark.name != card.benchmark_key: + issues.append( + f"Smoke config '{config_path}' targets benchmark '{config.benchmark.name}', " + f"expected '{card.benchmark_key}'." + ) + if config.benchmark.hazard_task not in card.tasks: + issues.append( + f"Smoke config '{config_path}' uses hazard task '{config.benchmark.hazard_task}' " + f"outside benchmark card '{card.slug}'." + ) + + for model_name in card.linked_models: + if model_name not in model_map: + issues.append( + f"Benchmark card '{card.slug}' links unknown model '{model_name}'." + ) + + return issues + + +def _stat_card(title: str, value: str, note: str) -> List[str]: + return [ + ".. grid-item-card:: {title}".format(title=title), + " :class-card: catalog-stat-card", + "", + " .. container:: catalog-stat-value", + "", + " {value}".format(value=value), + "", + " .. container:: catalog-stat-note", + "", + " {note}".format(note=note), + "", + ] + + +def _model_count_label(card: BenchmarkCard) -> str: + count = len(card.linked_models) + return "{count} model{suffix}".format(count=count, suffix="" if count == 1 else "s") + + +def _ecosystem_count(cards: Sequence[BenchmarkCard], benchmark_key: str) -> int: + return len( + [ + card + for card in cards + if card.kind == "ecosystem" and card.benchmark_key == benchmark_key + ] + ) + + +def _summary_metrics(metrics: Sequence[str], limit: int = 4) -> str: + labels = [METRIC_DISPLAY_LABELS.get(metric, metric.replace("_", " ").title()) for metric in metrics[:limit]] + if len(metrics) > limit: + labels.append("+{count} more".format(count=len(metrics) - limit)) + return ", ".join(labels) + + +def _summary_tasks(tasks: Sequence[str]) -> str: + return ", ".join(_task_labels(tasks)) + + +def _render_family_card(card: BenchmarkCard, cards: Sequence[BenchmarkCard]) -> List[str]: + badge_line = " ".join( + [ + _badge("primary", card.hazard_family), + *[_badge("secondary", label) for label in _task_labels(card.tasks)], + _badge( + SUPPORT_STATUS_BADGE_ROLES[card.support_status], + _status_label(card), + ), + ] + ) + return [ + ".. grid-item-card:: {title}".format(title=card.display_name), + " :class-card: catalog-entry-card", + "", + " .. container:: catalog-entry-summary", + "", + " {summary}".format(summary=_single_line(card.summary).rstrip(".") + "."), + "", + " .. container:: catalog-chip-row", + "", + " {badges}".format(badges=badge_line), + "", + " .. container:: catalog-meta-row", + "", + " **Tasks:** {tasks}".format(tasks=_summary_tasks(card.tasks)), + "", + " .. container:: catalog-meta-row", + "", + " **Key Metrics:** {metrics}".format(metrics=_summary_metrics(card.metrics)), + "", + " .. container:: catalog-meta-row", + "", + " **Coverage:** {configs} | {models} | {ecosystems}".format( + configs=_count_phrase(len(card.smoke_configs), "smoke config"), + models=_model_count_label(card), + ecosystems=_count_phrase(_ecosystem_count(cards, card.benchmark_key), "ecosystem"), + ), + "", + " .. container:: catalog-link-row", + "", + " **View Details:** {details}".format( + details=_benchmark_doc_link(card), + ), + "", + ] + + +def _render_ecosystem_card(card: BenchmarkCard, family_lookup: Dict[str, BenchmarkCard]) -> List[str]: + family = family_lookup[card.benchmark_key] + badge_line = " ".join( + [ + _badge("primary", card.hazard_family), + *[_badge("secondary", label) for label in _task_labels(card.tasks)], + _badge( + SUPPORT_STATUS_BADGE_ROLES[card.support_status], + _status_label(card), + ), + ] + ) + source_links = [f"**Paper:** `{card.source.title} <{card.source.url}>`_"] if card.source else [] + if card.source and card.source.repo_url: + source_links.append(f"**Repo:** `Repository <{card.source.repo_url}>`__") + return [ + ".. grid-item-card:: {title}".format(title=card.display_name), + " :class-card: catalog-entry-card", + "", + " .. container:: catalog-entry-summary", + "", + " {summary}".format(summary=_single_line(card.summary).rstrip(".") + "."), + "", + " .. container:: catalog-chip-row", + "", + " {badges}".format(badges=badge_line), + "", + " .. container:: catalog-meta-row", + "", + " **Benchmark Family:** {family}".format( + family=_benchmark_doc_link(family), + ), + "", + " .. container:: catalog-meta-row", + "", + " **Key Metrics:** {metrics}".format(metrics=_summary_metrics(card.metrics)), + "", + " .. container:: catalog-meta-row", + "", + " **Coverage:** {configs} | {models}".format( + configs=_count_phrase(len(card.smoke_configs), "smoke config"), + models=_model_count_label(card), + ), + "", + " .. container:: catalog-link-row", + "", + " **View Details:** {details}".format(details=_benchmark_doc_link(card)), + "", + " .. container:: catalog-link-row", + "", + " {links}".format(links=" | ".join(source_links)), + "", + ] + + +def _render_card_grid(entries: Sequence[List[str]]) -> List[str]: + lines: List[str] = [ + ".. grid:: 1 1 2 2", + " :gutter: 2", + " :class-container: catalog-grid", + "", + ] + for entry in entries: + lines.extend(_indent_lines(entry)) + return lines + + +def render_benchmark_page(cards: Optional[Sequence[BenchmarkCard]] = None) -> str: + if cards is None: + cards = load_benchmark_cards() + family_lookup = _family_lookup(cards) + families = _group_cards(cards, "family") + ecosystems = _group_cards(cards, "ecosystem") + family_cards = [card for card in cards if card.kind == "family"] + ecosystem_cards = [card for card in cards if card.kind == "ecosystem"] + unique_tasks = _ordered_unique(task for card in family_cards for task in card.tasks) + unique_smoke_configs = _ordered_unique( + config for card in family_cards for config in card.smoke_configs + ) + + lines: List[str] = [ + GENERATED_MARKER, + "", + "Benchmarks", + "===================", + "", + "Explore shared benchmark families, aligned external ecosystems, supported", + "tasks, and model compatibility across PyHazards.", + "", + "At a Glance", + "-----------", + "", + ".. grid:: 1 2 4 4", + " :gutter: 2", + " :class-container: catalog-grid", + "", + ] + lines.extend( + _indent_lines( + _stat_card( + "Benchmark Families", + str(len(family_cards)), + "Shared evaluator families available through the benchmark runner.", + ) + ) + ) + lines.extend( + _indent_lines( + _stat_card( + "Ecosystem Mappings", + str(len(ecosystem_cards)), + "External benchmark or data ecosystems linked from the public docs.", + ) + ) + ) + lines.extend( + _indent_lines( + _stat_card( + "Supported Task Families", + str(len(unique_tasks)), + "Hazard tasks covered across the family-level benchmark contracts.", + ) + ) + ) + lines.extend( + _indent_lines( + _stat_card( + "Smoke Configurations", + str(len(unique_smoke_configs)), + "Unique smoke configs referenced by the benchmark family cards.", + ) + ) + ) + + lines.extend( + [ + "", + "Benchmark Families", + "------------------", + "", + "These four cards summarize the benchmark families exposed through the", + "shared runner and compress the core tasks, metrics, support level, and", + "coverage counts into a scan-friendly catalog.", + "", + ] + ) + family_grid_cards: List[List[str]] = [] + for hazard in HAZARD_DISPLAY_ORDER: + for card in families.get(hazard, []): + family_grid_cards.append(_render_family_card(card, cards)) + lines.extend(_render_card_grid(family_grid_cards)) + + lines.extend( + [ + "", + "Coverage Matrix", + "---------------", + "", + "Use the matrix below for side-by-side comparison of hazard coverage,", + "family-level tasks, primary metrics, linked-model counts, and support", + "status without opening the detail pages first.", + "", + ".. list-table::", + " :widths: 14 22 18 20 14 12", + " :header-rows: 1", + " :class: catalog-matrix", + "", + " * - Hazard", + " - Benchmark Family", + " - Tasks", + " - Primary Metrics", + " - Linked Models", + " - Support Status", + ] + ) + for hazard in HAZARD_DISPLAY_ORDER: + for card in families.get(hazard, []): + lines.extend( + [ + " * - {hazard}".format(hazard=hazard), + " - {family}".format(family=_benchmark_doc_link(card)), + " - {tasks}".format(tasks=_summary_tasks(card.tasks)), + " - {metrics}".format(metrics=_summary_metrics(card.metrics)), + " - {models}".format(models=_model_count_label(card)), + " - {status}".format(status=_status_label(card)), + ] + ) + + lines.extend( + [ + "", + "Benchmark Ecosystems", + "--------------------", + "", + "Browse the aligned benchmark ecosystems by hazard family. Each card", + "links to a detail page with the routed benchmark family, source links,", + "and the models currently mapped to that ecosystem.", + "", + ".. tab-set::", + " :class: catalog-tabs", + "", + ] + ) + + for hazard in HAZARD_DISPLAY_ORDER: + hazard_cards = ecosystems.get(hazard, []) + if not hazard_cards: + continue + tab_lines: List[str] = [ + ".. tab-item:: {hazard}".format(hazard=hazard), + "", + " .. container:: catalog-section-note", + "", + " Ecosystem cards describe the external benchmark or data protocol", + " surfaced on this page and show how it maps back to the shared", + " PyHazards benchmark family.", + "", + ] + tab_lines.extend( + _indent_lines( + _render_card_grid( + [_render_ecosystem_card(card, family_lookup) for card in hazard_cards] + ) + ) + ) + tab_lines.append("") + lines.extend(_indent_lines(tab_lines)) + + lines.extend( + [ + "", + "Programmatic Use", + "----------------", + "", + ".. code-block:: python", + "", + " from pyhazards.configs import load_experiment_config", + " from pyhazards.engine import BenchmarkRunner", + "", + ' config = load_experiment_config("pyhazards/configs/earthquake/phasenet_smoke.yaml")', + " summary = BenchmarkRunner().run(config)", + " print(summary.metrics)", + "", + "Use ``python scripts/run_benchmark.py --help`` for the CLI entry point,", + "then pair this page with :doc:`pyhazards_configs` for experiment YAMLs", + "and :doc:`pyhazards_reports` for comparable benchmark exports.", + "", + ".. toctree::", + " :maxdepth: 1", + " :hidden:", + "", + ] + ) + + for card in cards: + lines.append(f" benchmarks/{card.slug}") + + lines.append("") + return "\n".join(lines) + + +def render_benchmark_detail_page( + card: BenchmarkCard, + cards: Sequence[BenchmarkCard], +) -> str: + family_lookup = _family_lookup(cards) + benchmark = build_benchmark(card.benchmark_key) + support_badge = _badge( + SUPPORT_STATUS_BADGE_ROLES[card.support_status], + _status_label(card), + ) + linked_models = _linked_models(card, absolute=True) + + lines: List[str] = [ + GENERATED_MARKER, + "", + card.display_name, + "=" * len(card.display_name), + "", + "Overview", + "--------", + "", + ] + for paragraph in card.description: + lines.append(_single_line(paragraph)) + lines.append("") + + lines.extend( + [ + "At a Glance", + "-----------", + "", + ".. grid:: 1 2 4 4", + " :gutter: 2", + " :class-container: catalog-grid", + "", + ] + ) + lines.extend( + _indent_lines( + _stat_card("Kind", card.kind.title(), "Family benchmark or external ecosystem view.") + ) + ) + lines.extend( + _indent_lines( + _stat_card("Hazard Family", card.hazard_family, "Public hazard grouping used on the benchmark index page.") + ) + ) + lines.extend( + _indent_lines( + _stat_card("Support Status", support_badge, "Current maturity of the adapter or evaluator path.") + ) + ) + lines.extend( + _indent_lines( + _stat_card("Linked Models", str(len(card.linked_models)), _model_count_label(card)) + ) + ) + + lines.extend( + [ + "", + "Benchmark Mapping", + "-----------------", + "", + "**Shared benchmark key:** ``{key}``".format(key=card.benchmark_key), + "", + "**Registered class:** ``{name}``".format(name=benchmark.__class__.__name__), + "", + ] + ) + + if card.kind == "ecosystem": + family = family_lookup[card.benchmark_key] + lines.extend( + [ + "Mapped benchmark family", + "~~~~~~~~~~~~~~~~~~~~~~~", + "", + _benchmark_doc_link(family, absolute=True), + "", + ] + ) + source = _source_sentence(card) + if source: + lines.extend( + [ + "Primary Source", + "~~~~~~~~~~~~~~", + "", + source, + "", + ] + ) + else: + lines.extend( + [ + "Mapped benchmark ecosystems", + "~~~~~~~~~~~~~~~~~~~~~~~~~~~", + "", + _ecosystem_links(cards, card.benchmark_key, absolute=True), + "", + ] + ) + + lines.extend( + [ + ".. dropdown:: Supported Tasks", + " :class-container: catalog-dropdown", + "", + ] + ) + for task in card.tasks: + lines.append(" - {task}".format(task=TASK_DISPLAY_LABELS.get(task, task))) + lines.append("") + + lines.extend( + [ + ".. dropdown:: Key Metrics", + " :class-container: catalog-dropdown", + "", + ] + ) + for metric in card.metrics: + lines.append(" - ``{metric}``".format(metric=metric)) + lines.append("") + + lines.extend( + [ + ".. dropdown:: Smoke Configs", + " :class-container: catalog-dropdown", + "", + ] + ) + for config_path in card.smoke_configs: + lines.append(" - ``{name}``".format(name=Path(config_path).name)) + lines.append("") + + lines.extend( + [ + ".. dropdown:: Linked Models", + " :class-container: catalog-dropdown", + "", + " {models}".format(models=linked_models), + "", + ] + ) + + if card.notes: + lines.extend([".. dropdown:: Notes", " :class-container: catalog-dropdown", ""]) + for note in card.notes: + lines.append(" - {note}".format(note=_single_line(note))) + lines.append("") + + return "\n".join(lines) + + +def rendered_benchmark_docs(cards: Optional[Sequence[BenchmarkCard]] = None) -> Dict[Path, str]: + if cards is None: + cards = load_benchmark_cards() + targets: Dict[Path, str] = { + BENCHMARK_PAGE_PATH: render_benchmark_page(cards), + } + for card in cards: + targets[card.doc_path] = render_benchmark_detail_page(card, cards) + return targets + + +def sync_generated_benchmark_docs(check: bool = False) -> List[Path]: + cards = load_benchmark_cards() + targets = rendered_benchmark_docs(cards) + changes: List[Path] = [] + + for path, content in targets.items(): + current = path.read_text(encoding="utf-8") if path.exists() else None + if current != content: + changes.append(path) + if not check: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(content, encoding="utf-8") + + managed_paths = set(targets.keys()) + if BENCHMARK_DOCS_DIR.exists(): + for path in BENCHMARK_DOCS_DIR.glob("*.rst"): + if path in managed_paths or not path.exists(): + continue + text = path.read_text(encoding="utf-8") + if GENERATED_MARKER not in text: + continue + changes.append(path) + if not check: + path.unlink() + + return changes + + +__all__ = [ + "BENCHMARK_CARDS_DIR", + "BENCHMARK_PAGE_PATH", + "BenchmarkCard", + "benchmark_catalog_alignment_issues", + "load_benchmark_cards", + "render_benchmark_page", + "rendered_benchmark_docs", + "sync_generated_benchmark_docs", +] diff --git a/pyhazards/benchmarks/__init__.py b/pyhazards/benchmarks/__init__.py new file mode 100644 index 00000000..7a9b5fcc --- /dev/null +++ b/pyhazards/benchmarks/__init__.py @@ -0,0 +1,81 @@ +from .base import Benchmark +from .registry import available_benchmarks, build_benchmark, get_benchmark, register_benchmark +from .runner import run_benchmark +from .schemas import BenchmarkResult, BenchmarkRunSummary +from .earthquake import EarthquakeBenchmark +from .wildfire import WildfireBenchmark +from .wildfire_benchmark import ( + AdapterRunOutput, + BenchmarkSection, + CacheBuildSummary, + align_static_fuel_to_cache, + REPRESENTATIVE_MODELS, + EvaluationProtocolSection, + ModelSection, + RunSection, + RunPaths, + WILDFIRE_BENCHMARK_CONFIG_ROOT, + WILDFIRE_RUNS_ROOT, + WildfireExperimentSetting, + WildfireSmokeAdapter, + SyntheticWildfireModelAdapter, + build_cache, + run_real_baselines, + build_default_experiment_setting, + write_experiment_setting, + prepare_run_paths, + build_experiment_setting_from_run_output, + build_model_template, + load_contract, + load_model_catalog, + parse_seed_list, + select_models, + run_smoke_batch, + create_adapter, + resolve_local_model_name, +) +from .flood import FloodBenchmark +from .tc import TropicalCycloneBenchmark + +__all__ = [ + "Benchmark", + "EarthquakeBenchmark", + "FloodBenchmark", + "BenchmarkResult", + "BenchmarkRunSummary", + "TropicalCycloneBenchmark", + "WildfireBenchmark", + "AdapterRunOutput", + "BenchmarkSection", + "CacheBuildSummary", + "align_static_fuel_to_cache", + "REPRESENTATIVE_MODELS", + "EvaluationProtocolSection", + "ModelSection", + "RunSection", + "RunPaths", + "WILDFIRE_BENCHMARK_CONFIG_ROOT", + "WILDFIRE_RUNS_ROOT", + "WildfireExperimentSetting", + "WildfireSmokeAdapter", + "SyntheticWildfireModelAdapter", + "build_cache", + "run_real_baselines", + "build_default_experiment_setting", + "write_experiment_setting", + "prepare_run_paths", + "build_experiment_setting_from_run_output", + "build_model_template", + "load_contract", + "load_model_catalog", + "parse_seed_list", + "select_models", + "run_smoke_batch", + "create_adapter", + "resolve_local_model_name", + "available_benchmarks", + "build_benchmark", + "get_benchmark", + "register_benchmark", + "run_benchmark", +] diff --git a/pyhazards/benchmarks/base.py b/pyhazards/benchmarks/base.py new file mode 100644 index 00000000..4feaccaf --- /dev/null +++ b/pyhazards/benchmarks/base.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Dict, Iterable, Sequence + +import torch.nn as nn + +from ..configs import ExperimentConfig +from ..datasets.base import DataBundle +from ..reports import BenchmarkReport, export_report_bundle +from .schemas import BenchmarkResult + + +class Benchmark(ABC): + """Shared benchmark contract for hazard evaluators.""" + + name: str = "benchmark" + hazard_task: str = "" + + @abstractmethod + def evaluate( + self, + model: nn.Module, + data: DataBundle, + config: ExperimentConfig, + ) -> BenchmarkResult: + raise NotImplementedError + + def aggregate_metrics(self, results: Sequence[BenchmarkResult]) -> Dict[str, float]: + totals: Dict[str, float] = {} + counts: Dict[str, int] = {} + for result in results: + for key, value in result.metrics.items(): + totals[key] = totals.get(key, 0.0) + float(value) + counts[key] = counts.get(key, 0) + 1 + return { + key: totals[key] / counts[key] + for key in sorted(totals.keys()) + if counts[key] > 0 + } + + def export_report( + self, + result: BenchmarkResult, + output_dir: str, + formats: Iterable[str], + ) -> Dict[str, str]: + report = BenchmarkReport( + benchmark_name=result.benchmark_name, + hazard_task=result.hazard_task, + metrics=result.metrics, + metadata=result.metadata, + artifacts=result.artifacts, + ) + return export_report_bundle(report, output_dir=output_dir, formats=list(formats)) diff --git a/pyhazards/benchmarks/earthquake.py b/pyhazards/benchmarks/earthquake.py new file mode 100644 index 00000000..8c638e59 --- /dev/null +++ b/pyhazards/benchmarks/earthquake.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +import json +from pathlib import Path +from typing import Dict + +import torch +import torch.nn as nn + +from ..configs import ExperimentConfig +from ..datasets.base import DataBundle +from .base import Benchmark +from .registry import register_benchmark +from .schemas import BenchmarkResult + + +class EarthquakeBenchmark(Benchmark): + name = "earthquake" + hazard_task = "earthquake.picking" + metric_names_by_task = { + "earthquake.picking": ["p_pick_mae", "s_pick_mae", "precision", "recall", "f1"], + "earthquake.forecasting": ["mae", "mse"], + } + + def evaluate(self, model: nn.Module, data: DataBundle, config: ExperimentConfig) -> BenchmarkResult: + split = data.get_split(config.benchmark.eval_split) + x = split.inputs + y = split.targets + preds = model(x) + + if config.benchmark.hazard_task == "earthquake.picking": + mae = (preds - y).abs() + tolerances = config.benchmark.params.get("detection_tolerances", [4.0, 8.0, 12.0]) + threshold_curve: Dict[str, float] = {} + detection_rate = 0.0 + for tolerance in tolerances: + hits = ((preds - y).abs() <= float(tolerance)).all(dim=1).float() + hit_rate = float(hits.mean().detach().cpu()) + threshold_curve[str(tolerance)] = hit_rate + if float(tolerance) == 8.0: + detection_rate = hit_rate + + metrics = { + "p_pick_mae": float(mae[:, 0].mean().detach().cpu()), + "s_pick_mae": float(mae[:, 1].mean().detach().cpu()), + "mean_pick_mae": float(mae.mean().detach().cpu()), + "precision": detection_rate, + "recall": detection_rate, + "f1": detection_rate, + } + else: + mse = torch.mean((preds - y) ** 2) + mae = torch.mean(torch.abs(preds - y)) + threshold_curve = {} + metrics = { + "mae": float(mae.detach().cpu()), + "mse": float(mse.detach().cpu()), + } + + return BenchmarkResult( + benchmark_name=self.name, + hazard_task=config.benchmark.hazard_task, + metrics=metrics, + metadata={ + "split": config.benchmark.eval_split, + "threshold_curve": threshold_curve, + "dataset_name": data.metadata.get("dataset"), + "source_dataset": data.metadata.get("source_dataset", data.metadata.get("dataset")), + }, + ) + + def export_report( + self, + result: BenchmarkResult, + output_dir: str, + formats, + ) -> Dict[str, str]: + paths = super().export_report(result, output_dir=output_dir, formats=formats) + if result.hazard_task == "earthquake.forecasting": + target = Path(output_dir) + target.mkdir(parents=True, exist_ok=True) + pycsep_path = target / "earthquake_pycsep.json" + payload = { + "adapter": "pyCSEP-style", + "benchmark_name": result.benchmark_name, + "hazard_task": result.hazard_task, + "metrics": result.metrics, + "metadata": result.metadata, + } + pycsep_path.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") + paths["pycsep"] = str(pycsep_path) + return paths + + +register_benchmark(EarthquakeBenchmark.name, EarthquakeBenchmark) + +__all__ = ["EarthquakeBenchmark"] diff --git a/pyhazards/benchmarks/flood.py b/pyhazards/benchmarks/flood.py new file mode 100644 index 00000000..5c0509d9 --- /dev/null +++ b/pyhazards/benchmarks/flood.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from typing import Dict + +import torch +import torch.nn as nn +from torch.utils.data import DataLoader + +from ..configs import ExperimentConfig +from ..datasets.base import DataBundle +from ..datasets.graph import graph_collate +from .base import Benchmark +from .registry import register_benchmark +from .schemas import BenchmarkResult + + +class FloodBenchmark(Benchmark): + name = "flood" + hazard_task = "flood.streamflow" + metric_names_by_task = { + "flood.streamflow": ["mae", "rmse", "nse", "kge"], + "flood.inundation": ["pixel_mae", "iou", "f1"], + } + + def evaluate(self, model: nn.Module, data: DataBundle, config: ExperimentConfig) -> BenchmarkResult: + split = data.get_split(config.benchmark.eval_split) + if ( + config.benchmark.hazard_task == "flood.streamflow" + and hasattr(split.inputs, "__len__") + and not isinstance(split.inputs, torch.Tensor) + ): + loader = DataLoader(split.inputs, batch_size=4, shuffle=False, collate_fn=graph_collate) + preds_all = [] + target_all = [] + with torch.no_grad(): + for batch, target in loader: + preds_all.append(model(batch)) + target_all.append(target) + preds = torch.cat(preds_all, dim=0) + targets = torch.cat(target_all, dim=0) + else: + preds = model(split.inputs) + targets = split.targets + + if config.benchmark.hazard_task == "flood.inundation": + pred_depth = preds.float() + target_depth = targets.float() + pred_mask = (pred_depth >= 0.5).float() + target_mask = (target_depth > 0).float() + intersection = (pred_mask * target_mask).sum() + union = pred_mask.sum() + target_mask.sum() - intersection + metrics: Dict[str, float] = { + "pixel_mae": float(torch.mean(torch.abs(pred_depth - target_depth)).detach().cpu()), + "iou": float((intersection / union.clamp(min=1.0)).detach().cpu()), + "f1": float( + ( + 2 * intersection + / (pred_mask.sum() + target_mask.sum()).clamp(min=1.0) + ).detach().cpu() + ), + } + else: + mae = torch.mean(torch.abs(preds - targets)) + rmse = torch.sqrt(torch.mean((preds - targets) ** 2)) + target_mean = torch.mean(targets) + denominator = torch.sum((targets - target_mean) ** 2).clamp(min=1e-6) + nse = 1.0 - torch.sum((preds - targets) ** 2) / denominator + pred_std = torch.std(preds).clamp(min=1e-6) + target_std = torch.std(targets).clamp(min=1e-6) + covariance = torch.mean((preds - torch.mean(preds)) * (targets - target_mean)) + correlation = covariance / (pred_std * target_std) + alpha = pred_std / target_std + beta = torch.mean(preds) / target_mean.clamp(min=1e-6) + kge = 1.0 - torch.sqrt((correlation - 1.0) ** 2 + (alpha - 1.0) ** 2 + (beta - 1.0) ** 2) + metrics = { + "mae": float(mae.detach().cpu()), + "rmse": float(rmse.detach().cpu()), + "nse": float(nse.detach().cpu()), + "kge": float(kge.detach().cpu()), + } + return BenchmarkResult( + benchmark_name=self.name, + hazard_task=config.benchmark.hazard_task, + metrics=metrics, + metadata={ + "split": config.benchmark.eval_split, + "dataset_name": data.metadata.get("dataset"), + "source_dataset": data.metadata.get("source_dataset", data.metadata.get("dataset")), + }, + ) + + +register_benchmark(FloodBenchmark.name, FloodBenchmark) + +__all__ = ["FloodBenchmark"] diff --git a/pyhazards/benchmarks/registry.py b/pyhazards/benchmarks/registry.py new file mode 100644 index 00000000..d9a94aad --- /dev/null +++ b/pyhazards/benchmarks/registry.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from typing import Callable, Dict, Type + +from .base import Benchmark + +_BENCHMARK_REGISTRY: Dict[str, Type[Benchmark]] = {} + + +def register_benchmark(name: str, builder: Type[Benchmark]) -> None: + key = name.strip().lower() + if key in _BENCHMARK_REGISTRY: + raise ValueError("Benchmark '{name}' already registered.".format(name=name)) + _BENCHMARK_REGISTRY[key] = builder + + +def available_benchmarks(): + return sorted(_BENCHMARK_REGISTRY.keys()) + + +def get_benchmark(name: str): + return _BENCHMARK_REGISTRY.get(name.strip().lower()) + + +def build_benchmark(name: str) -> Benchmark: + builder = get_benchmark(name) + if builder is None: + raise KeyError( + "Benchmark '{name}' is not registered. Known: {known}".format( + name=name, + known=", ".join(available_benchmarks()), + ) + ) + return builder() + + +__all__ = [ + "available_benchmarks", + "build_benchmark", + "get_benchmark", + "register_benchmark", +] diff --git a/pyhazards/benchmarks/runner.py b/pyhazards/benchmarks/runner.py new file mode 100644 index 00000000..65e1655e --- /dev/null +++ b/pyhazards/benchmarks/runner.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from typing import Union + +import torch.nn as nn + +from ..configs import ExperimentConfig +from ..datasets.base import DataBundle +from .base import Benchmark +from .registry import build_benchmark +from .schemas import BenchmarkRunSummary + + +def resolve_benchmark(benchmark: Union[str, Benchmark]) -> Benchmark: + if isinstance(benchmark, Benchmark): + return benchmark + return build_benchmark(benchmark) + + +def run_benchmark( + benchmark: Union[str, Benchmark], + model: nn.Module, + data: DataBundle, + config: ExperimentConfig, + output_dir: str | None = None, +) -> BenchmarkRunSummary: + benchmark_obj = resolve_benchmark(benchmark) + result = benchmark_obj.evaluate(model=model, data=data, config=config) + metrics = benchmark_obj.aggregate_metrics([result]) + result.metrics = metrics + report_dir = output_dir or config.report.output_dir + report_paths = benchmark_obj.export_report(result, output_dir=report_dir, formats=config.report.formats) + metadata = dict(result.metadata) + metadata.setdefault("eval_split", config.benchmark.eval_split) + return BenchmarkRunSummary( + benchmark_name=result.benchmark_name, + hazard_task=result.hazard_task, + metrics=metrics, + report_paths=report_paths, + metadata=metadata, + ) diff --git a/pyhazards/benchmarks/schemas.py b/pyhazards/benchmarks/schemas.py new file mode 100644 index 00000000..777cd9d0 --- /dev/null +++ b/pyhazards/benchmarks/schemas.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any, Dict, List + + +@dataclass +class BenchmarkResult: + benchmark_name: str + hazard_task: str + metrics: Dict[str, float] + predictions: List[Any] = field(default_factory=list) + artifacts: Dict[str, str] = field(default_factory=dict) + metadata: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class BenchmarkRunSummary: + benchmark_name: str + hazard_task: str + metrics: Dict[str, float] + report_paths: Dict[str, str] = field(default_factory=dict) + metadata: Dict[str, Any] = field(default_factory=dict) diff --git a/pyhazards/benchmarks/tc.py b/pyhazards/benchmarks/tc.py new file mode 100644 index 00000000..f555b88d --- /dev/null +++ b/pyhazards/benchmarks/tc.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + +from ..configs import ExperimentConfig +from ..datasets.base import DataBundle +from .base import Benchmark +from .registry import register_benchmark +from .schemas import BenchmarkResult + + +class TropicalCycloneBenchmark(Benchmark): + name = "tc" + hazard_task = "tc.track_intensity" + metric_names_by_task = { + "tc.track_intensity": ["track_error", "intensity_mae"], + } + + def evaluate(self, model: nn.Module, data: DataBundle, config: ExperimentConfig) -> BenchmarkResult: + split = data.get_split(config.benchmark.eval_split) + preds = model(split.inputs) + targets = split.targets + + track_error = torch.norm(preds[..., :2] - targets[..., :2], dim=-1).mean() + intensity_mae = torch.mean(torch.abs(preds[..., 2] - targets[..., 2])) + metrics = { + "track_error": float(track_error.detach().cpu()), + "intensity_mae": float(intensity_mae.detach().cpu()), + } + return BenchmarkResult( + benchmark_name=self.name, + hazard_task=config.benchmark.hazard_task, + metrics=metrics, + metadata={ + "split": config.benchmark.eval_split, + "dataset_name": data.metadata.get("dataset"), + "source_dataset": data.metadata.get("source_dataset", data.metadata.get("dataset")), + "history": data.metadata.get("history"), + "horizon": data.feature_spec.extra.get("horizon") if data.feature_spec.extra else None, + }, + ) + + +register_benchmark(TropicalCycloneBenchmark.name, TropicalCycloneBenchmark) + +__all__ = ["TropicalCycloneBenchmark"] diff --git a/pyhazards/benchmarks/wildfire.py b/pyhazards/benchmarks/wildfire.py new file mode 100644 index 00000000..d487918b --- /dev/null +++ b/pyhazards/benchmarks/wildfire.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +from typing import Dict + +import torch +import torch.nn as nn +import torch.nn.functional as F +from sklearn.metrics import accuracy_score, average_precision_score, f1_score, roc_auc_score + +from ..configs import ExperimentConfig +from ..datasets.base import DataBundle +from .base import Benchmark +from .registry import register_benchmark +from .schemas import BenchmarkResult + + +def _spread_metrics(logits: torch.Tensor, targets: torch.Tensor) -> Dict[str, float]: + probs = torch.sigmoid(logits) + preds = (probs >= 0.5).float() + targets = targets.float() + intersection = (preds * targets).sum() + union = preds.sum() + targets.sum() - intersection + iou = float((intersection / union.clamp(min=1.0)).detach().cpu()) + f1 = float((2 * intersection / (preds.sum() + targets.sum()).clamp(min=1.0)).detach().cpu()) + burned_area_mae = float( + torch.mean(torch.abs(preds.flatten(1).sum(dim=1) - targets.flatten(1).sum(dim=1))).detach().cpu() + ) + return {"iou": iou, "f1": f1, "burned_area_mae": burned_area_mae} + + +def _danger_metrics(logits: torch.Tensor, targets: torch.Tensor) -> Dict[str, float]: + if targets.dtype in {torch.int32, torch.int64} or targets.ndim == 1: + preds = logits.argmax(dim=1) + probs = F.softmax(logits, dim=1) + y_true = targets.detach().cpu().numpy() + y_pred = preds.detach().cpu().numpy() + y_score = probs.detach().cpu().numpy() + one_hot = F.one_hot(targets.long(), num_classes=logits.size(1)).detach().cpu().numpy() + try: + auc = float(roc_auc_score(one_hot, y_score, average="macro", multi_class="ovr")) + except ValueError: + auc = 0.0 + try: + pr_auc = float(average_precision_score(one_hot, y_score, average="macro")) + except ValueError: + pr_auc = 0.0 + return { + "accuracy": float(accuracy_score(y_true, y_pred)), + "macro_f1": float(f1_score(y_true, y_pred, average="macro")), + "auc": auc, + "pr_auc": pr_auc, + } + + preds = logits.float() + targets = targets.float() + mae = torch.mean(torch.abs(preds - targets)) + rmse = torch.sqrt(torch.mean((preds - targets) ** 2)) + return { + "mae": float(mae.detach().cpu()), + "rmse": float(rmse.detach().cpu()), + } + + +class WildfireBenchmark(Benchmark): + name = "wildfire" + hazard_task = "wildfire.danger" + metric_names_by_task = { + "wildfire.danger": ["accuracy", "macro_f1", "auc", "pr_auc", "mae", "rmse"], + "wildfire.spread": ["iou", "f1", "burned_area_mae"], + } + + def evaluate(self, model: nn.Module, data: DataBundle, config: ExperimentConfig) -> BenchmarkResult: + split = data.get_split(config.benchmark.eval_split) + x = split.inputs + y = split.targets + logits = model(x) + + if config.benchmark.hazard_task == "wildfire.danger": + metrics = _danger_metrics(logits, y) + else: + metrics = _spread_metrics(logits, y) + + return BenchmarkResult( + benchmark_name=self.name, + hazard_task=config.benchmark.hazard_task, + metrics=metrics, + metadata={ + "split": config.benchmark.eval_split, + "dataset_name": data.metadata.get("dataset"), + "source_dataset": data.metadata.get("source_dataset", data.metadata.get("dataset")), + }, + ) + + +register_benchmark(WildfireBenchmark.name, WildfireBenchmark) + +__all__ = ["WildfireBenchmark"] diff --git a/pyhazards/benchmarks/wildfire_benchmark/REAL_DATA_2024_PLAN.md b/pyhazards/benchmarks/wildfire_benchmark/REAL_DATA_2024_PLAN.md new file mode 100644 index 00000000..3b175883 --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/REAL_DATA_2024_PLAN.md @@ -0,0 +1,286 @@ +# Wildfire Benchmark Real-Data Plan (2024 v1) + +## Goal + +Run the first real-data wildfire benchmark in `my-copy` using the 2024 data pack that is already available locally. + +This plan treats 2024 as the first stable benchmark year because: +- the local 2024 label and weather coverage is already present; +- the benchmark contract already uses 2024 splits; +- we can start with a fair, single-year method comparison before moving to 2025 generalization. + +## Benchmark Year + +- Benchmark year: `2024` +- Primary task: `Track-O` +- Task definition: predict grid-level or aggregated wildfire occurrence probability `P(y=1 | x)` + +## Real-Data 2024 Dataset Pack + +### Core inputs + +1. **Fire labels / fire history** +- Path: `/home/runyang/ryang/firms/combine` +- Format: daily CSV files such as `2024-01-10.csv` +- Role: primary occurrence label source and lagged fire-history source + +2. **Dynamic weather / land-surface forcing** +- Path: `/home/runyang/output2024` +- Format: Prithvi-WxC predicted NetCDF files such as `pred_20240101_18.nc` +- Available channels observed in sample files: + - `T2M`, `QV2M`, `TQV`, `U10M`, `V10M`, `GWETROOT`, `TS`, `LAI`, `EFLUX`, `HFLUX`, `SWGNT`, `SWTNT`, `LWGAB`, `LWGEM` +- Role: main dynamic feature source + +3. **Static fuels / vegetation** +- Path: `/home/runyang/ryang/landfire_fbfm40` +- Role: static fuel and vegetation background + +### Recommended v1 optional inputs + +4. **Perimeters** +- Path: `/home/runyang/ryang/WFIGS_Perimeters/history_2024` +- Role: event extent, perimeter-derived context, perimeter proximity features + +5. **Human activity proxy** +- Candidate paths: + - `/home/runyang/ryang/WRC_Housing_Density` + - `/home/runyang/ryang/LandScan_Global_2024` +- Role: ignition proxy and exposure context + +## Minimum Real-Data Feature Packs by Model Family + +### Classical / Trees +Use aggregated tabular features. + +Required: +- FIRMS labels / lagged fire counts +- aggregated weather features from `output2024` +- LANDFIRE static fuel features + +Recommended: +- WFIGS perimeter proximity +- housing / population + +### Deep Learning +Use raster or raster-sequence tensors. + +Required: +- FIRMS rasterized labels +- `output2024` weather tensors +- LANDFIRE static channels + +Recommended: +- fire-history channels from FIRMS +- WFIGS perimeter channels or masks + +### Satellite Remote Sensing +Use raster tensors with wildfire-specific spatial observations. + +Required for v1: +- FIRMS labels / fire-history +- `output2024` +- LANDFIRE + +Recommended for later v2: +- GOES FDCF +- HMS Smoke + +### Physics / Simulators +Required: +- weather +- fuels +- perimeter or ignition initialization + +This group should not block the first real-data benchmark if data conversion takes longer. + +### Foundation Models +- `prithvi_wxc`: prioritize weather sequence tensors from `output2024` +- `prithvi_eo_2_tl`, `prithvi_burnscars`: use raster sequences plus static channels and fire-history context + +### LLM / MLLM +Do not block v1 on raw NetCDF ingestion. +Use summarized products, rendered maps, metadata, and benchmark-derived inputs after the core benchmark is stable. + +## Data Processing Strategy + +### Step 1: Build a canonical benchmark grid and date index +- Use `output2024/pred_20240101_18.nc` as a canonical weather grid reference. +- Create a canonical daily date list from `2024-01-01` through `2024-12-31`. +- Align all dynamic inputs to that grid and daily calendar. + +### Step 2: Build labels +- Read FIRMS daily CSV files from `/home/runyang/ryang/firms/combine`. +- Rasterize or aggregate them onto the benchmark grid. +- Create: + - `y_t`: binary occurrence label for day `t` + - optional lagged fire-history channels from prior days + +### Step 3: Build dynamic weather tensors +- Read `output2024/pred_*.nc` +- Daily aggregate if multiple files per day are used +- Select the 14 current channels as the default dynamic pack + +### Step 4: Build static tensors +- Reproject or sample LANDFIRE fuels to the benchmark grid +- Add optional human-activity layers if needed + +### Step 5: Materialize cached benchmark-ready arrays +Recommended cache layout: + +```text +/home/runyang/my-copy/data_cache/wildfire_2024_v1/ + dates.txt + labels/ + 2024-01-01.npy + met/ + 2024-01-01.npy + static/ + fuel.npy + housing.npy + population.npy + metadata/ + grid.json + vars.json +``` + +## Train / Val / Test Protocol + +Use the current benchmark contract split: +- Train: `2024-01-01` to `2024-09-30` +- Val: `2024-10-01` to `2024-10-31` +- Test: `2024-11-01` to `2024-12-31` + +Rules: +- fit all normalization statistics on train only; +- no future covariates relative to the prediction target; +- store fixed split files for reproducibility. + +## Training Recommendations + +### Phase A: real-data dry run +Use one seed first. +- Seed: `42` +- Purpose: verify data loading, training loop, output schema, and metric computation + +### Phase B: final benchmark runs +Use multi-seed reporting. +- Seeds: `42, 52, 62, 72, 82` +- Report: `mean ± std` + +### Classical / Trees +- `logistic_regression`: native binary objective +- `random_forest`: `predict_proba` +- `xgboost`: binary objective, several hundred rounds allowed +- `lightgbm`: binary objective, several hundred rounds allowed + +### Deep models +- task: binary occurrence probability +- output: one logit per grid cell / tile / target unit +- loss: `BCEWithLogitsLoss` +- recommended initial schedule: + - `max_epochs = 120` or higher + - early stopping monitor: `val_auprc` + - `patience = 20 to 30` + - `min_delta = 1e-4` +- current smoke settings are not sufficient for convergence claims + +## GPU Policy + +Real-data deep-model training should use GPU, not CPU. + +Record in `experiment_setting.json`: +- device +- gpu id +- gpu name +- total memory if available + +Recommended policy: +- classical models may remain on CPU unless GPU versions are explicitly used +- deep models should default to `cuda:` + +## Output Layout + +All new real-data benchmark artifacts should be written under: + +```text +/home/runyang/my-copy/runs/wildfire_benchmark/real/ +``` + +Recommended run layout: + +```text +runs/wildfire_benchmark/real/track_o_2024_real_v1/ + benchmark_contract_snapshot.json + benchmark_summary.json + experiment_templates.json + / + model_template.json + model_summary.json + seed_42/ + experiment_setting.json + history.csv + loss_curve.png + metrics.json +``` + +## Required Per-Seed Outputs + +For every model and seed: +- `experiment_setting.json` +- `history.csv` +- `loss_curve.png` +- `metrics.json` + +### history.csv should include +At minimum: +- step column (`epoch`, `round`, `iteration`, or `tree_count`) +- `train_loss` +- `val_loss` +- optional learning-rate column when applicable + +### loss_curve.png should show +- train loss vs step +- validation loss vs step +- clear title with model name and train unit + +## Evaluation Protocol + +### Primary metrics +- `AUPRC` + +### Secondary metrics +- `AUROC` + +### Reliability metrics +- `Brier` +- `NLL` +- `ECE` + +### Temporal consistency metrics +- `mean_day_to_day_change` +- `normalized_consistency_score` + +### Reporting rules +- report mean and std across seeds for final benchmark numbers +- include train/val loss curves +- log best step +- log converged step + +## Recommended Execution Order + +1. Build cache from FIRMS + `output2024` + LANDFIRE +2. Run `seed=42` dry run on 4 representative models: + - `logistic_regression` + - `xgboost` + - `unet` + - `convlstm` +3. Validate output artifacts and metric computation +4. Expand to the rest of the main benchmark roster +5. Add remote-sensing / foundation / simulator tracks afterwards + +## Immediate Implementation Notes + +- The current `track_o_2024_v1.json` still points to `/home/runyang/ryang/firms_download/combine`. +- The locally verified combined FIRMS label directory is `/home/runyang/ryang/firms/combine`. +- The first real-data contract should use the verified local path. + diff --git a/pyhazards/benchmarks/wildfire_benchmark/__init__.py b/pyhazards/benchmarks/wildfire_benchmark/__init__.py new file mode 100644 index 00000000..91cddac8 --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/__init__.py @@ -0,0 +1,47 @@ +from .experiment_settings import ( + BenchmarkSection, + EvaluationProtocolSection, + ModelSection, + RunSection, + WildfireExperimentSetting, + build_default_experiment_setting, + write_experiment_setting, +) +from .layout import RunPaths, WILDFIRE_RUNS_ROOT, prepare_run_paths +from .artifacts import AdapterRunOutput, build_experiment_setting_from_run_output, build_model_template +from .catalog import WILDFIRE_BENCHMARK_CONFIG_ROOT, load_contract, load_model_catalog, parse_seed_list, select_models +from .runner import run_smoke_batch +from .cache_builder import CacheBuildSummary, align_static_fuel_to_cache, build_cache +from .real_runner import REPRESENTATIVE_MODELS, run_real_baselines +from .adapters import WildfireSmokeAdapter, SyntheticWildfireModelAdapter, create_adapter, resolve_local_model_name + +__all__ = [ + "AdapterRunOutput", + "BenchmarkSection", + "CacheBuildSummary", + "align_static_fuel_to_cache", + "REPRESENTATIVE_MODELS", + "EvaluationProtocolSection", + "ModelSection", + "RunSection", + "RunPaths", + "WILDFIRE_BENCHMARK_CONFIG_ROOT", + "WILDFIRE_RUNS_ROOT", + "WildfireExperimentSetting", + "WildfireSmokeAdapter", + "SyntheticWildfireModelAdapter", + "build_cache", + "run_real_baselines", + "build_default_experiment_setting", + "write_experiment_setting", + "prepare_run_paths", + "build_experiment_setting_from_run_output", + "build_model_template", + "load_contract", + "load_model_catalog", + "parse_seed_list", + "select_models", + "run_smoke_batch", + "create_adapter", + "resolve_local_model_name", +] diff --git a/pyhazards/benchmarks/wildfire_benchmark/adapters/__init__.py b/pyhazards/benchmarks/wildfire_benchmark/adapters/__init__.py new file mode 100644 index 00000000..2229d760 --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/adapters/__init__.py @@ -0,0 +1,10 @@ +from .base import WildfireSmokeAdapter +from .registry import create_adapter +from .synthetic import SyntheticWildfireModelAdapter, resolve_local_model_name + +__all__ = [ + "WildfireSmokeAdapter", + "SyntheticWildfireModelAdapter", + "create_adapter", + "resolve_local_model_name", +] diff --git a/pyhazards/benchmarks/wildfire_benchmark/adapters/base.py b/pyhazards/benchmarks/wildfire_benchmark/adapters/base.py new file mode 100644 index 00000000..511c657d --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/adapters/base.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +import hashlib +from abc import ABC, abstractmethod +from typing import Any, Dict, List + +from ..artifacts import AdapterRunOutput, STEP_LABEL, build_experiment_setting_from_run_output + + +class WildfireSmokeAdapter(ABC): + """Minimal adapter contract for my-copy wildfire benchmark smoke runs.""" + + def __init__(self, model_spec: Dict[str, Any], contract: Dict[str, Any], step_limits: Dict[str, int]): + self.model_spec = model_spec + self.contract = contract + self.step_limits = step_limits + + @property + def model_name(self) -> str: + return str(self.model_spec["name"]) + + @property + def train_unit(self) -> str: + return str(self.model_spec["train_unit"]) + + @property + def step_name(self) -> str: + return STEP_LABEL[self.train_unit] + + def resolve_num_steps(self) -> int: + defaults = self.model_spec.get("defaults", {}) + if self.train_unit == "epoch": + return max(5, min(int(defaults.get("max_epochs", self.step_limits["epoch"])), self.step_limits["epoch"])) + if self.train_unit == "round": + return max(10, min(int(defaults.get("num_boost_round", self.step_limits["round"])), self.step_limits["round"])) + if self.train_unit == "iteration": + return max(10, min(int(defaults.get("max_iter", self.step_limits["iteration"])), self.step_limits["iteration"])) + if self.train_unit == "tree": + return max(10, min(int(defaults.get("n_estimators", self.step_limits["tree"])), self.step_limits["tree"])) + raise ValueError(f"Unsupported train_unit={self.train_unit}") + + @abstractmethod + def run(self, seed: int) -> AdapterRunOutput: + """Run one smoke seed and return standardized benchmark artifacts.""" + + def build_experiment_setting(self, seed: int, run_output: AdapterRunOutput) -> Dict[str, Any]: + return build_experiment_setting_from_run_output( + contract=self.contract, + model_spec=self.model_spec, + seed=int(seed), + run_output=run_output, + ) + + +def stable_seed_offset(model_name: str) -> int: + digest = hashlib.sha256(model_name.encode("utf-8")).hexdigest()[:8] + return int(digest, 16) + + +def moving_average(values: List[float], window: int) -> List[float]: + if window <= 1: + return values[:] + out: List[float] = [] + for idx in range(len(values)): + start = max(0, idx - window + 1) + chunk = values[start : idx + 1] + out.append(float(sum(chunk) / len(chunk))) + return out + + +def find_converged_step(history: List[Dict[str, float]], train_unit: str, smooth_window: int, patience: int, min_improvement: float) -> int: + step_key = STEP_LABEL[train_unit] + val_loss = [float(row["val_loss"]) for row in history] + smoothed = moving_average(val_loss, smooth_window) + + stable = 0 + for idx in range(1, len(smoothed)): + improvement = smoothed[idx - 1] - smoothed[idx] + if improvement < min_improvement: + stable += 1 + else: + stable = 0 + if stable >= patience: + return int(history[idx][step_key]) + return int(history[-1][step_key]) + + +def normalized_consistency_score(mean_day_to_day_change: float) -> float: + return max(0.0, min(1.0, 1.0 - float(mean_day_to_day_change))) diff --git a/pyhazards/benchmarks/wildfire_benchmark/adapters/registry.py b/pyhazards/benchmarks/wildfire_benchmark/adapters/registry.py new file mode 100644 index 00000000..f56a1eea --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/adapters/registry.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +from typing import Any, Dict + +from .base import WildfireSmokeAdapter +from .synthetic import SyntheticWildfireModelAdapter + + +SMOKE_ADAPTERS = {} + + +def create_adapter(model_spec: Dict[str, Any], contract: Dict[str, Any], step_limits: Dict[str, int]) -> WildfireSmokeAdapter: + adapter_cls = SMOKE_ADAPTERS.get(str(model_spec["name"]), SyntheticWildfireModelAdapter) + return adapter_cls(model_spec=model_spec, contract=contract, step_limits=step_limits) diff --git a/pyhazards/benchmarks/wildfire_benchmark/adapters/synthetic.py b/pyhazards/benchmarks/wildfire_benchmark/adapters/synthetic.py new file mode 100644 index 00000000..9dedf286 --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/adapters/synthetic.py @@ -0,0 +1,138 @@ +from __future__ import annotations + +import importlib +import math +from pathlib import Path +from typing import Dict, List + +import numpy as np + +from pyhazards.models import available_models + +from ..artifacts import AdapterRunOutput +from .base import ( + WildfireSmokeAdapter, + find_converged_step, + normalized_consistency_score, + stable_seed_offset, +) + +MODEL_NAME_ALIASES = { + "wrf_sfire_adapter": "wrf_sfire", + "forefire_adapter": "forefire", +} + + +def resolve_local_model_name(model_name: str) -> str: + return MODEL_NAME_ALIASES.get(model_name, model_name) + + +def summarize_metrics(best_val_loss: float, seed: int, model_name: str) -> Dict[str, float]: + seed_offset = stable_seed_offset(model_name) // 17 + rng = np.random.default_rng(seed + seed_offset) + + quality = float(np.clip(1.0 / (1.0 + best_val_loss), 0.0, 1.0)) + auprc = float(np.clip(0.03 + 0.5 * quality + rng.normal(0.0, 0.015), 0.0, 1.0)) + auroc = float(np.clip(0.55 + 0.42 * quality + rng.normal(0.0, 0.01), 0.0, 1.0)) + brier = float(np.clip(0.42 - 0.28 * quality + rng.normal(0.0, 0.01), 0.0, 1.0)) + nll = float(np.clip(1.1 - 0.7 * quality + rng.normal(0.0, 0.03), 0.01, 5.0)) + ece = float(np.clip(0.22 - 0.14 * quality + rng.normal(0.0, 0.008), 0.0, 1.0)) + temporal_delta = float(np.clip(0.25 - 0.12 * quality + rng.normal(0.0, 0.01), 0.0, 1.0)) + + return { + "auprc": auprc, + "auroc": auroc, + "brier": brier, + "nll": nll, + "ece": ece, + "mean_day_to_day_change": temporal_delta, + "normalized_consistency_score": normalized_consistency_score(temporal_delta), + } + + +class SyntheticWildfireModelAdapter(WildfireSmokeAdapter): + """Smoke adapter for migrated my-copy wildfire benchmark models.""" + + def _simulate_history(self, seed: int, num_steps: int) -> List[Dict[str, float]]: + step_key = self.step_name + defaults = self.model_spec.get("defaults", {}) + + seed_offset = stable_seed_offset(self.model_name) + rng = np.random.default_rng(seed + seed_offset) + + base_lr = float(defaults.get("lr", defaults.get("learning_rate", defaults.get("eta", 1e-3)))) + start_loss = float(rng.uniform(0.8, 1.6)) + floor_loss = float(rng.uniform(0.08, 0.25)) + speed = float(rng.uniform(0.02, 0.08)) + + history: List[Dict[str, float]] = [] + for step in range(1, num_steps + 1): + decay = floor_loss + (start_loss - floor_loss) * math.exp(-speed * step) + noise = float(rng.normal(0.0, 0.01)) + train_loss = max(0.01, decay + noise) + + gap = float(rng.uniform(0.02, 0.09)) + val_noise = float(rng.normal(0.0, 0.008)) + val_loss = max(0.01, train_loss + gap + val_noise) + + cosine = 0.5 * (1.0 + math.cos(math.pi * (step - 1) / max(1, num_steps - 1))) + learning_rate = base_lr * cosine + + history.append( + { + step_key: float(step), + "train_loss": float(train_loss), + "val_loss": float(val_loss), + "learning_rate": float(learning_rate), + } + ) + return history + + def _resolve_model_metadata(self) -> Dict[str, object]: + local_name = resolve_local_model_name(self.model_name) + registered = local_name in set(available_models()) + source_path = None + try: + module = importlib.import_module(f"pyhazards.models.{local_name}") + source_path = str(Path(module.__file__).resolve()) if getattr(module, "__file__", None) else None + except Exception: + source_path = None + return { + "canonical_model_name": local_name, + "registered_in_my_copy": registered, + "model_source": source_path, + } + + def run(self, seed: int) -> AdapterRunOutput: + num_steps = self.resolve_num_steps() + history = self._simulate_history(seed=seed, num_steps=num_steps) + + val_loss = [float(item["val_loss"]) for item in history] + best_idx = int(np.argmin(np.asarray(val_loss))) + best_step = int(history[best_idx][self.step_name]) + + conv_cfg = self.contract["shared_training"]["convergence_rule"] + converged_step = find_converged_step( + history=history, + train_unit=self.train_unit, + smooth_window=int(conv_cfg["smoothing_window"]), + patience=int(conv_cfg["patience"]), + min_improvement=float(conv_cfg["min_improvement"]), + ) + + metrics = summarize_metrics(best_val_loss=float(val_loss[best_idx]), seed=seed, model_name=self.model_name) + model_meta = self._resolve_model_metadata() + + return AdapterRunOutput( + history=history, + metrics=metrics, + best_step=best_step, + converged_step=converged_step, + train_unit=self.train_unit, + notes={ + "adapter_kind": "synthetic_my_copy_benchmark", + "status": "smoke_only", + "message": "Synthetic smoke run executed inside my-copy wildfire benchmark skeleton.", + **model_meta, + }, + ) diff --git a/pyhazards/benchmarks/wildfire_benchmark/artifacts.py b/pyhazards/benchmarks/wildfire_benchmark/artifacts.py new file mode 100644 index 00000000..6535b3db --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/artifacts.py @@ -0,0 +1,149 @@ +from __future__ import annotations + +import csv +import json +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, List, Mapping + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np + +from .experiment_settings import build_default_experiment_setting + +STEP_LABEL = { + "epoch": "epoch", + "round": "round", + "iteration": "iteration", + "tree": "tree_count", +} + + +@dataclass +class AdapterRunOutput: + history: List[Dict[str, float]] + metrics: Dict[str, float] + best_step: int + converged_step: int + train_unit: str + notes: Dict[str, Any] + + +def mean_std(values: List[float]) -> Dict[str, float]: + arr = np.asarray(values, dtype=float) + return {"mean": float(np.mean(arr)), "std": float(np.std(arr, ddof=0))} + + +def write_json(path: Path, payload: Mapping[str, Any]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(dict(payload), indent=2), encoding="utf-8") + + +def write_history_csv(path: Path, rows: List[Dict[str, float]]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + if not rows: + path.write_text("", encoding="utf-8") + return + fieldnames = list(rows[0].keys()) + with path.open("w", encoding="utf-8", newline="") as handle: + writer = csv.DictWriter(handle, fieldnames=fieldnames) + writer.writeheader() + writer.writerows(rows) + + +def plot_loss_curve(history: List[Dict[str, float]], train_unit: str, output_png: Path, title: str) -> None: + output_png.parent.mkdir(parents=True, exist_ok=True) + if not history: + return + step_key = STEP_LABEL[train_unit] + x = [int(row[step_key]) for row in history] + y_tr = [float(row["train_loss"]) for row in history] + y_va = [float(row["val_loss"]) for row in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, marker="o", linewidth=1.6, label="train_loss") + plt.plot(x, y_va, marker="s", linewidth=1.4, label="val_loss") + plt.xlabel(step_key) + plt.ylabel("loss") + plt.title(title) + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_png, dpi=150) + plt.close() + + +def build_model_template(contract: Dict[str, Any], model_spec: Dict[str, Any]) -> Dict[str, Any]: + shared = contract["shared_training"] + seed_list = shared.get("seed_list") or shared.get("dry_run_seed_list") or shared.get("final_seed_list") or [42] + return { + "template_version": "track_o_model_template_v1", + "benchmark_name": contract["benchmark_name"], + "contract_version": contract["contract_version"], + "task": contract["task"], + "model": { + "name": model_spec["name"], + "display_name": model_spec["display_name"], + "group": model_spec["group"], + "source_tier": model_spec["source_tier"], + "train_unit": model_spec["train_unit"], + "defaults": model_spec.get("defaults", {}), + }, + "reproducibility": { + "seed_list": seed_list, + "must_report_mean_std": contract["shared_training"]["report_requirements"]["report_mean_std_across_seeds"], + }, + "expected_metrics": [ + "auprc", + "auroc", + "brier", + "nll", + "ece", + "mean_day_to_day_change", + "normalized_consistency_score", + ], + "required_fields_for_real_runs": [ + "repo_url", + "repo_commit_or_tag", + "data_version", + "split_version", + "feature_set_version", + "hyperparam_search_budget", + "hardware", + "software_versions", + ], + } + + +def build_experiment_setting_from_run_output( + *, + contract: Dict[str, Any], + model_spec: Dict[str, Any], + seed: int, + run_output: AdapterRunOutput, +) -> Dict[str, Any]: + setting = build_default_experiment_setting( + model_name=str(model_spec["name"]), + display_name=str(model_spec["display_name"]), + group=str(model_spec["group"]), + source_tier=str(model_spec["source_tier"]), + train_unit=str(model_spec["train_unit"]), + defaults=model_spec.get("defaults", {}), + seed=int(seed), + num_steps=len(run_output.history), + best_step=int(run_output.best_step), + converged_step=int(run_output.converged_step), + step_name=STEP_LABEL[str(model_spec["train_unit"])], + mode=str(contract["mode"]), + task=str(contract["task"]), + notes=dict(run_output.notes), + metrics=run_output.metrics, + ) + setting.run.learning_weight = { + "kind": contract["shared_training"]["class_imbalance"]["policy"], + "value": "to_be_computed_from_real_train_split", + "clip_max": contract["shared_training"]["class_imbalance"]["clip_max"], + } + return setting.to_dict() diff --git a/pyhazards/benchmarks/wildfire_benchmark/cache_builder.py b/pyhazards/benchmarks/wildfire_benchmark/cache_builder.py new file mode 100644 index 00000000..65ef8b5d --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/cache_builder.py @@ -0,0 +1,378 @@ +from __future__ import annotations + +import json +import re +import subprocess +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Dict, Iterable, List, Sequence, Set + +import numpy as np +import pandas as pd +import xarray as xr +import yaml + +_DATE_RE = re.compile(r"pred_(\d{8})_\d{2}\.nc$") + + +@dataclass +class CacheBuildSummary: + cache_root: Path + n_label_days: int + n_met_days: int + n_shared_days: int + weather_vars: List[str] + train_days: int + val_days: int + test_days: int + + def to_dict(self) -> Dict[str, Any]: + return { + "cache_root": str(self.cache_root), + "n_label_days": int(self.n_label_days), + "n_met_days": int(self.n_met_days), + "n_shared_days": int(self.n_shared_days), + "weather_vars": list(self.weather_vars), + "train_days": int(self.train_days), + "val_days": int(self.val_days), + "test_days": int(self.test_days), + } + + +def _read_yaml(path: str | Path) -> Dict[str, Any]: + return yaml.safe_load(Path(path).read_text(encoding="utf-8")) + + +def _extract_pred_date(path: Path) -> str | None: + match = _DATE_RE.search(path.name) + if not match: + return None + stamp = match.group(1) + return f"{stamp[:4]}-{stamp[4:6]}-{stamp[6:8]}" + + +def _load_grid(sample_nc: Path) -> tuple[np.ndarray, np.ndarray]: + ds = xr.open_dataset(sample_nc) + try: + lat = np.asarray(ds["lat"].values, dtype=np.float64) + lon = np.asarray(ds["lon"].values, dtype=np.float64) + finally: + ds.close() + return lat, lon + + +def _select_weather_vars(ds: xr.Dataset, weather_vars: Sequence[str]) -> xr.Dataset: + missing = [name for name in weather_vars if name not in ds.data_vars] + if missing: + raise KeyError(f"Missing weather variables in dataset: {missing}") + return ds[list(weather_vars)] + + +def _discover_weather_groups(weather_dir: Path, weather_glob: str) -> Dict[str, List[Path]]: + grouped: Dict[str, List[Path]] = {} + for path in sorted(weather_dir.glob(weather_glob)): + date = _extract_pred_date(path) + if date is None: + continue + grouped.setdefault(date, []).append(path) + return grouped + + +def _discover_label_paths(firms_dir: Path, year: int) -> Dict[str, Path]: + return {path.stem: path for path in sorted(firms_dir.glob(f"{year}-*.csv"))} + + +def _daily_weather_arrays( + weather_groups: Dict[str, List[Path]], + weather_vars: Sequence[str], + *, + allowed_dates: Set[str] | None = None, +) -> Dict[str, np.ndarray]: + out: Dict[str, np.ndarray] = {} + for date in sorted(weather_groups): + if allowed_dates is not None and date not in allowed_dates: + continue + stacks: List[np.ndarray] = [] + for path in weather_groups[date]: + ds = xr.open_dataset(path) + try: + picked = _select_weather_vars(ds, weather_vars) + arr = np.stack([np.asarray(picked[var].values, dtype=np.float32) for var in weather_vars], axis=0) + if arr.ndim == 4 and arr.shape[1] == 1: + arr = arr[:, 0, :, :] + stacks.append(arr) + finally: + ds.close() + if stacks: + out[date] = np.mean(np.stack(stacks, axis=0), axis=0).astype(np.float32) + return out + + +def _read_firms_csv(path: Path) -> pd.DataFrame: + return pd.read_csv(path) + + +def _nearest_index(sorted_values: np.ndarray, values: np.ndarray) -> np.ndarray: + idx = np.searchsorted(sorted_values, values) + idx = np.clip(idx, 0, len(sorted_values) - 1) + left = np.clip(idx - 1, 0, len(sorted_values) - 1) + choose_left = np.abs(sorted_values[left] - values) <= np.abs(sorted_values[idx] - values) + return np.where(choose_left, left, idx) + + +def _firms_to_binary_grid(df: pd.DataFrame, lat: np.ndarray, lon: np.ndarray) -> np.ndarray: + label = np.zeros((lat.size, lon.size), dtype=np.float32) + if df.empty: + return label + + if "latitude" not in df.columns or "longitude" not in df.columns: + raise KeyError("FIRMS CSV must include 'latitude' and 'longitude' columns.") + + lat_vals = df["latitude"].to_numpy(dtype=np.float64, copy=False) + lon_vals = df["longitude"].to_numpy(dtype=np.float64, copy=False) + valid = np.isfinite(lat_vals) & np.isfinite(lon_vals) + lat_vals = lat_vals[valid] + lon_vals = lon_vals[valid] + if lat_vals.size == 0: + return label + + lat_idx = _nearest_index(lat, lat_vals) + lon_idx = _nearest_index(lon, lon_vals) + label[lat_idx, lon_idx] = 1.0 + return label + + +def _daily_label_arrays( + label_paths: Dict[str, Path], + lat: np.ndarray, + lon: np.ndarray, + *, + allowed_dates: Set[str] | None = None, +) -> Dict[str, np.ndarray]: + out: Dict[str, np.ndarray] = {} + for date in sorted(label_paths): + if allowed_dates is not None and date not in allowed_dates: + continue + df = _read_firms_csv(label_paths[date]) + out[date] = _firms_to_binary_grid(df, lat=lat, lon=lon) + return out + + +def _write_lines(path: Path, items: Iterable[str]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text("\n".join(items), encoding="utf-8") + + +def _date_in_range(date: str, start: str, end: str) -> bool: + return start <= date <= end + + +def _write_split_files(cache_root: Path, dates: Sequence[str], split_cfg: Dict[str, Sequence[str]]) -> Dict[str, int]: + split_root = cache_root / "splits" + counts: Dict[str, int] = {} + for split_name in ("train", "val", "test"): + start, end = split_cfg[split_name] + split_dates = [d for d in dates if _date_in_range(d, str(start), str(end))] + _write_lines(split_root / f"{split_name}_dates.txt", split_dates) + counts[split_name] = len(split_dates) + return counts + + +def build_cache(config_path: str | Path, *, limit_days: int = 0) -> CacheBuildSummary: + cfg = _read_yaml(config_path) + + cache_root = Path(cfg["cache"]["root"]) + labels_dir = cache_root / "labels" + met_dir = cache_root / "met" + static_dir = cache_root / "static" + metadata_dir = cache_root / "metadata" + for path in (labels_dir, met_dir, static_dir, metadata_dir): + path.mkdir(parents=True, exist_ok=True) + + weather_dir = Path(cfg["data"]["weather_dir"]) + weather_glob = str(cfg["data"].get("weather_glob", "pred_2024*.nc")) + weather_vars = list(cfg["data"]["weather_vars"]) + sample_nc = weather_dir / str(cfg["data"].get("sample_nc", "pred_20240101_18.nc")) + firms_dir = Path(cfg["data"]["firms_daily_dir"]) + landfire_tif = Path(cfg["data"]["landfire_tif"]) + year = int(cfg["data"]["year"]) + + lat, lon = _load_grid(sample_nc) + np.save(metadata_dir / "lat.npy", lat.astype(np.float32)) + np.save(metadata_dir / "lon.npy", lon.astype(np.float32)) + (metadata_dir / "grid.json").write_text( + json.dumps( + { + "sample_nc": str(sample_nc), + "lat_size": int(lat.size), + "lon_size": int(lon.size), + "lat_min": float(lat.min()), + "lat_max": float(lat.max()), + "lon_min": float(lon.min()), + "lon_max": float(lon.max()), + }, + indent=2, + ), + encoding="utf-8", + ) + (metadata_dir / "vars.json").write_text(json.dumps({"weather_vars": weather_vars}, indent=2), encoding="utf-8") + + weather_groups = _discover_weather_groups(weather_dir, weather_glob) + label_paths = _discover_label_paths(firms_dir, year) + + candidate_shared_dates = sorted(set(weather_groups) & set(label_paths)) + if limit_days > 0: + candidate_shared_dates = candidate_shared_dates[:limit_days] + allowed_dates = set(candidate_shared_dates) + + met_arrays = _daily_weather_arrays(weather_groups, weather_vars, allowed_dates=allowed_dates) + label_arrays = _daily_label_arrays(label_paths, lat=lat, lon=lon, allowed_dates=allowed_dates) + + shared_dates = sorted(set(met_arrays) & set(label_arrays)) + + for date in shared_dates: + np.save(met_dir / f"{date}.npy", met_arrays[date]) + np.save(labels_dir / f"{date}.npy", label_arrays[date]) + + _write_lines(cache_root / "dates.txt", shared_dates) + + static_manifest = { + "fuel_source": str(landfire_tif), + "status": "source_registered_only", + "message": "Static fuel reprojection to the benchmark grid is deferred until rasterio/rioxarray are available.", + "expected_output_path": str(static_dir / "fuel.npy"), + } + (static_dir / "fuel_manifest.json").write_text(json.dumps(static_manifest, indent=2), encoding="utf-8") + + split_counts = _write_split_files(cache_root, shared_dates, cfg["splits"]) + + summary = CacheBuildSummary( + cache_root=cache_root, + n_label_days=len(label_paths), + n_met_days=len(weather_groups), + n_shared_days=len(shared_dates), + weather_vars=weather_vars, + train_days=split_counts["train"], + val_days=split_counts["val"], + test_days=split_counts["test"], + ) + (cache_root / "cache_summary.json").write_text(json.dumps(summary.to_dict(), indent=2), encoding="utf-8") + return summary + + +def align_static_fuel_to_cache( + cache_root: str | Path, + *, + landfire_tif: str | Path | None = None, + overwrite: bool = False, +) -> Dict[str, Any]: + import tifffile as tf + + cache_root = Path(cache_root) + static_dir = cache_root / "static" + metadata_dir = cache_root / "metadata" + static_dir.mkdir(parents=True, exist_ok=True) + + lat = np.load(metadata_dir / "lat.npy") + lon = np.load(metadata_dir / "lon.npy") + manifest_path = static_dir / "fuel_manifest.json" + manifest = {} + if manifest_path.exists(): + manifest = json.loads(manifest_path.read_text(encoding="utf-8")) + + source_path = Path(landfire_tif or manifest.get("fuel_source") or "") + if not str(source_path): + raise ValueError("LANDFIRE source path is required to align static fuel to the cache grid.") + if not source_path.exists(): + raise FileNotFoundError(f"LANDFIRE source not found: {source_path}") + + fuel_npy_path = static_dir / "fuel.npy" + fuel_mask_path = static_dir / "fuel_mask.npy" + aligned_tif_path = static_dir / "fuel_aligned_benchmark_grid.tif" + if fuel_npy_path.exists() and fuel_mask_path.exists() and not overwrite: + payload = json.loads(manifest_path.read_text(encoding="utf-8")) if manifest_path.exists() else {} + return payload + + width = int(lon.size) + height = int(lat.size) + cmd = [ + "gdalwarp", + "-overwrite", + "-multi", + "-wo", + "NUM_THREADS=ALL_CPUS", + "-t_srs", + "EPSG:4326", + "-te", + "-180", + "-90", + "180", + "90", + "-ts", + str(width), + str(height), + "-r", + "mode", + "-srcnodata", + "32767", + "-dstnodata", + "-9999", + str(source_path), + str(aligned_tif_path), + ] + subprocess.run(cmd, check=True) + + raw = tf.imread(aligned_tif_path) + if raw.shape != (height, width): + raise ValueError(f"Aligned fuel raster shape mismatch: expected {(height, width)}, got {raw.shape}") + + valid_mask = raw >= 0 + fuel = np.where(valid_mask, raw, 0).astype(np.int16) + fuel_mask = valid_mask.astype(np.uint8) + np.save(fuel_npy_path, fuel) + np.save(fuel_mask_path, fuel_mask) + + unique_valid = np.unique(fuel[valid_mask]) if np.any(valid_mask) else np.asarray([], dtype=np.int16) + payload = { + "fuel_source": str(source_path), + "status": "aligned_to_cache_grid", + "grid_shape": [height, width], + "warp": { + "target_srs": "EPSG:4326", + "target_extent": [-180, -90, 180, 90], + "target_size": [width, height], + "resampling": "mode", + "dst_nodata": -9999, + }, + "output_files": { + "fuel": str(fuel_npy_path), + "fuel_mask": str(fuel_mask_path), + "aligned_tif": str(aligned_tif_path), + }, + "valid_cells": int(valid_mask.sum()), + "valid_fraction": float(valid_mask.mean()), + "unique_valid_values_count": int(unique_valid.size), + "unique_valid_values_sample": unique_valid[:32].astype(int).tolist(), + "notes": [ + "Static fuel values were warped from LANDFIRE CONUS Albers to the benchmark's nominal global lat-lon grid.", + "Negative values were treated as outside-domain/no-data and written as fuel=0 with fuel_mask=0.", + ], + } + manifest_path.write_text(json.dumps(payload, indent=2), encoding="utf-8") + + summary_path = cache_root / "cache_summary.json" + if summary_path.exists(): + summary = json.loads(summary_path.read_text(encoding="utf-8")) + summary["static_fuel"] = { + "status": "aligned", + "fuel_file": str(fuel_npy_path), + "fuel_mask_file": str(fuel_mask_path), + "valid_cells": int(valid_mask.sum()), + "valid_fraction": float(valid_mask.mean()), + } + summary_path.write_text(json.dumps(summary, indent=2), encoding="utf-8") + + return payload + + +__all__ = ["CacheBuildSummary", "build_cache", "align_static_fuel_to_cache"] diff --git a/pyhazards/benchmarks/wildfire_benchmark/catalog.py b/pyhazards/benchmarks/wildfire_benchmark/catalog.py new file mode 100644 index 00000000..d2edd379 --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/catalog.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +import json +from pathlib import Path +from typing import Any, Dict, List + +WILDFIRE_BENCHMARK_CONFIG_ROOT = Path(__file__).resolve().parents[2] / "configs" / "wildfire_benchmark" + + +def load_json(path: Path) -> Any: + return json.loads(path.read_text(encoding="utf-8")) + + +def load_contract(path: str | Path | None = None) -> Dict[str, Any]: + target = Path(path) if path else WILDFIRE_BENCHMARK_CONFIG_ROOT / "track_o_2024_v1.json" + return load_json(target) + + +def load_model_catalog(kind: str = "main", path: str | Path | None = None) -> List[Dict[str, Any]]: + if path is not None: + return load_json(Path(path)) + filename = "model_catalog_22.json" if kind == "main" else "model_catalog_extensions_v1.json" + return load_json(WILDFIRE_BENCHMARK_CONFIG_ROOT / filename) + + +def parse_seed_list(seed_text: str | List[int] | None) -> List[int]: + if seed_text is None: + return [42] + if isinstance(seed_text, list): + return [int(x) for x in seed_text] or [42] + seeds = [int(s.strip()) for s in str(seed_text).split(",") if s.strip()] + return seeds or [42] + + +def select_models( + all_models: List[Dict[str, Any]], + *, + source_tier: str = "all", + models: str | List[str] | None = None, + limit_models: int = 0, +) -> List[Dict[str, Any]]: + selected = list(all_models) + if source_tier != "all": + selected = [m for m in selected if m.get("source_tier") == source_tier] + + if models: + allowed = set(models) if isinstance(models, list) else {x.strip() for x in str(models).split(",") if x.strip()} + selected = [m for m in selected if m["name"] in allowed] + + selected = sorted(selected, key=lambda x: int(x.get("priority", 9999))) + if limit_models > 0: + selected = selected[:limit_models] + return selected diff --git a/pyhazards/benchmarks/wildfire_benchmark/experiment_settings.py b/pyhazards/benchmarks/wildfire_benchmark/experiment_settings.py new file mode 100644 index 00000000..ea76d957 --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/experiment_settings.py @@ -0,0 +1,134 @@ +from __future__ import annotations + +import json +from dataclasses import asdict, dataclass, field +from pathlib import Path +from typing import Any, Mapping + + +@dataclass +class BenchmarkSection: + name: str = "WildfireBench" + contract_version: str = "track_o_2024_v1" + mode: str = "scaffold_no_data" + task: str = "Track-O" + + +@dataclass +class EvaluationProtocolSection: + discrimination: dict[str, Any] = field( + default_factory=lambda: {"primary": "auprc", "secondary": "auroc"} + ) + reliability: dict[str, Any] = field( + default_factory=lambda: {"metrics": ["brier", "nll", "ece"]} + ) + temporal_consistency: dict[str, Any] = field( + default_factory=lambda: { + "metrics": ["mean_day_to_day_change", "normalized_consistency_score"] + } + ) + + +@dataclass +class ModelSection: + name: str + display_name: str + group: str + source_tier: str + train_unit: str + defaults: dict[str, Any] = field(default_factory=dict) + + +@dataclass +class RunSection: + seed: int = 42 + num_steps: int = 0 + best_step: int = 0 + converged_step: int = 0 + step_name: str = "epoch" + learning_weight: dict[str, Any] = field( + default_factory=lambda: { + "kind": "pos_weight_neg_over_pos", + "value": "to_be_computed_from_real_train_split", + "clip_max": 50.0, + } + ) + + +@dataclass +class WildfireExperimentSetting: + benchmark: BenchmarkSection + evaluation_protocol: EvaluationProtocolSection + model: ModelSection + run: RunSection + metrics: dict[str, Any] = field( + default_factory=lambda: { + "auprc": None, + "auroc": None, + "brier": None, + "nll": None, + "ece": None, + "mean_day_to_day_change": None, + "normalized_consistency_score": None, + } + ) + notes: dict[str, Any] = field(default_factory=dict) + + def to_dict(self) -> dict[str, Any]: + return asdict(self) + + def write_json(self, path: str | Path) -> Path: + target = Path(path) + target.parent.mkdir(parents=True, exist_ok=True) + target.write_text(json.dumps(self.to_dict(), indent=2, sort_keys=False), encoding="utf-8") + return target + + +def build_default_experiment_setting( + *, + model_name: str, + display_name: str, + group: str, + source_tier: str, + train_unit: str, + defaults: Mapping[str, Any] | None = None, + seed: int = 42, + num_steps: int = 0, + best_step: int = 0, + converged_step: int = 0, + step_name: str = "epoch", + mode: str = "scaffold_no_data", + task: str = "Track-O", + notes: Mapping[str, Any] | None = None, + metrics: Mapping[str, Any] | None = None, +) -> WildfireExperimentSetting: + setting = WildfireExperimentSetting( + benchmark=BenchmarkSection(mode=mode, task=task), + evaluation_protocol=EvaluationProtocolSection(), + model=ModelSection( + name=model_name, + display_name=display_name, + group=group, + source_tier=source_tier, + train_unit=train_unit, + defaults=dict(defaults or {}), + ), + run=RunSection( + seed=int(seed), + num_steps=int(num_steps), + best_step=int(best_step), + converged_step=int(converged_step), + step_name=step_name, + ), + notes=dict(notes or {}), + ) + if metrics: + setting.metrics.update(dict(metrics)) + return setting + + +def write_experiment_setting( + path: str | Path, + setting: WildfireExperimentSetting, +) -> Path: + return setting.write_json(path) diff --git a/pyhazards/benchmarks/wildfire_benchmark/layout.py b/pyhazards/benchmarks/wildfire_benchmark/layout.py new file mode 100644 index 00000000..11e5f568 --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/layout.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path + +WILDFIRE_RUNS_ROOT = Path(__file__).resolve().parents[3] / "runs" / "wildfire_benchmark" + + +@dataclass(frozen=True) +class RunPaths: + track: str + run_name: str + model_name: str + seed: int + run_root: Path + model_root: Path + seed_root: Path + experiment_setting_path: Path + history_csv_path: Path + loss_curve_path: Path + metrics_path: Path + model_summary_path: Path + model_template_path: Path + benchmark_summary_path: Path + benchmark_contract_snapshot_path: Path + + +def prepare_run_paths( + track: str, + run_name: str, + model_name: str, + seed: int, + create: bool = True, +) -> RunPaths: + if track not in {"smoke", "real", "archive"}: + raise ValueError(f"Unsupported wildfire benchmark track: {track!r}") + + run_root = WILDFIRE_RUNS_ROOT / track / run_name + model_root = run_root / model_name + seed_root = model_root / f"seed_{int(seed)}" + + paths = RunPaths( + track=track, + run_name=run_name, + model_name=model_name, + seed=int(seed), + run_root=run_root, + model_root=model_root, + seed_root=seed_root, + experiment_setting_path=seed_root / "experiment_setting.json", + history_csv_path=seed_root / "history.csv", + loss_curve_path=seed_root / "loss_curve.png", + metrics_path=seed_root / "metrics.json", + model_summary_path=model_root / "model_summary.json", + model_template_path=model_root / "model_template.json", + benchmark_summary_path=run_root / "benchmark_summary.json", + benchmark_contract_snapshot_path=run_root / "benchmark_contract_snapshot.json", + ) + + if create: + seed_root.mkdir(parents=True, exist_ok=True) + return paths diff --git a/pyhazards/benchmarks/wildfire_benchmark/real_runner.py b/pyhazards/benchmarks/wildfire_benchmark/real_runner.py new file mode 100644 index 00000000..4733f89f --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/real_runner.py @@ -0,0 +1,653 @@ +from __future__ import annotations + +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, Iterable, List, Sequence + +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score + +from pyhazards.datasets.wildfire import ( + WildfireTrackO2024RasterDataset, + WildfireTrackO2024TabularDataset, + WildfireTrackO2024TemporalDataset, +) +from pyhazards.models import build_model +from pyhazards.models.convlstm import ConvLSTMTrackOConfig, train_convlstm_track_o +from pyhazards.models.unet import UNetTrackOConfig, train_unet_track_o +from pyhazards.models.unet import binary_ece, normalized_consistency_score +from pyhazards.utils.hardware import auto_device + +from .artifacts import build_model_template, mean_std, plot_loss_curve, write_history_csv, write_json +from .catalog import load_contract, load_model_catalog +from .experiment_settings import build_default_experiment_setting +from .layout import WILDFIRE_RUNS_ROOT, prepare_run_paths + + +REPRESENTATIVE_MODELS = ("logistic_regression", "random_forest", "xgboost", "lightgbm", "unet", "convlstm") + + +def _to_numpy(x: torch.Tensor) -> np.ndarray: + return x.detach().cpu().numpy() + + +def _positive_class_prob(model: torch.nn.Module, x: torch.Tensor) -> np.ndarray: + pred = model(x) + arr = _to_numpy(pred) + if arr.ndim == 2 and arr.shape[1] == 2: + return arr[:, 1].astype(np.float32) + return arr.reshape(-1).astype(np.float32) + + +def _binary_metrics(y_true: np.ndarray, y_prob: np.ndarray) -> Dict[str, float]: + y_true = np.asarray(y_true, dtype=np.float32).reshape(-1) + y_prob = np.asarray(y_prob, dtype=np.float32).reshape(-1) + y_prob = np.clip(y_prob, 1e-7, 1.0 - 1e-7) + mean_change = float(np.mean(np.abs(np.diff(np.sort(y_prob))))) if len(y_prob) > 1 else 0.0 + + def _safe(callable_obj): + try: + return float(callable_obj()) + except Exception: + return float("nan") + + return { + "auprc": _safe(lambda: average_precision_score(y_true, y_prob)), + "auroc": _safe(lambda: roc_auc_score(y_true, y_prob)), + "brier": _safe(lambda: brier_score_loss(y_true, y_prob)), + "nll": _safe(lambda: log_loss(y_true, y_prob, labels=[0, 1])), + "ece": _safe(lambda: binary_ece(y_true, y_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + +def _catalog_lookup(names: Sequence[str]) -> list[dict[str, Any]]: + catalog = load_model_catalog("main") + allowed = set(names) + selected = [row for row in catalog if row["name"] in allowed] + if len(selected) != len(allowed): + found = {row["name"] for row in selected} + missing = sorted(allowed - found) + raise KeyError(f"Missing models in wildfire main catalog: {missing}") + return selected + + +def _build_setting( + *, + contract: dict[str, Any], + model_spec: dict[str, Any], + seed: int, + num_steps: int, + best_step: int, + converged_step: int, + metrics: dict[str, float], + notes: dict[str, Any], + learning_weight: dict[str, Any], +) -> dict[str, Any]: + setting = build_default_experiment_setting( + model_name=str(model_spec["name"]), + display_name=str(model_spec["display_name"]), + group=str(model_spec["group"]), + source_tier=str(model_spec["source_tier"]), + train_unit=str(model_spec["train_unit"]), + defaults=model_spec.get("defaults", {}), + seed=int(seed), + num_steps=int(num_steps), + best_step=int(best_step), + converged_step=int(converged_step), + step_name=str(model_spec["train_unit"]), + mode=str(contract["mode"]), + task=str(contract["task"]), + notes=notes, + metrics=metrics, + ) + setting.benchmark.contract_version = str(contract["contract_version"]) + setting.run.learning_weight = learning_weight + return setting.to_dict() + + +def _run_logistic_regression( + *, + bundle, + seed: int, + model_spec: dict[str, Any], +) -> dict[str, Any]: + defaults = dict(model_spec.get("defaults", {})) + model = build_model("logistic_regression", task="classification", **defaults) + model.fit_bundle(bundle, train_split="train", val_split="val") + + train_split = bundle.get_split("train") + val_split = bundle.get_split("val") + test_split = bundle.get_split("test") + + train_prob = np.clip(_positive_class_prob(model, train_split.inputs), 1e-7, 1.0 - 1e-7) + val_prob = np.clip(_positive_class_prob(model, val_split.inputs), 1e-7, 1.0 - 1e-7) + test_prob = np.clip(_positive_class_prob(model, test_split.inputs), 1e-7, 1.0 - 1e-7) + + train_true = _to_numpy(train_split.targets).reshape(-1).astype(np.float32) + val_true = _to_numpy(val_split.targets).reshape(-1).astype(np.float32) + test_true = _to_numpy(test_split.targets).reshape(-1).astype(np.float32) + + train_loss = float(log_loss(train_true, train_prob, labels=[0, 1])) + val_loss = float(log_loss(val_true, val_prob, labels=[0, 1])) + fitted_steps = int(np.max(getattr(model.estimator, "n_iter_", np.asarray([1])))) + history = [{"iteration": fitted_steps, "train_loss": train_loss, "val_loss": val_loss}] + + return { + "history": history, + "val_metrics": _binary_metrics(val_true, val_prob), + "test_metrics": _binary_metrics(test_true, test_prob), + "best_step": fitted_steps, + "converged_step": fitted_steps, + "learning_weight": {"kind": "class_weight", "value": defaults.get("class_weight", "balanced")}, + "notes": { + "model_source": "pyhazards.models.logistic_regression", + "device": "cpu", + "gpu_assignment": None, + }, + } + + +def _run_random_forest( + *, + bundle, + seed: int, + model_spec: dict[str, Any], +) -> dict[str, Any]: + defaults = dict(model_spec.get("defaults", {})) + model = build_model("random_forest", task="classification", **defaults) + model.fit_bundle(bundle, train_split="train", val_split="val") + + train_split = bundle.get_split("train") + val_split = bundle.get_split("val") + test_split = bundle.get_split("test") + + train_prob = np.clip(_positive_class_prob(model, train_split.inputs), 1e-7, 1.0 - 1e-7) + val_prob = np.clip(_positive_class_prob(model, val_split.inputs), 1e-7, 1.0 - 1e-7) + test_prob = np.clip(_positive_class_prob(model, test_split.inputs), 1e-7, 1.0 - 1e-7) + + train_true = _to_numpy(train_split.targets).reshape(-1).astype(np.float32) + val_true = _to_numpy(val_split.targets).reshape(-1).astype(np.float32) + test_true = _to_numpy(test_split.targets).reshape(-1).astype(np.float32) + + train_loss = float(log_loss(train_true, train_prob, labels=[0, 1])) + val_loss = float(log_loss(val_true, val_prob, labels=[0, 1])) + n_estimators = int(getattr(model.estimator, 'n_estimators', defaults.get('n_estimators', 500))) + history = [{"tree": n_estimators, "train_loss": train_loss, "val_loss": val_loss}] + + return { + "history": history, + "val_metrics": _binary_metrics(val_true, val_prob), + "test_metrics": _binary_metrics(test_true, test_prob), + "best_step": n_estimators, + "converged_step": n_estimators, + "learning_weight": {"kind": "class_weight", "value": defaults.get("class_weight", "balanced_subsample")}, + "notes": { + "model_source": "pyhazards.models.random_forest", + "device": "cpu", + "gpu_assignment": None, + }, + } + + +def _run_xgboost( + *, + bundle, + seed: int, + model_spec: dict[str, Any], + num_boost_round: int, +) -> dict[str, Any]: + import xgboost as xgb + + defaults = dict(model_spec.get("defaults", {})) + x_train = _to_numpy(bundle.get_split("train").inputs) + y_train = _to_numpy(bundle.get_split("train").targets).reshape(-1) + x_val = _to_numpy(bundle.get_split("val").inputs) + y_val = _to_numpy(bundle.get_split("val").targets).reshape(-1) + x_test = _to_numpy(bundle.get_split("test").inputs) + y_test = _to_numpy(bundle.get_split("test").targets).reshape(-1) + + dtrain = xgb.DMatrix(x_train, label=y_train) + dval = xgb.DMatrix(x_val, label=y_val) + dtest = xgb.DMatrix(x_test, label=y_test) + + params = { + "objective": "binary:logistic", + "eval_metric": "logloss", + "max_depth": int(defaults.get("max_depth", 8)), + "eta": float(defaults.get("eta", 0.05)), + "subsample": float(defaults.get("subsample", 0.8)), + "colsample_bytree": float(defaults.get("colsample_bytree", 0.8)), + "seed": int(seed), + } + evals_result: dict[str, Any] = {} + booster = xgb.train( + params=params, + dtrain=dtrain, + num_boost_round=int(num_boost_round), + evals=[(dtrain, "train"), (dval, "val")], + evals_result=evals_result, + verbose_eval=False, + ) + + train_curve = [float(v) for v in evals_result.get("train", {}).get("logloss", [])] + val_curve = [float(v) for v in evals_result.get("val", {}).get("logloss", [])] + history = [ + {"round": idx + 1, "train_loss": tr, "val_loss": va} + for idx, (tr, va) in enumerate(zip(train_curve, val_curve, strict=True)) + ] + best_step = int(np.argmin(val_curve) + 1) if val_curve else 1 + test_prob = np.clip(np.asarray(booster.predict(dtest), dtype=np.float32), 1e-7, 1.0 - 1e-7) + val_prob = np.clip(np.asarray(booster.predict(dval), dtype=np.float32), 1e-7, 1.0 - 1e-7) + + return { + "history": history, + "val_metrics": _binary_metrics(y_val, val_prob), + "test_metrics": _binary_metrics(y_test, test_prob), + "best_step": best_step, + "converged_step": len(history), + "learning_weight": {"kind": "native_binary_objective", "value": "binary:logistic"}, + "notes": { + "model_source": "pyhazards.models.xgboost", + "device": "cpu", + "gpu_assignment": None, + }, + } + + +def _run_lightgbm( + *, + bundle, + seed: int, + model_spec: dict[str, Any], + num_boost_round: int, +) -> dict[str, Any]: + import lightgbm as lgb + + defaults = dict(model_spec.get("defaults", {})) + x_train = _to_numpy(bundle.get_split("train").inputs) + y_train = _to_numpy(bundle.get_split("train").targets).reshape(-1) + x_val = _to_numpy(bundle.get_split("val").inputs) + y_val = _to_numpy(bundle.get_split("val").targets).reshape(-1) + x_test = _to_numpy(bundle.get_split("test").inputs) + y_test = _to_numpy(bundle.get_split("test").targets).reshape(-1) + + dtrain = lgb.Dataset(x_train, label=y_train) + dval = lgb.Dataset(x_val, label=y_val, reference=dtrain) + evals_result: dict[str, Any] = {} + train_pos = max(float(y_train.sum()), 1.0) + train_neg = max(float(y_train.size - y_train.sum()), 1.0) + scale_pos_weight = float(defaults.get("scale_pos_weight", min(train_neg / train_pos, 500.0))) + params = { + "objective": "binary", + "metric": "binary_logloss", + "num_leaves": int(defaults.get("num_leaves", 15)), + "learning_rate": float(defaults.get("learning_rate", 0.03)), + "feature_fraction": float(defaults.get("feature_fraction", 0.8)), + "bagging_fraction": float(defaults.get("bagging_fraction", 0.8)), + "bagging_freq": int(defaults.get("bagging_freq", 1)), + "min_data_in_leaf": int(defaults.get("min_data_in_leaf", 200)), + "min_sum_hessian_in_leaf": float(defaults.get("min_sum_hessian_in_leaf", 1e-3)), + "lambda_l2": float(defaults.get("lambda_l2", 1.0)), + "max_depth": int(defaults.get("max_depth", -1)), + "scale_pos_weight": scale_pos_weight, + "seed": int(seed), + "verbose": -1, + "force_col_wise": True, + } + booster = lgb.train( + params=params, + train_set=dtrain, + num_boost_round=int(num_boost_round), + valid_sets=[dtrain, dval], + valid_names=["train", "val"], + callbacks=[ + lgb.log_evaluation(period=0), + lgb.record_evaluation(evals_result), + ], + ) + + train_curve = [float(v) for v in evals_result.get("train", {}).get("binary_logloss", [])] + val_curve = [float(v) for v in evals_result.get("val", {}).get("binary_logloss", [])] + history = [ + {"round": idx + 1, "train_loss": tr, "val_loss": va} + for idx, (tr, va) in enumerate(zip(train_curve, val_curve, strict=True)) + ] + best_step = int(np.argmin(val_curve) + 1) if val_curve else len(history) + val_prob = np.clip(np.asarray(booster.predict(x_val), dtype=np.float32), 1e-7, 1.0 - 1e-7) + test_prob = np.clip(np.asarray(booster.predict(x_test), dtype=np.float32), 1e-7, 1.0 - 1e-7) + + return { + "history": history, + "val_metrics": _binary_metrics(y_val, val_prob), + "test_metrics": _binary_metrics(y_test, test_prob), + "best_step": best_step, + "converged_step": len(history), + "learning_weight": {"kind": "scale_pos_weight", "value": float(scale_pos_weight), "derived_from": "train_neg_over_pos_clipped"}, + "notes": { + "model_source": "pyhazards.models.lightgbm", + "device": "cpu", + "gpu_assignment": None, + }, + } + + +def _run_unet( + *, + bundle, + seed: int, + device: str, + max_epochs: int, + patience: int, +) -> dict[str, Any]: + train_split = bundle.get_split("train") + val_split = bundle.get_split("val") + test_split = bundle.get_split("test") + + cfg = UNetTrackOConfig( + in_channels=int(bundle.feature_spec.channels or train_split.inputs.shape[1]), + batch_size=4, + max_epochs=int(max_epochs), + early_stopping_rounds=int(patience), + seed=int(seed), + device=device, + ) + model, history, val_metrics, best_epoch, pos_weight = train_unet_track_o( + _to_numpy(train_split.inputs), + _to_numpy(train_split.targets), + _to_numpy(val_split.inputs), + _to_numpy(val_split.targets), + cfg, + ) + + with torch.no_grad(): + logits = model(test_split.inputs.to(torch.device(device if str(device).startswith("cuda") and torch.cuda.is_available() else "cpu"))) + test_prob = torch.sigmoid(logits).detach().cpu().numpy().reshape(-1) + test_true = _to_numpy(test_split.targets).reshape(-1) + + return { + "history": history, + "val_metrics": val_metrics, + "test_metrics": _binary_metrics(test_true, test_prob), + "best_step": int(best_epoch), + "converged_step": len(history), + "learning_weight": {"kind": "pos_weight_neg_over_pos", "value": float(pos_weight), "clip_max": float(cfg.pos_weight_clip_max)}, + "notes": { + "model_source": "pyhazards.models.unet", + "device": device, + "gpu_assignment": device if str(device).startswith("cuda") else None, + }, + } + + +def _run_convlstm( + *, + bundle, + seed: int, + device: str, + max_epochs: int, + patience: int, + history_len: int, +) -> dict[str, Any]: + train_split = bundle.get_split("train") + val_split = bundle.get_split("val") + test_split = bundle.get_split("test") + + cfg = ConvLSTMTrackOConfig( + seq_len=int(history_len), + in_channels=int(bundle.feature_spec.channels or train_split.inputs.shape[2]), + batch_size=2, + max_epochs=int(max_epochs), + early_stopping_rounds=int(patience), + seed=int(seed), + device=device, + ) + model, history, val_metrics, best_epoch, pos_weight = train_convlstm_track_o( + _to_numpy(train_split.inputs), + _to_numpy(train_split.targets), + _to_numpy(val_split.inputs), + _to_numpy(val_split.targets), + cfg, + ) + + eval_device = torch.device(device if str(device).startswith("cuda") and torch.cuda.is_available() else "cpu") + with torch.no_grad(): + logits = model(test_split.inputs.to(eval_device)) + test_prob = torch.sigmoid(logits).detach().cpu().numpy().reshape(-1) + test_true = _to_numpy(test_split.targets).reshape(-1) + + return { + "history": history, + "val_metrics": val_metrics, + "test_metrics": _binary_metrics(test_true, test_prob), + "best_step": int(best_epoch), + "converged_step": len(history), + "learning_weight": {"kind": "pos_weight_neg_over_pos", "value": float(pos_weight), "clip_max": float(cfg.pos_weight_clip_max)}, + "notes": { + "model_source": "pyhazards.models.convlstm", + "device": device, + "gpu_assignment": device if str(device).startswith("cuda") else None, + }, + } + + +def _write_per_seed_outputs( + *, + contract: dict[str, Any], + model_spec: dict[str, Any], + seed: int, + run_name: str, + result: dict[str, Any], + notes_extra: dict[str, Any], +) -> dict[str, Any]: + paths = prepare_run_paths(track="real", run_name=run_name, model_name=str(model_spec["name"]), seed=int(seed), create=True) + write_history_csv(paths.history_csv_path, result["history"]) + plot_loss_curve(result["history"], str(model_spec["train_unit"]), paths.loss_curve_path, f"{model_spec['display_name']} ({model_spec['train_unit']})") + metrics_payload = { + "val": result["val_metrics"], + "test": result["test_metrics"], + "best_step": int(result["best_step"]), + "converged_step": int(result["converged_step"]), + } + write_json(paths.metrics_path, metrics_payload) + + setting = _build_setting( + contract=contract, + model_spec=model_spec, + seed=int(seed), + num_steps=int(result["converged_step"]), + best_step=int(result["best_step"]), + converged_step=int(result["converged_step"]), + metrics=result["test_metrics"], + notes={**result["notes"], **notes_extra, "val_metrics": result["val_metrics"]}, + learning_weight=result["learning_weight"], + ) + write_json(paths.experiment_setting_path, setting) + return {"paths": paths, "metrics": metrics_payload, "setting": setting} + + +def run_real_baselines( + *, + cache_dir: str | Path = "/home/runyang/my-copy/data_cache/wildfire_2024_v1", + run_name: str = "track_o_2024_real_v1_first4_dryrun", + models: Sequence[str] | None = None, + seed: int = 42, + train_limit_days: int | None = None, + val_limit_days: int | None = None, + test_limit_days: int | None = None, + tabular_downsample: int = 8, + raster_downsample: int = 4, + temporal_downsample: int = 8, + temporal_history: int = 6, + xgboost_rounds: int = 120, + lightgbm_rounds: int = 120, + unet_epochs: int = 12, + convlstm_epochs: int = 12, + deep_patience: int = 4, + device: str | None = None, +) -> Path: + cache_root = Path(cache_dir) + contract = load_contract(Path(__file__).resolve().parents[2] / "configs" / "wildfire_benchmark" / "track_o_2024_real_v1.json") + selected_names = tuple(models or REPRESENTATIVE_MODELS) + model_specs = _catalog_lookup(selected_names) + + run_root = WILDFIRE_RUNS_ROOT / "real" / run_name + run_root.mkdir(parents=True, exist_ok=True) + write_json(run_root / "benchmark_contract_snapshot.json", contract) + + dataset_common = { + "cache_dir": str(cache_root), + "train_limit_days": train_limit_days, + "val_limit_days": val_limit_days, + "test_limit_days": test_limit_days, + } + bundles: dict[str, Any] = {} + if any(name in selected_names for name in ("logistic_regression", "random_forest", "xgboost", "lightgbm")): + bundles["tabular"] = WildfireTrackO2024TabularDataset( + downsample_factor=tabular_downsample, + **dataset_common, + ).load() + if "unet" in selected_names: + bundles["raster"] = WildfireTrackO2024RasterDataset( + downsample_factor=raster_downsample, + **dataset_common, + ).load() + if "convlstm" in selected_names: + bundles["temporal"] = WildfireTrackO2024TemporalDataset( + history=temporal_history, + downsample_factor=temporal_downsample, + **dataset_common, + ).load() + + device_text = str(device or auto_device()) + benchmark_rows: List[dict[str, Any]] = [] + templates_index: dict[str, Any] = {} + + for model_spec in model_specs: + name = str(model_spec["name"]) + model_root = run_root / name + model_root.mkdir(parents=True, exist_ok=True) + template = build_model_template(contract, model_spec) + templates_index[name] = template + write_json(model_root / "model_template.json", template) + + if name == "logistic_regression": + result = _run_logistic_regression(bundle=bundles["tabular"], seed=seed, model_spec=model_spec) + dataset_meta = bundles["tabular"].metadata + elif name == "random_forest": + result = _run_random_forest(bundle=bundles["tabular"], seed=seed, model_spec=model_spec) + dataset_meta = bundles["tabular"].metadata + elif name == "xgboost": + result = _run_xgboost(bundle=bundles["tabular"], seed=seed, model_spec=model_spec, num_boost_round=xgboost_rounds) + dataset_meta = bundles["tabular"].metadata + elif name == "lightgbm": + result = _run_lightgbm(bundle=bundles["tabular"], seed=seed, model_spec=model_spec, num_boost_round=lightgbm_rounds) + dataset_meta = bundles["tabular"].metadata + elif name == "unet": + result = _run_unet(bundle=bundles["raster"], seed=seed, device=device_text, max_epochs=unet_epochs, patience=deep_patience) + dataset_meta = bundles["raster"].metadata + elif name == "convlstm": + result = _run_convlstm( + bundle=bundles["temporal"], + seed=seed, + device=device_text, + max_epochs=convlstm_epochs, + patience=deep_patience, + history_len=temporal_history, + ) + dataset_meta = bundles["temporal"].metadata + else: + raise ValueError(f"Unsupported representative model: {name}") + + has_static_fuel = bool(dataset_meta.get("has_static_fuel", False)) + payload = _write_per_seed_outputs( + contract=contract, + model_spec=model_spec, + seed=seed, + run_name=run_name, + result=result, + notes_extra={ + "cache_root": str(cache_root), + "dataset_metadata": dataset_meta, + "split_version": "cache_2024_v1", + "feature_set_version": "weather_plus_fuel_v1" if has_static_fuel else "weather_only_v1_static_fuel_pending", + "static_fuel_status": "aligned" if has_static_fuel else "manifest_only", + }, + ) + + metric_stats = {k: mean_std([float(v)]) for k, v in result["test_metrics"].items()} + write_json( + model_root / "model_summary.json", + { + "model": { + "name": name, + "display_name": model_spec["display_name"], + "group": model_spec["group"], + "source_tier": model_spec["source_tier"], + "train_unit": model_spec["train_unit"], + }, + "mode": contract["mode"], + "n_seeds": 1, + "seeds": [int(seed)], + "metrics_mean_std": metric_stats, + "per_seed": [ + { + "seed": int(seed), + "best_step": int(result["best_step"]), + "converged_step": int(result["converged_step"]), + "train_unit": model_spec["train_unit"], + **result["test_metrics"], + } + ], + }, + ) + benchmark_rows.append( + { + "name": name, + "display_name": model_spec["display_name"], + "group": model_spec["group"], + "source_tier": model_spec["source_tier"], + "train_unit": model_spec["train_unit"], + "auprc_mean": metric_stats.get("auprc", {}).get("mean"), + "auprc_std": metric_stats.get("auprc", {}).get("std"), + "auroc_mean": metric_stats.get("auroc", {}).get("mean"), + "auroc_std": metric_stats.get("auroc", {}).get("std"), + "brier_mean": metric_stats.get("brier", {}).get("mean"), + "nll_mean": metric_stats.get("nll", {}).get("mean"), + "ece_mean": metric_stats.get("ece", {}).get("mean"), + "normalized_consistency_score_mean": metric_stats.get("normalized_consistency_score", {}).get("mean"), + } + ) + + write_json( + run_root / "benchmark_summary.json", + { + "benchmark": { + "name": contract["benchmark_name"], + "contract_version": contract["contract_version"], + "mode": contract["mode"], + "task": contract["task"], + "generated_at": datetime.now().isoformat(), + "note": "First real-data dry run on the 2024 wildfire cache.", + "cache_root": str(cache_root), + }, + "models_selected": list(selected_names), + "n_models": len(selected_names), + "seeds": [int(seed)], + "rows": benchmark_rows, + }, + ) + write_json( + run_root / "experiment_templates.json", + { + "template_version": "track_o_model_template_v1", + "generated_at": datetime.now().isoformat(), + "models": templates_index, + }, + ) + return run_root + + +__all__ = ["REPRESENTATIVE_MODELS", "run_real_baselines"] diff --git a/pyhazards/benchmarks/wildfire_benchmark/runner.py b/pyhazards/benchmarks/wildfire_benchmark/runner.py new file mode 100644 index 00000000..aaa16869 --- /dev/null +++ b/pyhazards/benchmarks/wildfire_benchmark/runner.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +from datetime import datetime +from pathlib import Path +from typing import Any, Callable, Dict, List + +from .artifacts import ( + build_experiment_setting_from_run_output, + build_model_template, + mean_std, + plot_loss_curve, + write_history_csv, + write_json, +) +from .catalog import load_contract, load_model_catalog, parse_seed_list, select_models +from .layout import WILDFIRE_RUNS_ROOT, prepare_run_paths + + +def run_smoke_batch( + *, + adapter_factory: Callable[[Dict[str, Any], Dict[str, Any], Dict[str, int]], Any], + run_name: str | None = None, + track: str = "smoke", + catalog_kind: str = "main", + catalog_path: str | Path | None = None, + contract_path: str | Path | None = None, + source_tier: str = "all", + models: str | List[str] | None = None, + seeds: str | List[int] | None = None, + limit_models: int = 0, + step_limits: Dict[str, int] | None = None, +) -> Path: + contract = load_contract(contract_path) + catalog = load_model_catalog(catalog_kind, catalog_path) + selected_models = select_models(catalog, source_tier=source_tier, models=models, limit_models=limit_models) + if not selected_models: + raise ValueError("No wildfire benchmark models selected.") + + seed_list = parse_seed_list(seeds) + step_limits = step_limits or {"epoch": 60, "round": 300, "iteration": 250, "tree": 300} + run_name = run_name or f"smoke_batch_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + + run_root = WILDFIRE_RUNS_ROOT / track / run_name + run_root.mkdir(parents=True, exist_ok=True) + write_json(run_root / "benchmark_contract_snapshot.json", contract) + + benchmark_rows: List[Dict[str, Any]] = [] + templates_index: Dict[str, Dict[str, Any]] = {} + + for model_spec in selected_models: + model_name = str(model_spec["name"]) + model_root = run_root / model_name + model_root.mkdir(parents=True, exist_ok=True) + + template = build_model_template(contract, model_spec) + templates_index[model_name] = template + write_json(model_root / "model_template.json", template) + + metric_pool: Dict[str, List[float]] = {} + per_seed_rows: List[Dict[str, Any]] = [] + + for seed in seed_list: + paths = prepare_run_paths(track=track, run_name=run_name, model_name=model_name, seed=int(seed), create=True) + adapter = adapter_factory(model_spec, contract, step_limits) + run_output = adapter.run(seed=int(seed)) + + write_history_csv(paths.history_csv_path, run_output.history) + plot_loss_curve(run_output.history, run_output.train_unit, paths.loss_curve_path, f"{model_spec['display_name']} ({run_output.train_unit})") + + if hasattr(adapter, 'build_experiment_setting'): + experiment_setting = adapter.build_experiment_setting(seed=int(seed), run_output=run_output) + else: + experiment_setting = build_experiment_setting_from_run_output( + contract=contract, + model_spec=model_spec, + seed=int(seed), + run_output=run_output, + ) + write_json(paths.experiment_setting_path, experiment_setting) + write_json(paths.metrics_path, run_output.metrics) + + for key, value in run_output.metrics.items(): + metric_pool.setdefault(key, []).append(float(value)) + per_seed_rows.append( + { + 'seed': int(seed), + 'best_step': int(run_output.best_step), + 'converged_step': int(run_output.converged_step), + 'train_unit': run_output.train_unit, + **run_output.metrics, + } + ) + + metric_stats = {k: mean_std(v) for k, v in metric_pool.items()} + write_json( + model_root / 'model_summary.json', + { + 'model': { + 'name': model_name, + 'display_name': model_spec['display_name'], + 'group': model_spec['group'], + 'source_tier': model_spec['source_tier'], + 'train_unit': model_spec['train_unit'], + }, + 'mode': contract['mode'], + 'n_seeds': len(seed_list), + 'seeds': seed_list, + 'metrics_mean_std': metric_stats, + 'per_seed': per_seed_rows, + }, + ) + benchmark_rows.append( + { + 'name': model_name, + 'display_name': model_spec['display_name'], + 'group': model_spec['group'], + 'source_tier': model_spec['source_tier'], + 'train_unit': model_spec['train_unit'], + 'auprc_mean': metric_stats.get('auprc', {}).get('mean'), + 'auprc_std': metric_stats.get('auprc', {}).get('std'), + 'auroc_mean': metric_stats.get('auroc', {}).get('mean'), + 'auroc_std': metric_stats.get('auroc', {}).get('std'), + 'brier_mean': metric_stats.get('brier', {}).get('mean'), + 'nll_mean': metric_stats.get('nll', {}).get('mean'), + 'ece_mean': metric_stats.get('ece', {}).get('mean'), + 'normalized_consistency_score_mean': metric_stats.get('normalized_consistency_score', {}).get('mean'), + } + ) + + write_json( + run_root / 'benchmark_summary.json', + { + 'benchmark': { + 'name': contract['benchmark_name'], + 'contract_version': contract['contract_version'], + 'mode': contract['mode'], + 'task': contract['task'], + 'generated_at': datetime.now().isoformat(), + 'note': 'Adapter-level smoke run.', + 'contract_path': str(contract_path) if contract_path else 'pyhazards/configs/wildfire_benchmark/track_o_2024_v1.json', + 'catalog_kind': catalog_kind, + }, + 'models_selected': [m['name'] for m in selected_models], + 'n_models': len(selected_models), + 'seeds': seed_list, + 'rows': benchmark_rows, + }, + ) + + templates_payload = { + 'template_version': 'track_o_model_template_v1', + 'generated_at': datetime.now().isoformat(), + 'models': templates_index, + } + write_json(run_root / 'experiment_templates.json', templates_payload) + if catalog_kind == 'main': + write_json(run_root / 'experiment_templates_22.json', templates_payload) + return run_root diff --git a/pyhazards/configs/__init__.py b/pyhazards/configs/__init__.py new file mode 100644 index 00000000..c612e351 --- /dev/null +++ b/pyhazards/configs/__init__.py @@ -0,0 +1,19 @@ +from ._schema import ( + BenchmarkConfig, + DatasetRef, + ExperimentConfig, + ModelRef, + ReportConfig, + dump_experiment_config, + load_experiment_config, +) + +__all__ = [ + "BenchmarkConfig", + "DatasetRef", + "ExperimentConfig", + "ModelRef", + "ReportConfig", + "dump_experiment_config", + "load_experiment_config", +] diff --git a/pyhazards/configs/_schema.py b/pyhazards/configs/_schema.py new file mode 100644 index 00000000..bd3925b9 --- /dev/null +++ b/pyhazards/configs/_schema.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from dataclasses import asdict, dataclass, field +from pathlib import Path +from typing import Any, Dict, List + +import yaml + +from ..tasks import get_hazard_task + +_REPORT_FORMATS = {"json", "md", "csv"} + + +@dataclass +class DatasetRef: + name: str + params: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class ModelRef: + name: str + task: str + params: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class ReportConfig: + output_dir: str = "reports" + formats: List[str] = field(default_factory=lambda: ["json"]) + + def __post_init__(self) -> None: + normalized = [fmt.lower() for fmt in self.formats] + unknown = [fmt for fmt in normalized if fmt not in _REPORT_FORMATS] + if unknown: + raise ValueError( + "Unknown report format(s): {unknown}. Known: {known}".format( + unknown=", ".join(sorted(set(unknown))), + known=", ".join(sorted(_REPORT_FORMATS)), + ) + ) + self.formats = normalized + + +@dataclass +class BenchmarkConfig: + name: str + hazard_task: str + metrics: List[str] = field(default_factory=list) + eval_split: str = "test" + params: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self) -> None: + self.hazard_task = get_hazard_task(self.hazard_task).name + + +@dataclass +class ExperimentConfig: + benchmark: BenchmarkConfig + dataset: DatasetRef + model: ModelRef + report: ReportConfig = field(default_factory=ReportConfig) + seed: int = 0 + metadata: Dict[str, Any] = field(default_factory=dict) + + def to_dict(self) -> Dict[str, Any]: + return asdict(self) + + +def load_experiment_config(path: str | Path) -> ExperimentConfig: + raw = yaml.safe_load(Path(path).read_text(encoding="utf-8")) or {} + return ExperimentConfig( + benchmark=BenchmarkConfig(**raw["benchmark"]), + dataset=DatasetRef(**raw["dataset"]), + model=ModelRef(**raw["model"]), + report=ReportConfig(**raw.get("report", {})), + seed=raw.get("seed", 0), + metadata=raw.get("metadata", {}), + ) + + +def dump_experiment_config(config: ExperimentConfig, path: str | Path) -> None: + payload = config.to_dict() + Path(path).write_text(yaml.safe_dump(payload, sort_keys=False), encoding="utf-8") + + +__all__ = [ + "BenchmarkConfig", + "DatasetRef", + "ExperimentConfig", + "ModelRef", + "ReportConfig", + "dump_experiment_config", + "load_experiment_config", +] diff --git a/pyhazards/configs/earthquake/eqnet_smoke.yaml b/pyhazards/configs/earthquake/eqnet_smoke.yaml new file mode 100644 index 00000000..7c23b438 --- /dev/null +++ b/pyhazards/configs/earthquake/eqnet_smoke.yaml @@ -0,0 +1,23 @@ +benchmark: + name: earthquake + hazard_task: earthquake.picking + metrics: + - p_pick_mae + - s_pick_mae + - f1 + eval_split: test +dataset: + name: seisbench_waveforms + params: + micro: true +model: + name: eqnet + task: regression + params: + in_channels: 3 + hidden_dim: 48 +report: + output_dir: reports/earthquake_eqnet + formats: + - json +seed: 7 diff --git a/pyhazards/configs/earthquake/eqtransformer_smoke.yaml b/pyhazards/configs/earthquake/eqtransformer_smoke.yaml new file mode 100644 index 00000000..3e6e5ace --- /dev/null +++ b/pyhazards/configs/earthquake/eqtransformer_smoke.yaml @@ -0,0 +1,23 @@ +benchmark: + name: earthquake + hazard_task: earthquake.picking + metrics: + - p_pick_mae + - s_pick_mae + - f1 + eval_split: test +dataset: + name: pick_benchmark_waveforms + params: + micro: true +model: + name: eqtransformer + task: regression + params: + in_channels: 3 + hidden_dim: 48 +report: + output_dir: reports/earthquake_eqtransformer + formats: + - json +seed: 7 diff --git a/pyhazards/configs/earthquake/gpd_smoke.yaml b/pyhazards/configs/earthquake/gpd_smoke.yaml new file mode 100644 index 00000000..e164ccd0 --- /dev/null +++ b/pyhazards/configs/earthquake/gpd_smoke.yaml @@ -0,0 +1,23 @@ +benchmark: + name: earthquake + hazard_task: earthquake.picking + metrics: + - p_pick_mae + - s_pick_mae + - f1 + eval_split: test +dataset: + name: pick_benchmark_waveforms + params: + micro: true +model: + name: gpd + task: regression + params: + in_channels: 3 + hidden_dim: 32 +report: + output_dir: reports/earthquake_gpd + formats: + - json +seed: 7 diff --git a/pyhazards/configs/earthquake/phasenet_smoke.yaml b/pyhazards/configs/earthquake/phasenet_smoke.yaml new file mode 100644 index 00000000..8f0033fd --- /dev/null +++ b/pyhazards/configs/earthquake/phasenet_smoke.yaml @@ -0,0 +1,22 @@ +benchmark: + name: earthquake + hazard_task: earthquake.picking + metrics: + - p_pick_mae + - s_pick_mae + eval_split: test +dataset: + name: seisbench_waveforms + params: + micro: true +model: + name: phasenet + task: regression + params: + in_channels: 3 + hidden_dim: 32 +report: + output_dir: reports/earthquake + formats: + - json +seed: 7 diff --git a/pyhazards/configs/earthquake/wavecastnet_benchmark_smoke.yaml b/pyhazards/configs/earthquake/wavecastnet_benchmark_smoke.yaml new file mode 100644 index 00000000..d451ba59 --- /dev/null +++ b/pyhazards/configs/earthquake/wavecastnet_benchmark_smoke.yaml @@ -0,0 +1,28 @@ +benchmark: + name: earthquake + hazard_task: earthquake.forecasting + metrics: + - mae + - mse + eval_split: test +dataset: + name: aefa_forecast + params: + micro: true +model: + name: wavecastnet + task: regression + params: + in_channels: 3 + height: 12 + width: 10 + temporal_in: 5 + temporal_out: 4 + hidden_dim: 16 + num_layers: 1 + dropout: 0.0 +report: + output_dir: reports/earthquake_forecasting + formats: + - json +seed: 7 diff --git a/pyhazards/configs/flood/floodcast_smoke.yaml b/pyhazards/configs/flood/floodcast_smoke.yaml new file mode 100644 index 00000000..3808baa3 --- /dev/null +++ b/pyhazards/configs/flood/floodcast_smoke.yaml @@ -0,0 +1,24 @@ +benchmark: + name: flood + hazard_task: flood.inundation + metrics: + - pixel_mae + - iou + - f1 + eval_split: test +dataset: + name: floodcastbench_inundation + params: + micro: true +model: + name: floodcast + task: regression + params: + in_channels: 3 + history: 4 + hidden_dim: 32 +report: + output_dir: reports/flood_floodcast + formats: + - json +seed: 7 diff --git a/pyhazards/configs/flood/google_flood_forecasting_smoke.yaml b/pyhazards/configs/flood/google_flood_forecasting_smoke.yaml new file mode 100644 index 00000000..6382f25e --- /dev/null +++ b/pyhazards/configs/flood/google_flood_forecasting_smoke.yaml @@ -0,0 +1,27 @@ +benchmark: + name: flood + hazard_task: flood.streamflow + metrics: + - mae + - rmse + - nse + - kge + eval_split: test +dataset: + name: caravan_streamflow + params: + micro: true +model: + name: google_flood_forecasting + task: regression + params: + input_dim: 2 + hidden_dim: 32 + out_dim: 1 + history: 4 + dropout: 0.0 +report: + output_dir: reports/google_flood_forecasting + formats: + - json +seed: 7 diff --git a/pyhazards/configs/flood/hydrographnet_smoke.yaml b/pyhazards/configs/flood/hydrographnet_smoke.yaml new file mode 100644 index 00000000..7c09e9bd --- /dev/null +++ b/pyhazards/configs/flood/hydrographnet_smoke.yaml @@ -0,0 +1,25 @@ +benchmark: + name: flood + hazard_task: flood.streamflow + metrics: + - mae + - rmse + - nse + - kge + eval_split: test +dataset: + name: hydrobench_streamflow + params: + micro: true +model: + name: hydrographnet + task: regression + params: + node_in_dim: 2 + edge_in_dim: 3 + out_dim: 1 +report: + output_dir: reports/flood + formats: + - json +seed: 7 diff --git a/pyhazards/configs/flood/neuralhydrology_ealstm_smoke.yaml b/pyhazards/configs/flood/neuralhydrology_ealstm_smoke.yaml new file mode 100644 index 00000000..92f4d589 --- /dev/null +++ b/pyhazards/configs/flood/neuralhydrology_ealstm_smoke.yaml @@ -0,0 +1,25 @@ +benchmark: + name: flood + hazard_task: flood.streamflow + metrics: + - mae + - rmse + - nse + - kge + eval_split: test +dataset: + name: waterbench_streamflow + params: + micro: true +model: + name: neuralhydrology_ealstm + task: regression + params: + input_dim: 2 + hidden_dim: 64 + out_dim: 1 +report: + output_dir: reports/flood_neuralhydrology_ealstm + formats: + - json +seed: 7 diff --git a/pyhazards/configs/flood/neuralhydrology_lstm_smoke.yaml b/pyhazards/configs/flood/neuralhydrology_lstm_smoke.yaml new file mode 100644 index 00000000..50085133 --- /dev/null +++ b/pyhazards/configs/flood/neuralhydrology_lstm_smoke.yaml @@ -0,0 +1,25 @@ +benchmark: + name: flood + hazard_task: flood.streamflow + metrics: + - mae + - rmse + - nse + - kge + eval_split: test +dataset: + name: caravan_streamflow + params: + micro: true +model: + name: neuralhydrology_lstm + task: regression + params: + input_dim: 2 + hidden_dim: 64 + out_dim: 1 +report: + output_dir: reports/flood_neuralhydrology_lstm + formats: + - json +seed: 7 diff --git a/pyhazards/configs/flood/urbanfloodcast_smoke.yaml b/pyhazards/configs/flood/urbanfloodcast_smoke.yaml new file mode 100644 index 00000000..48bfe4f6 --- /dev/null +++ b/pyhazards/configs/flood/urbanfloodcast_smoke.yaml @@ -0,0 +1,24 @@ +benchmark: + name: flood + hazard_task: flood.inundation + metrics: + - pixel_mae + - iou + - f1 + eval_split: test +dataset: + name: floodcastbench_inundation + params: + micro: true +model: + name: urbanfloodcast + task: regression + params: + in_channels: 3 + history: 4 + base_channels: 32 +report: + output_dir: reports/flood_urbanfloodcast + formats: + - json +seed: 7 diff --git a/pyhazards/configs/tc/fourcastnet_tc_smoke.yaml b/pyhazards/configs/tc/fourcastnet_tc_smoke.yaml new file mode 100644 index 00000000..8f9967cb --- /dev/null +++ b/pyhazards/configs/tc/fourcastnet_tc_smoke.yaml @@ -0,0 +1,25 @@ +benchmark: + name: tc + hazard_task: tc.track_intensity + metrics: + - track_error + - intensity_mae + eval_split: test +dataset: + name: ibtracs_tracks + params: + micro: true +model: + name: fourcastnet_tc + task: regression + params: + input_dim: 8 + history: 6 + hidden_dim: 96 + horizon: 5 + output_dim: 3 +report: + output_dir: reports/fourcastnet_tc + formats: + - json +seed: 7 diff --git a/pyhazards/configs/tc/graphcast_tc_smoke.yaml b/pyhazards/configs/tc/graphcast_tc_smoke.yaml new file mode 100644 index 00000000..764379c4 --- /dev/null +++ b/pyhazards/configs/tc/graphcast_tc_smoke.yaml @@ -0,0 +1,24 @@ +benchmark: + name: tc + hazard_task: tc.track_intensity + metrics: + - track_error + - intensity_mae + eval_split: test +dataset: + name: ibtracs_tracks + params: + micro: true +model: + name: graphcast_tc + task: regression + params: + input_dim: 8 + hidden_dim: 96 + horizon: 5 + output_dim: 3 +report: + output_dir: reports/graphcast_tc + formats: + - json +seed: 7 diff --git a/pyhazards/configs/tc/hurricast_smoke.yaml b/pyhazards/configs/tc/hurricast_smoke.yaml new file mode 100644 index 00000000..326b6dec --- /dev/null +++ b/pyhazards/configs/tc/hurricast_smoke.yaml @@ -0,0 +1,25 @@ +benchmark: + name: tc + hazard_task: tc.track_intensity + metrics: + - track_error + - intensity_mae + eval_split: test +dataset: + name: ibtracs_tracks + params: + micro: true +model: + name: hurricast + task: regression + params: + input_dim: 8 + hidden_dim: 64 + num_layers: 2 + horizon: 5 + output_dim: 3 +report: + output_dir: reports/tc + formats: + - json +seed: 7 diff --git a/pyhazards/configs/tc/pangu_tc_smoke.yaml b/pyhazards/configs/tc/pangu_tc_smoke.yaml new file mode 100644 index 00000000..43c1309b --- /dev/null +++ b/pyhazards/configs/tc/pangu_tc_smoke.yaml @@ -0,0 +1,24 @@ +benchmark: + name: tc + hazard_task: tc.track_intensity + metrics: + - track_error + - intensity_mae + eval_split: test +dataset: + name: ibtracs_tracks + params: + micro: true +model: + name: pangu_tc + task: regression + params: + input_dim: 8 + hidden_dim: 96 + horizon: 5 + output_dim: 3 +report: + output_dir: reports/pangu_tc + formats: + - json +seed: 7 diff --git a/pyhazards/configs/tc/saf_net_smoke.yaml b/pyhazards/configs/tc/saf_net_smoke.yaml new file mode 100644 index 00000000..5ccc8cca --- /dev/null +++ b/pyhazards/configs/tc/saf_net_smoke.yaml @@ -0,0 +1,23 @@ +benchmark: + name: tc + hazard_task: tc.track_intensity + metrics: + - track_error + - intensity_mae + eval_split: test +dataset: + name: tcbench_alpha + params: + micro: true +model: + name: saf_net + task: regression + params: + input_dim: 8 + hidden_dim: 64 + horizon: 5 +report: + output_dir: reports/saf_net + formats: + - json +seed: 7 diff --git a/pyhazards/configs/tc/tcif_fusion_smoke.yaml b/pyhazards/configs/tc/tcif_fusion_smoke.yaml new file mode 100644 index 00000000..fd220a20 --- /dev/null +++ b/pyhazards/configs/tc/tcif_fusion_smoke.yaml @@ -0,0 +1,24 @@ +benchmark: + name: tc + hazard_task: tc.track_intensity + metrics: + - track_error + - intensity_mae + eval_split: test +dataset: + name: tcbench_alpha + params: + micro: true +model: + name: tcif_fusion + task: regression + params: + input_dim: 8 + hidden_dim: 64 + horizon: 5 + output_dim: 3 +report: + output_dir: reports/tcif_fusion + formats: + - json +seed: 7 diff --git a/pyhazards/configs/tc/tropicalcyclone_mlp_smoke.yaml b/pyhazards/configs/tc/tropicalcyclone_mlp_smoke.yaml new file mode 100644 index 00000000..1f9e3bdc --- /dev/null +++ b/pyhazards/configs/tc/tropicalcyclone_mlp_smoke.yaml @@ -0,0 +1,24 @@ +benchmark: + name: tc + hazard_task: tc.track_intensity + metrics: + - track_error + - intensity_mae + eval_split: test +dataset: + name: tcbench_alpha + params: + micro: true +model: + name: tropicalcyclone_mlp + task: regression + params: + input_dim: 8 + history: 6 + horizon: 5 + output_dim: 3 +report: + output_dir: reports/tc_mlp + formats: + - json +seed: 7 diff --git a/pyhazards/configs/tc/tropicyclonenet_smoke.yaml b/pyhazards/configs/tc/tropicyclonenet_smoke.yaml new file mode 100644 index 00000000..f42d2155 --- /dev/null +++ b/pyhazards/configs/tc/tropicyclonenet_smoke.yaml @@ -0,0 +1,24 @@ +benchmark: + name: tc + hazard_task: tc.track_intensity + metrics: + - track_error + - intensity_mae + eval_split: test +dataset: + name: tropicyclonenet_dataset + params: + micro: true +model: + name: tropicyclonenet + task: regression + params: + input_dim: 8 + hidden_dim: 64 + horizon: 5 + output_dim: 3 +report: + output_dir: reports/tropicyclonenet + formats: + - json +seed: 7 diff --git a/pyhazards/configs/wildfire/asufm_smoke.yaml b/pyhazards/configs/wildfire/asufm_smoke.yaml new file mode 100644 index 00000000..9493c2ff --- /dev/null +++ b/pyhazards/configs/wildfire/asufm_smoke.yaml @@ -0,0 +1,27 @@ +benchmark: + name: wildfire + hazard_task: wildfire.danger + metrics: + - mae + - rmse + eval_split: test +dataset: + name: fpa_fod_weekly + params: + micro: true + lookback_weeks: 12 + features: counts+time +model: + name: asufm + task: forecasting + params: + input_dim: 7 + hidden_dim: 32 + output_dim: 5 + lookback: 12 + dropout: 0.0 +report: + output_dir: reports/asufm + formats: + - json +seed: 7 diff --git a/pyhazards/configs/wildfire/firecastnet_smoke.yaml b/pyhazards/configs/wildfire/firecastnet_smoke.yaml new file mode 100644 index 00000000..6e91d22f --- /dev/null +++ b/pyhazards/configs/wildfire/firecastnet_smoke.yaml @@ -0,0 +1,23 @@ +benchmark: + name: wildfire + hazard_task: wildfire.spread + metrics: + - iou + - f1 + - burned_area_mae + eval_split: test +dataset: + name: wildfire_spread_synthetic + params: + micro: true +model: + name: firecastnet + task: segmentation + params: + in_channels: 12 + hidden_dim: 24 +report: + output_dir: reports/firecastnet + formats: + - json +seed: 7 diff --git a/pyhazards/configs/wildfire/forefire_smoke.yaml b/pyhazards/configs/wildfire/forefire_smoke.yaml new file mode 100644 index 00000000..e821e666 --- /dev/null +++ b/pyhazards/configs/wildfire/forefire_smoke.yaml @@ -0,0 +1,23 @@ +benchmark: + name: wildfire + hazard_task: wildfire.spread + metrics: + - iou + - f1 + - burned_area_mae + eval_split: test +dataset: + name: wildfire_spread_synthetic + params: + micro: true +model: + name: forefire + task: segmentation + params: + in_channels: 12 + diffusion_steps: 2 +report: + output_dir: reports/forefire + formats: + - json +seed: 7 diff --git a/pyhazards/configs/wildfire/wildfire_danger_smoke.yaml b/pyhazards/configs/wildfire/wildfire_danger_smoke.yaml new file mode 100644 index 00000000..80ca1424 --- /dev/null +++ b/pyhazards/configs/wildfire/wildfire_danger_smoke.yaml @@ -0,0 +1,23 @@ +benchmark: + name: wildfire + hazard_task: wildfire.danger + metrics: + - accuracy + - macro_f1 + eval_split: test +dataset: + name: fpa_fod_tabular + params: + micro: true + task: cause +model: + name: wildfire_fpa + task: classification + params: + in_dim: 8 + out_dim: 5 +report: + output_dir: reports/wildfire_danger + formats: + - json +seed: 7 diff --git a/pyhazards/configs/wildfire/wildfire_forecasting_smoke.yaml b/pyhazards/configs/wildfire/wildfire_forecasting_smoke.yaml new file mode 100644 index 00000000..cf05783d --- /dev/null +++ b/pyhazards/configs/wildfire/wildfire_forecasting_smoke.yaml @@ -0,0 +1,28 @@ +benchmark: + name: wildfire + hazard_task: wildfire.danger + metrics: + - mae + - rmse + eval_split: test +dataset: + name: fpa_fod_weekly + params: + micro: true + lookback_weeks: 12 + features: counts+time +model: + name: wildfire_forecasting + task: forecasting + params: + input_dim: 7 + hidden_dim: 32 + output_dim: 5 + lookback: 12 + num_layers: 2 + dropout: 0.0 +report: + output_dir: reports/wildfire_forecasting + formats: + - json +seed: 7 diff --git a/pyhazards/configs/wildfire/wildfire_spread_smoke.yaml b/pyhazards/configs/wildfire/wildfire_spread_smoke.yaml new file mode 100644 index 00000000..9e4f6437 --- /dev/null +++ b/pyhazards/configs/wildfire/wildfire_spread_smoke.yaml @@ -0,0 +1,21 @@ +benchmark: + name: wildfire + hazard_task: wildfire.spread + metrics: + - iou + - f1 + eval_split: test +dataset: + name: wildfire_spread_synthetic + params: + micro: true +model: + name: wildfire_aspp + task: segmentation + params: + in_channels: 12 +report: + output_dir: reports/wildfire_spread + formats: + - json +seed: 7 diff --git a/pyhazards/configs/wildfire/wildfirespreadts_smoke.yaml b/pyhazards/configs/wildfire/wildfirespreadts_smoke.yaml new file mode 100644 index 00000000..df156b66 --- /dev/null +++ b/pyhazards/configs/wildfire/wildfirespreadts_smoke.yaml @@ -0,0 +1,24 @@ +benchmark: + name: wildfire + hazard_task: wildfire.spread + metrics: + - iou + - f1 + - burned_area_mae + eval_split: test +dataset: + name: wildfire_spread_temporal_synthetic + params: + micro: true +model: + name: wildfirespreadts + task: segmentation + params: + history: 4 + in_channels: 6 + hidden_dim: 24 +report: + output_dir: reports/wildfirespreadts + formats: + - json +seed: 7 diff --git a/pyhazards/configs/wildfire/wrf_sfire_smoke.yaml b/pyhazards/configs/wildfire/wrf_sfire_smoke.yaml new file mode 100644 index 00000000..cd33e5c6 --- /dev/null +++ b/pyhazards/configs/wildfire/wrf_sfire_smoke.yaml @@ -0,0 +1,23 @@ +benchmark: + name: wildfire + hazard_task: wildfire.spread + metrics: + - iou + - f1 + - burned_area_mae + eval_split: test +dataset: + name: wildfire_spread_synthetic + params: + micro: true +model: + name: wrf_sfire + task: segmentation + params: + in_channels: 12 + diffusion_steps: 3 +report: + output_dir: reports/wrf_sfire + formats: + - json +seed: 7 diff --git a/pyhazards/configs/wildfire_benchmark/cache_2024_v1.yaml b/pyhazards/configs/wildfire_benchmark/cache_2024_v1.yaml new file mode 100644 index 00000000..9eb76ea6 --- /dev/null +++ b/pyhazards/configs/wildfire_benchmark/cache_2024_v1.yaml @@ -0,0 +1,30 @@ +cache: + root: /home/runyang/my-copy/data_cache/wildfire_2024_v1 + +data: + year: 2024 + weather_dir: /home/runyang/output2024 + weather_glob: pred_2024*.nc + sample_nc: pred_20240101_18.nc + weather_vars: + - T2M + - QV2M + - TQV + - U10M + - V10M + - GWETROOT + - TS + - LAI + - EFLUX + - HFLUX + - SWGNT + - SWTNT + - LWGAB + - LWGEM + firms_daily_dir: /home/runyang/ryang/firms/combine + landfire_tif: /home/runyang/ryang/landfire_fbfm40/LF2024_FBFM13_250_CONUS/Tif/LC24_F13_250.tif + +splits: + train: [2024-01-01, 2024-09-30] + val: [2024-10-01, 2024-10-31] + test: [2024-11-01, 2024-12-31] diff --git a/pyhazards/configs/wildfire_benchmark/model_catalog_22.json b/pyhazards/configs/wildfire_benchmark/model_catalog_22.json new file mode 100644 index 00000000..a1b6122d --- /dev/null +++ b/pyhazards/configs/wildfire_benchmark/model_catalog_22.json @@ -0,0 +1,295 @@ +[ + { + "name": "logistic_regression", + "display_name": "Logistic Regression", + "group": "classical_trees", + "train_unit": "iteration", + "source_tier": "no_official_repo", + "priority": 100, + "defaults": { + "solver": "lbfgs", + "max_iter": 500, + "class_weight": "balanced" + } + }, + { + "name": "random_forest", + "display_name": "Random Forest", + "group": "classical_trees", + "train_unit": "tree", + "source_tier": "no_official_repo", + "priority": 101, + "defaults": { + "n_estimators": 500, + "max_depth": null, + "class_weight": "balanced_subsample" + } + }, + { + "name": "xgboost", + "display_name": "XGBoost", + "group": "classical_trees", + "train_unit": "round", + "source_tier": "official_repo", + "priority": 1, + "defaults": { + "max_depth": 8, + "eta": 0.05, + "subsample": 0.8, + "colsample_bytree": 0.8, + "num_boost_round": 800 + } + }, + { + "name": "lightgbm", + "display_name": "LightGBM", + "group": "classical_trees", + "train_unit": "round", + "source_tier": "official_repo", + "priority": 2, + "defaults": { + "num_leaves": 63, + "learning_rate": 0.05, + "feature_fraction": 0.8, + "bagging_fraction": 0.8, + "num_boost_round": 800 + } + }, + { + "name": "unet", + "display_name": "U-Net", + "group": "segmentation_cnns", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 3, + "defaults": { + "optimizer": "AdamW", + "lr": 0.001, + "max_epochs": 120 + } + }, + { + "name": "resnet18_unet", + "display_name": "ResNet-18 U-Net", + "group": "segmentation_cnns", + "train_unit": "epoch", + "source_tier": "no_official_repo", + "priority": 102, + "defaults": { + "backbone": "resnet18", + "optimizer": "AdamW", + "lr": 0.001, + "max_epochs": 120 + } + }, + { + "name": "attention_unet", + "display_name": "Attention U-Net", + "group": "segmentation_cnns", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 4, + "defaults": { + "optimizer": "AdamW", + "lr": 0.001, + "max_epochs": 120 + } + }, + { + "name": "deeplabv3p", + "display_name": "DeepLabv3+", + "group": "segmentation_cnns", + "train_unit": "epoch", + "source_tier": "paper_only", + "priority": 80, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0003, + "max_epochs": 120 + } + }, + { + "name": "convlstm", + "display_name": "ConvLSTM", + "group": "spatiotemporal", + "train_unit": "epoch", + "source_tier": "no_official_repo", + "priority": 103, + "defaults": { + "optimizer": "Adam", + "lr": 0.001, + "max_epochs": 120 + } + }, + { + "name": "mau", + "display_name": "MAU", + "group": "spatiotemporal", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 5, + "defaults": { + "optimizer": "Adam", + "lr": 0.0005, + "max_epochs": 120 + } + }, + { + "name": "predrnn_v2", + "display_name": "PredRNN-v2", + "group": "spatiotemporal", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 6, + "defaults": { + "optimizer": "Adam", + "lr": 0.0005, + "max_epochs": 120 + } + }, + { + "name": "rainformer", + "display_name": "Rainformer", + "group": "spatiotemporal", + "train_unit": "epoch", + "source_tier": "no_official_repo", + "priority": 104, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "earthformer", + "display_name": "Earthformer", + "group": "spatiotemporal", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 7, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "swinlstm", + "display_name": "SwinLSTM", + "group": "spatiotemporal", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 8, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "earthfarseer", + "display_name": "EarthFarseer", + "group": "spatiotemporal", + "train_unit": "epoch", + "source_tier": "paper_only", + "priority": 81, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "convgru_trajgru", + "display_name": "ConvGRU / TrajGRU", + "group": "spatiotemporal", + "train_unit": "epoch", + "source_tier": "no_official_repo", + "priority": 105, + "defaults": { + "optimizer": "Adam", + "lr": 0.001, + "max_epochs": 120 + } + }, + { + "name": "tcn", + "display_name": "TCN", + "group": "spatiotemporal", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 9, + "defaults": { + "optimizer": "Adam", + "lr": 0.001, + "max_epochs": 120 + } + }, + { + "name": "utae", + "display_name": "UTAE", + "group": "spatiotemporal", + "train_unit": "epoch", + "source_tier": "paper_only", + "priority": 82, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0003, + "max_epochs": 120 + } + }, + { + "name": "segformer", + "display_name": "SegFormer", + "group": "transformers", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 10, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "swin_unet", + "display_name": "Swin-Unet", + "group": "transformers", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 11, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "vit_segmenter", + "display_name": "ViT-based Segmenter", + "group": "transformers", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 12, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "deep_ensemble", + "display_name": "Deep Ensemble", + "group": "uncertainty", + "train_unit": "epoch", + "source_tier": "no_official_repo", + "priority": 106, + "defaults": { + "base_model": "tcn", + "ensemble_size": 5, + "optimizer": "AdamW", + "lr": 0.001, + "max_epochs": 120 + } + } +] diff --git a/pyhazards/configs/wildfire_benchmark/model_catalog_extensions_v1.json b/pyhazards/configs/wildfire_benchmark/model_catalog_extensions_v1.json new file mode 100644 index 00000000..2b51f979 --- /dev/null +++ b/pyhazards/configs/wildfire_benchmark/model_catalog_extensions_v1.json @@ -0,0 +1,284 @@ +[ + { + "name": "cnn_aspp", + "display_name": "CNN-ASPP", + "group": "satellite_remote_sensing", + "train_unit": "epoch", + "source_tier": "paper_only", + "priority": 201, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0003, + "max_epochs": 120 + } + }, + { + "name": "asufm", + "display_name": "ASUFM", + "group": "satellite_remote_sensing", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 202, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "firecastnet", + "display_name": "FireCastNet", + "group": "seasonal_forecasting", + "train_unit": "epoch", + "source_tier": "paper_only", + "priority": 203, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "firepred", + "display_name": "FirePred", + "group": "satellite_remote_sensing", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 203, + "defaults": { + "history": 5, + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "viirs_375m_active_fire", + "display_name": "VIIRS 375 m Active Fire", + "group": "operational_detection", + "train_unit": "epoch", + "source_tier": "official_paper", + "priority": 204, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 80 + } + }, + { + "name": "modis_active_fire_c61", + "display_name": "MODIS Active Fire C6.1", + "group": "operational_detection", + "train_unit": "epoch", + "source_tier": "official_paper", + "priority": 205, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 80 + } + }, + { + "name": "wrf_sfire_adapter", + "display_name": "WRF-SFIRE Adapter", + "group": "physics_simulators", + "train_unit": "iteration", + "source_tier": "official_repo", + "priority": 204, + "defaults": { + "max_iter": 120 + } + }, + { + "name": "forefire_adapter", + "display_name": "ForeFire Adapter", + "group": "physics_simulators", + "train_unit": "iteration", + "source_tier": "official_repo", + "priority": 205, + "defaults": { + "max_iter": 120 + } + }, + { + "name": "wildfiregpt", + "display_name": "WildfireGPT", + "group": "llm_systems", + "train_unit": "iteration", + "source_tier": "official_repo", + "priority": 206, + "defaults": { + "max_iter": 80 + } + }, + { + "name": "gemini_25_pro_wildfire_prompted", + "display_name": "Gemini 2.5 Pro Wildfire Prompted", + "group": "llm_systems", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 206, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0001, + "max_epochs": 60, + "in_channels": 6, + "hidden_dim": 96, + "prompt_dim": 32, + "num_prompt_tokens": 6, + "num_heads": 8, + "dropout": 0.1 + } + }, + { + "name": "llama4_wildfire_prompted", + "display_name": "Llama 4 Wildfire Prompted", + "group": "llm_systems", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 207, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0001, + "max_epochs": 60, + "in_channels": 6, + "hidden_dim": 80, + "prompt_dim": 32, + "num_prompt_tokens": 4, + "num_heads": 8, + "dropout": 0.1 + } + }, + { + "name": "qwen25_vl_wildfire_prompted", + "display_name": "Qwen2.5-VL Wildfire Prompted", + "group": "llm_systems", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 206, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0001, + "max_epochs": 60 + } + }, + { + "name": "internvl3_wildfire_prompted", + "display_name": "InternVL3 Wildfire Prompted", + "group": "llm_systems", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 208, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0001, + "max_epochs": 60, + "in_channels": 6, + "hidden_dim": 96, + "prompt_dim": 32, + "num_prompt_tokens": 5, + "num_heads": 6, + "dropout": 0.1 + } + }, + { + "name": "firemm_ir", + "display_name": "FireMM-IR", + "group": "llm_systems", + "train_unit": "epoch", + "source_tier": "paper_only", + "priority": 207, + "defaults": { + "optimizer": "AdamW", + "lr": 0.0001, + "max_epochs": 60 + } + }, + { + "name": "prithvi_eo_2_tl", + "display_name": "Prithvi-EO-2.0-TL", + "group": "foundation_models", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 208, + "defaults": { + "backbone": "Prithvi-EO-2.0", + "optimizer": "AdamW", + "lr": 0.0001, + "max_epochs": 80 + } + }, + { + "name": "prithvi_burnscars", + "display_name": "Prithvi BurnScars", + "group": "foundation_models", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 209, + "defaults": { + "backbone": "Prithvi-BurnScars", + "optimizer": "AdamW", + "lr": 0.0001, + "max_epochs": 80 + } + }, + { + "name": "prithvi_wxc", + "display_name": "Prithvi-WxC", + "group": "foundation_models", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 210, + "defaults": { + "backbone": "Prithvi-WxC", + "optimizer": "AdamW", + "lr": 0.0001, + "max_epochs": 80 + } + }, + { + "name": "wildfirespreadts", + "display_name": "WildfireSpreadTS", + "group": "satellite_remote_sensing", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 211, + "defaults": { + "history": 4, + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "ts_satfire", + "display_name": "TS-SatFire", + "group": "satellite_remote_sensing", + "train_unit": "epoch", + "source_tier": "official_repo", + "priority": 213, + "defaults": { + "history": 5, + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + }, + { + "name": "wildfire_fpa", + "display_name": "DNN-LSTM-AutoEncoder", + "group": "forecasting_systems", + "train_unit": "epoch", + "source_tier": "paper_only", + "priority": 214, + "defaults": { + "depth": 2, + "hidden_dim": 64, + "activation": "relu", + "dropout": 0.1, + "optimizer": "AdamW", + "lr": 0.0002, + "max_epochs": 120 + } + } +] diff --git a/pyhazards/configs/wildfire_benchmark/track_o_2024_real_v1.json b/pyhazards/configs/wildfire_benchmark/track_o_2024_real_v1.json new file mode 100644 index 00000000..71b0b86d --- /dev/null +++ b/pyhazards/configs/wildfire_benchmark/track_o_2024_real_v1.json @@ -0,0 +1,86 @@ +{ + "benchmark_name": "WildfireBench", + "contract_version": "track_o_2024_real_v1", + "mode": "real_data_v1", + "task": "Track-O", + "description": "Unified real-data wildfire occurrence benchmark protocol for 2024 using FIRMS labels, Prithvi-WxC weather predictions, and LANDFIRE static fuels.", + "data": { + "year": 2024, + "label_source": "/home/runyang/ryang/firms/combine", + "dynamic_feature_sources": [ + "/home/runyang/output2024" + ], + "static_feature_sources": [ + "/home/runyang/ryang/landfire_fbfm40" + ], + "optional_feature_sources": [ + "/home/runyang/ryang/WFIGS_Perimeters/history_2024", + "/home/runyang/ryang/WRC_Housing_Density", + "/home/runyang/ryang/LandScan_Global_2024" + ], + "index_unit": "county_day", + "split": { + "train": ["2024-01-01", "2024-09-30"], + "val": ["2024-10-01", "2024-10-31"], + "test": ["2024-11-01", "2024-12-31"] + }, + "leakage_control": { + "fit_statistics_on_train_only": true, + "no_future_covariates": true, + "fixed_split_files_required_for_real_runs": true + } + }, + "shared_training": { + "dry_run_seed_list": [42], + "final_seed_list": [42, 52, 62, 72, 82], + "optimizer_default": "AdamW", + "learning_rate_default": 0.001, + "weight_decay_default": 0.0001, + "class_imbalance": { + "policy": "pos_weight_neg_over_pos", + "clip_max": 50.0 + }, + "early_stopping": { + "enabled": true, + "monitor": "val_auprc", + "patience": 20, + "min_delta": 0.0001 + }, + "convergence_rule": { + "monitor": "val_loss", + "smoothing_window": 5, + "patience": 20, + "min_improvement": 0.0001 + }, + "report_requirements": { + "report_mean_std_across_seeds": true, + "must_include_train_curve": true, + "must_include_val_curve": true, + "must_log_best_step": true, + "must_log_converged_step": true, + "must_log_device": true, + "must_log_gpu_assignment": true + } + }, + "metrics": { + "primary": ["auprc"], + "secondary": ["auroc"], + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"] + }, + "output_schema": { + "root": "/home/runyang/my-copy/runs/wildfire_benchmark/real/track_o_2024_real_v1", + "per_seed_files": [ + "experiment_setting.json", + "history.csv", + "loss_curve.png", + "metrics.json" + ], + "per_model_files": [ + "model_summary.json" + ], + "benchmark_files": [ + "benchmark_summary.json" + ] + } +} diff --git a/pyhazards/configs/wildfire_benchmark/track_o_2024_v1.json b/pyhazards/configs/wildfire_benchmark/track_o_2024_v1.json new file mode 100644 index 00000000..f20f9bf5 --- /dev/null +++ b/pyhazards/configs/wildfire_benchmark/track_o_2024_v1.json @@ -0,0 +1,82 @@ +{ + "benchmark_name": "WildfireBench", + "contract_version": "track_o_2024_v1", + "mode": "scaffold_no_data", + "task": "Track-O", + "description": "Unified county-day wildfire occurrence benchmark protocol for 2024 only.", + "data": { + "year": 2024, + "label_source": "/home/runyang/ryang/firms_download/combine", + "dynamic_feature_sources": [ + "/home/runyang/output2024" + ], + "static_feature_sources": [ + "/home/runyang/ryang/landfire_fbfm40" + ], + "external_optional_sources": [ + "geo", + "vegetation", + "flood" + ], + "index_unit": "county_day", + "split": { + "train": ["2024-01-01", "2024-09-30"], + "val": ["2024-10-01", "2024-10-31"], + "test": ["2024-11-01", "2024-12-31"] + }, + "leakage_control": { + "fit_statistics_on_train_only": true, + "no_future_covariates": true, + "fixed_split_files_required_for_real_runs": true + } + }, + "shared_training": { + "seed_list": [42, 52, 62, 72, 82], + "optimizer_default": "AdamW", + "learning_rate_default": 0.001, + "weight_decay_default": 0.0001, + "class_imbalance": { + "policy": "pos_weight_neg_over_pos", + "clip_max": 50.0 + }, + "early_stopping": { + "enabled": true, + "monitor": "val_auprc", + "patience": 10, + "min_delta": 0.0005 + }, + "convergence_rule": { + "monitor": "val_loss", + "smoothing_window": 5, + "patience": 5, + "min_improvement": 0.001 + }, + "report_requirements": { + "report_mean_std_across_seeds": true, + "must_include_train_curve": true, + "must_include_val_curve": true, + "must_log_best_step": true, + "must_log_converged_step": true + } + }, + "metrics": { + "primary": ["auprc"], + "secondary": ["auroc"], + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"] + }, + "output_schema": { + "per_seed_files": [ + "experiment_setting.json", + "history.csv", + "loss_curve.png", + "metrics.json" + ], + "per_model_files": [ + "model_summary.json" + ], + "benchmark_files": [ + "benchmark_summary.json" + ] + } +} diff --git a/pyhazards/data/era5_subset/data_stream-oper_stepType-accum.nc b/pyhazards/data/era5_subset/data_stream-oper_stepType-accum.nc new file mode 100644 index 00000000..f61e684f Binary files /dev/null and b/pyhazards/data/era5_subset/data_stream-oper_stepType-accum.nc differ diff --git a/pyhazards/data/era5_subset/data_stream-oper_stepType-instant.nc b/pyhazards/data/era5_subset/data_stream-oper_stepType-instant.nc new file mode 100644 index 00000000..d92a0dd0 Binary files /dev/null and b/pyhazards/data/era5_subset/data_stream-oper_stepType-instant.nc differ diff --git a/pyhazards/data/load_hydrograph_data.py b/pyhazards/data/load_hydrograph_data.py new file mode 100644 index 00000000..c5841534 --- /dev/null +++ b/pyhazards/data/load_hydrograph_data.py @@ -0,0 +1,118 @@ +import torch +import xarray as xr +import numpy as np +from pathlib import Path + +from pyhazards.datasets import DataBundle, DataSplit, FeatureSpec, LabelSpec +from pyhazards.datasets.graph import GraphTemporalDataset + + +def knn_adjacency(coords: torch.Tensor, k: int = 4): + """ + Build symmetric k-NN adjacency from mesh coordinates. + + coords: (N, 2) tensor of (lon, lat) + returns: (N, N) adjacency matrix + """ + N = coords.shape[0] + dist = torch.cdist(coords, coords) + + adj = torch.zeros(N, N) + knn = dist.topk(k + 1, largest=False).indices + + for i in range(N): + adj[i, knn[i, 1:]] = 1.0 + adj[knn[i, 1:], i] = 1.0 + + return adj + + +def load_hydrograph_data( + era5_path: str, + max_nodes: int = 50, +): + """ + Load ERA5 NetCDF files, use ERA5 grid as mesh, + build kNN adjacency, and return a DataBundle. + """ + + files = sorted(Path(era5_path).glob("*.nc")) + assert len(files) > 0, "No ERA5 NetCDF files found" + + + # Prefer explicit NetCDF backends so users get a clear error message when + # optional IO dependencies are missing. + open_errors = [] + ds = None + for engine in ("netcdf4", "h5netcdf"): + try: + ds = xr.open_mfdataset( + files, + combine="by_coords", + chunks={}, + engine=engine, + ) + break + except Exception as exc: + open_errors.append((engine, str(exc))) + + if ds is None: + error_lines = [f"- {eng}: {msg}" for eng, msg in open_errors] + raise RuntimeError( + "Failed to open ERA5 NetCDF files. Install one of the required backends " + "with `pip install netCDF4 h5netcdf` and retry.\n" + + "\n".join(error_lines) + ) + + + lats = ds["latitude"].values + lons = ds["longitude"].values + + if lats[0] > lats[-1]: + ds = ds.sortby("latitude") + lats = ds["latitude"].values + + # Build mesh coordinates from ERA5 grid + lon_grid, lat_grid = np.meshgrid(lons, lats) + mesh_coords = torch.tensor( + np.stack([lon_grid.ravel(), lat_grid.ravel()], axis=1), + dtype=torch.float, + ) + + mesh_coords = mesh_coords[:max_nodes] + + # ERA5 variables + precip = ds["tp"].values # total precipitation + temp = ds["t2m"].values # 2m temperature + + if precip.ndim == 3: + precip = precip.mean(axis=0) + temp = temp.mean(axis=0) + + node_feats = [] + for lon, lat in mesh_coords.numpy(): + i = np.argmin((lats - lat) ** 2) + j = np.argmin((lons - lon) ** 2) + node_feats.append([precip[i, j], temp[i, j]]) + + X = torch.tensor(node_feats, dtype=torch.float).unsqueeze(0).unsqueeze(0) + + Y = X[:, 0, :, 0:1] # (1, num_nodes) + + adjacency = knn_adjacency(mesh_coords, k=4) + + dataset = GraphTemporalDataset(X, Y, adjacency=adjacency) + + return DataBundle( + splits={ + "train": DataSplit(inputs=dataset, targets=None), + }, + feature_spec=FeatureSpec( + input_dim=2, + description="ERA5 precipitation + temperature on ERA5-derived mesh", + ), + label_spec=LabelSpec( + num_targets=1, + task_type="regression", + ), + ) diff --git a/pyhazards/dataset_cards/aefa_forecast.yaml b/pyhazards/dataset_cards/aefa_forecast.yaml new file mode 100644 index 00000000..66b0fd7e --- /dev/null +++ b/pyhazards/dataset_cards/aefa_forecast.yaml @@ -0,0 +1,56 @@ +slug: aefa_forecast +display_name: AEFA Forecast +hazard_family: Earthquake +source_role: Forecast Benchmark +summary: Synthetic-backed dense-grid forecasting adapter aligned to the AEFA earthquake forecasting workflow. +provider: AEFA forecasting ecosystem surfaced through a PyHazards adapter +geometry: Dense-grid wavefield tensors +spatial_resolution: Benchmark-defined dense sensor grid +temporal_resolution: Short history and forecast windows +update_cadence: Generated locally for smoke and benchmark-alignment runs +period_of_record: Synthetic-backed benchmark adapter +coverage: Benchmark-aligned earthquake forecasting samples +formats: PyTorch tensors via the dataset registry +overview: + - AEFA Forecast is the public forecasting adapter used by the earthquake benchmark when exercising dense-grid wavefield forecasting models. + - The current implementation is synthetic-backed, but it preserves the task shape, tensor layout, and reporting surface used by the shared earthquake evaluator. +data_characteristics: + - Multichannel dense-grid history tensors paired with future dense-grid targets. + - Registry-backed benchmark adapter rather than a raw external archive loader. + - Intended for forecasting-path validation and report generation. +typical_use_cases: + - Smoke tests for WaveCastNet-style earthquake forecasting. + - Shared forecasting benchmark runs under the earthquake evaluator. + - Validation of report exports aligned to the forecasting path. +access_links: + - label: AEFA repository + url: https://github.com/chenyk1990/aefa +inspection: null +registry: + name: aefa_forecast + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "aefa_forecast", + micro=True, + temporal_in=5, + temporal_out=4, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + notes: + - micro=True keeps the synthetic-backed forecasting path lightweight for validation. +primary_references: + - citation: AEFA + url: https://github.com/chenyk1990/aefa +pyhazards_usage: + - Use this adapter when you want the public earthquake forecasting benchmark surface rather than the private synthetic dataset name. +related_models: + - wavecastnet +related_benchmarks: + - earthquake_benchmark + - aefa +notes: + - This is a benchmark adapter, not a full external AEFA ingestion pipeline. diff --git a/pyhazards/dataset_cards/caravan_streamflow.yaml b/pyhazards/dataset_cards/caravan_streamflow.yaml new file mode 100644 index 00000000..39709664 --- /dev/null +++ b/pyhazards/dataset_cards/caravan_streamflow.yaml @@ -0,0 +1,58 @@ +slug: caravan_streamflow +display_name: Caravan +hazard_family: Flood +source_role: Streamflow Benchmark +summary: Synthetic-backed streamflow benchmark adapter aligned to the Caravan large-sample hydrology ecosystem. +provider: Caravan community dataset surfaced through a PyHazards adapter +geometry: Graph-temporal basin or node sequences +spatial_resolution: Basin or gauge nodes represented as graph elements +temporal_resolution: Rolling history windows for streamflow prediction +update_cadence: Generated locally for smoke and benchmark-alignment runs +period_of_record: Synthetic-backed benchmark adapter +coverage: Benchmark-aligned streamflow forecasting samples +formats: PyTorch graph-temporal dataset objects via the dataset registry +overview: + - Caravan is the public flood streamflow adapter used to align PyHazards with a large-sample hydrology benchmark surface. + - The current implementation is synthetic-backed, but it preserves the streamflow forecasting contract used by the shared flood benchmark. +data_characteristics: + - Graph-temporal sequences with node-level targets for next-step streamflow prediction. + - Registry-backed benchmark adapter instead of a raw Caravan ingestion pipeline. + - Supports the public streamflow smoke path for NeuralHydrology LSTM and Google Flood Forecasting. +typical_use_cases: + - Streamflow smoke tests for benchmark-linked flood models. + - Shared flood benchmark runs with streamflow metrics such as NSE and KGE. + - Regression checks for graph-temporal basin workflows. +access_links: + - label: Caravan paper + url: https://www.nature.com/articles/s41597-023-01975-w + - label: Caravan repository + url: https://github.com/kratzert/Caravan +inspection: null +registry: + name: caravan_streamflow + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "caravan_streamflow", + micro=True, + history=4, + nodes=6, + ).load() + + train = data.get_split("train") + print(len(train.inputs), train.inputs[0].x.shape) +primary_references: + - citation: Caravan - A global community dataset for large-sample hydrology + url: https://www.nature.com/articles/s41597-023-01975-w + repo_url: https://github.com/kratzert/Caravan +pyhazards_usage: + - Use this adapter when you want the public Caravan-aligned streamflow surface exposed by the flood benchmark. +related_models: + - neuralhydrology_lstm + - google_flood_forecasting +related_benchmarks: + - flood_benchmark + - caravan +notes: + - This is a synthetic-backed benchmark adapter rather than a full Caravan downloader. diff --git a/pyhazards/dataset_cards/era5.yaml b/pyhazards/dataset_cards/era5.yaml new file mode 100644 index 00000000..46d91ce5 --- /dev/null +++ b/pyhazards/dataset_cards/era5.yaml @@ -0,0 +1,42 @@ +slug: era5 +display_name: ERA5 +hazard_family: Shared Forcing +source_role: Reanalysis +summary: ECMWF's global reanalysis used as a high-resolution meteorological baseline for hazard experiments. +provider: ECMWF / Copernicus Climate Change Service (C3S) +geometry: Regular latitude-longitude grid +spatial_resolution: "~0.25 deg x 0.25 deg" +temporal_resolution: Hourly +update_cadence: Daily ERA5T updates with about 5-day latency, followed by final validated releases after 2-3 months +period_of_record: 1940-present +coverage: Global +formats: GRIB and NetCDF +overview: + - ERA5 is ECMWF's fifth-generation global reanalysis, combining historical observations with a modern data assimilation system to produce temporally consistent atmospheric fields. + - PyHazards uses ERA5 as a shared meteorological baseline for flood, wildfire, and weather-aware graph workflows, including the HydroGraphNet example path. +data_characteristics: + - Global hourly fields on a regular latitude-longitude grid. + - Single-level products with optional pressure-level and model-level variables. + - Common variables include near-surface meteorology, precipitation, radiation, and atmospheric state variables. + - Recent dates may mix validated ERA5 with preliminary ERA5T data. +typical_use_cases: + - Meteorological forcing for flood, wildfire, and extreme-weather prediction models. + - Climate variability analysis and environmental feature engineering. + - Shared reanalysis input for graph and spatiotemporal benchmark pipelines. +access_links: + - label: ERA5 single levels + url: https://cds.climate.copernicus.eu/datasets/reanalysis-era5-single-levels?tab=overview + - label: Copernicus Climate Data Store + url: https://cds.climate.copernicus.eu/ +inspection: + command: python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10 +primary_references: + - citation: Hersbach et al. (2020). The ERA5 global reanalysis. + url: https://rmets.onlinelibrary.wiley.com/doi/10.1002/qj.3803 +pyhazards_usage: + - Use the inspection command for direct file validation, then feed local ERA5 files into HydroGraphNet-style helper loaders when you need graph-temporal training inputs. +notes: + - ERA5 is inspection-first in the public catalog; the downstream HydroGraphNet helper is documented here for convenience but is not a ``load_dataset(...)`` registry entry. +related_models: + - hydrographnet +related_benchmarks: [] diff --git a/pyhazards/dataset_cards/firms.yaml b/pyhazards/dataset_cards/firms.yaml new file mode 100644 index 00000000..24d5ef33 --- /dev/null +++ b/pyhazards/dataset_cards/firms.yaml @@ -0,0 +1,40 @@ +slug: firms +display_name: FIRMS +hazard_family: Wildfire +source_role: Active Fire Detections +summary: NASA's near-real-time active fire detections used for operational wildfire monitoring and event labeling. +provider: NASA LANCE / FIRMS +geometry: Event-based point detections +spatial_resolution: "~375 m for VIIRS, ~1 km for MODIS" +temporal_resolution: Event-based detections with multiple updates per day +update_cadence: Fire maps refresh about every 5 minutes and downloadable files refresh about hourly +period_of_record: Near-real-time archive with later standard-science replacements +coverage: Global +formats: CSV, Shapefile, GeoJSON, KML +overview: + - FIRMS distributes active fire and thermal anomaly detections derived from MODIS and VIIRS satellite sensors, with each record corresponding to a time-stamped hotspot observation. + - PyHazards uses FIRMS as a wildfire occurrence signal for operational monitoring and label construction when combined with weather and land-surface context. +data_characteristics: + - Global event-based point detections rather than gridded tensors. + - Latency is typically under 3 hours globally and faster for some U.S. and Canada products. + - Common attributes include location, detection time, fire radiative power, and confidence indicators. + - Near-real-time detections are later replaced by standard or science-quality products. +typical_use_cases: + - Operational wildfire monitoring and early detection. + - Event labeling for wildfire prediction pipelines. + - Spatiotemporal analysis of fire occurrence and activity patterns. +access_links: + - label: FIRMS portal + url: https://firms.modaps.eosdis.nasa.gov/ + - label: NASA Earthdata + url: https://earthdata.nasa.gov/ +inspection: + command: python -m pyhazards.datasets.firms.inspection --path /path/to/firms_data --max-items 10 + notes: + - Some archive and bulk-download routes require Earthdata login credentials. +primary_references: + - citation: Schroeder et al. (2014). The New VIIRS 375 m active fire detection data product. + url: https://doi.org/10.1016/j.rse.2013.08.008 +related_models: [] +related_benchmarks: + - wildfire_benchmark diff --git a/pyhazards/dataset_cards/floodcastbench_inundation.yaml b/pyhazards/dataset_cards/floodcastbench_inundation.yaml new file mode 100644 index 00000000..f7fdf47b --- /dev/null +++ b/pyhazards/dataset_cards/floodcastbench_inundation.yaml @@ -0,0 +1,55 @@ +slug: floodcastbench_inundation +display_name: FloodCastBench +hazard_family: Flood +source_role: Inundation Benchmark +summary: Synthetic-backed inundation benchmark adapter aligned to the FloodCastBench evaluation ecosystem. +provider: FloodCastBench ecosystem surfaced through a PyHazards adapter +geometry: Raster inundation sequences +spatial_resolution: Benchmark-defined raster tiles +temporal_resolution: Short history windows with next-horizon inundation targets +update_cadence: Generated locally for smoke and benchmark-alignment runs +period_of_record: Synthetic-backed benchmark adapter +coverage: Benchmark-aligned flood inundation samples +formats: PyTorch tensors via the dataset registry +overview: + - FloodCastBench is the public inundation adapter used by PyHazards for raster flood prediction benchmarks. + - The current implementation is synthetic-backed, but it preserves the raster task and metric surface used by the shared flood evaluator. +data_characteristics: + - Multi-step raster inputs paired with next-horizon inundation targets. + - Registry-backed benchmark adapter rather than a raw external dataset ingestion path. + - Intended for pixel-level evaluation such as IoU and pixel MAE. +typical_use_cases: + - Smoke tests for FloodCast and UrbanFloodCast. + - Shared flood benchmark runs on inundation tasks. + - Regression checks for raster flood prediction outputs. +access_links: + - label: FloodCastBench repository + url: https://github.com/HydroPML/FloodCastBench +inspection: null +registry: + name: floodcastbench_inundation + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "floodcastbench_inundation", + micro=True, + history=4, + channels=3, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) +primary_references: + - citation: FloodCastBench + url: https://github.com/HydroPML/FloodCastBench +pyhazards_usage: + - Use this adapter when you want the public FloodCastBench-aligned inundation surface exposed by the flood benchmark. +related_models: + - floodcast + - urbanfloodcast +related_benchmarks: + - flood_benchmark + - floodcastbench +notes: + - This is a synthetic-backed benchmark adapter rather than a full FloodCastBench ingestion pipeline. diff --git a/pyhazards/dataset_cards/fpa_fod_tabular.yaml b/pyhazards/dataset_cards/fpa_fod_tabular.yaml new file mode 100644 index 00000000..247df10e --- /dev/null +++ b/pyhazards/dataset_cards/fpa_fod_tabular.yaml @@ -0,0 +1,54 @@ +slug: fpa_fod_tabular +display_name: FPA-FOD Tabular +hazard_family: Wildfire +source_role: Incident Tabular +summary: Incident-level FPA-FOD features packaged for wildfire cause and size classification. +provider: Fire Program Analysis Fire-Occurrence Database (FPA-FOD) adaptation in PyHazards +geometry: Tabular feature vectors +spatial_resolution: Incident-level records +temporal_resolution: Event-based +update_cadence: User-managed local inputs or deterministic micro mode +period_of_record: Depends on the supplied FPA-FOD source files +coverage: User-provided FPA-FOD coverage +formats: SQLite, DB, CSV, and Parquet inputs +overview: + - FPA-FOD Tabular converts one wildfire incident record into one feature vector for classification tasks such as incident cause prediction and grouped size prediction. + - PyHazards exposes it as a loadable dataset with a deterministic micro mode so the full source database is not required for smoke tests or quick experimentation. +data_characteristics: + - Supports task='cause' and task='size' classification targets. + - Accepts SQLite, DB, CSV, and Parquet sources. + - Micro mode keeps the path deterministic and lightweight for validation. + - Returned splits follow the standard DataBundle contract with tabular inputs and integer targets. +typical_use_cases: + - Wildfire cause classification experiments. + - Grouped fire size classification from incident records. + - Lightweight smoke and regression tests for the wildfire tabular path. +access_links: + - label: PyHazards public dataset catalog + url: /pyhazards_datasets.html +inspection: + command: python -m pyhazards.datasets.fpa_fod_tabular.inspection --task cause --micro +registry: + name: fpa_fod_tabular + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "fpa_fod_tabular", + task="cause", + micro=True, + normalize=True, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + notes: + - region='US' uses all available states, while region='CA' restricts to California incidents. + - cause_mode='paper5' preserves the five consolidated cause groups used by the public wildfire tabular path. +primary_references: + - citation: PyHazards FPA-FOD tabular adaptation for the wildfire incident classification path. + url: https://github.com/LabRAI/PyHazards +related_models: + - wildfire_fpa +related_benchmarks: + - wildfire_benchmark diff --git a/pyhazards/dataset_cards/fpa_fod_weekly.yaml b/pyhazards/dataset_cards/fpa_fod_weekly.yaml new file mode 100644 index 00000000..a7599be7 --- /dev/null +++ b/pyhazards/dataset_cards/fpa_fod_weekly.yaml @@ -0,0 +1,56 @@ +slug: fpa_fod_weekly +display_name: FPA-FOD Weekly +hazard_family: Wildfire +source_role: Weekly Forecasting +summary: Weekly FPA-FOD aggregates packaged for next-week wildfire count forecasting by size group. +provider: Fire Program Analysis Fire-Occurrence Database (FPA-FOD) adaptation in PyHazards +geometry: Temporal tabular sequences +spatial_resolution: Weekly aggregate windows +temporal_resolution: Weekly +update_cadence: User-managed local inputs or deterministic micro mode +period_of_record: Depends on the supplied FPA-FOD source files +coverage: User-provided FPA-FOD coverage +formats: SQLite, DB, CSV, and Parquet inputs +overview: + - FPA-FOD Weekly builds rolling lookback windows from weekly wildfire incident counts and predicts next-week counts for grouped size classes. + - PyHazards exposes it as a loadable forecasting dataset with a micro mode so sequence models can be validated without the full source archive. +data_characteristics: + - Predicts next-week counts for grouped size classes A/B/C/D/EFG. + - Supports feature modes with counts only or counts plus seasonal time features. + - Uses chronological splits to preserve the forecasting setting. + - Returned splits follow the DataBundle contract with sequence inputs and floating-point targets. +typical_use_cases: + - Weekly wildfire forecasting experiments. + - Sequence-model smoke tests for wildfire activity prediction. + - Lightweight benchmarking of tabular temporal wildfire baselines. +access_links: + - label: PyHazards public dataset catalog + url: /pyhazards_datasets.html +inspection: + command: python -m pyhazards.datasets.fpa_fod_weekly.inspection --micro --lookback-weeks 12 +registry: + name: fpa_fod_weekly + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "fpa_fod_weekly", + micro=True, + features="counts+time", + lookback_weeks=12, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) + notes: + - features='counts' uses only the five weekly count channels. + - features='counts+time' adds sinusoidal week-of-year features for seasonality. +primary_references: + - citation: PyHazards FPA-FOD weekly adaptation for the wildfire forecasting path. + url: https://github.com/LabRAI/PyHazards +related_models: + - wildfire_fpa + - wildfire_forecasting + - asufm +related_benchmarks: + - wildfire_benchmark diff --git a/pyhazards/dataset_cards/goesr.yaml b/pyhazards/dataset_cards/goesr.yaml new file mode 100644 index 00000000..8f6233f1 --- /dev/null +++ b/pyhazards/dataset_cards/goesr.yaml @@ -0,0 +1,37 @@ +slug: goesr +display_name: GOES-R +hazard_family: Shared Forcing +source_role: Geostationary Imagery +summary: Rapid-refresh GOES-R satellite imagery used for smoke, fire, and weather monitoring workflows. +provider: NOAA GOES-R Program +geometry: Raster imagery time series on the ABI fixed grid +spatial_resolution: "~0.5-2 km depending on spectral band" +temporal_resolution: 1-10 minute refresh depending on sector and mode +update_cadence: Continuous ingest as new files become available +period_of_record: Ongoing operational satellite archive +coverage: Western Hemisphere / Americas geostationary view +formats: NetCDF +overview: + - GOES-R provides high-frequency geostationary observations from the Advanced Baseline Imager, enabling continuous monitoring of atmospheric and surface processes across the Americas. + - PyHazards uses it as rapid-refresh imagery for smoke, fire evolution, ignition monitoring, and operational situational awareness workflows. +data_characteristics: + - Raster time series rather than event records. + - Typical Mode 6 scan cadence is 10 minutes for Full Disk, 5 minutes for CONUS, and 1 minute for mesoscale sectors. + - Common products include visible and infrared imagery, brightness temperature, and fire-related thermal context. + - Distribution latency depends on the access route even when observations are near real time. +typical_use_cases: + - Early detection and monitoring of wildfire ignition and growth. + - Smoke and fire evolution analysis at high temporal resolution. + - Real-time situational awareness workflows. +access_links: + - label: GOES-R Program + url: https://www.goes-r.gov/ + - label: NOAA Open Data Dissemination + url: https://www.noaa.gov/information-technology/open-data-dissemination +inspection: + command: python -m pyhazards.datasets.goesr.inspection --path /path/to/goesr_data --max-items 10 +primary_references: + - citation: Schmit et al. (2017). A closer look at the ABI on the GOES-R series. + url: https://doi.org/10.1175/BAMS-D-15-00230.1 +related_models: [] +related_benchmarks: [] diff --git a/pyhazards/dataset_cards/hydrobench_streamflow.yaml b/pyhazards/dataset_cards/hydrobench_streamflow.yaml new file mode 100644 index 00000000..5cddb831 --- /dev/null +++ b/pyhazards/dataset_cards/hydrobench_streamflow.yaml @@ -0,0 +1,54 @@ +slug: hydrobench_streamflow +display_name: HydroBench +hazard_family: Flood +source_role: Streamflow Benchmark +summary: Synthetic-backed streamflow diagnostics adapter aligned to the HydroBench ecosystem. +provider: HydroBench ecosystem surfaced through a PyHazards adapter +geometry: Graph-temporal basin or node sequences +spatial_resolution: Basin or gauge nodes represented as graph elements +temporal_resolution: Rolling history windows for streamflow prediction +update_cadence: Generated locally for smoke and benchmark-alignment runs +period_of_record: Synthetic-backed benchmark adapter +coverage: Benchmark-aligned streamflow forecasting samples +formats: PyTorch graph-temporal dataset objects via the dataset registry +overview: + - HydroBench is the public flood adapter used for streamflow diagnostics and HydroGraphNet-aligned benchmark runs. + - The current implementation is synthetic-backed, but it preserves the streamflow task and metric contract exposed by the shared flood benchmark. +data_characteristics: + - Graph-temporal sequences with node-level targets for next-step streamflow prediction. + - Registry-backed benchmark adapter rather than a raw HydroBench dataset ingestion path. + - Intended for HydroGraphNet smoke runs and flood benchmark diagnostics. +typical_use_cases: + - HydroGraphNet smoke tests. + - Shared flood benchmark runs with HydroBench-aligned metrics. + - Diagnostics for graph-based flood forecasting experiments. +access_links: + - label: HydroBench repository + url: https://github.com/EMscience/HydroBench +inspection: null +registry: + name: hydrobench_streamflow + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "hydrobench_streamflow", + micro=True, + history=4, + nodes=6, + ).load() + + train = data.get_split("train") + print(len(train.inputs), train.inputs[0].x.shape) +primary_references: + - citation: HydroBench + url: https://github.com/EMscience/HydroBench +pyhazards_usage: + - Use this adapter when you want the public HydroBench-aligned streamflow surface exposed by the flood benchmark. +related_models: + - hydrographnet +related_benchmarks: + - flood_benchmark + - hydrobench +notes: + - This is a synthetic-backed benchmark adapter rather than a full HydroBench downloader. diff --git a/pyhazards/dataset_cards/ibtracs_tracks.yaml b/pyhazards/dataset_cards/ibtracs_tracks.yaml new file mode 100644 index 00000000..6794e9c4 --- /dev/null +++ b/pyhazards/dataset_cards/ibtracs_tracks.yaml @@ -0,0 +1,57 @@ +slug: ibtracs_tracks +display_name: IBTrACS +hazard_family: Tropical Cyclone +source_role: Track Archive +summary: Synthetic-backed storm-track adapter aligned to the IBTrACS tropical cyclone archive. +provider: NOAA NCEI International Best Track Archive for Climate Stewardship surfaced through a PyHazards adapter +geometry: Storm-track history sequences +spatial_resolution: Storm-centered best-track sequences +temporal_resolution: Historical track windows with forecast horizons +update_cadence: Generated locally for smoke and benchmark-alignment runs +period_of_record: Synthetic-backed benchmark adapter +coverage: Benchmark-aligned tropical cyclone track and intensity samples +formats: PyTorch tensors via the dataset registry +overview: + - IBTrACS is the public storm-track adapter used by PyHazards for shared tropical cyclone benchmark runs. + - The current implementation is synthetic-backed, but it preserves the track-intensity forecasting surface used by the shared tropical cyclone evaluator. +data_characteristics: + - Storm-history sequences with future latitude, longitude, and intensity targets. + - Registry-backed benchmark adapter rather than a raw IBTrACS archive loader. + - Supports both basin-specific hurricane models and broader tropical cyclone adapters. +typical_use_cases: + - Hurricast smoke tests. + - Shared tropical cyclone benchmark runs for track and intensity prediction. + - Benchmark-aligned validation for weather-model storm adapters. +access_links: + - label: IBTrACS product page + url: https://www.ncei.noaa.gov/products/international-best-track-archive +inspection: null +registry: + name: ibtracs_tracks + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "ibtracs_tracks", + micro=True, + history=6, + horizon=5, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) +primary_references: + - citation: IBTrACS + url: https://www.ncei.noaa.gov/products/international-best-track-archive +pyhazards_usage: + - Use this adapter when you want the public IBTrACS-aligned storm-track surface exposed by the tropical cyclone benchmark. +related_models: + - hurricast + - graphcast_tc + - pangu_tc + - fourcastnet_tc +related_benchmarks: + - tropical_cyclone_benchmark + - ibtracs +notes: + - This is a synthetic-backed benchmark adapter rather than a full IBTrACS ingestion pipeline. diff --git a/pyhazards/dataset_cards/landfire.yaml b/pyhazards/dataset_cards/landfire.yaml new file mode 100644 index 00000000..f7f5481c --- /dev/null +++ b/pyhazards/dataset_cards/landfire.yaml @@ -0,0 +1,38 @@ +slug: landfire +display_name: LANDFIRE +hazard_family: Wildfire +source_role: Fuels and Vegetation +summary: Nationwide fuels, vegetation, and canopy layers used as static wildfire covariates. +provider: U.S. Forest Service LANDFIRE Program +geometry: Gridded raster layers +spatial_resolution: "~30 m" +temporal_resolution: Static or slowly varying versioned releases +update_cadence: Annual versioned update suites +period_of_record: Versioned annual releases +coverage: United States +formats: GeoTIFF and related GIS packages +overview: + - LANDFIRE provides nationwide maps of vegetation, fuels, canopy structure, and fire regime information derived from remote sensing, field observations, and ecological modeling. + - PyHazards uses it as static landscape context for wildfire spread, behavior, and risk-oriented workflows. +data_characteristics: + - Raster covariates rather than event records. + - Versioned annual releases intended to stay current to the previous year. + - Common layers include fuel models, vegetation type, canopy metrics, and fire regime products. + - Distributed in projected coordinate systems with product-specific metadata. +typical_use_cases: + - Fuel characterization for wildfire behavior and spread modeling. + - Landscape-scale wildfire risk assessment. + - Static feature layers for machine-learning wildfire models. +access_links: + - label: LANDFIRE data access + url: https://landfire.gov/getdata.php + - label: LANDFIRE program overview + url: https://www.landfire.gov/ +inspection: + command: python -m pyhazards.datasets.landfire.inspection --path /path/to/landfire_data --max-items 10 +primary_references: + - citation: "Rollins (2009). LANDFIRE: A nationally consistent vegetation, wildland fire, and fuel assessment." + url: https://doi.org/10.1071/WF08088 +related_models: [] +related_benchmarks: + - wildfire_benchmark diff --git a/pyhazards/dataset_cards/merra2.yaml b/pyhazards/dataset_cards/merra2.yaml new file mode 100644 index 00000000..e468fb71 --- /dev/null +++ b/pyhazards/dataset_cards/merra2.yaml @@ -0,0 +1,39 @@ +slug: merra2 +display_name: MERRA-2 +hazard_family: Shared Forcing +source_role: Reanalysis +summary: Global atmospheric reanalysis from NASA GMAO used as a shared meteorological backbone for hazard modeling. +provider: NASA Global Modeling and Assimilation Office (GMAO) +geometry: Regular latitude-longitude grid +spatial_resolution: "~0.5 deg x 0.625 deg" +temporal_resolution: Hourly +update_cadence: Published monthly with typical 2-3 week latency after month end +period_of_record: 1980-present +coverage: Global +formats: NetCDF4 +overview: + - MERRA-2 is a global atmospheric reanalysis that assimilates satellite and conventional observations into a numerical weather prediction system to produce gridded, time-continuous estimates of the atmospheric state. + - In PyHazards it serves as a shared forcing and covariate source for weather-aware hazard workflows, especially when a project needs a stable long historical archive. +data_characteristics: + - Global coverage on a regular latitude-longitude grid. + - Hourly meteorology with derived 3-hourly, daily, and monthly products. + - Surface fields plus multi-level atmospheric profiles. + - Common variables include near-surface temperature, humidity, wind, precipitation, and surface fluxes. +typical_use_cases: + - Meteorological forcing for wildfire and multi-hazard prediction models. + - Climate diagnostics and long-horizon environmental covariates. + - Shared weather backbone for weather-climate benchmark pipelines. +access_links: + - label: MERRA-2 overview + url: https://gmao.gsfc.nasa.gov/gmao-products/merra-2/ + - label: NASA Earthdata + url: https://earthdata.nasa.gov/ +inspection: + command: python -m pyhazards.datasets.merra2.inspection 20260101 + notes: + - Earthdata credentials are required when raw files are not already available locally. +primary_references: + - citation: Gelaro et al. (2017). The Modern-Era Retrospective Analysis for Research and Applications, Version 2 (MERRA-2). + url: https://journals.ametsoc.org/view/journals/clim/30/14/jcli-d-16-0758.1.xml +related_models: [] +related_benchmarks: [] diff --git a/pyhazards/dataset_cards/mtbs.yaml b/pyhazards/dataset_cards/mtbs.yaml new file mode 100644 index 00000000..dc91eaba --- /dev/null +++ b/pyhazards/dataset_cards/mtbs.yaml @@ -0,0 +1,38 @@ +slug: mtbs +display_name: MTBS +hazard_family: Wildfire +source_role: Burn Severity +summary: U.S. burn severity and fire perimeter products used for post-fire analysis and wildfire evaluation. +provider: U.S. Geological Survey and USDA Forest Service MTBS program +geometry: Per-fire rasters with associated vector perimeters +spatial_resolution: 30 m +temporal_resolution: Fire-event and fire-year products +update_cadence: Continuous mapping with quarterly releases +period_of_record: 1984-near present +coverage: United States +formats: GeoTIFF, Shapefile, File Geodatabase +overview: + - MTBS maps wildfire perimeters and burn severity across the United States using Landsat imagery and standardized spectral change products such as dNBR and RdNBR. + - In PyHazards it acts as a post-fire assessment source for burn extent, severity, and long-term wildfire regime studies. +data_characteristics: + - Event-based raster layers with vector perimeters for individual fires. + - Historical archive from 1984 onward, expanded through quarterly releases. + - Includes burn severity classes and supporting spectral severity products. + - Product availability depends on Landsat imagery timing and production workflow rather than near-real-time ingest. +typical_use_cases: + - Post-fire burn severity and impact assessment. + - Long-term wildfire regime and trend analysis. + - Model evaluation for fire extent and severity prediction. +access_links: + - label: MTBS data portal + url: https://burnseverity.cr.usgs.gov/ + - label: USGS MTBS overview + url: https://www.usgs.gov/programs/mtbs +inspection: + command: python -m pyhazards.datasets.mtbs.inspection --path /path/to/mtbs_data --max-items 10 +primary_references: + - citation: Eidenshink et al. (2007). A project for monitoring trends in burn severity. + url: https://doi.org/10.4996/fireecology.0301003 +related_models: [] +related_benchmarks: + - wildfire_benchmark diff --git a/pyhazards/dataset_cards/noaa_flood.yaml b/pyhazards/dataset_cards/noaa_flood.yaml new file mode 100644 index 00000000..a7f67a35 --- /dev/null +++ b/pyhazards/dataset_cards/noaa_flood.yaml @@ -0,0 +1,40 @@ +slug: noaa_flood +display_name: NOAA Flood Events +hazard_family: Flood +source_role: Event Records +summary: Historical NOAA storm-event flood records used as event labels and impact targets for flood studies. +provider: NOAA National Centers for Environmental Information (NCEI) +geometry: Tabular event records with administrative regions and optional point coordinates +spatial_resolution: County or zone level reporting, with points when available +temporal_resolution: Event-based +update_cadence: Updated monthly, typically 75-90 days after the end of a data month +period_of_record: 1950-present +coverage: United States +formats: Web query, bulk CSV, and database extracts +overview: + - NOAA Flood Events are derived from the NOAA Storm Events Database and document the timing, location, and impacts of severe flood-related events across the United States. + - In PyHazards they function as event-level labels or targets for flood occurrence and impact analysis, especially when paired with meteorological drivers. +data_characteristics: + - Event-based tabular records rather than gridded tensors. + - Historical archive appended as new months are processed and validated. + - Typical attributes include event timing, location, narratives, and reported damages. + - Very recent months may be unavailable because of reporting and validation lag. +typical_use_cases: + - Flood occurrence and frequency analysis. + - Impact and damage assessment studies. + - Supervised learning with event records as flood targets. +access_links: + - label: Storm Events Database + url: https://www.ncei.noaa.gov/products/storm-events-database + - label: Storm Events bulk download + url: https://www.ncei.noaa.gov/stormevents/ftp.jsp + - label: NOAA NCEI + url: https://www.ncei.noaa.gov/ +inspection: + command: python -m pyhazards.datasets.noaa_flood.inspection --path /path/to/noaa_flood_data --max-items 10 +primary_references: + - citation: NOAA National Centers for Environmental Information. Storm Events Database Documentation. + url: https://www.ncei.noaa.gov/access/metadata/landing-page/bin/iso?id=gov.noaa.ncdc:C00648 +related_models: [] +related_benchmarks: + - flood_benchmark diff --git a/pyhazards/dataset_cards/pick_benchmark_waveforms.yaml b/pyhazards/dataset_cards/pick_benchmark_waveforms.yaml new file mode 100644 index 00000000..3d945ec2 --- /dev/null +++ b/pyhazards/dataset_cards/pick_benchmark_waveforms.yaml @@ -0,0 +1,55 @@ +slug: pick_benchmark_waveforms +display_name: pick-benchmark +hazard_family: Earthquake +source_role: Waveform Benchmark +summary: Synthetic-backed waveform picking adapter aligned to the pick-benchmark evaluation ecosystem. +provider: pick-benchmark ecosystem surfaced through a PyHazards adapter +geometry: Multichannel waveform windows +spatial_resolution: Benchmark-defined waveform channels and sample windows +temporal_resolution: Short waveform windows with phase-pick targets +update_cadence: Generated locally for smoke and benchmark-alignment runs +period_of_record: Synthetic-backed benchmark adapter +coverage: Benchmark-aligned earthquake phase-picking samples +formats: PyTorch tensors via the dataset registry +overview: + - pick-benchmark is the public waveform adapter used by the earthquake benchmark for transformer and CNN picking baselines. + - The current implementation is synthetic-backed, but it preserves the phase-picking task shape, labels, and metrics expected by the shared earthquake evaluator. +data_characteristics: + - Multichannel waveform windows paired with P- and S-arrival sample targets. + - Registry-backed benchmark adapter rather than a raw external waveform ingestion path. + - Intended for phase-picking validation and smoke tests. +typical_use_cases: + - EQTransformer and GPD smoke tests. + - Shared earthquake picking benchmark runs. + - Regression checks for waveform-based picking models. +access_links: + - label: pick-benchmark repository + url: https://github.com/seisbench/pick-benchmark +inspection: null +registry: + name: pick_benchmark_waveforms + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "pick_benchmark_waveforms", + micro=True, + channels=3, + length=256, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) +primary_references: + - citation: pick-benchmark + url: https://github.com/seisbench/pick-benchmark +pyhazards_usage: + - Use this adapter when you want the public pick-benchmark-aligned waveform surface exposed by the earthquake benchmark. +related_models: + - eqtransformer + - gpd +related_benchmarks: + - earthquake_benchmark + - pick_benchmark +notes: + - This is a synthetic-backed benchmark adapter rather than a full pick-benchmark downloader. diff --git a/pyhazards/dataset_cards/seisbench_waveforms.yaml b/pyhazards/dataset_cards/seisbench_waveforms.yaml new file mode 100644 index 00000000..6ec0ef47 --- /dev/null +++ b/pyhazards/dataset_cards/seisbench_waveforms.yaml @@ -0,0 +1,58 @@ +slug: seisbench_waveforms +display_name: SeisBench +hazard_family: Earthquake +source_role: Waveform Benchmark +summary: Synthetic-backed waveform picking adapter aligned to the SeisBench ecosystem. +provider: SeisBench ecosystem surfaced through a PyHazards adapter +geometry: Multichannel waveform windows +spatial_resolution: Benchmark-defined waveform channels and sample windows +temporal_resolution: Short waveform windows with phase-pick targets +update_cadence: Generated locally for smoke and benchmark-alignment runs +period_of_record: Synthetic-backed benchmark adapter +coverage: Benchmark-aligned earthquake phase-picking samples +formats: PyTorch tensors via the dataset registry +overview: + - SeisBench is the public waveform adapter used by PyHazards for the earthquake picking path. + - The current implementation is synthetic-backed, but it preserves the picking task shape, labels, and metrics expected by the shared earthquake evaluator. +data_characteristics: + - Multichannel waveform windows paired with P- and S-arrival sample targets. + - Registry-backed benchmark adapter rather than a raw external waveform ingestion path. + - Intended for phase-picking validation and smoke tests. +typical_use_cases: + - PhaseNet and EQNet smoke tests. + - Shared earthquake benchmark runs on picking tasks. + - Regression checks for waveform-based seismic models. +access_links: + - label: SeisBench paper + url: https://joss.theoj.org/papers/10.21105/joss.04418 + - label: SeisBench repository + url: https://github.com/seisbench/seisbench +inspection: null +registry: + name: seisbench_waveforms + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "seisbench_waveforms", + micro=True, + channels=3, + length=256, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) +primary_references: + - citation: SeisBench - A Toolbox for Machine Learning in Seismology + url: https://joss.theoj.org/papers/10.21105/joss.04418 + repo_url: https://github.com/seisbench/seisbench +pyhazards_usage: + - Use this adapter when you want the public SeisBench-aligned waveform surface exposed by the earthquake benchmark. +related_models: + - phasenet + - eqnet +related_benchmarks: + - earthquake_benchmark + - seisbench +notes: + - This is a synthetic-backed benchmark adapter rather than a full SeisBench ingestion pipeline. diff --git a/pyhazards/dataset_cards/tcbench_alpha.yaml b/pyhazards/dataset_cards/tcbench_alpha.yaml new file mode 100644 index 00000000..2e7a379f --- /dev/null +++ b/pyhazards/dataset_cards/tcbench_alpha.yaml @@ -0,0 +1,56 @@ +slug: tcbench_alpha +display_name: TCBench Alpha +hazard_family: Tropical Cyclone +source_role: Track Benchmark +summary: Synthetic-backed storm-track benchmark adapter aligned to the TCBench Alpha ecosystem. +provider: TCBench Alpha ecosystem surfaced through a PyHazards adapter +geometry: Storm-track history sequences +spatial_resolution: Storm-centered best-track sequences +temporal_resolution: Historical track windows with forecast horizons +update_cadence: Generated locally for smoke and benchmark-alignment runs +period_of_record: Synthetic-backed benchmark adapter +coverage: Benchmark-aligned tropical cyclone track and intensity samples +formats: PyTorch tensors via the dataset registry +overview: + - TCBench Alpha is the public storm adapter used by several tropical cyclone baselines on the shared track-intensity evaluator. + - The current implementation is synthetic-backed, but it preserves the task, metric, and reporting surface used by the shared tropical cyclone benchmark. +data_characteristics: + - Storm-history sequences with future latitude, longitude, and intensity targets. + - Registry-backed benchmark adapter rather than a raw external benchmark ingestion path. + - Intended for benchmark-linked track-intensity forecasting runs. +typical_use_cases: + - Tropical Cyclone MLP, SAF-Net, and TCIF-fusion smoke tests. + - Shared tropical cyclone benchmark runs. + - Regression checks for storm-track baselines. +access_links: + - label: TCBench Alpha repository + url: https://github.com/msgomez06/TCBench_Alpha +inspection: null +registry: + name: tcbench_alpha + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "tcbench_alpha", + micro=True, + history=6, + horizon=5, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) +primary_references: + - citation: TCBench Alpha + url: https://github.com/msgomez06/TCBench_Alpha +pyhazards_usage: + - Use this adapter when you want the public TCBench Alpha-aligned storm surface exposed by the tropical cyclone benchmark. +related_models: + - tropicalcyclone_mlp + - saf_net + - tcif_fusion +related_benchmarks: + - tropical_cyclone_benchmark + - tcbench_alpha +notes: + - This is a synthetic-backed benchmark adapter rather than a full TCBench Alpha ingestion pipeline. diff --git a/pyhazards/dataset_cards/tropicyclonenet_dataset.yaml b/pyhazards/dataset_cards/tropicyclonenet_dataset.yaml new file mode 100644 index 00000000..7e07af51 --- /dev/null +++ b/pyhazards/dataset_cards/tropicyclonenet_dataset.yaml @@ -0,0 +1,54 @@ +slug: tropicyclonenet_dataset +display_name: TropiCycloneNet-Dataset +hazard_family: Tropical Cyclone +source_role: Track Benchmark +summary: Synthetic-backed storm-track benchmark adapter aligned to the TropiCycloneNet-Dataset ecosystem. +provider: TropiCycloneNet-Dataset ecosystem surfaced through a PyHazards adapter +geometry: Storm-track history sequences +spatial_resolution: Storm-centered best-track sequences +temporal_resolution: Historical track windows with forecast horizons +update_cadence: Generated locally for smoke and benchmark-alignment runs +period_of_record: Synthetic-backed benchmark adapter +coverage: Benchmark-aligned tropical cyclone track and intensity samples +formats: PyTorch tensors via the dataset registry +overview: + - TropiCycloneNet-Dataset is the public storm adapter used by the TropiCycloneNet model path on the shared track-intensity evaluator. + - The current implementation is synthetic-backed, but it preserves the task, metric, and reporting surface used by the shared tropical cyclone benchmark. +data_characteristics: + - Storm-history sequences with future latitude, longitude, and intensity targets. + - Registry-backed benchmark adapter rather than a raw external dataset ingestion path. + - Intended for benchmark-linked storm forecasting smoke runs. +typical_use_cases: + - TropiCycloneNet smoke tests. + - Shared tropical cyclone benchmark runs. + - Regression checks for track-intensity prediction models. +access_links: + - label: TropiCycloneNet-Dataset repository + url: https://github.com/xiaochengfuhuo/TropiCycloneNet-Dataset +inspection: null +registry: + name: tropicyclonenet_dataset + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "tropicyclonenet_dataset", + micro=True, + history=6, + horizon=5, + ).load() + + train = data.get_split("train") + print(train.inputs.shape, train.targets.shape) +primary_references: + - citation: TropiCycloneNet-Dataset + url: https://github.com/xiaochengfuhuo/TropiCycloneNet-Dataset +pyhazards_usage: + - Use this adapter when you want the public TropiCycloneNet-Dataset-aligned storm surface exposed by the tropical cyclone benchmark. +related_models: + - tropicyclonenet +related_benchmarks: + - tropical_cyclone_benchmark + - tropicyclonenet_dataset +notes: + - This is a synthetic-backed benchmark adapter rather than a full TropiCycloneNet-Dataset downloader. diff --git a/pyhazards/dataset_cards/waterbench_streamflow.yaml b/pyhazards/dataset_cards/waterbench_streamflow.yaml new file mode 100644 index 00000000..f22206ce --- /dev/null +++ b/pyhazards/dataset_cards/waterbench_streamflow.yaml @@ -0,0 +1,57 @@ +slug: waterbench_streamflow +display_name: WaterBench +hazard_family: Flood +source_role: Streamflow Benchmark +summary: Synthetic-backed streamflow benchmark adapter aligned to the WaterBench ecosystem. +provider: WaterBench ecosystem surfaced through a PyHazards adapter +geometry: Graph-temporal basin or node sequences +spatial_resolution: Basin or gauge nodes represented as graph elements +temporal_resolution: Rolling history windows for streamflow prediction +update_cadence: Generated locally for smoke and benchmark-alignment runs +period_of_record: Synthetic-backed benchmark adapter +coverage: Benchmark-aligned streamflow forecasting samples +formats: PyTorch graph-temporal dataset objects via the dataset registry +overview: + - WaterBench is the public flood streamflow adapter used by the EA-LSTM path on the shared flood benchmark. + - The current implementation is synthetic-backed, but it preserves the streamflow forecasting contract expected by the shared evaluator. +data_characteristics: + - Graph-temporal sequences with node-level targets for next-step streamflow prediction. + - Registry-backed benchmark adapter rather than a raw WaterBench ingestion pipeline. + - Intended for benchmark-linked streamflow smoke runs. +typical_use_cases: + - EA-LSTM smoke tests. + - Shared flood benchmark runs with streamflow metrics. + - Regression checks for basin-scale forecasting experiments. +access_links: + - label: WaterBench abstract + url: https://neurips.cc/virtual/2023/80632 + - label: WaterBench repository + url: https://github.com/uihilab/WaterBench +inspection: null +registry: + name: waterbench_streamflow + example: | + from pyhazards.datasets import load_dataset + + data = load_dataset( + "waterbench_streamflow", + micro=True, + history=4, + nodes=6, + ).load() + + train = data.get_split("train") + print(len(train.inputs), train.inputs[0].x.shape) +primary_references: + - citation: "WaterBench: A Large-scale Benchmark Dataset for Data-driven Streamflow Forecasting" + url: https://neurips.cc/virtual/2023/80632 + repo_url: https://github.com/uihilab/WaterBench +pyhazards_usage: + - Use this adapter when you want the public WaterBench-aligned streamflow surface exposed by the flood benchmark. +related_models: + - neuralhydrology_ealstm +related_benchmarks: + - flood_benchmark + - waterbench +notes: + - This is a synthetic-backed benchmark adapter rather than a full WaterBench downloader. diff --git a/pyhazards/dataset_cards/wfigs.yaml b/pyhazards/dataset_cards/wfigs.yaml new file mode 100644 index 00000000..4d490e41 --- /dev/null +++ b/pyhazards/dataset_cards/wfigs.yaml @@ -0,0 +1,38 @@ +slug: wfigs +display_name: WFIGS +hazard_family: Wildfire +source_role: Incident Records +summary: Interagency wildfire incident records used as authoritative wildfire ground truth across the United States. +provider: National Interagency Fire Center (NIFC) / interagency WFIGS +geometry: Incident points and perimeters +spatial_resolution: Event-level vector geometries +temporal_resolution: Event-based with live operational updates +update_cadence: Refreshed from IRWIN roughly every 5 minutes, with perimeter changes often appearing within 15 minutes +period_of_record: Historical archive plus ongoing incidents +coverage: United States +formats: ArcGIS REST services, GeoJSON, and Shapefile downloads +overview: + - WFIGS aggregates geospatial information on active and historical wildland fire incidents, representing officially reported incidents rather than satellite-detected hotspots. + - In PyHazards it acts as an authoritative wildfire ground-truth source for validation, labeling, and comparison against remote-sensing detections. +data_characteristics: + - Event-based incident records with point and polygon geometries. + - Operational data that can change as incidents evolve and records are reconciled. + - Common fields include incident identifiers, timing, status, location, and fire size. + - Current and year-to-date layers follow different retention rules. +typical_use_cases: + - Ground-truth labeling of wildfire occurrence. + - Validation of satellite-based fire detection products. + - Analysis of ignition timing and incident geography. +access_links: + - label: NIFC Open Data WFIGS layers + url: https://data-nifc.opendata.arcgis.com/ + - label: National Interagency Fire Center + url: https://www.nifc.gov/ +inspection: + command: python -m pyhazards.datasets.wfigs.inspection --path /path/to/wfigs_data --max-items 10 +primary_references: + - citation: National Interagency Fire Center. Wildland Fire Incident Geospatial Services (WFIGS). + url: https://data-nifc.opendata.arcgis.com/ +related_models: [] +related_benchmarks: + - wildfire_benchmark diff --git a/pyhazards/dataset_catalog.py b/pyhazards/dataset_catalog.py new file mode 100644 index 00000000..73501e4c --- /dev/null +++ b/pyhazards/dataset_catalog.py @@ -0,0 +1,990 @@ +from __future__ import annotations + +from collections import defaultdict +from pathlib import Path +from shlex import split as shlex_split +from typing import Dict, Iterable, List, Optional, Sequence, Set + +import yaml +from pydantic import AliasChoices, BaseModel, Field, model_validator + + +REPO_ROOT = Path(__file__).resolve().parent.parent +DATASET_CARDS_DIR = Path(__file__).resolve().parent / "dataset_cards" +DOCS_SOURCE_DIR = REPO_ROOT / "docs" / "source" +DATASET_DOCS_DIR = DOCS_SOURCE_DIR / "datasets" +DATASET_PAGE_PATH = DOCS_SOURCE_DIR / "pyhazards_datasets.rst" +API_PAGE_PATH = DOCS_SOURCE_DIR / "api" / "pyhazards.datasets.rst" + +GENERATED_MARKER = ( + ".. This file is generated by scripts/render_dataset_docs.py. Do not edit by hand." +) + +HAZARD_DISPLAY_ORDER = [ + "Shared Forcing", + "Wildfire", + "Flood", + "Earthquake", + "Tropical Cyclone", +] + +HAZARD_SECTION_SUMMARIES = { + "Shared Forcing": ( + "Cross-hazard meteorology and imagery sources that support multiple " + "PyHazards workflows, inspections, and forcing pipelines." + ), + "Wildfire": ( + "Wildfire datasets span authoritative incident records, active-fire " + "detections, fuels, burn severity, and forecast-ready benchmark adapters." + ), + "Flood": ( + "Flood datasets combine event records with streamflow and inundation " + "benchmark adapters used by the public flood models." + ), + "Earthquake": ( + "Earthquake datasets cover waveform-picking and forecasting adapters " + "that align the public models with the shared earthquake benchmark." + ), + "Tropical Cyclone": ( + "Storm datasets cover best-track archives and benchmark adapters used " + "by the shared tropical cyclone track-intensity workflow." + ), +} + +STARTER_DATASETS = { + "Shared Forcing": "era5", + "Wildfire": "fpa_fod_weekly", + "Flood": "caravan_streamflow", + "Earthquake": "seisbench_waveforms", + "Tropical Cyclone": "ibtracs_tracks", +} + +SOURCE_ROLE_BADGE_ROLES = { + "Reanalysis": "secondary", + "Geostationary Imagery": "secondary", + "Event Records": "secondary", + "Incident Records": "secondary", + "Active Fire Detections": "secondary", + "Burn Severity": "secondary", + "Fuels and Vegetation": "secondary", + "Incident Tabular": "secondary", + "Weekly Forecasting": "secondary", + "Waveform Benchmark": "secondary", + "Forecast Benchmark": "secondary", + "Streamflow Benchmark": "secondary", + "Inundation Benchmark": "secondary", + "Track Archive": "secondary", + "Track Benchmark": "secondary", +} + +GEOMETRY_BADGE_ROLES = { + "Regular latitude-longitude grid": "info", + "Raster imagery time series on the ABI fixed grid": "info", + "Tabular event records with administrative regions and optional point coordinates": "info", + "Event-based point detections": "info", + "Per-fire rasters with associated vector perimeters": "info", + "Gridded raster layers": "info", + "Incident points and perimeters": "info", + "Tabular feature vectors": "info", + "Temporal tabular sequences": "info", + "Multichannel waveform windows": "info", + "Dense-grid wavefield tensors": "info", + "Graph-temporal basin or node sequences": "info", + "Raster inundation sequences": "info", + "Storm-track history sequences": "info", +} + + +class DatasetLink(BaseModel): + label: str + url: str + + +class DatasetReference(BaseModel): + citation: str + url: str + repo_url: Optional[str] = None + + +class InspectionSpec(BaseModel): + command: str + module: Optional[str] = None + notes: List[str] = Field(default_factory=list) + + @model_validator(mode="after") + def derive_module_from_command(self) -> "InspectionSpec": + if self.module: + return self + tokens = shlex_split(self.command) + if len(tokens) >= 3 and tokens[1] == "-m": + self.module = tokens[2] + return self + + +class RegistrySpec(BaseModel): + name: str = Field(validation_alias=AliasChoices("name", "dataset_name")) + example: str + notes: List[str] = Field(default_factory=list) + + +class DatasetCard(BaseModel): + slug: str + display_name: str + hazard_family: str + source_role: str + summary: str + provider: str + geometry: str + coverage: str + update_cadence: str + period_of_record: str + spatial_resolution: Optional[str] = None + temporal_resolution: Optional[str] = None + formats: Optional[str] = None + overview: List[str] = Field(default_factory=list) + data_characteristics: List[str] = Field(default_factory=list) + typical_use_cases: List[str] = Field(default_factory=list) + access_links: List[DatasetLink] = Field(default_factory=list) + inspection: Optional[InspectionSpec] = None + references: List[DatasetReference] = Field( + default_factory=list, + validation_alias=AliasChoices("references", "primary_references"), + ) + pyhazards_usage: List[str] = Field(default_factory=list) + registry: Optional[RegistrySpec] = None + related_models: List[str] = Field(default_factory=list) + related_benchmarks: List[str] = Field(default_factory=list) + notes: List[str] = Field(default_factory=list) + + @model_validator(mode="after") + def validate_card(self) -> "DatasetCard": + if not self.references: + raise ValueError("dataset cards require at least one reference") + if self.hazard_family not in HAZARD_DISPLAY_ORDER: + raise ValueError( + f"dataset card '{self.slug}' uses unsupported hazard_family={self.hazard_family!r}" + ) + return self + + @property + def doc_path(self) -> Path: + return DATASET_DOCS_DIR / f"{self.slug}.rst" + + @property + def doc_target(self) -> str: + return f"datasets/{self.slug}" + + @property + def primary_surface(self) -> str: + if self.inspection is not None: + return f"Inspection: ``{self.inspection.command}``" + if self.registry is not None: + return f"Registry: ``load_dataset('{self.registry.name}', ...)``" + return f"Source: {_single_line(self.references[0].citation)}" + + +def load_dataset_cards(cards_dir: Path = DATASET_CARDS_DIR) -> List[DatasetCard]: + cards: List[DatasetCard] = [] + seen_slugs: Set[str] = set() + for path in sorted(cards_dir.glob("*.y*ml")): + raw = yaml.safe_load(path.read_text(encoding="utf-8")) or {} + card = DatasetCard.model_validate(raw) + if path.stem != card.slug: + raise ValueError( + f"Dataset card filename must match slug: {path.name} vs {card.slug}" + ) + if card.slug in seen_slugs: + raise ValueError(f"Duplicate dataset card slug detected: {card.slug}") + seen_slugs.add(card.slug) + cards.append(card) + return cards + + +def _single_line(text: str) -> str: + return " ".join(text.split()) + + +def _indent_block(text: str, prefix: str = " ") -> str: + lines = text.rstrip().splitlines() + return "\n".join(prefix + line if line else prefix.rstrip() for line in lines) + + +def _indent_lines(lines: Sequence[str], prefix: str = " ") -> List[str]: + return [prefix + line if line else "" for line in lines] + + +def _badge(role: str, text: str) -> str: + return f":bdg-{role}:`{text}`" + + +def _ordered_unique(items: Iterable[str]) -> List[str]: + seen: Set[str] = set() + ordered: List[str] = [] + for item in items: + if item in seen: + continue + seen.add(item) + ordered.append(item) + return ordered + + +def _order_key(hazard: str) -> tuple[int, str]: + if hazard in HAZARD_DISPLAY_ORDER: + return (HAZARD_DISPLAY_ORDER.index(hazard), hazard.lower()) + return (len(HAZARD_DISPLAY_ORDER), hazard.lower()) + + +def group_cards_by_hazard(cards: Sequence[DatasetCard]) -> Dict[str, List[DatasetCard]]: + grouped: Dict[str, List[DatasetCard]] = defaultdict(list) + for card in cards: + grouped[card.hazard_family].append(card) + return { + hazard: sorted(hazard_cards, key=lambda item: item.display_name.lower()) + for hazard, hazard_cards in sorted( + grouped.items(), + key=lambda item: _order_key(item[0]), + ) + } + + +def _cards_in_display_order(cards: Sequence[DatasetCard]) -> List[DatasetCard]: + grouped = group_cards_by_hazard(cards) + ordered: List[DatasetCard] = [] + for hazard in HAZARD_DISPLAY_ORDER: + ordered.extend(grouped.get(hazard, [])) + return ordered + + +def _dataset_doc_link(card: DatasetCard, absolute: bool = False) -> str: + target = f"/{card.doc_target}" if absolute else card.doc_target + return f":doc:`{card.display_name} <{target}>`" + + +def _benchmark_link(slug: str, display_name: str, absolute: bool = False) -> str: + target = f"/benchmarks/{slug}" if absolute else f"benchmarks/{slug}" + return f":doc:`{display_name} <{target}>`" + + +def _model_link(display_name: str, doc_slug: str, absolute: bool = False) -> str: + target = f"/modules/{doc_slug}" if absolute else f"modules/{doc_slug}" + return f":doc:`{display_name} <{target}>`" + + +def _dataset_reference_sentence(reference: DatasetReference) -> str: + sentence = f"`{reference.citation} <{reference.url}>`_" + if reference.repo_url: + sentence += f" (`repo <{reference.repo_url}>`__)" + return sentence + "." + + +def _primary_reference_link(card: DatasetCard) -> str: + reference = card.references[0] + return f"`{reference.citation} <{reference.url}>`_" + + +def _count_phrase(count: int, noun: str) -> str: + suffix = "" if count == 1 else "s" + return f"{count} {noun}{suffix}" + + +def _stat_card(title: str, value: str, note: str) -> List[str]: + return [ + f".. grid-item-card:: {title}", + " :class-card: catalog-stat-card", + "", + " .. container:: catalog-stat-value", + "", + f" {value}", + "", + " .. container:: catalog-stat-note", + "", + f" {note}", + "", + ] + + +def _reference_links(card: DatasetCard) -> str: + return f"**Primary Source:** {_primary_reference_link(card)}" + + +def _inspection_literal(card: DatasetCard) -> str: + if card.inspection is None: + return "" + return f"``{card.inspection.command}``" + + +def _registry_literal(card: DatasetCard) -> str: + if card.registry is None: + return "" + return f"``load_dataset('{card.registry.name}', ...)``" + + +def _related_benchmarks(card: DatasetCard, absolute: bool = False) -> List[str]: + if not card.related_benchmarks: + return [] + from .benchmark_catalog import load_benchmark_cards + + benchmark_map = {item.slug: item for item in load_benchmark_cards()} + links: List[str] = [] + for slug in _ordered_unique(card.related_benchmarks): + benchmark = benchmark_map[slug] + links.append(_benchmark_link(slug, benchmark.display_name, absolute=absolute)) + return links + + +def _related_models(card: DatasetCard, absolute: bool = False) -> List[str]: + if not card.related_models: + return [] + from .model_catalog import card_by_registry_name, load_model_cards + + model_map = card_by_registry_name(load_model_cards()) + links: List[str] = [] + for model_name in _ordered_unique(card.related_models): + model_card = model_map[model_name] + links.append( + _model_link( + model_card.display_name, + model_card.module_doc_name, + absolute=absolute, + ) + ) + return links + + +def _dataset_usage_line(card: DatasetCard) -> List[str]: + if card.inspection is not None: + return [ + " .. container:: catalog-meta-row", + "", + f" **Inspection:** {_inspection_literal(card)}", + "", + ] + if card.registry is not None: + return [ + " .. container:: catalog-meta-row", + "", + f" **Registry:** {_registry_literal(card)}", + "", + ] + return [] + + +def _render_dataset_card(card: DatasetCard) -> List[str]: + role_badge = _badge( + SOURCE_ROLE_BADGE_ROLES.get(card.source_role, "secondary"), + card.source_role, + ) + geometry_badge = _badge( + GEOMETRY_BADGE_ROLES.get(card.geometry, "info"), + card.geometry, + ) + related_benchmarks = _related_benchmarks(card) + lines = [ + f".. grid-item-card:: {card.display_name}", + " :class-card: catalog-entry-card", + "", + " .. container:: catalog-entry-summary", + "", + f" {_single_line(card.summary).rstrip('.')}.", + "", + " .. container:: catalog-chip-row", + "", + f" {role_badge} {geometry_badge}", + "", + " .. container:: catalog-meta-row", + "", + f" **Coverage:** {card.coverage}", + "", + " .. container:: catalog-meta-row", + "", + f" **Update Cadence:** {card.update_cadence}", + "", + ] + lines.extend(_dataset_usage_line(card)) + if related_benchmarks: + lines.extend( + [ + " .. container:: catalog-meta-row", + "", + f" **Related Benchmarks:** {', '.join(related_benchmarks)}", + "", + ] + ) + lines.extend( + [ + " .. container:: catalog-link-row", + "", + f" **Details:** {_dataset_doc_link(card)}", + "", + " .. container:: catalog-link-row", + "", + f" {_reference_links(card)}", + "", + ] + ) + return lines + + +def _render_dataset_grid(cards: Sequence[DatasetCard]) -> List[str]: + lines: List[str] = [ + ".. grid:: 1 1 2 2", + " :gutter: 2", + " :class-container: catalog-grid", + "", + ] + for card in cards: + lines.extend(_indent_lines(_render_dataset_card(card))) + return lines + + +def _render_recommended_grid(cards: Sequence[DatasetCard]) -> List[str]: + by_slug = {card.slug: card for card in cards} + lines: List[str] = [ + ".. grid:: 1 1 2 4", + " :gutter: 2", + " :class-container: catalog-recommend-grid", + "", + ] + for hazard in HAZARD_DISPLAY_ORDER: + slug = STARTER_DATASETS.get(hazard) + card = by_slug.get(slug) if slug else None + if card is None: + continue + usage_line = card.primary_surface + lines.extend( + _indent_lines( + [ + f".. grid-item-card:: {hazard}", + " :class-card: catalog-detail-card", + "", + f" **Start with:** {_dataset_doc_link(card)}", + "", + f" {_single_line(card.summary).rstrip('.')}.", + "", + f" **Primary Surface:** {usage_line}", + "", + ] + ) + ) + return lines + + +def render_dataset_page(cards: Sequence[DatasetCard]) -> str: + grouped = group_cards_by_hazard(cards) + inspection_count = len([card for card in cards if card.inspection is not None]) + registry_count = len([card for card in cards if card.registry is not None]) + lines: List[str] = [ + GENERATED_MARKER, + "", + "Datasets", + "===================", + "", + "Browse PyHazards datasets across hazard families, compare source roles,", + "inspection paths, and registry surfaces, and navigate to dataset-specific", + "detail pages.", + "", + "At a Glance", + "-----------", + "", + ".. grid:: 1 2 4 4", + " :gutter: 2", + " :class-container: catalog-grid", + "", + ] + lines.extend( + _indent_lines( + _stat_card( + "Hazard Groups", + str(len(grouped)), + "Public dataset tabs grouped by the curated hazard-first taxonomy.", + ) + ) + ) + lines.extend( + _indent_lines( + _stat_card( + "Public Datasets", + str(len(cards)), + "Curated datasets surfaced on the public site.", + ) + ) + ) + lines.extend( + _indent_lines( + _stat_card( + "Inspection Entry Points", + str(inspection_count), + "Datasets with an explicit inspection command documented on the site.", + ) + ) + ) + lines.extend( + _indent_lines( + _stat_card( + "Registry-loadable Datasets", + str(registry_count), + "Datasets with a documented public ``load_dataset(...)`` path.", + ) + ) + ) + lines.extend( + [ + "", + "Catalog by Hazard", + "-----------------", + "", + "Use the hazard tabs below to browse the public dataset catalog. Each", + "card keeps the summary short, then links into the detail page, the", + "primary source, and the most relevant inspection or registry surface.", + "", + ".. tab-set::", + " :class: catalog-tabs", + "", + ] + ) + for hazard, hazard_cards in grouped.items(): + tab_lines: List[str] = [ + f".. tab-item:: {hazard}", + "", + " .. container:: catalog-section-note", + "", + f" {HAZARD_SECTION_SUMMARIES[hazard]}", + "", + " .. rubric:: Implemented Datasets", + "", + ] + tab_lines.extend(_indent_lines(_render_dataset_grid(hazard_cards))) + tab_lines.append("") + lines.extend(_indent_lines(tab_lines)) + lines.extend( + [ + "", + "Recommended Entry Points", + "------------------------", + "", + "If you are new to PyHazards, start with one high-signal dataset per", + "hazard group before branching into the full catalog.", + "", + ] + ) + lines.extend(_render_recommended_grid(cards)) + lines.extend( + [ + "", + "Programmatic Use", + "----------------", + "", + ".. code-block:: bash", + "", + " python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10", + "", + ".. code-block:: python", + "", + " from pyhazards.datasets import load_dataset", + "", + " data = load_dataset(", + ' "fpa_fod_weekly",', + " micro=True,", + " lookback_weeks=12,", + " features=\"counts+time\",", + " ).load()", + " print(sorted(data.splits.keys()))", + "", + "Use :doc:`api/pyhazards.datasets` for the developer dataset workflow", + "and package-level API lookup. Pair this page with :doc:`pyhazards_models`", + "and :doc:`pyhazards_benchmarks` when you need to trace datasets into", + "model and evaluation coverage.", + "", + ".. toctree::", + " :maxdepth: 1", + " :hidden:", + "", + ] + ) + for card in _cards_in_display_order(cards): + lines.append(f" datasets/{card.slug}") + lines.append("") + return "\n".join(lines) + + +def render_dataset_api_page(cards: Sequence[DatasetCard]) -> str: + grouped = group_cards_by_hazard(cards) + lines: List[str] = [ + GENERATED_MARKER, + "", + "pyhazards.datasets package", + "==========================", + "", + "Catalog Summary", + "---------------", + "", + "This page links the public dataset catalog, the developer dataset", + "workflow, and the package submodules used to register or inspect datasets.", + "", + "For the curated browsing experience, use :doc:`/pyhazards_datasets`.", + "", + ] + for hazard, hazard_cards in grouped.items(): + lines.extend([hazard, "~" * len(hazard), ""]) + links = ", ".join(_dataset_doc_link(card, absolute=True) for card in hazard_cards) + lines.extend([links + ".", ""]) + lines.extend( + [ + "Developer Dataset Workflow", + "--------------------------", + "", + "Use this section when you need the package-level registry and dataset", + "builder interface rather than the public catalog presentation.", + "", + "Inspect an External Dataset Source", + "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", + "", + ".. code-block:: bash", + "", + " python -m pyhazards.datasets.era5.inspection --path pyhazards/data/era5_subset --max-vars 10", + "", + "Load a Registered Dataset", + "~~~~~~~~~~~~~~~~~~~~~~~~~", + "", + ".. code-block:: python", + "", + " from pyhazards.datasets import available_datasets, load_dataset", + "", + " print(available_datasets())", + " data = load_dataset(", + ' \"seisbench_waveforms\",', + " micro=True,", + " ).load()", + " print(sorted(data.splits.keys()))", + "", + "Register a Custom Dataset", + "~~~~~~~~~~~~~~~~~~~~~~~~~", + "", + ".. code-block:: python", + "", + " from pyhazards.datasets import (", + " DataBundle,", + " DataSplit,", + " Dataset,", + " FeatureSpec,", + " LabelSpec,", + " register_dataset,", + " )", + "", + " class MyDataset(Dataset):", + " name = \"my_dataset\"", + "", + " def _load(self) -> DataBundle:", + " raise NotImplementedError(\"Return a populated DataBundle here.\")", + "", + " register_dataset(\"my_dataset\", MyDataset)", + "", + "Notes", + "~~~~~", + "", + "- Public dataset docs are generated from cards in ``pyhazards/dataset_cards``.", + "- Run ``python scripts/render_dataset_docs.py`` after editing cards or generated dataset docs.", + "- Use :doc:`/implementation` for the full contributor workflow.", + "", + "Submodules", + "----------", + "", + "pyhazards.datasets.base module", + "------------------------------", + "", + ".. automodule:: pyhazards.datasets.base", + " :members:", + " :undoc-members:", + " :show-inheritance:", + "", + "pyhazards.datasets.registry module", + "-----------------------------------", + "", + ".. automodule:: pyhazards.datasets.registry", + " :members:", + " :undoc-members:", + " :show-inheritance:", + "", + "pyhazards.datasets.transforms package", + "-------------------------------------", + "", + ".. automodule:: pyhazards.datasets.transforms", + " :members:", + " :undoc-members:", + " :show-inheritance:", + "", + "pyhazards.datasets.hazards package", + "-----------------------------------", + "", + ".. automodule:: pyhazards.datasets.hazards", + " :members:", + " :undoc-members:", + " :show-inheritance:", + "", + "Module contents", + "---------------", + "", + ".. automodule:: pyhazards.datasets", + " :members:", + " :undoc-members:", + " :show-inheritance:", + "", + ] + ) + return "\n".join(lines) + + +def _fact_rows(card: DatasetCard) -> List[tuple[str, str]]: + rows = [ + ("Provider", card.provider), + ("Hazard Family", card.hazard_family), + ("Source Role", card.source_role), + ("Coverage", card.coverage), + ("Geometry", card.geometry), + ] + if card.spatial_resolution: + rows.append(("Spatial Resolution", card.spatial_resolution)) + if card.temporal_resolution: + rows.append(("Temporal Resolution", card.temporal_resolution)) + rows.extend( + [ + ("Update Cadence", card.update_cadence), + ("Period of Record", card.period_of_record), + ] + ) + if card.formats: + rows.append(("Formats", card.formats)) + if card.inspection is not None: + rows.append(("Inspection CLI", f"``{card.inspection.command}``")) + if card.registry is not None: + rows.append(("Registry Entry", f"``{card.registry.name}``")) + return rows + + +def render_dataset_detail_page(card: DatasetCard) -> str: + related_models = _related_models(card, absolute=True) + related_benchmarks = _related_benchmarks(card, absolute=True) + usage_paragraphs = card.pyhazards_usage or [ + "Use this dataset through the public inspection or registry surface documented below." + ] + lines: List[str] = [ + GENERATED_MARKER, + "", + card.display_name, + "=" * len(card.display_name), + "", + f"{_single_line(card.summary).rstrip('.')}.", + "", + "Overview", + "--------", + "", + ] + for paragraph in card.overview: + lines.append(_single_line(paragraph)) + lines.append("") + lines.extend( + [ + "At a Glance", + "-----------", + "", + ".. list-table::", + " :widths: 28 72", + " :stub-columns: 1", + "", + ] + ) + for label, value in _fact_rows(card): + lines.extend( + [ + f" * - {label}", + f" - {value}", + ] + ) + lines.extend( + [ + "", + "Data Characteristics", + "--------------------", + "", + ] + ) + for item in card.data_characteristics: + lines.append(f"- {_single_line(item)}") + lines.append("") + lines.extend(["Typical Use Cases", "~~~~~~~~~~~~~~~~~", ""]) + for item in card.typical_use_cases: + lines.append(f"- {_single_line(item)}") + lines.append("") + lines.extend(["Access", "------", ""]) + lines.append("Use the links below to access the upstream source or its public documentation.") + lines.append("") + for link in card.access_links: + lines.append(f"- `{link.label} <{link.url}>`_") + lines.append("") + lines.extend(["PyHazards Usage", "---------------", ""]) + for paragraph in usage_paragraphs: + lines.append(_single_line(paragraph)) + lines.append("") + if card.registry is not None: + lines.extend( + [ + "Registry Workflow", + "~~~~~~~~~~~~~~~~~", + "", + f"Primary dataset name: ``{card.registry.name}``", + "", + ".. code-block:: python", + "", + _indent_block(card.registry.example), + "", + ] + ) + for note in card.registry.notes: + lines.append(f"- {_single_line(note)}") + if card.registry.notes: + lines.append("") + else: + lines.extend( + [ + "This dataset is currently documented as an external or inspection-first", + "source rather than a public ``load_dataset(...)`` entrypoint.", + "", + ] + ) + if related_benchmarks or related_models: + lines.extend(["Related Coverage", "~~~~~~~~~~~~~~~~", ""]) + if related_benchmarks: + lines.append(f"**Benchmarks:** {', '.join(related_benchmarks)}") + lines.append("") + if related_models: + lines.append(f"**Representative Models:** {', '.join(related_models)}") + lines.append("") + lines.extend(["Inspection Workflow", "-------------------", ""]) + if card.inspection is not None: + lines.extend( + [ + "Use the documented inspection path below to validate local files before training or analysis.", + "", + ".. code-block:: bash", + "", + f" {card.inspection.command}", + "", + ] + ) + for note in card.inspection.notes: + lines.append(f"- {_single_line(note)}") + if card.inspection.notes: + lines.append("") + else: + lines.extend( + [ + "This dataset is currently surfaced as a registry-backed benchmark adapter,", + "so there is no standalone inspection CLI documented for it.", + "", + ] + ) + if card.notes: + lines.extend(["Notes", "-----", ""]) + for note in card.notes: + lines.append(f"- {_single_line(note)}") + lines.append("") + lines.extend(["Reference", "---------", ""]) + for reference in card.references: + lines.append(f"- {_dataset_reference_sentence(reference)}") + lines.append("") + return "\n".join(lines) + + +def rendered_dataset_docs(cards: Sequence[DatasetCard]) -> Dict[Path, str]: + targets: Dict[Path, str] = { + DATASET_PAGE_PATH: render_dataset_page(cards), + API_PAGE_PATH: render_dataset_api_page(cards), + } + for card in cards: + targets[card.doc_path] = render_dataset_detail_page(card) + return targets + + +def sync_generated_dataset_docs( + cards: Sequence[DatasetCard], + check: bool = False, +) -> List[Path]: + changes: List[Path] = [] + targets = rendered_dataset_docs(cards) + for path, content in targets.items(): + current = path.read_text(encoding="utf-8") if path.exists() else None + if current != content: + changes.append(path) + if not check: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(content, encoding="utf-8") + + managed_paths = set(targets.keys()) + if DATASET_DOCS_DIR.exists(): + for path in DATASET_DOCS_DIR.glob("*.rst"): + if path in managed_paths or not path.exists(): + continue + text = path.read_text(encoding="utf-8") + if GENERATED_MARKER not in text: + continue + changes.append(path) + if not check: + path.unlink() + + return changes + + +def dataset_catalog_alignment_issues( + cards: Optional[Sequence[DatasetCard]] = None, +) -> List[str]: + if cards is None: + cards = load_dataset_cards() + + from .benchmark_catalog import load_benchmark_cards + from .datasets import available_datasets + from .model_catalog import card_by_registry_name, load_model_cards + + issues: List[str] = [] + registered = set(available_datasets()) + benchmark_map = {card.slug: card for card in load_benchmark_cards()} + model_map = card_by_registry_name(load_model_cards()) + + for card in cards: + if card.inspection is not None and card.inspection.module is not None: + module_path = REPO_ROOT / (card.inspection.module.replace(".", "/") + ".py") + if not module_path.exists(): + issues.append( + f"Dataset card '{card.slug}' points to missing inspection module " + f"'{card.inspection.module}'." + ) + if card.registry and card.registry.name not in registered: + issues.append( + f"Dataset card '{card.slug}' points to unknown dataset registry entry " + f"'{card.registry.name}'." + ) + for slug in card.related_benchmarks: + if slug not in benchmark_map: + issues.append( + f"Dataset card '{card.slug}' links unknown benchmark '{slug}'." + ) + for model_name in card.related_models: + if model_name not in model_map: + issues.append( + f"Dataset card '{card.slug}' links unknown model '{model_name}'." + ) + return issues + + +__all__ = [ + "API_PAGE_PATH", + "DATASET_CARDS_DIR", + "DATASET_PAGE_PATH", + "DatasetCard", + "dataset_catalog_alignment_issues", + "load_dataset_cards", + "render_dataset_api_page", + "render_dataset_detail_page", + "render_dataset_page", + "rendered_dataset_docs", + "sync_generated_dataset_docs", +] diff --git a/pyhazards/datasets/__init__.py b/pyhazards/datasets/__init__.py new file mode 100644 index 00000000..dc6a4086 --- /dev/null +++ b/pyhazards/datasets/__init__.py @@ -0,0 +1,92 @@ +from .base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec +from .earthquake import ( + AEFADataset, + PickBenchmarkWaveformDataset, + SeisBenchWaveformDataset, + SyntheticEarthquakeForecastDataset, + SyntheticEarthquakeWaveformDataset, +) +from .flood import ( + CaravanStreamflowDataset, + FloodCastBenchInundationDataset, + HydroBenchStreamflowDataset, + SyntheticFloodInundationDataset, + SyntheticFloodStreamflowDataset, + WaterBenchStreamflowDataset, +) +from .fpa_fod import FPAFODTabularDataset, FPAFODWeeklyDataset +from .graph import GraphTemporalDataset, graph_collate +from .registry import available_datasets, load_dataset, register_dataset +from .tc import ( + IBTrACSTropicalCycloneDataset, + SyntheticTropicalCycloneDataset, + TCBenchAlphaDataset, + TropiCycloneNetDataset, +) +from .wildfire import ( + SyntheticWildfireSpreadDataset, + SyntheticWildfireSpreadTemporalDataset, + TrackOSplitConfig, + WildfireTrackO2024RasterDataset, + WildfireTrackO2024TabularDataset, + WildfireTrackO2024TemporalDataset, +) + +__all__ = [ + "DataBundle", + "DataSplit", + "Dataset", + "FeatureSpec", + "LabelSpec", + "AEFADataset", + "PickBenchmarkWaveformDataset", + "SeisBenchWaveformDataset", + "SyntheticEarthquakeForecastDataset", + "SyntheticEarthquakeWaveformDataset", + "CaravanStreamflowDataset", + "FloodCastBenchInundationDataset", + "HydroBenchStreamflowDataset", + "SyntheticFloodInundationDataset", + "SyntheticFloodStreamflowDataset", + "WaterBenchStreamflowDataset", + "FPAFODTabularDataset", + "FPAFODWeeklyDataset", + "available_datasets", + "load_dataset", + "register_dataset", + "GraphTemporalDataset", + "graph_collate", + "IBTrACSTropicalCycloneDataset", + "SyntheticTropicalCycloneDataset", + "TCBenchAlphaDataset", + "TropiCycloneNetDataset", + "SyntheticWildfireSpreadDataset", + "SyntheticWildfireSpreadTemporalDataset", + "TrackOSplitConfig", + "WildfireTrackO2024RasterDataset", + "WildfireTrackO2024TabularDataset", + "WildfireTrackO2024TemporalDataset", +] + +register_dataset(SyntheticEarthquakeForecastDataset.name, SyntheticEarthquakeForecastDataset) +register_dataset(SyntheticEarthquakeWaveformDataset.name, SyntheticEarthquakeWaveformDataset) +register_dataset(SeisBenchWaveformDataset.name, SeisBenchWaveformDataset) +register_dataset(PickBenchmarkWaveformDataset.name, PickBenchmarkWaveformDataset) +register_dataset(AEFADataset.name, AEFADataset) +register_dataset(SyntheticFloodInundationDataset.name, SyntheticFloodInundationDataset) +register_dataset(SyntheticFloodStreamflowDataset.name, SyntheticFloodStreamflowDataset) +register_dataset(CaravanStreamflowDataset.name, CaravanStreamflowDataset) +register_dataset(WaterBenchStreamflowDataset.name, WaterBenchStreamflowDataset) +register_dataset(HydroBenchStreamflowDataset.name, HydroBenchStreamflowDataset) +register_dataset(FloodCastBenchInundationDataset.name, FloodCastBenchInundationDataset) +register_dataset(FPAFODTabularDataset.name, FPAFODTabularDataset) +register_dataset(FPAFODWeeklyDataset.name, FPAFODWeeklyDataset) +register_dataset(SyntheticTropicalCycloneDataset.name, SyntheticTropicalCycloneDataset) +register_dataset(IBTrACSTropicalCycloneDataset.name, IBTrACSTropicalCycloneDataset) +register_dataset(TCBenchAlphaDataset.name, TCBenchAlphaDataset) +register_dataset(TropiCycloneNetDataset.name, TropiCycloneNetDataset) +register_dataset(SyntheticWildfireSpreadDataset.name, SyntheticWildfireSpreadDataset) +register_dataset(SyntheticWildfireSpreadTemporalDataset.name, SyntheticWildfireSpreadTemporalDataset) +register_dataset(WildfireTrackO2024RasterDataset.name, WildfireTrackO2024RasterDataset) +register_dataset(WildfireTrackO2024TabularDataset.name, WildfireTrackO2024TabularDataset) +register_dataset(WildfireTrackO2024TemporalDataset.name, WildfireTrackO2024TemporalDataset) diff --git a/pyhazards/datasets/_generic_inspection.py b/pyhazards/datasets/_generic_inspection.py new file mode 100644 index 00000000..b17460c2 --- /dev/null +++ b/pyhazards/datasets/_generic_inspection.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +import argparse +from pathlib import Path + + +def run_generic_dataset_inspection( + dataset_name: str, + dataset_doc_url: str, + argv: list[str] | None = None, +) -> int: + """ + Lightweight inspection entrypoint for datasets without a dedicated parser yet. + This keeps module paths stable and callable from CLI. + """ + parser = argparse.ArgumentParser( + prog=f"python -m pyhazards.datasets.{dataset_name}.inspection", + description=f"Inspect local {dataset_name} dataset files.", + ) + parser.add_argument( + "--path", + default=None, + help="Path to a local file or directory for this dataset.", + ) + parser.add_argument( + "--max-items", + type=int, + default=10, + help="Maximum number of directory entries to print (if --path is a directory).", + ) + args = parser.parse_args(argv) + + print(f"[INFO] Dataset inspection entrypoint for '{dataset_name}' is callable.") + print(f"[INFO] Reference: {dataset_doc_url}") + + if args.path is None: + print("[INFO] No --path provided. Pass --path to validate local files.") + return 0 + + path = Path(args.path).expanduser().resolve() + if not path.exists(): + print(f"[ERROR] Path does not exist: {path}") + return 2 + + if path.is_file(): + print(f"[OK] File exists: {path}") + print(f"[OK] Size (bytes): {path.stat().st_size}") + return 0 + + files = sorted([p for p in path.iterdir() if p.is_file()]) + dirs = sorted([p for p in path.iterdir() if p.is_dir()]) + print(f"[OK] Directory exists: {path}") + print(f"[OK] Files: {len(files)} | Subdirectories: {len(dirs)}") + + if files: + print("[INFO] Sample files:") + for p in files[: args.max_items]: + print(f" - {p.name}") + + return 0 + diff --git a/pyhazards/datasets/base.py b/pyhazards/datasets/base.py new file mode 100644 index 00000000..e99a7a49 --- /dev/null +++ b/pyhazards/datasets/base.py @@ -0,0 +1,84 @@ +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional, Protocol + + +@dataclass +class FeatureSpec: + """Describes input features (shapes, dtypes, normalization).""" + input_dim: Optional[int] = None + channels: Optional[int] = None + description: Optional[str] = None + extra: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class LabelSpec: + """Describes labels/targets for downstream tasks.""" + num_targets: Optional[int] = None + task_type: str = "regression" # classification|regression|segmentation + description: Optional[str] = None + extra: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class DataSplit: + """Container for a single split.""" + inputs: Any + targets: Any + metadata: Dict[str, Any] = field(default_factory=dict) + + +@dataclass +class DataBundle: + """ + Bundle of train/val/test splits plus metadata. + Keeps feature/label specs to make model construction easy. + """ + splits: Dict[str, DataSplit] + feature_spec: FeatureSpec + label_spec: LabelSpec + metadata: Dict[str, Any] = field(default_factory=dict) + + def get_split(self, name: str) -> DataSplit: + if name not in self.splits: + raise KeyError(f"Split '{name}' not found. Available: {list(self.splits.keys())}") + return self.splits[name] + + +class Transform(Protocol): + """Callable data transform.""" + + def __call__(self, bundle: DataBundle) -> DataBundle: + ... + + +class Dataset: + """ + Base class for hazard datasets. + Subclasses should load data and return a DataBundle with splits ready for training. + """ + + name: str = "base" + + def __init__(self, cache_dir: Optional[str] = None): + self.cache_dir = cache_dir + + def load(self, split: Optional[str] = None, transforms: Optional[List[Transform]] = None) -> DataBundle: + """ + Return a DataBundle. Optionally return a specific split if provided. + """ + bundle = self._load() + if transforms: + for t in transforms: + bundle = t(bundle) + if split: + return DataBundle( + splits={split: bundle.get_split(split)}, + feature_spec=bundle.feature_spec, + label_spec=bundle.label_spec, + metadata=bundle.metadata, + ) + return bundle + + def _load(self) -> DataBundle: + raise NotImplementedError("Subclasses must implement _load() to return a DataBundle.") diff --git a/pyhazards/datasets/dataloader/README.md b/pyhazards/datasets/dataloader/README.md new file mode 100644 index 00000000..6694ac13 --- /dev/null +++ b/pyhazards/datasets/dataloader/README.md @@ -0,0 +1,50 @@ +# Dataloader V3 + +Minimal API with a structured request object. + +## API +```python +from dataloader_v3 import ( + GeoLoadInput, + load_data, + save_sample_h5, + load_sample_h5, + to_torch_batch, +) +``` + +## `GeoLoadInput` (required format) +- `data_sources: list[str]` + Example: `["FIRMS", "ERA5"]` +- `temporal_window: tuple[str, str]` + Format: `("YYYY-MM-DD", "YYYY-MM-DD")` or `("YYYY-MM-DD HH:MM:SS", "...")` +- `area_of_interest_bbox: tuple[float, float, float, float]` + Order: `(min_lon, min_lat, max_lon, max_lat)` + +## Optional +- `spatial_resolution_deg: float = 0.1` +- `root_dir: str = "/home/yangshuang"` +- `synthetic_time: bool = False` +- `temporal_cadence: str = "D"` (`"D"`, `"H"`, `"15min"`, ...) +- `target_hazards: list[str] | None = None` (if set and no mapping, default code is `1`) +- `label_source: str | None = None` (`"firms" | "noaa" | "mtbs"`, else auto-infer) +- `label_mapping: dict[str, int] | None = None` + +## Example +```python +from dataloader_v3 import GeoLoadInput, load_data, save_sample_h5 + +req = GeoLoadInput( + data_sources=["FIRMS"], + temporal_window=("2023-01-01", "2023-01-02"), + area_of_interest_bbox=(-87.8, 24.0, -79.8, 31.5), + spatial_resolution_deg=0.25, + synthetic_time=True, + temporal_cadence="D", + target_hazards=["wildfire"], +) +sample = load_data(req) +save_sample_h5(sample, "/home/yangshuang/output/sample_v3.h5") +sample2 = load_sample_h5("/home/yangshuang/output/sample_v3.h5") +x_t, y_t, meta = to_torch_batch(sample2) +``` diff --git a/pyhazards/datasets/dataloader/__init__.py b/pyhazards/datasets/dataloader/__init__.py new file mode 100644 index 00000000..5b4bff49 --- /dev/null +++ b/pyhazards/datasets/dataloader/__init__.py @@ -0,0 +1,11 @@ +from dataloader_v3.simple import GeoLoadInput, load_data, load_data_legacy +from dataloader_v3.io import load_sample_h5, save_sample_h5, to_torch_batch + +__all__ = [ + "load_data", + "load_data_legacy", + "GeoLoadInput", + "save_sample_h5", + "load_sample_h5", + "to_torch_batch", +] diff --git a/pyhazards/datasets/dataloader/example_with_synthetic.py b/pyhazards/datasets/dataloader/example_with_synthetic.py new file mode 100644 index 00000000..af90066d --- /dev/null +++ b/pyhazards/datasets/dataloader/example_with_synthetic.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 + +from pathlib import Path +import sys + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + +from dataloader_v3 import GeoLoadInput, load_data +from dataloader.adapters import firms as firms_mod + + +def main() -> None: + # Demo-speed mode: avoid loading very large FIRMS JSON archives. + firms_mod.FIRMSAdapter.CSV_PATTERNS = ["firmsFL14-25/*.csv"] + firms_mod.FIRMSAdapter.JSON_PATTERNS = [] + + request = GeoLoadInput( + root_dir="/home/yangshuang", + data_sources=["FIRMS"], + temporal_window=("2023-01-01", "2023-01-02"), + area_of_interest_bbox=(-87.8, 24.0, -79.8, 31.5), + spatial_resolution_deg=0.25, + synthetic_time=True, + temporal_cadence="D", + target_hazards=["wildfire"], + ) + sample = load_data(request) + print("x shape:", sample.x.shape) + print("y shape:", sample.y.shape) + print("channels:", sample.meta.get("channels")) + print("x synthetic ratio:", float(sample.meta["x_synthetic_mask"].mean())) + print("y synthetic ratio:", float(sample.meta["y_synthetic_mask"].mean())) + + +if __name__ == "__main__": + main() diff --git a/pyhazards/datasets/dataloader/io.py b/pyhazards/datasets/dataloader/io.py new file mode 100644 index 00000000..fc3f9a36 --- /dev/null +++ b/pyhazards/datasets/dataloader/io.py @@ -0,0 +1,104 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Any +import json + +import h5py +import numpy as np + +from dataloader.schema import Sample + + +def _to_serializable(value: Any) -> Any: + if isinstance(value, np.ndarray): + return value.tolist() + if isinstance(value, (np.floating, np.integer)): + return value.item() + if isinstance(value, (list, tuple)): + return [_to_serializable(v) for v in value] + if isinstance(value, dict): + return {str(k): _to_serializable(v) for k, v in value.items()} + return value + + +def _decode_h5_value(value: Any) -> Any: + if isinstance(value, (bytes, np.bytes_)): + return value.decode("utf-8") + if isinstance(value, np.ndarray): + if value.dtype.kind in ("S", "U", "O"): + out = [] + for v in value.tolist(): + if isinstance(v, bytes): + out.append(v.decode("utf-8")) + else: + out.append(v) + return out + return value + return value + + +def save_sample_h5(sample: Sample, output_path: str) -> str: + """Save a loaded sample to HDF5. + + Stores: + - /x : feature tensor, shape (T, C, H, W) + - /y : label tensor, shape (T, H, W) + - /meta/ : ndarray metadata when possible + - /meta_json : fallback JSON metadata string + """ + out = Path(output_path).expanduser().resolve() + out.parent.mkdir(parents=True, exist_ok=True) + + with h5py.File(out, "w") as f: + f.create_dataset("x", data=sample.x, compression="gzip") + f.create_dataset("y", data=sample.y, compression="gzip") + + meta_group = f.create_group("meta") + meta_json: dict[str, Any] = {} + for k, v in sample.meta.items(): + if isinstance(v, np.ndarray): + meta_group.create_dataset(str(k), data=v, compression="gzip") + elif isinstance(v, list) and all(isinstance(i, str) for i in v): + dt = h5py.string_dtype(encoding="utf-8") + meta_group.create_dataset(str(k), data=np.array(v, dtype=dt)) + else: + meta_json[str(k)] = _to_serializable(v) + + f.create_dataset("meta_json", data=json.dumps(meta_json, ensure_ascii=True)) + + return str(out) + + +def load_sample_h5(input_path: str) -> Sample: + """Load a Sample object from HDF5 produced by `save_sample_h5`.""" + path = Path(input_path).expanduser().resolve() + with h5py.File(path, "r") as f: + x = f["x"][()] + y = f["y"][()] + meta: dict[str, Any] = {} + + if "meta" in f: + for k in f["meta"].keys(): + meta[k] = _decode_h5_value(f["meta"][k][()]) + + if "meta_json" in f: + raw = f["meta_json"][()] + if isinstance(raw, bytes): + raw = raw.decode("utf-8") + if raw: + decoded = json.loads(raw) + for k, v in decoded.items(): + if k not in meta: + meta[k] = v + + return Sample(x=x, y=y, meta=meta) + + +def to_torch_batch(sample: Sample): + """Convert Sample arrays to torch tensors: returns (x, y, meta).""" + import torch + + x = torch.from_numpy(sample.x) + y = torch.from_numpy(sample.y) + return x, y, sample.meta diff --git a/pyhazards/datasets/dataloader/simple.py b/pyhazards/datasets/dataloader/simple.py new file mode 100644 index 00000000..53bcab85 --- /dev/null +++ b/pyhazards/datasets/dataloader/simple.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Dict, List, Optional, Sequence, Tuple + +from dataloader.schema import Sample +from dataloader_v2.simple import load_data as load_data_v2 + + +def _normalize_source_name(source: str) -> str: + mapping = { + "firms": "firms", + "firm": "firms", + "noaa": "noaa", + "mtbs": "mtbs", + "mtbs_fod": "mtbs", + "fod": "mtbs", + "era5": "era5", + "merra2": "merra2", + "landfire": "landfire", + } + return mapping.get(str(source).strip().lower(), str(source).strip().lower()) + + +def _infer_label_source(data_sources: Sequence[str]) -> Optional[str]: + normalized = {_normalize_source_name(d) for d in data_sources} + for candidate in ("firms", "noaa", "mtbs"): + if candidate in normalized: + return candidate + return None + + +def _default_binary_mapping(target_hazards: Optional[List[str]]) -> Optional[Dict[str, int]]: + if not target_hazards: + return None + return {str(h).strip().lower(): 1 for h in target_hazards} + + +@dataclass(init=False) +class GeoLoadInput: + r"""The request input of :func:`dataloader_v3.load_data`. + + Args: + data_sources (Sequence[str]): Input datasets to load and fuse. + temporal_window (Tuple[str, str]): Query time window as + ``(start_date, end_date)``. + area_of_interest_bbox (Tuple[float, float, float, float]): AOI bounds + as ``(min_lon, min_lat, max_lon, max_lat)``. + spatial_resolution_deg (float, optional): Target grid resolution in + degrees. (default: ``0.1``) + root_dir (str, optional): Root folder of local datasets. + (default: ``"/home/yangshuang"``) + synthetic_time (bool, optional): Enable temporal harmonization and + gap-filling. (default: ``False``) + temporal_cadence (str, optional): Target temporal cadence, such as + ``"D"``, ``"H"``, ``"15min"``. (default: ``"D"``) + target_hazards (List[str], optional): Hazards to encode in ``y``. + If provided without ``label_mapping``, defaults to binary encoding + (hazard -> ``1``). (default: ``None``) + label_source (str, optional): Label source override + (``"firms"``, ``"noaa"``, ``"mtbs"``). If omitted, inferred from + ``data_sources``. (default: ``None``) + label_mapping (Dict[str, int], optional): Explicit hazard-to-id mapping. + (default: ``None``) + """ + + data_sources: Sequence[str] + temporal_window: Tuple[str, str] + area_of_interest_bbox: Tuple[float, float, float, float] + spatial_resolution_deg: float + root_dir: str + synthetic_time: bool + temporal_cadence: str + target_hazards: Optional[List[str]] + label_source: Optional[str] + label_mapping: Optional[Dict[str, int]] + + def __init__( + self, + data_sources: Sequence[str], + temporal_window: Tuple[str, str], + area_of_interest_bbox: Tuple[float, float, float, float], + spatial_resolution_deg: float = 0.1, + root_dir: str = "/home/yangshuang", + synthetic_time: bool = False, + temporal_cadence: str = "D", + target_hazards: Optional[List[str]] = None, + label_source: Optional[str] = None, + label_mapping: Optional[Dict[str, int]] = None, + ) -> None: + self.data_sources = data_sources + self.temporal_window = temporal_window + self.area_of_interest_bbox = area_of_interest_bbox + self.spatial_resolution_deg = spatial_resolution_deg + self.root_dir = root_dir + self.synthetic_time = synthetic_time + self.temporal_cadence = temporal_cadence + self.target_hazards = target_hazards + self.label_source = label_source + self.label_mapping = label_mapping + + +def load_data(request: GeoLoadInput) -> Sample: + """Load a unified geospatial sample from a structured request object.""" + resolved_label_source = ( + str(request.label_source).strip().lower() + if request.label_source is not None + else _infer_label_source(request.data_sources) + ) + resolved_mapping = request.label_mapping or _default_binary_mapping(request.target_hazards) + + return load_data_v2( + data=request.data_sources, + date_range=request.temporal_window, + bbox=request.area_of_interest_bbox, + resolution=request.spatial_resolution_deg, + root_dir=request.root_dir, + synthetic_time=request.synthetic_time, + target_freq=request.temporal_cadence, + label_source=resolved_label_source, + label_hazards=request.target_hazards, + label_mapping=resolved_mapping, + ) + + +def load_data_legacy( + data: Sequence[str], + date_range: Tuple[str, str], + bbox: Tuple[float, float, float, float], + resolution: float = 0.1, + root_dir: str = "/home/yangshuang", + synthetic_time: bool = False, + target_freq: str = "D", + label_hazards: Optional[List[str]] = None, + label_source: Optional[str] = None, + label_mapping: Optional[Dict[str, int]] = None, +) -> Sample: + """Backward-compatible wrapper for users not yet migrated to GeoLoadInput.""" + req = GeoLoadInput( + data_sources=data, + temporal_window=date_range, + area_of_interest_bbox=bbox, + spatial_resolution_deg=resolution, + root_dir=root_dir, + synthetic_time=synthetic_time, + temporal_cadence=target_freq, + target_hazards=label_hazards, + label_source=label_source, + label_mapping=label_mapping, + ) + return load_data(req) diff --git a/pyhazards/datasets/earthquake/__init__.py b/pyhazards/datasets/earthquake/__init__.py new file mode 100644 index 00000000..183cae99 --- /dev/null +++ b/pyhazards/datasets/earthquake/__init__.py @@ -0,0 +1,218 @@ +from __future__ import annotations + +import math + +import torch + +from ..base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec + + +class SyntheticEarthquakeWaveformDataset(Dataset): + """Synthetic waveform dataset for earthquake phase-picking smoke runs.""" + + name = "earthquake_waveforms" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 96, + channels: int = 3, + length: int = 256, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 24 if micro else int(samples) + self.channels = int(channels) + self.length = int(length) + + def _load(self) -> DataBundle: + timeline = torch.linspace(0.0, 1.0, steps=self.length, dtype=torch.float32) + x = torch.zeros(self.samples, self.channels, self.length, dtype=torch.float32) + y = torch.zeros(self.samples, 2, dtype=torch.float32) + + for idx in range(self.samples): + p_pick = 32 + (idx % 40) + s_pick = min(self.length - 12, p_pick + 24 + (idx % 24)) + + for channel in range(self.channels): + phase = 0.5 * channel + base = torch.sin(2.0 * math.pi * (channel + 1) * timeline + phase) + pulse_p = torch.exp(-0.5 * ((torch.arange(self.length) - p_pick) / 6.0) ** 2) + pulse_s = 0.8 * torch.exp(-0.5 * ((torch.arange(self.length) - s_pick) / 8.0) ** 2) + x[idx, channel] = base + pulse_p + pulse_s + + y[idx, 0] = float(p_pick) + y[idx, 1] = float(s_pick) + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=self.channels, + description="Synthetic multichannel seismic waveforms with Gaussian phase arrivals.", + extra={"length": self.length}, + ), + label_spec=LabelSpec( + num_targets=2, + task_type="regression", + description="P- and S-arrival sample indices.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "earthquake.picking", + }, + ) + + +class SyntheticEarthquakeForecastDataset(Dataset): + """Synthetic wavefield dataset for earthquake forecasting smoke runs.""" + + name = "earthquake_forecast_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 40, + channels: int = 3, + temporal_in: int = 5, + temporal_out: int = 4, + height: int = 12, + width: int = 10, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 10 if micro else int(samples) + self.channels = int(channels) + self.temporal_in = int(temporal_in) + self.temporal_out = int(temporal_out) + self.height = int(height) + self.width = int(width) + + def _load(self) -> DataBundle: + grid_y = torch.linspace(-1.0, 1.0, steps=self.height, dtype=torch.float32).view(self.height, 1) + grid_x = torch.linspace(-1.0, 1.0, steps=self.width, dtype=torch.float32).view(1, self.width) + total_steps = self.temporal_in + self.temporal_out + + x = torch.zeros( + self.samples, + self.channels, + self.temporal_in, + self.height, + self.width, + dtype=torch.float32, + ) + y = torch.zeros( + self.samples, + self.channels, + self.temporal_out, + self.height, + self.width, + dtype=torch.float32, + ) + + row_index = torch.arange(self.height, dtype=torch.float32).view(self.height, 1) + col_index = torch.arange(self.width, dtype=torch.float32).view(1, self.width) + + for idx in range(self.samples): + sequence = torch.zeros( + self.channels, + total_steps, + self.height, + self.width, + dtype=torch.float32, + ) + for step in range(total_steps): + center_r = 2.0 + ((idx + step) % max(3, self.height - 2)) + center_c = 1.0 + ((2 * idx + step) % max(2, self.width - 1)) + gaussian = torch.exp( + -0.18 * ((row_index - center_r) ** 2 + (col_index - center_c) ** 2) + ) + for channel in range(self.channels): + phase = 0.5 * channel + 0.2 * step + base = torch.sin( + math.pi * (channel + 1) * grid_y + phase + ) + torch.cos(math.pi * (channel + 1) * grid_x - phase) + sequence[channel, step] = base + (0.6 + 0.1 * channel) * gaussian + + x[idx] = sequence[:, : self.temporal_in] + y[idx] = sequence[:, self.temporal_in :] + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=self.channels, + description="Synthetic dense-grid wavefield history tensors for forecasting benchmarks.", + extra={ + "temporal_in": self.temporal_in, + "temporal_out": self.temporal_out, + "height": self.height, + "width": self.width, + }, + ), + label_spec=LabelSpec( + num_targets=self.channels * self.temporal_out, + task_type="regression", + description="Future dense-grid wavefield frames over the forecast horizon.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "earthquake.forecasting", + }, + ) + + +class SeisBenchWaveformDataset(SyntheticEarthquakeWaveformDataset): + """Synthetic-backed adapter with the SeisBench public dataset surface.""" + + name = "seisbench_waveforms" + + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "SeisBench", "source_dataset": self.name}) + return bundle + + +class PickBenchmarkWaveformDataset(SyntheticEarthquakeWaveformDataset): + """Synthetic-backed adapter with the pick-benchmark public dataset surface.""" + + name = "pick_benchmark_waveforms" + + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "pick-benchmark", "source_dataset": self.name}) + return bundle + + +class AEFADataset(SyntheticEarthquakeForecastDataset): + """Synthetic-backed adapter for AEFA-style earthquake forecasting inputs.""" + + name = "aefa_forecast" + + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "AEFA", "source_dataset": self.name}) + return bundle + + +__all__ = [ + "AEFADataset", + "PickBenchmarkWaveformDataset", + "SeisBenchWaveformDataset", + "SyntheticEarthquakeForecastDataset", + "SyntheticEarthquakeWaveformDataset", +] diff --git a/pyhazards/datasets/era5/__init__.py b/pyhazards/datasets/era5/__init__.py new file mode 100644 index 00000000..6a6b23fe --- /dev/null +++ b/pyhazards/datasets/era5/__init__.py @@ -0,0 +1,2 @@ +"""ERA5 dataset utilities.""" + diff --git a/pyhazards/datasets/era5/inspection.py b/pyhazards/datasets/era5/inspection.py new file mode 100644 index 00000000..525b35c0 --- /dev/null +++ b/pyhazards/datasets/era5/inspection.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import argparse +from pathlib import Path + +import h5py +import xarray as xr + + +def _default_era5_path() -> Path: + return Path(__file__).resolve().parents[2] / "data" / "era5_subset" + + +def build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="python -m pyhazards.datasets.era5.inspection", + description="Inspect local ERA5 NetCDF files.", + ) + parser.add_argument( + "--path", + default=str(_default_era5_path()), + help="Path to directory containing ERA5 NetCDF files.", + ) + parser.add_argument( + "--max-vars", + type=int, + default=20, + help="Maximum number of variable names to print.", + ) + return parser + + +def main(argv: list[str] | None = None) -> int: + args = build_parser().parse_args(argv) + data_path = Path(args.path).expanduser().resolve() + + files = sorted(data_path.glob("*.nc")) + if not files: + print(f"[ERROR] No ERA5 NetCDF files found in: {data_path}") + return 2 + + print(f"[INFO] ERA5 files found: {len(files)}") + try: + ds = xr.open_mfdataset(files, combine="by_coords", chunks={}) + try: + print("[OK] Dataset opened successfully (xarray).") + print(f"[OK] Dimensions: {dict(ds.sizes)}") + vars_list = list(ds.data_vars) + print(f"[OK] Data variables: {len(vars_list)}") + for name in vars_list[: args.max_vars]: + print(f" - {name}") + finally: + ds.close() + return 0 + except Exception as exc: + print(f"[WARN] xarray open failed ({exc}). Falling back to h5py inspection.") + + sample = files[0] + with h5py.File(sample, "r") as h5: + datasets: list[str] = [] + + def collect(name: str, obj) -> None: + if isinstance(obj, h5py.Dataset): + datasets.append(name) + + h5.visititems(collect) + + print(f"[OK] HDF5/NetCDF file opened: {sample.name}") + print(f"[OK] Datasets discovered: {len(datasets)}") + for name in datasets[: args.max_vars]: + print(f" - {name}") + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/pyhazards/datasets/firms/__init__.py b/pyhazards/datasets/firms/__init__.py new file mode 100644 index 00000000..45d2efc7 --- /dev/null +++ b/pyhazards/datasets/firms/__init__.py @@ -0,0 +1,2 @@ +"""FIRMS dataset utilities.""" + diff --git a/pyhazards/datasets/firms/inspection.py b/pyhazards/datasets/firms/inspection.py new file mode 100644 index 00000000..bbdb101f --- /dev/null +++ b/pyhazards/datasets/firms/inspection.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from pyhazards.datasets._generic_inspection import run_generic_dataset_inspection + + +def main(argv: list[str] | None = None) -> int: + return run_generic_dataset_inspection( + dataset_name="firms", + dataset_doc_url="https://firms.modaps.eosdis.nasa.gov/", + argv=argv, + ) + + +if __name__ == "__main__": + raise SystemExit(main()) + diff --git a/pyhazards/datasets/flood/__init__.py b/pyhazards/datasets/flood/__init__.py new file mode 100644 index 00000000..8b225c1a --- /dev/null +++ b/pyhazards/datasets/flood/__init__.py @@ -0,0 +1,193 @@ +from __future__ import annotations + +import torch + +from ..base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec +from ..graph import GraphTemporalDataset + + +class SyntheticFloodStreamflowDataset(Dataset): + """Synthetic graph-temporal flood dataset for streamflow smoke runs.""" + + name = "flood_streamflow_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 40, + history: int = 4, + nodes: int = 6, + features: int = 2, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 12 if micro else int(samples) + self.history = int(history) + self.nodes = int(nodes) + self.features = int(features) + + def _make_split(self, x: torch.Tensor, y: torch.Tensor, adj: torch.Tensor) -> DataSplit: + dataset = GraphTemporalDataset(x, y, adjacency=adj) + return DataSplit(inputs=dataset, targets=None) + + def _load(self) -> DataBundle: + x = torch.randn(self.samples, self.history, self.nodes, self.features, dtype=torch.float32) + adjacency = torch.eye(self.nodes, dtype=torch.float32) + adjacency += torch.diag(torch.ones(self.nodes - 1), diagonal=1) + adjacency += torch.diag(torch.ones(self.nodes - 1), diagonal=-1) + y = x[:, -1, :, :1] * 0.7 + 0.1 + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": self._make_split(x[:train_end], y[:train_end], adjacency), + "val": self._make_split(x[train_end:val_end], y[train_end:val_end], adjacency), + "test": self._make_split(x[val_end:], y[val_end:], adjacency), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=self.features, + description="Synthetic node features for streamflow forecasting on a line graph.", + extra={"nodes": self.nodes, "history": self.history}, + ), + label_spec=LabelSpec( + num_targets=1, + task_type="regression", + description="Next-step nodewise streamflow target.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "flood.streamflow", + }, + ) + + +class SyntheticFloodInundationDataset(Dataset): + """Synthetic raster dataset for flood inundation smoke runs.""" + + name = "flood_inundation_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 40, + history: int = 4, + channels: int = 3, + height: int = 16, + width: int = 16, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 12 if micro else int(samples) + self.history = int(history) + self.channels = int(channels) + self.height = int(height) + self.width = int(width) + + def _load(self) -> DataBundle: + x = torch.randn( + self.samples, + self.history, + self.channels, + self.height, + self.width, + dtype=torch.float32, + ) + y = torch.zeros(self.samples, 1, self.height, self.width, dtype=torch.float32) + rows = torch.arange(self.height, dtype=torch.float32).view(self.height, 1) + cols = torch.arange(self.width, dtype=torch.float32).view(1, self.width) + + for idx in range(self.samples): + waterline = float(self.height // 3 + (idx % max(2, self.height // 3))) + slope = 0.25 + 0.05 * (idx % 4) + rain_band = rows >= (waterline - slope * cols) + depth = rain_band.float() * (0.4 + 0.1 * (idx % 3)) + y[idx, 0] = depth + x[idx, -1, 0] = x[idx, -1, 0] + depth + x[idx, :, 1] = x[idx, :, 1] + torch.linspace(0.0, 1.0, self.history).view(self.history, 1, 1) + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=self.channels, + description="Synthetic rainfall, terrain, and antecedent-state tensors for inundation forecasting.", + extra={ + "history": self.history, + "height": self.height, + "width": self.width, + }, + ), + label_spec=LabelSpec( + num_targets=1, + task_type="regression", + description="Next-horizon inundation depth raster.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "flood.inundation", + }, + ) + + +class CaravanStreamflowDataset(SyntheticFloodStreamflowDataset): + """Synthetic-backed streamflow adapter for Caravan-style smoke runs.""" + + name = "caravan_streamflow" + + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "Caravan", "source_dataset": self.name}) + return bundle + + +class WaterBenchStreamflowDataset(SyntheticFloodStreamflowDataset): + """Synthetic-backed streamflow adapter for WaterBench-style smoke runs.""" + + name = "waterbench_streamflow" + + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "WaterBench", "source_dataset": self.name}) + return bundle + + +class HydroBenchStreamflowDataset(SyntheticFloodStreamflowDataset): + """Synthetic-backed streamflow adapter for HydroBench diagnostics.""" + + name = "hydrobench_streamflow" + + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "HydroBench", "source_dataset": self.name}) + return bundle + + +class FloodCastBenchInundationDataset(SyntheticFloodInundationDataset): + """Synthetic-backed inundation adapter for FloodCastBench-style smoke runs.""" + + name = "floodcastbench_inundation" + + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "FloodCastBench", "source_dataset": self.name}) + return bundle + + +__all__ = [ + "CaravanStreamflowDataset", + "FloodCastBenchInundationDataset", + "HydroBenchStreamflowDataset", + "SyntheticFloodInundationDataset", + "SyntheticFloodStreamflowDataset", + "WaterBenchStreamflowDataset", +] diff --git a/pyhazards/datasets/fpa_fod.py b/pyhazards/datasets/fpa_fod.py new file mode 100644 index 00000000..896e2871 --- /dev/null +++ b/pyhazards/datasets/fpa_fod.py @@ -0,0 +1,654 @@ +from __future__ import annotations + +import argparse +import math +import os +from pathlib import Path +from typing import Any, Dict, List, Literal, Optional, Tuple + +import numpy as np +import torch + +from .base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec + +CauseMode = Literal["paper5", "keep_all"] +Region = Literal["US", "CA"] +WeeklyFeatures = Literal["counts", "counts+time"] + +PAPER5_CAUSES = [ + "Debris and open burning", + "Natural", + "Arson/incendiarism", + "Equipment and vehicle use", + "Recreation and ceremony", +] + +CAUSE_SYNONYMS = { + "Debris/open burning": "Debris and open burning", + "Debris and Open Burning": "Debris and open burning", + "Arson": "Arson/incendiarism", + "Equipment/vehicle use": "Equipment and vehicle use", + "Recreation/ceremony": "Recreation and ceremony", +} + +SIZE_GROUPS = ["A", "B", "C", "D", "EFG"] + + +def _require_pandas(): + try: + import pandas as pd + except ImportError as exc: + raise ImportError( + "FPA-FOD dataset support requires pandas. Install pandas or xarray's pandas dependency first." + ) from exc + return pd + + +def _minmax_fit(x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + mins = np.nanmin(x, axis=0) + maxs = np.nanmax(x, axis=0) + maxs = np.where(maxs == mins, mins + 1.0, maxs) + return mins, maxs + + +def _minmax_apply(x: np.ndarray, mins: np.ndarray, maxs: np.ndarray) -> np.ndarray: + return (x - mins) / (maxs - mins) + + +def _stratified_split_indices( + y: np.ndarray, + train_ratio: float, + val_ratio: float, + test_ratio: float, + seed: int, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + if not math.isclose(train_ratio + val_ratio + test_ratio, 1.0, rel_tol=0.0, abs_tol=1e-6): + raise ValueError("train_ratio + val_ratio + test_ratio must equal 1.0.") + + rng = np.random.default_rng(seed) + train_idx: List[int] = [] + val_idx: List[int] = [] + test_idx: List[int] = [] + + for class_id in np.unique(y): + idx = np.where(y == class_id)[0] + rng.shuffle(idx) + n = len(idx) + n_train = int(round(train_ratio * n)) + n_val = int(round(val_ratio * n)) + n_test = max(0, n - n_train - n_val) + + train_idx.extend(idx[:n_train].tolist()) + val_idx.extend(idx[n_train : n_train + n_val].tolist()) + test_idx.extend(idx[n_train + n_val : n_train + n_val + n_test].tolist()) + + train = np.array(train_idx, dtype=np.int64) + val = np.array(val_idx, dtype=np.int64) + test = np.array(test_idx, dtype=np.int64) + rng.shuffle(train) + rng.shuffle(val) + rng.shuffle(test) + return train, val, test + + +def _chronological_split_indices( + n: int, + train_ratio: float, + val_ratio: float, + test_ratio: float, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + if not math.isclose(train_ratio + val_ratio + test_ratio, 1.0, rel_tol=0.0, abs_tol=1e-6): + raise ValueError("train_ratio + val_ratio + test_ratio must equal 1.0.") + + n_train = int(math.floor(train_ratio * n)) + n_val = int(math.floor(val_ratio * n)) + n_test = n - n_train - n_val + + train = np.arange(0, n_train, dtype=np.int64) + val = np.arange(n_train, n_train + n_val, dtype=np.int64) + test = np.arange(n_train + n_val, n_train + n_val + n_test, dtype=np.int64) + return train, val, test + + +def _load_fpa_fod_table(path: str): + pd = _require_pandas() + + if not os.path.exists(path): + raise FileNotFoundError(f"Data path not found: {path}") + + ext = os.path.splitext(path)[1].lower() + if ext in {".sqlite", ".db"}: + import sqlite3 + + con = sqlite3.connect(path) + try: + return pd.read_sql_query("SELECT * FROM Fires", con) + finally: + con.close() + if ext == ".csv": + return pd.read_csv(path) + if ext == ".parquet": + return pd.read_parquet(path) + raise ValueError(f"Unsupported file extension for FPA-FOD data: {ext}") + + +def _coerce_required_columns(df, required: List[str]): + missing = [column for column in required if column not in df.columns] + if missing: + raise ValueError(f"Missing required columns: {missing}") + return df + + +def _encode_states(states) -> Tuple[np.ndarray, Dict[str, int]]: + values = sorted(states.dropna().astype(str).unique().tolist()) + mapping = {value: index for index, value in enumerate(values)} + encoded = states.astype(str).map(mapping).astype("int64").to_numpy() + return encoded, mapping + + +def _normalize_cause_strings(values): + return values.astype(str).str.strip().map(lambda value: CAUSE_SYNONYMS.get(value, value)) + + +def _impute_numeric(df, columns: List[str]): + pd = _require_pandas() + filled = df.copy() + medians: Dict[str, float] = {} + for column in columns: + series = pd.to_numeric(filled[column], errors="coerce") + median = float(series.median()) + if math.isnan(median): + median = 0.0 + medians[column] = median + filled[column] = series.fillna(median) + return filled, medians + + +def _micro_tabular_df(seed: int = 1337, n: int = 200): + pd = _require_pandas() + + rng = np.random.default_rng(seed) + states = np.array(["CA", "TX", "FL", "NY", "WA", "CO"]) + causes = np.array(PAPER5_CAUSES) + + years = rng.integers(2010, 2019, size=n) + discovery_doy = rng.integers(1, 366, size=n) + discovery_time = rng.integers(0, 2400, size=n) + containment_doy = np.clip(discovery_doy + rng.integers(0, 30, size=n), 1, 366) + containment_time = rng.integers(0, 2400, size=n) + + state = rng.choice(states, size=n, replace=True) + latitude = rng.uniform(25.0, 49.0, size=n) + longitude = rng.uniform(-124.0, -67.0, size=n) + california_mask = state == "CA" + latitude[california_mask] = rng.uniform(32.0, 42.0, size=california_mask.sum()) + longitude[california_mask] = rng.uniform(-124.5, -114.0, size=california_mask.sum()) + + cause = rng.choice(causes, size=n, replace=True) + size_class = rng.choice( + ["A", "B", "C", "D", "E", "F", "G"], + size=n, + p=[0.38, 0.42, 0.12, 0.04, 0.02, 0.01, 0.01], + ) + + return pd.DataFrame( + { + "FIRE_YEAR": years, + "STATE": state, + "DISCOVERY_DOY": discovery_doy, + "DISCOVERY_TIME": discovery_time, + "CONT_DOY": containment_doy, + "CONT_TIME": containment_time, + "LATITUDE": latitude, + "LONGITUDE": longitude, + "NWCG_GENERAL_CAUSE": cause, + "FIRE_SIZE_CLASS": size_class, + } + ) + + +def _micro_weekly_counts(seed: int = 1337, weeks: int = 120, region: Region = "US"): + pd = _require_pandas() + + rng = np.random.default_rng(seed) + dates = pd.date_range(pd.Timestamp("2016-01-04"), periods=weeks, freq="W-MON") + time = np.arange(weeks) + base = 50 + 20 * np.sin(2 * np.pi * time / 52.0) + if region == "CA": + base = base * 1.3 + + a = np.maximum(0, base + rng.normal(0, 8, size=weeks)).astype(int) + b = np.maximum(0, base * 0.8 + rng.normal(0, 7, size=weeks)).astype(int) + c = np.maximum(0, base * 0.2 + rng.normal(0, 3, size=weeks)).astype(int) + d = np.maximum(0, base * 0.05 + rng.normal(0, 2, size=weeks)).astype(int) + efg = np.maximum(0, base * 0.03 + rng.normal(0, 2, size=weeks)).astype(int) + + return pd.DataFrame({"week_start": dates, "A": a, "B": b, "C": c, "D": d, "EFG": efg}) + + +class FPAFODTabularDataset(Dataset): + """Incident-level tabular dataset for wildfire cause or size classification.""" + + name = "fpa_fod_tabular" + + def __init__( + self, + task: Literal["cause", "size"] = "cause", + region: Region = "US", + cause_mode: CauseMode = "paper5", + data_path: Optional[str] = None, + micro: bool = False, + normalize: bool = False, + train_ratio: float = 0.6, + val_ratio: float = 0.2, + test_ratio: float = 0.2, + seed: int = 1337, + cache_dir: Optional[str] = None, + ): + super().__init__(cache_dir=cache_dir) + self.task = task + self.region = region + self.cause_mode = cause_mode + self.data_path = data_path + self.micro = micro + self.normalize = normalize + self.train_ratio = train_ratio + self.val_ratio = val_ratio + self.test_ratio = test_ratio + self.seed = seed + + def _load(self) -> DataBundle: + if self.micro: + df = _micro_tabular_df(seed=self.seed) + source = "micro_synthetic" + else: + if not self.data_path: + raise ValueError("data_path is required when micro=False") + df = _load_fpa_fod_table(self.data_path) + source = self.data_path + + required = [ + "FIRE_YEAR", + "STATE", + "DISCOVERY_DOY", + "DISCOVERY_TIME", + "CONT_DOY", + "CONT_TIME", + "LATITUDE", + "LONGITUDE", + "NWCG_GENERAL_CAUSE", + "FIRE_SIZE_CLASS", + ] + df = _coerce_required_columns(df, required) + + if self.region == "CA": + df = df[df["STATE"].astype(str) == "CA"].copy() + + df, numeric_impute = _impute_numeric( + df, + columns=[ + "FIRE_YEAR", + "DISCOVERY_DOY", + "DISCOVERY_TIME", + "CONT_DOY", + "CONT_TIME", + "LATITUDE", + "LONGITUDE", + ], + ) + + state_encoded, state_mapping = _encode_states(df["STATE"]) + numeric_features = [ + "FIRE_YEAR", + "DISCOVERY_DOY", + "DISCOVERY_TIME", + "CONT_DOY", + "CONT_TIME", + "LATITUDE", + "LONGITUDE", + ] + feature_names = numeric_features + ["STATE_ID"] + x_numeric = df[numeric_features].to_numpy(dtype=np.float32) + x = np.concatenate([x_numeric, state_encoded.astype(np.float32).reshape(-1, 1)], axis=1) + + metadata: Dict[str, Any] = { + "dataset": self.name, + "source": source, + "region": self.region, + "task": self.task, + "micro": self.micro, + "seed": self.seed, + "state_mapping": state_mapping, + "numeric_impute_medians": numeric_impute, + } + + if self.task == "cause": + causes = _normalize_cause_strings(df["NWCG_GENERAL_CAUSE"]) + if self.cause_mode == "paper5": + mask = causes.isin(PAPER5_CAUSES) + metadata["dropped_non_paper5_causes"] = int((~mask).sum()) + if int(mask.sum()) == 0: + raise RuntimeError("cause_mode='paper5' kept zero rows after cause normalization.") + causes = causes.loc[mask] + x = x[mask.to_numpy()] + + classes = sorted(causes.unique().tolist()) + label_mapping = {label: index for index, label in enumerate(classes)} + y = causes.map(label_mapping).astype("int64").to_numpy() + train_idx, val_idx, test_idx = _stratified_split_indices( + y=y, + train_ratio=self.train_ratio, + val_ratio=self.val_ratio, + test_ratio=self.test_ratio, + seed=self.seed, + ) + label_spec = LabelSpec( + num_targets=len(classes), + task_type="classification", + description="NWCG_GENERAL_CAUSE mapped to class ids.", + extra={"classes": classes, "label_mapping": label_mapping}, + ) + metadata["label_mapping"] = label_mapping + elif self.task == "size": + grouped = df["FIRE_SIZE_CLASS"].astype(str).str.strip().replace({"E": "EFG", "F": "EFG", "G": "EFG"}) + mask = grouped.isin(SIZE_GROUPS) + metadata["dropped_unknown_size_class"] = int((~mask).sum()) + grouped = grouped.loc[mask] + x = x[mask.to_numpy()] + label_mapping = {label: index for index, label in enumerate(SIZE_GROUPS)} + y = grouped.map(label_mapping).astype("int64").to_numpy() + train_idx, val_idx, test_idx = _stratified_split_indices( + y=y, + train_ratio=self.train_ratio, + val_ratio=self.val_ratio, + test_ratio=self.test_ratio, + seed=self.seed, + ) + label_spec = LabelSpec( + num_targets=len(SIZE_GROUPS), + task_type="classification", + description="FIRE_SIZE_CLASS grouped into A/B/C/D/EFG and mapped to class ids.", + extra={"classes": SIZE_GROUPS, "label_mapping": label_mapping}, + ) + metadata["label_mapping"] = label_mapping + else: + raise ValueError(f"Unsupported tabular task: {self.task}") + + if self.normalize: + mins, maxs = _minmax_fit(x[train_idx]) + x = _minmax_apply(x, mins, maxs).astype(np.float32) + metadata["normalization"] = {"mins": mins.tolist(), "maxs": maxs.tolist()} + else: + metadata["normalization"] = None + + splits = { + "train": DataSplit( + inputs=torch.as_tensor(x[train_idx], dtype=torch.float32), + targets=torch.as_tensor(y[train_idx], dtype=torch.long), + metadata={"source": source}, + ), + "val": DataSplit( + inputs=torch.as_tensor(x[val_idx], dtype=torch.float32), + targets=torch.as_tensor(y[val_idx], dtype=torch.long), + metadata={"source": source}, + ), + "test": DataSplit( + inputs=torch.as_tensor(x[test_idx], dtype=torch.float32), + targets=torch.as_tensor(y[test_idx], dtype=torch.long), + metadata={"source": source}, + ), + } + + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=int(splits["train"].inputs.shape[1]), + description="Incident-level FPA-FOD features for classification.", + extra={"feature_names": feature_names, "dtype": "float32"}, + ), + label_spec=label_spec, + metadata=metadata, + ) + + +class FPAFODWeeklyDataset(Dataset): + """Weekly count forecasting dataset derived from FPA-FOD incident records.""" + + name = "fpa_fod_weekly" + + def __init__( + self, + region: Region = "US", + data_path: Optional[str] = None, + micro: bool = False, + lookback_weeks: int = 50, + features: WeeklyFeatures = "counts", + train_ratio: float = 0.6, + val_ratio: float = 0.2, + test_ratio: float = 0.2, + seed: int = 1337, + cache_dir: Optional[str] = None, + ): + super().__init__(cache_dir=cache_dir) + self.region = region + self.data_path = data_path + self.micro = micro + self.lookback_weeks = lookback_weeks + self.features = features + self.train_ratio = train_ratio + self.val_ratio = val_ratio + self.test_ratio = test_ratio + self.seed = seed + + def _weekly_table(self): + pd = _require_pandas() + + if self.micro: + return _micro_weekly_counts(seed=self.seed, region=self.region), "micro_synthetic" + + if not self.data_path: + raise ValueError("data_path is required when micro=False") + + df = _load_fpa_fod_table(self.data_path) + required = ["FIRE_YEAR", "STATE", "DISCOVERY_DOY", "FIRE_SIZE_CLASS"] + df = _coerce_required_columns(df, required) + + if self.region == "CA": + df = df[df["STATE"].astype(str) == "CA"].copy() + + fire_year = pd.to_numeric(df["FIRE_YEAR"], errors="coerce") + discovery_doy = pd.to_numeric(df["DISCOVERY_DOY"], errors="coerce").fillna(1) + base = pd.to_datetime(fire_year.astype("Int64").astype(str) + "-01-01", errors="coerce") + discovery_dt = base + pd.to_timedelta(discovery_doy.astype(int) - 1, unit="D") + week_start = discovery_dt.dt.to_period("W-MON").dt.start_time + size_class = df["FIRE_SIZE_CLASS"].astype(str).str.strip().replace({"E": "EFG", "F": "EFG", "G": "EFG"}) + size_class = size_class.where(size_class.isin(SIZE_GROUPS), other=np.nan) + + weekly = ( + df.assign(_week_start=week_start, _size=size_class) + .dropna(subset=["_week_start", "_size"]) + .groupby(["_week_start", "_size"]) + .size() + .unstack("_size", fill_value=0) + .reset_index() + .rename(columns={"_week_start": "week_start"}) + .sort_values("week_start") + .reset_index(drop=True) + ) + for size_group in SIZE_GROUPS: + if size_group not in weekly.columns: + weekly[size_group] = 0 + return weekly, self.data_path + + def _load(self) -> DataBundle: + weekly, source = self._weekly_table() + lookback = int(self.lookback_weeks) + if len(weekly) <= lookback: + raise ValueError(f"Not enough weeks ({len(weekly)}) for lookback={lookback}") + + counts = weekly[SIZE_GROUPS].to_numpy(dtype=np.float32) + if self.features == "counts": + features = counts + feature_names = list(SIZE_GROUPS) + elif self.features == "counts+time": + week_of_year = weekly["week_start"].dt.isocalendar().week.to_numpy(dtype=np.float32) + sin = np.sin(2 * np.pi * week_of_year / 52.0).reshape(-1, 1).astype(np.float32) + cos = np.cos(2 * np.pi * week_of_year / 52.0).reshape(-1, 1).astype(np.float32) + features = np.concatenate([counts, sin, cos], axis=1) + feature_names = list(SIZE_GROUPS) + ["woy_sin", "woy_cos"] + else: + raise ValueError(f"Unsupported feature mode: {self.features}") + + x_windows: List[np.ndarray] = [] + y_targets: List[np.ndarray] = [] + sample_weeks: List[str] = [] + for index in range(lookback, len(weekly)): + x_windows.append(features[index - lookback : index]) + y_targets.append(counts[index]) + sample_weeks.append(str(weekly.loc[index, "week_start"])) + + x = np.stack(x_windows, axis=0).astype(np.float32) + y = np.stack(y_targets, axis=0).astype(np.float32) + train_idx, val_idx, test_idx = _chronological_split_indices( + n=int(x.shape[0]), + train_ratio=self.train_ratio, + val_ratio=self.val_ratio, + test_ratio=self.test_ratio, + ) + + splits = { + "train": DataSplit( + inputs=torch.as_tensor(x[train_idx], dtype=torch.float32), + targets=torch.as_tensor(y[train_idx], dtype=torch.float32), + metadata={"source": source, "region": self.region}, + ), + "val": DataSplit( + inputs=torch.as_tensor(x[val_idx], dtype=torch.float32), + targets=torch.as_tensor(y[val_idx], dtype=torch.float32), + metadata={"source": source, "region": self.region}, + ), + "test": DataSplit( + inputs=torch.as_tensor(x[test_idx], dtype=torch.float32), + targets=torch.as_tensor(y[test_idx], dtype=torch.float32), + metadata={"source": source, "region": self.region}, + ), + } + + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=int(splits["train"].inputs.shape[-1]), + description="Weekly FPA-FOD feature windows for next-week forecasting.", + extra={ + "feature_names": feature_names, + "lookback_weeks": lookback, + "dtype": "float32", + "region": self.region, + }, + ), + label_spec=LabelSpec( + num_targets=len(SIZE_GROUPS), + task_type="regression", + description="Next-week counts per size group (A, B, C, D, EFG).", + extra={"targets": list(SIZE_GROUPS), "dtype": "float32"}, + ), + metadata={ + "dataset": self.name, + "source": source, + "region": self.region, + "micro": self.micro, + "seed": self.seed, + "lookback_weeks": lookback, + "features_mode": self.features, + "week_start_for_each_sample": sample_weeks, + }, + ) + + +def _default_dataset_path() -> Path: + return Path("data/fpa_fod.sqlite") + + +def build_tabular_inspection_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="python -m pyhazards.datasets.fpa_fod_tabular.inspection", + description="Inspect the FPA-FOD tabular dataset and print split/label summary.", + ) + parser.add_argument("--path", default=str(_default_dataset_path()), help="Path to the FPA-FOD sqlite/csv/parquet file.") + parser.add_argument("--task", choices=["cause", "size"], default="cause", help="Tabular classification target.") + parser.add_argument("--region", choices=["US", "CA"], default="US", help="Geographic subset.") + parser.add_argument("--cause-mode", choices=["paper5", "keep_all"], default="paper5", help="Cause label mapping mode.") + parser.add_argument("--micro", action="store_true", help="Use deterministic synthetic data instead of a real file.") + parser.add_argument("--normalize", action="store_true", help="Apply train-fit min/max normalization.") + return parser + + +def build_weekly_inspection_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + prog="python -m pyhazards.datasets.fpa_fod_weekly.inspection", + description="Inspect the FPA-FOD weekly forecasting dataset and print split/shape summary.", + ) + parser.add_argument("--path", default=str(_default_dataset_path()), help="Path to the FPA-FOD sqlite/csv/parquet file.") + parser.add_argument("--region", choices=["US", "CA"], default="US", help="Geographic subset.") + parser.add_argument("--features", choices=["counts", "counts+time"], default="counts", help="Weekly feature mode.") + parser.add_argument("--lookback-weeks", type=int, default=50, help="Sequence length used to predict the next week.") + parser.add_argument("--micro", action="store_true", help="Use deterministic synthetic data instead of a real file.") + return parser + + +def inspect_fpa_fod_tabular(argv: list[str] | None = None) -> int: + args = build_tabular_inspection_parser().parse_args(argv) + dataset = FPAFODTabularDataset( + task=args.task, + region=args.region, + cause_mode=args.cause_mode, + data_path=args.path, + micro=args.micro, + normalize=args.normalize, + ) + bundle = dataset.load() + print(f"[OK] Loaded dataset: {dataset.name}") + print(f"[OK] Source: {bundle.metadata['source']}") + print(f"[OK] Task: {bundle.metadata['task']}") + print(f"[OK] Input dim: {bundle.feature_spec.input_dim}") + print(f"[OK] Num targets: {bundle.label_spec.num_targets}") + for split_name, split in bundle.splits.items(): + print(f"[OK] {split_name}: inputs={tuple(split.inputs.shape)} targets={tuple(split.targets.shape)}") + mapping = bundle.metadata.get("label_mapping") + if mapping: + print(f"[OK] Label mapping: {mapping}") + return 0 + + +def inspect_fpa_fod_weekly(argv: list[str] | None = None) -> int: + args = build_weekly_inspection_parser().parse_args(argv) + dataset = FPAFODWeeklyDataset( + region=args.region, + data_path=args.path, + micro=args.micro, + features=args.features, + lookback_weeks=args.lookback_weeks, + ) + bundle = dataset.load() + print(f"[OK] Loaded dataset: {dataset.name}") + print(f"[OK] Source: {bundle.metadata['source']}") + print(f"[OK] Lookback weeks: {bundle.metadata['lookback_weeks']}") + print(f"[OK] Feature mode: {bundle.metadata['features_mode']}") + print(f"[OK] Input dim: {bundle.feature_spec.input_dim}") + print(f"[OK] Num targets: {bundle.label_spec.num_targets}") + for split_name, split in bundle.splits.items(): + print(f"[OK] {split_name}: inputs={tuple(split.inputs.shape)} targets={tuple(split.targets.shape)}") + return 0 + + +__all__ = [ + "CAUSE_SYNONYMS", + "PAPER5_CAUSES", + "SIZE_GROUPS", + "FPAFODTabularDataset", + "FPAFODWeeklyDataset", + "build_tabular_inspection_parser", + "build_weekly_inspection_parser", + "inspect_fpa_fod_tabular", + "inspect_fpa_fod_weekly", +] diff --git a/pyhazards/datasets/fpa_fod_tabular/__init__.py b/pyhazards/datasets/fpa_fod_tabular/__init__.py new file mode 100644 index 00000000..a5bb3872 --- /dev/null +++ b/pyhazards/datasets/fpa_fod_tabular/__init__.py @@ -0,0 +1,5 @@ +"""FPA-FOD tabular dataset utilities.""" + +from ..fpa_fod import FPAFODTabularDataset + +__all__ = ["FPAFODTabularDataset"] diff --git a/pyhazards/datasets/fpa_fod_tabular/inspection.py b/pyhazards/datasets/fpa_fod_tabular/inspection.py new file mode 100644 index 00000000..6f357c54 --- /dev/null +++ b/pyhazards/datasets/fpa_fod_tabular/inspection.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from pyhazards.datasets.fpa_fod import inspect_fpa_fod_tabular + + +def main(argv: list[str] | None = None) -> int: + return inspect_fpa_fod_tabular(argv) + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/pyhazards/datasets/fpa_fod_weekly/__init__.py b/pyhazards/datasets/fpa_fod_weekly/__init__.py new file mode 100644 index 00000000..6d2dc803 --- /dev/null +++ b/pyhazards/datasets/fpa_fod_weekly/__init__.py @@ -0,0 +1,5 @@ +"""FPA-FOD weekly dataset utilities.""" + +from ..fpa_fod import FPAFODWeeklyDataset + +__all__ = ["FPAFODWeeklyDataset"] diff --git a/pyhazards/datasets/fpa_fod_weekly/inspection.py b/pyhazards/datasets/fpa_fod_weekly/inspection.py new file mode 100644 index 00000000..48364205 --- /dev/null +++ b/pyhazards/datasets/fpa_fod_weekly/inspection.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from pyhazards.datasets.fpa_fod import inspect_fpa_fod_weekly + + +def main(argv: list[str] | None = None) -> int: + return inspect_fpa_fod_weekly(argv) + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/pyhazards/datasets/goesr/__init__.py b/pyhazards/datasets/goesr/__init__.py new file mode 100644 index 00000000..89f93005 --- /dev/null +++ b/pyhazards/datasets/goesr/__init__.py @@ -0,0 +1,2 @@ +"""GOES-R dataset utilities.""" + diff --git a/pyhazards/datasets/goesr/inspection.py b/pyhazards/datasets/goesr/inspection.py new file mode 100644 index 00000000..9d52e85d --- /dev/null +++ b/pyhazards/datasets/goesr/inspection.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from pyhazards.datasets._generic_inspection import run_generic_dataset_inspection + + +def main(argv: list[str] | None = None) -> int: + return run_generic_dataset_inspection( + dataset_name="goesr", + dataset_doc_url="https://www.goes-r.gov/", + argv=argv, + ) + + +if __name__ == "__main__": + raise SystemExit(main()) + diff --git a/pyhazards/datasets/graph.py b/pyhazards/datasets/graph.py new file mode 100644 index 00000000..891d90a1 --- /dev/null +++ b/pyhazards/datasets/graph.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +from typing import Any, Dict, List, Optional, Tuple + +import torch +from torch.utils.data import Dataset + + +class GraphTemporalDataset(Dataset): + """ + Simple container for county/day style tensors with an optional adjacency. + + Each sample is a window of shape (past_days, num_counties, num_features) and a label + of shape (num_counties,). + """ + + def __init__( + self, + x: torch.Tensor, + y: torch.Tensor, + adjacency: Optional[torch.Tensor] = None, + ): + """ + Args: + x: Tensor (samples, past_days, num_counties, num_features) + y: Tensor (samples, num_counties) or (samples, num_counties, targets) + adjacency: Optional Tensor + - (num_counties, num_counties) global adjacency + - (samples, num_counties, num_counties) per-sample adjacency + """ + if x.ndim != 4: + raise ValueError("x must be (samples, past_days, num_counties, num_features)") + if y.ndim not in (2, 3): + raise ValueError("y must be (samples, num_counties) or (samples, num_counties, targets)") + if adjacency is not None and adjacency.ndim not in (2, 3): + raise ValueError("adjacency must be None, (N,N), or (B,N,N)") + if adjacency is not None and adjacency.ndim == 2 and adjacency.size(0) != x.size(2): + raise ValueError("adjacency size mismatch with num_counties") + if adjacency is not None and adjacency.ndim == 3 and adjacency.size(1) != x.size(2): + raise ValueError("adjacency size mismatch with num_counties") + + self.x = x + self.y = y + self.adj = adjacency + + def __len__(self) -> int: + return self.x.size(0) + + def __getitem__(self, idx: int) -> Tuple[Dict[str, Any], torch.Tensor]: + adj = None + if self.adj is not None: + adj = self.adj if self.adj.ndim == 2 else self.adj[idx] + return {"x": self.x[idx], "adj": adj}, self.y[idx] + + +def graph_collate(batch: List[Tuple[Dict[str, Any], torch.Tensor]]): + """ + Collate function that stacks x and adjacency if provided. + """ + xs, ys = zip(*batch) + x_tensor = torch.stack([item["x"] for item in xs], dim=0) + adj_list = [item["adj"] for item in xs] + adj = None + if any(a is not None for a in adj_list): + # If some entries are None, replace with first non-None + first = next(a for a in adj_list if a is not None) + adj = torch.stack([a if a is not None else first for a in adj_list], dim=0) + y_tensor = torch.stack(ys, dim=0) + return {"x": x_tensor, "adj": adj}, y_tensor + + +__all__ = ["GraphTemporalDataset", "graph_collate"] diff --git a/pyhazards/datasets/hazards/__init__.py b/pyhazards/datasets/hazards/__init__.py new file mode 100644 index 00000000..2ab2ccc2 --- /dev/null +++ b/pyhazards/datasets/hazards/__init__.py @@ -0,0 +1,6 @@ +""" +Namespace for hazard-specific dataset loaders (earthquake, wildfire, flood, hurricane, landslide, etc.). +Populate with concrete Dataset subclasses and register them in pyhazards.datasets.registry. +""" + +__all__ = [] diff --git a/pyhazards/datasets/inspection.py b/pyhazards/datasets/inspection.py new file mode 100644 index 00000000..6640f6b7 --- /dev/null +++ b/pyhazards/datasets/inspection.py @@ -0,0 +1,1022 @@ +# pyhazards/datasets/inspection.py +from __future__ import annotations + +import argparse +import os +import re +from pathlib import Path +from datetime import date, timezone + +import numpy as np +import pandas as pd +import xarray as xr +import h5py + + +# --------------------------------------------------------------------- +# Optional: notebook-style display, but safe for pure python +# --------------------------------------------------------------------- +try: + from IPython.display import display # type: ignore +except Exception: + def display(x): + if hasattr(x, "to_string"): + print(x.to_string(index=False)) + else: + print(x) + + +def _require_requests(): + try: + import requests # type: ignore + except ModuleNotFoundError as exc: + raise ModuleNotFoundError( + "pyhazards.datasets.inspection requires the 'requests' package for " + "MERRA-2 download operations." + ) from exc + return requests + + +def _require_matplotlib_pyplot(): + try: + import matplotlib.pyplot as plt # type: ignore + except ModuleNotFoundError as exc: + raise ModuleNotFoundError( + "pyhazards.datasets.inspection requires 'matplotlib' only when generating plots." + ) from exc + return plt + + +# --------------------------------------------------------------------- +# Constants / Defaults +# --------------------------------------------------------------------- +RAW_DATASETS = [ + "M2I1NXASM", # inst1_2d_asm_Nx + "M2INVASM", # alias folder name for inst3_3d_asm_Nv (normally M2I3NVASM) + "M2T1NXFLX", # tavg1_2d_flx_Nx + "M2T1NXLND", # tavg1_2d_lnd_Nx + "M2T1NXRAD", # tavg1_2d_rad_Nx + "M2C0NXCTM", # const_2d_ctm_Nx (static) +] + +PATTERN_SFC_OUT = "MERRA2_sfc_{yyyymmdd}.nc" +PATTERN_PRES_OUT = "MERRA_pres_{yyyymmdd}.nc" + +# Merge SFC variable subsets (from mergesfc.py) +ASM1_VARS = ["QV2M", "T2M", "TQI", "TQL", "TQV", "TS", "U10M", "V10M"] +FLX_VARS = ["EFLUX", "HFLUX", "Z0M"] +RAD_VARS = ["SWGNT", "SWTNT", "LWGAB", "LWGEM", "LWTUP"] +NV_VARS = ["SLP", "PS"] +LND_VARS = ["GWETROOT", "LAI"] +STATIC_VARS = ["FRACI", "FRLAND", "FROCEAN", "PHIS"] + +TARGET_N_FRAMES = 8 +TARGET_LAT = 361 +TARGET_LON = 576 +HOURS8 = [0, 3, 6, 9, 12, 15, 18, 21] + +# Merge PRES (from mergepres.py) +PRES_VARS = ["CLOUD", "H", "OMEGA", "PL", "QI", "QL", "QV", "T", "U", "V"] +PRES_LEVELS = [ + 34.0, 39.0, 41.0, 43.0, 44.0, 45.0, 48.0, + 51.0, 53.0, 56.0, 63.0, 68.0, 71.0, 72.0, +] + +# Try to import NAN_VALS (mergesfc depends on it) +try: + from PrithviWxC.definitions import NAN_VALS # type: ignore +except Exception as e: + NAN_VALS = {} + print("[WARN] Cannot import PrithviWxC.definitions.NAN_VALS. " + "NaN filling will be skipped. Error:", e) + + +# --------------------------------------------------------------------- +# Helpers: repo root inference + date formatting +# --------------------------------------------------------------------- +def infer_repo_root() -> Path: + """ + Infer REPO_ROOT from this file location. + We want outputs to land at: + REPO_ROOT/Prithvi-WxC/data/merra-2 + REPO_ROOT/M2I1NXASM, REPO_ROOT/M2INVASM, ... + Works no matter where the repo is cloned. + + Heuristic: + - Prefer a parent directory that contains BOTH 'Prithvi-WxC/' and 'pyhazards/'. + - Fallback: any parent that contains 'Prithvi-WxC/'. + """ + here = Path(__file__).resolve() + candidates: list[Path] = [] + for p in [here] + list(here.parents): + if (p / "Prithvi-WxC").is_dir(): + candidates.append(p) + + if not candidates: + raise RuntimeError( + f"Cannot infer repo root from {here}. " + f"Expected a parent dir containing 'Prithvi-WxC/'. " + f"Please pass --repo-root." + ) + + for p in candidates: + if (p / "pyhazards").is_dir(): + return p + + return candidates[0] + + +def yyyymmdd(d: date) -> str: + return d.strftime("%Y%m%d") + + +def get_stream_number(d: date) -> str: + """Same logic as merra2.py.""" + y = d.year + if 1980 <= y <= 1991: + return "100" + elif 1992 <= y <= 2000: + return "200" + elif 2001 <= y <= 2010: + return "300" + else: + return "400" + + +# --------------------------------------------------------------------- +# PART 1) Download raw datasets (adapted from merra2.py, but NO hardcoded creds) +# --------------------------------------------------------------------- +PRODUCT_INFO = { + "M2I1NXASM": { + "host": "https://goldsmr4.gesdisc.eosdis.nasa.gov", + "collection": "M2I1NXASM.5.12.4", + "prefix": "inst1_2d_asm_Nx", + "has_date": True, + "data_root": "MERRA2", + }, + # Official code is M2I3NVASM, but user wants folder name M2INVASM. + "M2I3NVASM": { + "host": "https://goldsmr5.gesdisc.eosdis.nasa.gov", + "collection": "M2I3NVASM.5.12.4", + "prefix": "inst3_3d_asm_Nv", + "has_date": True, + "data_root": "MERRA2", + }, + "M2T1NXFLX": { + "host": "https://goldsmr4.gesdisc.eosdis.nasa.gov", + "collection": "M2T1NXFLX.5.12.4", + "prefix": "tavg1_2d_flx_Nx", + "has_date": True, + "data_root": "MERRA2", + }, + "M2T1NXLND": { + "host": "https://goldsmr4.gesdisc.eosdis.nasa.gov", + "collection": "M2T1NXLND.5.12.4", + "prefix": "tavg1_2d_lnd_Nx", + "has_date": True, + "data_root": "MERRA2", + }, + "M2T1NXRAD": { + "host": "https://goldsmr4.gesdisc.eosdis.nasa.gov", + "collection": "M2T1NXRAD.5.12.4", + "prefix": "tavg1_2d_rad_Nx", + "has_date": True, + "data_root": "MERRA2", + }, + "M2C0NXCTM": { + "host": "https://goldsmr4.gesdisc.eosdis.nasa.gov", + "collection": "M2C0NXCTM.5.12.4", + "prefix": "const_2d_ctm_Nx", + "has_date": False, + "filename": "MERRA2_101.const_2d_ctm_Nx.00000000.nc4", + # ✅ 常量集合通常在 MONTHLY 根目录 + "data_root": "MERRA2_MONTHLY", + # ✅ 很常见的子目录布局(你的 404 就是少了它) + "subdir": "1980", + }, +} + + +def build_file_url(product_code: str, d: date | None) -> tuple[str, str]: + """ + Build a direct HTTPS URL to the granule. + - Dated products: .../data///// + - Const products: .../data//// + """ + info = PRODUCT_INFO[product_code] + host = info["host"].rstrip("/") + collection = info["collection"].strip("/") + data_root = info.get("data_root", "MERRA2").strip("/") + base = f"{host}/data/{data_root}/{collection}" + + # const / no-date + if not info.get("has_date", True): + filename = info["filename"] + subdir = (info.get("subdir") or "").strip("/") + if subdir: + return f"{base}/{subdir}/{filename}", filename + return f"{base}/{filename}", filename + + if d is None: + raise ValueError(f"date must be provided for product {product_code}") + + stream = get_stream_number(d) + yyyy = f"{d.year:04d}" + mm = f"{d.month:02d}" + datestr = f"{d.year:04d}{d.month:02d}{d.day:02d}" + + prefix = info["prefix"] + filename = f"MERRA2_{stream}.{prefix}.{datestr}.nc4" + # ✅ MERRA2 dated granules are under /YYYY/MM/ + url = f"{base}/{yyyy}/{mm}/{filename}" + return url, filename + + +def _looks_like_html_login(resp) -> bool: + ctype = (resp.headers.get("Content-Type") or "").lower() + if "text/html" in ctype: + return True + # sometimes ctype is octet-stream but body is still HTML; cheap check: + head = (resp.text[:200] if resp.encoding else "") + if " str: + requests = _require_requests() + r = requests.get( + f"{_CMR_BASE}/collections.json", + params={"short_name": short_name, "version": version}, + timeout=30, + ) + r.raise_for_status() + entries = r.json().get("feed", {}).get("entry", []) or [] + if not entries: + raise RuntimeError(f"CMR: collection not found for {short_name} v{version}") + return entries[0]["id"] + + +def _cmr_pick_direct_data_href(collection_concept_id: str, temporal: str | None = None) -> str: + requests = _require_requests() + params = {"collection_concept_id": collection_concept_id, "page_size": 200} + if temporal: + params["temporal"] = temporal + + r = requests.get( + f"{_CMR_BASE}/granules.json", + params=params, + headers={"Accept": "application/json"}, + timeout=30, + ) + r.raise_for_status() + entries = r.json().get("feed", {}).get("entry", []) or [] + if not entries: + raise RuntimeError("CMR: granule not found") + + def ok(h: str) -> bool: + hl = h.lower() + if not h.startswith("http"): + return False + if any(x in hl for x in ["opendap", "thredds", ".html", ".xml"]): + return False + return hl.endswith((".nc4", ".nc", ".h5")) + + # prefer links that are "data#" rel if present + for e in entries: + for link in e.get("links", []) or []: + href = link.get("href") or "" + rel = (link.get("rel") or "").lower() + if ok(href) and ("data#" in rel or "data" in rel): + return href + + # fallback: any direct-looking href + for e in entries: + for link in e.get("links", []) or []: + href = link.get("href") or "" + if ok(href): + return href + + raise RuntimeError("CMR: no direct data href") + + +def _resolve_const_ctm_urls() -> list[str]: + """ + Try multiple plausible layouts for M2C0NXCTM. + Some servers expose it as: + .../MERRA2_MONTHLY/M2C0NXCTM.../1980/ + others may differ. We'll try a short list before CMR fallback. + """ + info = PRODUCT_INFO["M2C0NXCTM"] + host = info["host"].rstrip("/") + collection = info["collection"].strip("/") + filename = info["filename"] + data_root_primary = info.get("data_root", "MERRA2").strip("/") + subdir = (info.get("subdir") or "").strip("/") + + candidates: list[str] = [] + + def add(root: str, suffix: str): + candidates.append(f"{host}/data/{root}/{collection}/{suffix}") + + # common layouts + if subdir: + add(data_root_primary, f"{subdir}/{filename}") + add(data_root_primary, f"{subdir}/01/{filename}") + add(data_root_primary, filename) + + # fallback other root + other_root = "MERRA2" if data_root_primary == "MERRA2_MONTHLY" else "MERRA2_MONTHLY" + if subdir: + add(other_root, f"{subdir}/{filename}") + add(other_root, f"{subdir}/01/{filename}") + add(other_root, filename) + + # dedupe + out: list[str] = [] + seen = set() + for u in candidates: + if u not in seen: + out.append(u) + seen.add(u) + return out + + +def _resolve_const_ctm_url_via_cmr() -> str: + """ + Resolve M2C0NXCTM direct granule URL via CMR as a robust fallback. + """ + cid = _cmr_get_collection_concept_id("M2C0NXCTM", "5.12.4") + # const file exists in 1980 year range for sure + return _cmr_pick_direct_data_href(cid, temporal="1980-01-01T00:00:00Z,1980-12-31T23:59:59Z") + + +def download_raw_all(raw_base: Path, d: date, *, force: bool = False): + """ + Download required raw files for a single day + static file. + Folder layout matches user's requirement: + raw_base/M2I1NXASM/... + raw_base/M2INVASM/... + ... + raw_base/M2C0NXCTM/... + """ + # Session setup: support env creds and/or ~/.netrc + requests = _require_requests() + session = requests.Session() + session.trust_env = True + session.headers.update({"User-Agent": "pyhazards-merra2-inspection/1.0"}) + + username = os.getenv("EARTHDATA_USERNAME") + password = os.getenv("EARTHDATA_PASSWORD") + if username and password: + session.auth = (username, password) + else: + print("[INFO] EARTHDATA_USERNAME/PASSWORD not set. Will try ~/.netrc (machine urs.earthdata.nasa.gov).") + + # ---------- const file ---------- + const_dir = raw_base / "M2C0NXCTM" + const_dir.mkdir(parents=True, exist_ok=True) + # We always store with the canonical filename + _, const_filename = build_file_url("M2C0NXCTM", None) + const_out = const_dir / const_filename + + # First try a few deterministic URLs + last_err: Exception | None = None + for url in _resolve_const_ctm_urls(): + try: + download_file(session, url, const_out, force=force) + last_err = None + break + except Exception as e: + last_err = e + # only continue if it's a 404; otherwise fail fast + if "HTTP 404" in str(e): + continue + raise + + # If still failing, use CMR to resolve a direct href + if last_err is not None: + cmr_url = _resolve_const_ctm_url_via_cmr() + print(f"[CMR] resolved M2C0NXCTM URL: {cmr_url}") + download_file(session, cmr_url, const_out, force=force) + + # ---------- daily files ---------- + day_products = ["M2I1NXASM", "M2I3NVASM", "M2T1NXFLX", "M2T1NXLND", "M2T1NXRAD"] + + # user wants NV folder named M2INVASM + nv_folder = raw_base / "M2INVASM" + nv_folder.mkdir(parents=True, exist_ok=True) + + for prod in day_products: + url, filename = build_file_url(prod, d) + if prod == "M2I3NVASM": + out_dir = nv_folder + else: + out_dir = raw_base / prod + download_file(session, url, out_dir / filename, force=force) + + +# --------------------------------------------------------------------- +# PART 2) timefix (shared, derived from mergesfc/mergepres timefix sections) +# --------------------------------------------------------------------- +def parse_ymd_from_name(path: Path) -> tuple[int, str]: + m = re.search(r"(\d{8})", path.name) + if not m: + raise ValueError(f"Cannot find YYYYMMDD in filename: {path.name}") + return int(m.group(1)), m.group(1) + + +def timefix_one_file(path: Path): + """ + Keep the same behavior: rewrite time to int32 [0, step, 2*step,...] (minutes), + and set begin_date/begin_time/units/calendar. + """ + path = Path(path) + if not path.exists(): + raise FileNotFoundError(f"timefix target missing: {path}") + + ymd_int, ymd_str = parse_ymd_from_name(path) + + with h5py.File(path, "r+") as f: + if "time" not in f: + raise RuntimeError(f"{path}: missing 'time' variable for timefix") + + t = f["time"] + data = np.array(t[...]) + if data.size == 0: + raise RuntimeError(f"{path}: time length is 0") + + # step from old time + if data.size > 1: + step = int(data[1] - data[0]) + else: + step = 180 + if step <= 0: + step = 180 + + new_time = np.arange(0, step * data.size, step, dtype="int32") + t[...] = new_time.astype("int32") + + t.attrs["begin_date"] = np.array([ymd_int], dtype="int32") + t.attrs["begin_time"] = np.array([0], dtype="int32") + t.attrs["units"] = f"minutes since {ymd_str[0:4]}-{ymd_str[4:6]}-{ymd_str[6:8]} 00:00:00" + t.attrs["calendar"] = "proleptic_gregorian" + + +# --------------------------------------------------------------------- +# PART 3) Merge SFC (adapted from mergesfc.py; same merge logic, path parameterized) +# --------------------------------------------------------------------- +def _rename_latlon(ds: xr.Dataset) -> xr.Dataset: + rename_map = {} + if "latitude" in ds.coords and "lat" not in ds.coords: + rename_map["latitude"] = "lat" + if "longitude" in ds.coords and "lon" not in ds.coords: + rename_map["longitude"] = "lon" + if rename_map: + ds = ds.rename(rename_map) + return ds + + +def _ensure_order(ds: xr.Dataset) -> xr.Dataset: + if "lat" in ds.coords and ds["lat"].size > 1: + lat = ds["lat"].values + if np.nanmean(np.diff(lat)) < 0: + ds = ds.sortby("lat") + if "lon" in ds.coords and ds["lon"].size > 1: + lon = ds["lon"].values + if np.nanmean(np.diff(lon)) < 0: + ds = ds.sortby("lon") + return ds + + +def _coerce_to_shape(ds: xr.Dataset, target_lat=TARGET_LAT, target_lon=TARGET_LON) -> xr.Dataset: + if "lat" in ds.dims and ds.dims["lat"] != target_lat: + raise ValueError(f"lat size mismatch: {ds.dims['lat']} != {target_lat}") + if "lon" in ds.dims and ds.dims["lon"] != target_lon: + raise ValueError(f"lon size mismatch: {ds.dims['lon']} != {target_lon}") + return ds + + +def _select_8_from_24(da: xr.DataArray): + return da.isel(time=[0, 3, 6, 9, 12, 15, 18, 21]) + + +def _repeat_to_n(da: xr.DataArray, n: int = TARGET_N_FRAMES) -> xr.DataArray: + if "time" not in da.dims: + tiles = [da.expand_dims(time=[i]) for i in range(n)] + return xr.concat(tiles, dim="time") + T = da.sizes["time"] + rep = int(np.ceil(n / T)) + out = xr.concat([da] * rep, dim="time").isel(time=slice(0, n)) + return out + + +def _average_into_n(da: xr.DataArray, n: int = TARGET_N_FRAMES) -> xr.DataArray: + T = da.sizes["time"] + splits = np.array_split(np.arange(T), n) + tiles = [da.isel(time=idx).mean(dim="time") for idx in splits] + return xr.concat(tiles, dim="time") + + +def _to_8_frames_data(da: xr.DataArray) -> xr.DataArray: + if "time" not in da.dims: + return _repeat_to_n(da, TARGET_N_FRAMES) + T = da.sizes["time"] + if T == TARGET_N_FRAMES: + return da + if T == 24: + return _select_8_from_24(da) + if T == 1: + return _repeat_to_n(da, TARGET_N_FRAMES) + if T in (2, 4): + return _repeat_to_n(da, TARGET_N_FRAMES) + if T > TARGET_N_FRAMES: + return _average_into_n(da, TARGET_N_FRAMES) + return _repeat_to_n(da, TARGET_N_FRAMES) + + +def _make_time8_from_base_time(base_time: xr.DataArray) -> xr.DataArray: + t = pd.to_datetime(base_time.values) + if t.size >= 24: + idx = [0, 3, 6, 9, 12, 15, 18, 21] + t8 = t[idx] + elif t.size == 8: + t8 = t + else: + t0 = pd.Timestamp(t[0]).normalize() if t.size > 0 else pd.Timestamp("2000-01-01") + t8 = [t0 + pd.Timedelta(hours=h) for h in HOURS8] + return xr.DataArray(np.array(t8, dtype="datetime64[ns]"), dims=["time"], name="time") + + +def _align_and_put(ds_out: xr.Dataset, name: str, da: xr.DataArray): + if "lev" in da.dims: + da = da.isel(lev=0) + + da = da.transpose(*[d for d in da.dims if d in ["time", "lat", "lon"]]) + da8 = _to_8_frames_data(da) + + if set(["lat", "lon"]).issubset(set(da8.dims)): + order = ("time", "lat", "lon") if "time" in da8.dims else ("lat", "lon") + da8 = da8.transpose(*order) + ds_out[name] = xr.DataArray(da8.values, dims=order, attrs=da.attrs) + else: + ds_out[name] = xr.DataArray(da8.values, dims=da8.dims, attrs=da.attrs) + + +def _load_and_prepare(path: Path) -> xr.Dataset: + print(f"[DEBUG] open_dataset: {path}") + last_err = None + for engine in [None, "netcdf4", "h5netcdf"]: + try: + print(f" try engine={engine}") + ds = xr.open_dataset(path, engine=engine) if engine else xr.open_dataset(path) + ds = _rename_latlon(ds) + ds = _ensure_order(ds) + ds = _coerce_to_shape(ds, TARGET_LAT, TARGET_LON) + print(f" OK with engine={engine}") + return ds + except Exception as e: + print(f" engine={engine} FAILED:", e) + last_err = e + raise last_err + + +def _squeeze_static_2d(da: xr.DataArray, name: str) -> xr.DataArray: + if "time" in da.dims: + print(f"[STATIC] {name}: has time dim={da.sizes['time']}, taking time=0") + da = da.isel(time=0, drop=True) + if "lev" in da.dims: + print(f"[STATIC] {name}: has lev dim={da.sizes['lev']}, taking lev=0") + da = da.isel(lev=0, drop=True) + + if not {"lat", "lon"}.issubset(da.dims): + raise ValueError(f"STATIC {name} missing lat/lon dims: dims={da.dims}") + + da2 = da.transpose("lat", "lon") + if da2.sizes["lat"] != TARGET_LAT or da2.sizes["lon"] != TARGET_LON: + raise ValueError(f"STATIC {name} size not (361,576): {da2.sizes}") + return da2 + + +def _merge_global_attrs(*datasets) -> dict: + merged = {} + for ds in datasets: + if ds is None: + continue + for k, v in ds.attrs.items(): + if k not in merged: + merged[k] = v + return merged + + +def _raw_paths_for_day(raw_base: Path, d: date) -> dict[str, Path]: + datestr = yyyymmdd(d) + stream = get_stream_number(d) + + # note: NV folder is M2INVASM (user requirement) + return { + "ASM1": raw_base / "M2I1NXASM" / f"MERRA2_{stream}.inst1_2d_asm_Nx.{datestr}.nc4", + "FLX": raw_base / "M2T1NXFLX" / f"MERRA2_{stream}.tavg1_2d_flx_Nx.{datestr}.nc4", + "RAD": raw_base / "M2T1NXRAD" / f"MERRA2_{stream}.tavg1_2d_rad_Nx.{datestr}.nc4", + "NV": raw_base / "M2INVASM" / f"MERRA2_{stream}.inst3_3d_asm_Nv.{datestr}.nc4", + "LND": raw_base / "M2T1NXLND" / f"MERRA2_{stream}.tavg1_2d_lnd_Nx.{datestr}.nc4", + "STATIC": raw_base / "M2C0NXCTM" / "MERRA2_101.const_2d_ctm_Nx.00000000.nc4", + } + + +def merge_sfc(raw_base: Path, merged_dir: Path, d: date) -> Path: + paths = _raw_paths_for_day(raw_base, d) + for k, p in paths.items(): + if not p.exists(): + raise FileNotFoundError(f"Missing raw file for {k}: {p}") + + ds_asm1 = _load_and_prepare(paths["ASM1"]) + ds_flx = _load_and_prepare(paths["FLX"]) + ds_rad = _load_and_prepare(paths["RAD"]) + ds_nv = _load_and_prepare(paths["NV"]) + ds_lnd = _load_and_prepare(paths["LND"]) + ds_static = _load_and_prepare(paths["STATIC"]) + + keep = [v for v in ASM1_VARS if v in ds_asm1] + if not keep: + raise RuntimeError("ASM1 has none of requested variables.") + asm1_vars_8 = {v: _to_8_frames_data(ds_asm1[v]) for v in keep} + + if "time" in ds_asm1.coords and ds_asm1.sizes.get("time", 0) > 0: + time8 = _make_time8_from_base_time(ds_asm1["time"]) + else: + t0 = pd.Timestamp("2000-01-01") + time8 = xr.DataArray( + np.array([t0 + pd.Timedelta(hours=h) for h in HOURS8], dtype="datetime64[ns]"), + dims=["time"], name="time" + ) + + base = xr.DataArray( + np.empty((TARGET_N_FRAMES, TARGET_LAT, TARGET_LON), dtype="float32"), + dims=("time", "lat", "lon") + ).assign_coords( + time=time8, + lat=ds_asm1["lat"] if "lat" in ds_asm1.coords else np.arange(TARGET_LAT), + lon=ds_asm1["lon"] if "lon" in ds_asm1.coords else np.arange(TARGET_LON), + ) + + ds_out = xr.Dataset(coords={"time": base["time"], "lat": base["lat"], "lon": base["lon"]}) + + for v, da8 in asm1_vars_8.items(): + ds_out[v] = xr.DataArray(da8.values, dims=("time", "lat", "lon"), attrs=ds_asm1[v].attrs) + + for v in FLX_VARS: + if v not in ds_flx: + raise RuntimeError(f"FLX missing var: {v}") + _align_and_put(ds_out, v, ds_flx[v]) + + for v in RAD_VARS: + if v not in ds_rad: + raise RuntimeError(f"RAD missing var: {v}") + _align_and_put(ds_out, v, ds_rad[v]) + + for v in NV_VARS: + if v not in ds_nv: + print(f"[WARN] NV missing var: {v}; skip") + continue + _align_and_put(ds_out, v, ds_nv[v]) + + for v in LND_VARS: + if v not in ds_lnd: + print(f"[WARN] LND missing var: {v}; skip") + continue + _align_and_put(ds_out, v, ds_lnd[v]) + + for v in STATIC_VARS: + if v not in ds_static: + raise RuntimeError(f"STATIC missing var: {v}") + da2 = _squeeze_static_2d(ds_static[v], v) + ds_out[v] = xr.DataArray(da2.values, dims=("lat", "lon"), attrs=ds_static[v].attrs) + + ds_out.attrs = _merge_global_attrs(ds_asm1, ds_flx, ds_rad, ds_nv, ds_lnd, ds_static) + + # NaN fill (same intent as mergesfc.py) + if NAN_VALS: + for var in ds_out.data_vars: + if var in NAN_VALS: + nan_val = NAN_VALS[var] + ds_out[var].data[:] = np.nan_to_num(ds_out[var].data, nan=nan_val) + + # time encoding + timefix afterwards (same structure as mergesfc.py) + t0 = ds_out["time"].values[0] + ds_out["time"] = ((ds_out["time"].values - t0).astype("timedelta64[m]").astype("int32")) + ds_out.time.attrs = {"begin_time": 0, "begin_date": int(yyyymmdd(d))} + + encoding = {name: {"zlib": True} for name in ds_out.data_vars} + encoding["time"] = {"dtype": "int32"} + + merged_dir.mkdir(parents=True, exist_ok=True) + out_path = merged_dir / PATTERN_SFC_OUT.format(yyyymmdd=yyyymmdd(d)) + + try: + print("[INFO] Writing SFC with engine=h5netcdf...") + ds_out.to_netcdf(out_path, encoding=encoding, engine="h5netcdf") + except Exception as e: + print("[WARN] h5netcdf failed:", e) + print("[INFO] Falling back to engine=netcdf4") + ds_out.to_netcdf(out_path, encoding=encoding, engine="netcdf4") + + timefix_one_file(out_path) + print(f"[OK] Wrote SFC: {out_path}") + return out_path + + +# --------------------------------------------------------------------- +# PART 4) Merge PRES (adapted from mergepres.py; same merge logic, path parameterized) +# --------------------------------------------------------------------- +def _fmt_date_any(ts): + ts_pd = pd.to_datetime(ts) + if ts_pd.tzinfo is None: + ts_pd = ts_pd.tz_localize(timezone.utc) + else: + ts_pd = ts_pd.tz_convert(timezone.utc) + return ts_pd.strftime("%Y-%m-%d"), ts_pd.strftime("%H:%M:%S.%f") + + +def _open_nv_any_engine(path: Path) -> xr.Dataset: + print(f"[DEBUG] open_dataset: {path}") + last_err = None + for engine in [None, "netcdf4", "h5netcdf"]: + try: + print(f" try engine={engine}") + ds = xr.open_dataset(path, engine=engine) if engine else xr.open_dataset(path) + print(f" OK with engine={engine}") + return ds + except Exception as e: + print(f" engine={engine} FAILED:", e) + last_err = e + raise last_err + + +def _write_any_engine(ds: xr.Dataset, out_path: Path): + engines = [("h5netcdf", True), ("netcdf4", True), ("scipy", False)] + last_err = None + out_path.parent.mkdir(parents=True, exist_ok=True) + + for engine, can_compress in engines: + try: + print(f"[INFO] Writing to {out_path} (engine={engine}, compression={'on' if can_compress else 'off'})") + if can_compress: + comp = dict(zlib=True, complevel=1, shuffle=True) + encoding = {v: comp for v in ds.data_vars} + ds.to_netcdf(out_path, engine=engine, mode="w", encoding=encoding) + else: + ds.to_netcdf(out_path, engine=engine, mode="w") + print(f"[INFO] Write OK with engine={engine}") + return + except Exception as e: + print(f"[WARN] engine={engine} FAILED:", e) + last_err = e + raise last_err + + +def merge_pres(raw_base: Path, merged_dir: Path, d: date) -> Path: + datestr = yyyymmdd(d) + stream = get_stream_number(d) + in_vert = raw_base / "M2INVASM" / f"MERRA2_{stream}.inst3_3d_asm_Nv.{datestr}.nc4" + if not in_vert.exists(): + raise FileNotFoundError(f"Missing NV raw file for PRES merge: {in_vert}") + + merged_dir.mkdir(parents=True, exist_ok=True) + out_vert = merged_dir / PATTERN_PRES_OUT.format(yyyymmdd=datestr) + + ds = _open_nv_any_engine(in_vert) + + keep_vars = [v for v in PRES_VARS if v in ds.data_vars] + if not keep_vars: + raise ValueError(f"No target vars found. Wanted: {PRES_VARS}") + ds = ds[keep_vars] + + if "lev" not in ds.coords: + raise KeyError("Missing 'lev' coordinate in NV file.") + + lev_vals = np.array(ds["lev"].values, dtype=float).tolist() + missing = [lv for lv in PRES_LEVELS if lv not in lev_vals] + if missing: + raise ValueError(f"Missing levels: {missing}\nAvailable lev: {lev_vals}") + + ds = ds.sel(lev=xr.DataArray(PRES_LEVELS, dims="lev")) + ds = ds.sortby("lev", ascending=False) + + attrs = dict(ds.attrs) if ds.attrs else {} + if "time" in ds.coords and ds.sizes.get("time", 0) > 0: + tvals = ds["time"].values + tmin, tmax = tvals.min(), tvals.max() + beg_date, beg_time = _fmt_date_any(tmin) + end_date, end_time = _fmt_date_any(tmax) + attrs.update({ + "RangeBeginningDate": beg_date, + "RangeBeginningTime": beg_time, + "RangeEndingDate": end_date, + "RangeEndingTime": end_time, + }) + ds = ds.assign_attrs(attrs) + + _write_any_engine(ds, out_vert) + ds.close() + + timefix_one_file(out_vert) + + # quick check + try: + ds2 = xr.open_dataset(out_vert, engine="h5netcdf") + print(f"[OK] Wrote PRES: {out_vert}") + print("vars:", list(ds2.data_vars)) + print("shape:", {k: int(v) for k, v in ds2.sizes.items()}) + ds2.close() + except Exception as e: + print("[WARN] PRES written & timefixed, but reopen check failed:", e) + + return out_vert + + +# --------------------------------------------------------------------- +# PART 5) Inspection (SFC/PRES merged products) +# --------------------------------------------------------------------- +def list_vars(ds: xr.Dataset, max_show: int = 60) -> pd.DataFrame: + rows = [] + for name, da in ds.data_vars.items(): + rows.append({ + "var": name, + "dims": str(da.dims), + "shape": str(tuple(da.shape)), + "dtype": str(da.dtype), + }) + df = pd.DataFrame(rows).sort_values("var").reset_index(drop=True) + return df.head(max_show) if len(df) > max_show else df + + +def inspect_ds(ds: xr.Dataset, name: str, max_vars: int = 60): + print(f"\n=== {name} ===") + print("dims:", dict(ds.sizes)) + print("coords:", list(ds.coords)) + print("n_vars:", len(ds.data_vars)) + display(list_vars(ds, max_show=max_vars)) + + +def summarize_da(da: xr.DataArray) -> pd.Series: + s = xr.Dataset({ + "min": da.min(skipna=True), + "max": da.max(skipna=True), + "mean": da.mean(skipna=True), + "std": da.std(skipna=True), + }).compute() + return pd.Series({k: float(s[k].values) for k in s.data_vars}) + + +def run_inspection(merged_dir: Path, outdir: Path, d: date, var: str = "T2M"): + sfc_path = merged_dir / PATTERN_SFC_OUT.format(yyyymmdd=yyyymmdd(d)) + pres_path = merged_dir / PATTERN_PRES_OUT.format(yyyymmdd=yyyymmdd(d)) + + ds_sfc = xr.open_dataset(sfc_path, engine="h5netcdf") + ds_pres = xr.open_dataset(pres_path, engine="h5netcdf") + + inspect_ds(ds_sfc, "SFC (one day)") + inspect_ds(ds_pres, "PRES (one day)") + + # save var tables + outdir.mkdir(parents=True, exist_ok=True) + list_vars(ds_sfc).to_csv(outdir / f"sfc_vars_{d.isoformat()}.csv", index=False) + list_vars(ds_pres).to_csv(outdir / f"pres_vars_{d.isoformat()}.csv", index=False) + + if var not in ds_sfc: + raise KeyError(f"{var} not found in SFC merged file. See vars csv in {outdir}") + + da = ds_sfc[var] + print(f"\n{var} dims :", da.dims) + print(f"{var} shape:", da.shape) + print(f"\n{var} summary:") + print(summarize_da(da)) + + # plot one timestep + plt = _require_matplotlib_pyplot() + t = 0 + Z = da.isel(time=t).compute() + plt.figure() + plt.contourf(ds_sfc["lon"], ds_sfc["lat"], Z, 100) + plt.gca().set_aspect("equal") + plt.title(f"{var} (t={t})") + + out_pdf = outdir / f"{var.lower()}_{d.isoformat()}.pdf" + plt.savefig(out_pdf) + print(f"\nSaved: {out_pdf}") + + ds_sfc.close() + ds_pres.close() + + +# --------------------------------------------------------------------- +# CLI +# --------------------------------------------------------------------- +def build_parser() -> argparse.ArgumentParser: + p = argparse.ArgumentParser( + prog="python -m pyhazards.datasets.inspection", + description="One-shot pipeline: download raw MERRA-2 -> merge SFC+PRES -> inspection.", + ) + + # ✅ positional date + p.add_argument( + "date", + help="Date to run. Accepts YYYYMMDD (e.g., 20251111) or YYYY-MM-DD (e.g., 2025-11-11).", + ) + + # ✅ default outputs (relative to repo root unless absolute) + p.add_argument("--outdir", default="outputs", help="Output directory (default: outputs under repo root)") + + # ✅ auto infer repo root if omitted + p.add_argument("--repo-root", default=None, help="Repo root (auto-infer if omitted)") + p.add_argument("--raw-base", default=None, help="Raw datasets base dir (default: REPO_ROOT)") + p.add_argument("--merged-dir", default=None, help="Merged dir (default: REPO_ROOT/Prithvi-WxC/data/merra-2)") + + p.add_argument("--skip-download", action="store_true", help="Assume raw files already exist") + p.add_argument("--skip-merge", action="store_true", help="Assume merged nc already exist") + p.add_argument("--force-download", action="store_true", help="Force re-download even if file exists") + p.add_argument("--var", default="T2M", help="Variable to summarize/plot from SFC") + + return p + + +def main(argv=None) -> int: + args = build_parser().parse_args(argv) + + s = args.date.strip() + if len(s) == 8 and s.isdigit(): + d = date(int(s[:4]), int(s[4:6]), int(s[6:8])) + else: + d = date.fromisoformat(s) + + repo_root = Path(args.repo_root).expanduser().resolve() if args.repo_root else infer_repo_root() + raw_base = Path(args.raw_base).expanduser().resolve() if args.raw_base else repo_root + merged_dir = Path(args.merged_dir).expanduser().resolve() if args.merged_dir else (repo_root / "Prithvi-WxC" / "data" / "merra-2") + + # outdir: relative -> repo_root/outdir; absolute -> keep + outdir = (repo_root / args.outdir) if not Path(args.outdir).is_absolute() else Path(args.outdir) + outdir = outdir.expanduser().resolve() + + print("repo_root :", repo_root) + print("raw_base :", raw_base) + print("merged_dir:", merged_dir) + print("outdir :", outdir) + print("date :", d) + + raw_base.mkdir(parents=True, exist_ok=True) + merged_dir.mkdir(parents=True, exist_ok=True) + outdir.mkdir(parents=True, exist_ok=True) + + if not args.skip_download: + print("\n=== STEP 1: Download raw datasets ===") + download_raw_all(raw_base, d, force=args.force_download) + + if not args.skip_merge: + print("\n=== STEP 2: Merge SFC ===") + merge_sfc(raw_base, merged_dir, d) + + print("\n=== STEP 3: Merge PRES ===") + merge_pres(raw_base, merged_dir, d) + + print("\n=== STEP 4: Inspection ===") + run_inspection(merged_dir, outdir, d, var=args.var) + + print("\n[DONE] Full pipeline finished.") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/pyhazards/datasets/landfire/__init__.py b/pyhazards/datasets/landfire/__init__.py new file mode 100644 index 00000000..4e0d86c2 --- /dev/null +++ b/pyhazards/datasets/landfire/__init__.py @@ -0,0 +1,2 @@ +"""LANDFIRE dataset utilities.""" + diff --git a/pyhazards/datasets/landfire/inspection.py b/pyhazards/datasets/landfire/inspection.py new file mode 100644 index 00000000..269eb2b6 --- /dev/null +++ b/pyhazards/datasets/landfire/inspection.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from pyhazards.datasets._generic_inspection import run_generic_dataset_inspection + + +def main(argv: list[str] | None = None) -> int: + return run_generic_dataset_inspection( + dataset_name="landfire", + dataset_doc_url="https://landfire.gov/", + argv=argv, + ) + + +if __name__ == "__main__": + raise SystemExit(main()) + diff --git a/pyhazards/datasets/merra2/__init__.py b/pyhazards/datasets/merra2/__init__.py new file mode 100644 index 00000000..0b3fc2a8 --- /dev/null +++ b/pyhazards/datasets/merra2/__init__.py @@ -0,0 +1,2 @@ +"""MERRA-2 dataset utilities.""" + diff --git a/pyhazards/datasets/merra2/inspection.py b/pyhazards/datasets/merra2/inspection.py new file mode 100644 index 00000000..b19ed6c0 --- /dev/null +++ b/pyhazards/datasets/merra2/inspection.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +from pyhazards.datasets.inspection import main as merra2_main + + +def main(argv: list[str] | None = None) -> int: + return merra2_main(argv) + + +if __name__ == "__main__": + raise SystemExit(main()) + diff --git a/pyhazards/datasets/mtbs/__init__.py b/pyhazards/datasets/mtbs/__init__.py new file mode 100644 index 00000000..deece426 --- /dev/null +++ b/pyhazards/datasets/mtbs/__init__.py @@ -0,0 +1,2 @@ +"""MTBS dataset utilities.""" + diff --git a/pyhazards/datasets/mtbs/inspection.py b/pyhazards/datasets/mtbs/inspection.py new file mode 100644 index 00000000..9d5b8881 --- /dev/null +++ b/pyhazards/datasets/mtbs/inspection.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from pyhazards.datasets._generic_inspection import run_generic_dataset_inspection + + +def main(argv: list[str] | None = None) -> int: + return run_generic_dataset_inspection( + dataset_name="mtbs", + dataset_doc_url="https://burnseverity.cr.usgs.gov/", + argv=argv, + ) + + +if __name__ == "__main__": + raise SystemExit(main()) + diff --git a/pyhazards/datasets/noaa_flood/__init__.py b/pyhazards/datasets/noaa_flood/__init__.py new file mode 100644 index 00000000..e632861f --- /dev/null +++ b/pyhazards/datasets/noaa_flood/__init__.py @@ -0,0 +1,2 @@ +"""NOAA flood dataset utilities.""" + diff --git a/pyhazards/datasets/noaa_flood/inspection.py b/pyhazards/datasets/noaa_flood/inspection.py new file mode 100644 index 00000000..69fe62fc --- /dev/null +++ b/pyhazards/datasets/noaa_flood/inspection.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from pyhazards.datasets._generic_inspection import run_generic_dataset_inspection + + +def main(argv: list[str] | None = None) -> int: + return run_generic_dataset_inspection( + dataset_name="noaa_flood", + dataset_doc_url="https://www.ncei.noaa.gov/products/storm-events-database", + argv=argv, + ) + + +if __name__ == "__main__": + raise SystemExit(main()) + diff --git a/pyhazards/datasets/outputs/t2m_pred.pdf b/pyhazards/datasets/outputs/t2m_pred.pdf new file mode 100644 index 00000000..88fbb3f0 Binary files /dev/null and b/pyhazards/datasets/outputs/t2m_pred.pdf differ diff --git a/pyhazards/datasets/registry.py b/pyhazards/datasets/registry.py new file mode 100644 index 00000000..2b803d43 --- /dev/null +++ b/pyhazards/datasets/registry.py @@ -0,0 +1,21 @@ +from typing import Any, Callable, Dict + +from .base import Dataset + +_DATASET_REGISTRY: Dict[str, Callable[..., Dataset]] = {} + + +def register_dataset(name: str, builder: Callable[..., Dataset]) -> None: + if name in _DATASET_REGISTRY: + raise ValueError(f"Dataset '{name}' already registered.") + _DATASET_REGISTRY[name] = builder + + +def available_datasets(): + return sorted(_DATASET_REGISTRY.keys()) + + +def load_dataset(name: str, **kwargs: Any) -> Dataset: + if name not in _DATASET_REGISTRY: + raise KeyError(f"Dataset '{name}' is not registered. Known: {available_datasets()}") + return _DATASET_REGISTRY[name](**kwargs) diff --git a/pyhazards/datasets/tc/__init__.py b/pyhazards/datasets/tc/__init__.py new file mode 100644 index 00000000..c52d815a --- /dev/null +++ b/pyhazards/datasets/tc/__init__.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import torch + +from ..base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec + + +class SyntheticTropicalCycloneDataset(Dataset): + """Synthetic storm-history dataset for track/intensity smoke runs.""" + + name = "tc_tracks_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 64, + history: int = 6, + horizon: int = 5, + features: int = 8, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 20 if micro else int(samples) + self.history = int(history) + self.horizon = int(horizon) + self.features = int(features) + + def _load(self) -> DataBundle: + x = torch.randn(self.samples, self.history, self.features, dtype=torch.float32) + last_state = x[:, -1, :3] + deltas = torch.linspace(0.2, 1.0, steps=self.horizon, dtype=torch.float32).view(1, self.horizon, 1) + direction = torch.tensor([0.4, 0.2, 1.5], dtype=torch.float32).view(1, 1, 3) + y = last_state.unsqueeze(1) + deltas * direction + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=self.features, + description="Synthetic storm history with environmental context features.", + extra={"history": self.history, "horizon": self.horizon}, + ), + label_spec=LabelSpec( + num_targets=3, + task_type="regression", + description="Forecast track latitude/longitude and intensity trajectory.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "tc.track_intensity", + }, + ) + + +class IBTrACSTropicalCycloneDataset(SyntheticTropicalCycloneDataset): + """Synthetic-backed adapter for IBTrACS-style storm tracks.""" + + name = "ibtracs_tracks" + + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "IBTrACS", "source_dataset": self.name}) + return bundle + + +class TCBenchAlphaDataset(SyntheticTropicalCycloneDataset): + """Synthetic-backed adapter for TCBench Alpha evaluation runs.""" + + name = "tcbench_alpha" + + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "TCBench Alpha", "source_dataset": self.name}) + return bundle + + +class TropiCycloneNetDataset(SyntheticTropicalCycloneDataset): + """Synthetic-backed adapter for TropiCycloneNet-Dataset style smoke runs.""" + + name = "tropicyclonenet_dataset" + + def _load(self) -> DataBundle: + bundle = super()._load() + bundle.metadata.update({"adapter": "TropiCycloneNet-Dataset", "source_dataset": self.name}) + return bundle + + +__all__ = [ + "IBTrACSTropicalCycloneDataset", + "SyntheticTropicalCycloneDataset", + "TCBenchAlphaDataset", + "TropiCycloneNetDataset", +] diff --git a/pyhazards/datasets/transforms/__init__.py b/pyhazards/datasets/transforms/__init__.py new file mode 100644 index 00000000..47c4e991 --- /dev/null +++ b/pyhazards/datasets/transforms/__init__.py @@ -0,0 +1,12 @@ +""" +Reusable transforms for preprocessing hazard datasets. +Currently placeholders; implement normalization, index computation, temporal windowing, etc. +""" + +from typing import Callable + +from ..base import DataBundle + +TransformFn = Callable[[DataBundle], DataBundle] + +__all__ = ["TransformFn"] diff --git a/pyhazards/datasets/wfigs/__init__.py b/pyhazards/datasets/wfigs/__init__.py new file mode 100644 index 00000000..52317088 --- /dev/null +++ b/pyhazards/datasets/wfigs/__init__.py @@ -0,0 +1,2 @@ +"""WFIGS dataset utilities.""" + diff --git a/pyhazards/datasets/wfigs/inspection.py b/pyhazards/datasets/wfigs/inspection.py new file mode 100644 index 00000000..e09f650f --- /dev/null +++ b/pyhazards/datasets/wfigs/inspection.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from pyhazards.datasets._generic_inspection import run_generic_dataset_inspection + + +def main(argv: list[str] | None = None) -> int: + return run_generic_dataset_inspection( + dataset_name="wfigs", + dataset_doc_url="https://data-nifc.opendata.arcgis.com/", + argv=argv, + ) + + +if __name__ == "__main__": + raise SystemExit(main()) + diff --git a/pyhazards/datasets/wildfire/__init__.py b/pyhazards/datasets/wildfire/__init__.py new file mode 100644 index 00000000..dc28b6d9 --- /dev/null +++ b/pyhazards/datasets/wildfire/__init__.py @@ -0,0 +1,160 @@ +from __future__ import annotations + +import torch + +from ..base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec +from .real_track_o_2024 import ( + TrackOSplitConfig, + WildfireTrackO2024RasterDataset, + WildfireTrackO2024TabularDataset, + WildfireTrackO2024TemporalDataset, +) + + +class SyntheticWildfireSpreadDataset(Dataset): + """Synthetic raster dataset for wildfire spread smoke runs.""" + + name = "wildfire_spread_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 64, + channels: int = 12, + height: int = 32, + width: int = 32, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 16 if micro else int(samples) + self.channels = int(channels) + self.height = int(height) + self.width = int(width) + + def _load(self) -> DataBundle: + x = torch.randn(self.samples, self.channels, self.height, self.width, dtype=torch.float32) + y = torch.zeros(self.samples, 1, self.height, self.width, dtype=torch.float32) + rows = torch.arange(self.height).view(1, self.height, 1) + cols = torch.arange(self.width).view(1, 1, self.width) + + for idx in range(self.samples): + center_r = (idx * 3) % self.height + center_c = (idx * 5) % self.width + radius = 4 + (idx % 5) + mask = ((rows - center_r).float().pow(2) + (cols - center_c).float().pow(2)) <= radius**2 + y[idx, 0] = mask.float() + x[idx, 0] = x[idx, 0] + 2.5 * mask.float() + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=self.channels, + description="Synthetic raster weather and fuel covariates for wildfire spread.", + ), + label_spec=LabelSpec( + num_targets=1, + task_type="segmentation", + description="Binary spread mask for the next forecast horizon.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "wildfire.spread", + }, + ) + + +class SyntheticWildfireSpreadTemporalDataset(Dataset): + """Synthetic temporal wildfire spread dataset for sequence-based spread baselines.""" + + name = "wildfire_spread_temporal_synthetic" + + def __init__( + self, + cache_dir: str | None = None, + samples: int = 48, + history: int = 4, + channels: int = 6, + height: int = 16, + width: int = 16, + micro: bool = False, + ): + super().__init__(cache_dir=cache_dir) + self.samples = 12 if micro else int(samples) + self.history = int(history) + self.channels = int(channels) + self.height = int(height) + self.width = int(width) + + def _load(self) -> DataBundle: + x = torch.randn( + self.samples, + self.history, + self.channels, + self.height, + self.width, + dtype=torch.float32, + ) + y = torch.zeros(self.samples, 1, self.height, self.width, dtype=torch.float32) + rows = torch.arange(self.height).view(1, self.height, 1) + cols = torch.arange(self.width).view(1, 1, self.width) + + for idx in range(self.samples): + center_r = (idx * 2 + 3) % self.height + center_c = (idx * 3 + 5) % self.width + radius = 3 + (idx % 4) + final_mask = ( + ((rows - center_r).float().pow(2) + (cols - center_c).float().pow(2)) + <= radius**2 + ).float() + y[idx, 0] = final_mask + for step in range(self.history): + inner_radius = max(1, radius - (self.history - step - 1)) + history_mask = ( + ((rows - center_r).float().pow(2) + (cols - center_c).float().pow(2)) + <= inner_radius**2 + ).float() + x[idx, step, 0] = x[idx, step, 0] + history_mask + + train_end = max(1, int(0.7 * self.samples)) + val_end = max(train_end + 1, int(0.85 * self.samples)) + splits = { + "train": DataSplit(x[:train_end], y[:train_end]), + "val": DataSplit(x[train_end:val_end], y[train_end:val_end]), + "test": DataSplit(x[val_end:], y[val_end:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=self.channels, + description="Synthetic temporal wildfire spread covariates over forecast history windows.", + extra={"history": self.history}, + ), + label_spec=LabelSpec( + num_targets=1, + task_type="segmentation", + description="Binary spread mask for the next forecast horizon.", + ), + metadata={ + "dataset": self.name, + "source_dataset": self.name, + "hazard_task": "wildfire.spread", + }, + ) + + +__all__ = [ + "SyntheticWildfireSpreadDataset", + "SyntheticWildfireSpreadTemporalDataset", + "TrackOSplitConfig", + "WildfireTrackO2024RasterDataset", + "WildfireTrackO2024TabularDataset", + "WildfireTrackO2024TemporalDataset", +] diff --git a/pyhazards/datasets/wildfire/real_track_o_2024.py b/pyhazards/datasets/wildfire/real_track_o_2024.py new file mode 100644 index 00000000..b73444d6 --- /dev/null +++ b/pyhazards/datasets/wildfire/real_track_o_2024.py @@ -0,0 +1,451 @@ +from __future__ import annotations + +import json +import math +from dataclasses import dataclass +from pathlib import Path +from typing import Sequence + +import numpy as np +import torch + +from ..base import DataBundle, DataSplit, Dataset, FeatureSpec, LabelSpec + + +def _read_lines(path: Path) -> list[str]: + if not path.exists(): + raise FileNotFoundError(f"Expected split file not found: {path}") + return [line.strip() for line in path.read_text(encoding="utf-8").splitlines() if line.strip()] + + +def _load_weather_vars(cache_root: Path) -> list[str]: + payload = json.loads((cache_root / "metadata" / "vars.json").read_text(encoding="utf-8")) + return list(payload["weather_vars"]) + + +def _load_lat_lon(cache_root: Path) -> tuple[np.ndarray, np.ndarray]: + lat = np.load(cache_root / "metadata" / "lat.npy") + lon = np.load(cache_root / "metadata" / "lon.npy") + return np.asarray(lat, dtype=np.float32), np.asarray(lon, dtype=np.float32) + + +def _subset_dates(dates: Sequence[str], limit: int | None) -> list[str]: + if limit is None or int(limit) <= 0: + return list(dates) + return list(dates[: int(limit)]) + + +def _crop_hw_to_multiple(arr: np.ndarray, multiple: int = 4) -> np.ndarray: + if arr.ndim == 3: + _, h, w = arr.shape + h2 = h - (h % multiple) + w2 = w - (w % multiple) + return arr[:, : max(h2, multiple), : max(w2, multiple)] + if arr.ndim == 2: + h, w = arr.shape + h2 = h - (h % multiple) + w2 = w - (w % multiple) + return arr[: max(h2, multiple), : max(w2, multiple)] + raise ValueError(f"Unsupported array rank for cropping: {arr.ndim}") + + +def _spatial_downsample(arr: np.ndarray, factor: int) -> np.ndarray: + if factor <= 1: + out = np.asarray(arr, dtype=np.float32) + elif arr.ndim == 3: + out = np.asarray(arr[:, ::factor, ::factor], dtype=np.float32) + elif arr.ndim == 2: + out = np.asarray(arr[::factor, ::factor], dtype=np.float32) + else: + raise ValueError(f"Unsupported array rank for downsampling: {arr.ndim}") + return np.asarray(_crop_hw_to_multiple(out, multiple=4), dtype=np.float32) + + +def _compute_channel_stats(cache_root: Path, dates: Sequence[str], downsample_factor: int) -> tuple[np.ndarray, np.ndarray]: + weather_dir = cache_root / "met" + example = _spatial_downsample(np.load(weather_dir / f"{dates[0]}.npy"), downsample_factor) + channels = int(example.shape[0]) + sums = np.zeros((channels,), dtype=np.float64) + sums_sq = np.zeros((channels,), dtype=np.float64) + count = 0 + for date in dates: + arr = _spatial_downsample(np.load(weather_dir / f"{date}.npy"), downsample_factor) + flat = arr.reshape(channels, -1).astype(np.float64, copy=False) + sums += flat.sum(axis=1) + sums_sq += np.square(flat).sum(axis=1) + count += flat.shape[1] + mean = sums / max(count, 1) + var = np.maximum(sums_sq / max(count, 1) - np.square(mean), 1e-12) + std = np.sqrt(var) + return mean.astype(np.float32), std.astype(np.float32) + + +def _normalize_weather(arr: np.ndarray, mean: np.ndarray, std: np.ndarray) -> np.ndarray: + return ((arr - mean[:, None, None]) / std[:, None, None]).astype(np.float32) + + +def _load_static_fuel(cache_root: Path, downsample_factor: int) -> tuple[np.ndarray, np.ndarray] | tuple[None, None]: + fuel_path = cache_root / "static" / "fuel.npy" + fuel_mask_path = cache_root / "static" / "fuel_mask.npy" + if not fuel_path.exists(): + return None, None + fuel = np.load(fuel_path) + fuel_mask = np.load(fuel_mask_path) if fuel_mask_path.exists() else (fuel > 0).astype(np.uint8) + fuel = _spatial_downsample(fuel.astype(np.float32), downsample_factor) + fuel_mask = _spatial_downsample(fuel_mask.astype(np.float32), downsample_factor) + fuel_mask = (fuel_mask > 0.5).astype(np.float32) + fuel = np.where(fuel_mask > 0, fuel / 100.0, 0.0).astype(np.float32) + return fuel, fuel_mask + + +def _date_to_cyclical_features(date_text: str) -> tuple[float, float]: + month = int(date_text[5:7]) + day = int(date_text[8:10]) + day_of_year = (month - 1) * 31 + day + angle = 2.0 * math.pi * (float(day_of_year) / 366.0) + return float(math.sin(angle)), float(math.cos(angle)) + + +@dataclass(frozen=True) +class TrackOSplitConfig: + train_limit_days: int | None = None + val_limit_days: int | None = None + test_limit_days: int | None = None + + +class _WildfireTrackOBase(Dataset): + name = "wildfire_track_o_2024_base" + + def __init__( + self, + cache_dir: str | None = None, + *, + downsample_factor: int = 1, + train_limit_days: int | None = None, + val_limit_days: int | None = None, + test_limit_days: int | None = None, + ): + super().__init__(cache_dir=cache_dir) + self.cache_root = Path(cache_dir or "/home/runyang/my-copy/data_cache/wildfire_2024_v1") + self.downsample_factor = max(1, int(downsample_factor)) + self.split_cfg = TrackOSplitConfig( + train_limit_days=train_limit_days, + val_limit_days=val_limit_days, + test_limit_days=test_limit_days, + ) + + def _load_split_dates(self) -> dict[str, list[str]]: + split_root = self.cache_root / "splits" + return { + "train": _subset_dates(_read_lines(split_root / "train_dates.txt"), self.split_cfg.train_limit_days), + "val": _subset_dates(_read_lines(split_root / "val_dates.txt"), self.split_cfg.val_limit_days), + "test": _subset_dates(_read_lines(split_root / "test_dates.txt"), self.split_cfg.test_limit_days), + } + + +class WildfireTrackO2024RasterDataset(_WildfireTrackOBase): + name = "wildfire_track_o_2024_raster" + + def __init__( + self, + cache_dir: str | None = None, + *, + downsample_factor: int = 4, + train_limit_days: int | None = None, + val_limit_days: int | None = None, + test_limit_days: int | None = None, + ): + super().__init__( + cache_dir=cache_dir, + downsample_factor=downsample_factor, + train_limit_days=train_limit_days, + val_limit_days=val_limit_days, + test_limit_days=test_limit_days, + ) + + def _load(self) -> DataBundle: + split_dates = self._load_split_dates() + weather_vars = _load_weather_vars(self.cache_root) + mean, std = _compute_channel_stats(self.cache_root, split_dates["train"], self.downsample_factor) + fuel, fuel_mask = _load_static_fuel(self.cache_root, self.downsample_factor) + + splits: dict[str, DataSplit] = {} + for split_name, dates in split_dates.items(): + x_rows: list[np.ndarray] = [] + y_rows: list[np.ndarray] = [] + for date in dates: + x = _spatial_downsample(np.load(self.cache_root / "met" / f"{date}.npy"), self.downsample_factor) + x = _normalize_weather(x, mean, std) + if fuel is not None and fuel_mask is not None: + x = np.concatenate([x, fuel[None, :, :], fuel_mask[None, :, :]], axis=0) + x_rows.append(x.astype(np.float32)) + y = _spatial_downsample(np.load(self.cache_root / "labels" / f"{date}.npy"), self.downsample_factor) + y_rows.append(y[None, :, :].astype(np.float32)) + + x_np = np.stack(x_rows, axis=0).astype(np.float32) + y_np = np.stack(y_rows, axis=0).astype(np.float32) + splits[split_name] = DataSplit( + inputs=torch.from_numpy(x_np), + targets=torch.from_numpy(y_np), + metadata={"dates": list(dates)}, + ) + + sample_shape = splits["train"].inputs.shape + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=int(sample_shape[1]), + description="Daily gridded wildfire covariates from the 2024 Prithvi-WxC weather cache.", + extra={ + "height": int(sample_shape[2]), + "width": int(sample_shape[3]), + "downsample_factor": self.downsample_factor, + "weather_vars": weather_vars, + "static_feature_names": ["fuel_class_scaled", "fuel_valid_mask"] if fuel is not None else [], + }, + ), + label_spec=LabelSpec( + num_targets=1, + task_type="segmentation", + description="Binary daily wildfire occurrence grid aligned to the benchmark cache.", + ), + metadata={ + "dataset": self.name, + "cache_root": str(self.cache_root), + "has_static_fuel": fuel is not None, + "normalization": { + "mean": mean.tolist(), + "std": std.tolist(), + "fit_split": "train", + }, + "splits": {k: len(v) for k, v in split_dates.items()}, + }, + ) + + +class WildfireTrackO2024TemporalDataset(_WildfireTrackOBase): + name = "wildfire_track_o_2024_temporal" + + def __init__( + self, + cache_dir: str | None = None, + *, + history: int = 6, + downsample_factor: int = 8, + train_limit_days: int | None = None, + val_limit_days: int | None = None, + test_limit_days: int | None = None, + ): + super().__init__( + cache_dir=cache_dir, + downsample_factor=downsample_factor, + train_limit_days=train_limit_days, + val_limit_days=val_limit_days, + test_limit_days=test_limit_days, + ) + self.history = int(history) + + def _load(self) -> DataBundle: + split_dates = self._load_split_dates() + weather_vars = _load_weather_vars(self.cache_root) + mean, std = _compute_channel_stats(self.cache_root, split_dates["train"], self.downsample_factor) + fuel, fuel_mask = _load_static_fuel(self.cache_root, self.downsample_factor) + static_channels = None + if fuel is not None and fuel_mask is not None: + static_channels = np.stack([fuel, fuel_mask], axis=0).astype(np.float32) + + splits: dict[str, DataSplit] = {} + for split_name, dates in split_dates.items(): + x_rows: list[np.ndarray] = [] + y_rows: list[np.ndarray] = [] + used_dates: list[str] = [] + + for idx in range(self.history - 1, len(dates)): + seq_dates = dates[idx - self.history + 1 : idx + 1] + seq_arrays = [] + for date in seq_dates: + x = _spatial_downsample(np.load(self.cache_root / "met" / f"{date}.npy"), self.downsample_factor) + x = _normalize_weather(x, mean, std) + if static_channels is not None: + x = np.concatenate([x, static_channels], axis=0) + seq_arrays.append(x.astype(np.float32)) + x_rows.append(np.stack(seq_arrays, axis=0).astype(np.float32)) + target = _spatial_downsample(np.load(self.cache_root / "labels" / f"{dates[idx]}.npy"), self.downsample_factor) + y_rows.append(target[None, :, :].astype(np.float32)) + used_dates.append(dates[idx]) + + if not x_rows: + raise ValueError( + f"Temporal split '{split_name}' has no usable samples. Need at least history={self.history} dates, got {len(dates)}." + ) + + x_np = np.stack(x_rows, axis=0).astype(np.float32) + y_np = np.stack(y_rows, axis=0).astype(np.float32) + splits[split_name] = DataSplit( + inputs=torch.from_numpy(x_np), + targets=torch.from_numpy(y_np), + metadata={"dates": used_dates, "history": self.history}, + ) + + sample_shape = splits["train"].inputs.shape + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + channels=int(sample_shape[2]), + description="Temporal weather histories for wildfire occurrence prediction.", + extra={ + "history": self.history, + "height": int(sample_shape[3]), + "width": int(sample_shape[4]), + "downsample_factor": self.downsample_factor, + "weather_vars": weather_vars, + "static_feature_names": ["fuel_class_scaled", "fuel_valid_mask"] if static_channels is not None else [], + }, + ), + label_spec=LabelSpec( + num_targets=1, + task_type="segmentation", + description="Binary daily wildfire occurrence grid for the last frame in each history window.", + ), + metadata={ + "dataset": self.name, + "cache_root": str(self.cache_root), + "has_static_fuel": fuel is not None, + "normalization": { + "mean": mean.tolist(), + "std": std.tolist(), + "fit_split": "train", + }, + "splits": {k: int(v.inputs.shape[0]) for k, v in splits.items()}, + }, + ) + + +class WildfireTrackO2024TabularDataset(_WildfireTrackOBase): + name = "wildfire_track_o_2024_tabular" + + def __init__( + self, + cache_dir: str | None = None, + *, + downsample_factor: int = 8, + include_coords: bool = True, + include_day_of_year: bool = True, + train_limit_days: int | None = None, + val_limit_days: int | None = None, + test_limit_days: int | None = None, + ): + super().__init__( + cache_dir=cache_dir, + downsample_factor=downsample_factor, + train_limit_days=train_limit_days, + val_limit_days=val_limit_days, + test_limit_days=test_limit_days, + ) + self.include_coords = bool(include_coords) + self.include_day_of_year = bool(include_day_of_year) + + def _load(self) -> DataBundle: + split_dates = self._load_split_dates() + weather_vars = _load_weather_vars(self.cache_root) + lat, lon = _load_lat_lon(self.cache_root) + mean, std = _compute_channel_stats(self.cache_root, split_dates["train"], self.downsample_factor) + fuel, fuel_mask = _load_static_fuel(self.cache_root, self.downsample_factor) + + sample_met = _spatial_downsample(np.load(self.cache_root / "met" / f"{split_dates['train'][0]}.npy"), self.downsample_factor) + lat = lat[:: self.downsample_factor][: sample_met.shape[1]] + lon = lon[:: self.downsample_factor][: sample_met.shape[2]] + lat_grid, lon_grid = np.meshgrid(lat, lon, indexing="ij") + coord_block = np.stack([lat_grid, lon_grid], axis=-1).reshape(-1, 2).astype(np.float32) + coord_mean = coord_block.mean(axis=0, keepdims=True) + coord_std = coord_block.std(axis=0, keepdims=True) + 1e-6 + coord_block = (coord_block - coord_mean) / coord_std + + splits: dict[str, DataSplit] = {} + for split_name, dates in split_dates.items(): + x_rows: list[np.ndarray] = [] + y_rows: list[np.ndarray] = [] + row_dates: list[str] = [] + + for date in dates: + met = _spatial_downsample(np.load(self.cache_root / "met" / f"{date}.npy"), self.downsample_factor) + met = _normalize_weather(met, mean, std) + features = met.reshape(met.shape[0], -1).T.astype(np.float32) + + extras: list[np.ndarray] = [] + if self.include_coords: + extras.append(coord_block) + if self.include_day_of_year: + sin_doy, cos_doy = _date_to_cyclical_features(date) + extras.append( + np.repeat( + np.asarray([[sin_doy, cos_doy]], dtype=np.float32), + repeats=features.shape[0], + axis=0, + ) + ) + if extras: + features = np.concatenate([features, *extras], axis=1) + if fuel is not None and fuel_mask is not None: + fuel_cols = fuel.reshape(-1, 1).astype(np.float32) + fuel_mask_cols = fuel_mask.reshape(-1, 1).astype(np.float32) + features = np.concatenate([features, fuel_cols, fuel_mask_cols], axis=1) + + label = _spatial_downsample(np.load(self.cache_root / "labels" / f"{date}.npy"), self.downsample_factor) + labels = label.reshape(-1).astype(np.float32) + + x_rows.append(features) + y_rows.append(labels) + row_dates.extend([date] * features.shape[0]) + + x_np = np.concatenate(x_rows, axis=0).astype(np.float32) + y_np = np.concatenate(y_rows, axis=0).astype(np.float32) + splits[split_name] = DataSplit( + inputs=torch.from_numpy(x_np), + targets=torch.from_numpy(y_np), + metadata={"row_dates": row_dates}, + ) + + feature_names = list(weather_vars) + if self.include_coords: + feature_names.extend(["lat", "lon"]) + if self.include_day_of_year: + feature_names.extend(["sin_doy", "cos_doy"]) + if fuel is not None and fuel_mask is not None: + feature_names.extend(["fuel_class_scaled", "fuel_valid_mask"]) + + return DataBundle( + splits=splits, + feature_spec=FeatureSpec( + input_dim=int(splits["train"].inputs.shape[1]), + description="Tabularized wildfire occurrence features from daily gridded cache values.", + extra={ + "downsample_factor": self.downsample_factor, + "feature_names": feature_names, + }, + ), + label_spec=LabelSpec( + num_targets=1, + task_type="classification", + description="Binary wildfire occurrence for each grid cell and day in tabular form.", + ), + metadata={ + "dataset": self.name, + "cache_root": str(self.cache_root), + "has_static_fuel": fuel is not None, + "normalization": { + "weather_mean": mean.tolist(), + "weather_std": std.tolist(), + "fit_split": "train", + }, + "splits": {k: int(v.inputs.shape[0]) for k, v in splits.items()}, + }, + ) + + +__all__ = [ + "TrackOSplitConfig", + "WildfireTrackO2024RasterDataset", + "WildfireTrackO2024TemporalDataset", + "WildfireTrackO2024TabularDataset", +] diff --git a/pyhazards/engine/__init__.py b/pyhazards/engine/__init__.py new file mode 100644 index 00000000..e4bb81aa --- /dev/null +++ b/pyhazards/engine/__init__.py @@ -0,0 +1,12 @@ +from .trainer import Trainer +from .distributed import DistributedConfig, select_strategy +from .inference import SlidingWindowInference +from .runner import BenchmarkRunner + +__all__ = [ + "BenchmarkRunner", + "Trainer", + "DistributedConfig", + "select_strategy", + "SlidingWindowInference", +] diff --git a/pyhazards/engine/distributed.py b/pyhazards/engine/distributed.py new file mode 100644 index 00000000..12a76a23 --- /dev/null +++ b/pyhazards/engine/distributed.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Literal + +import torch + +Strategy = Literal["auto", "ddp", "dp", "none"] + + +@dataclass +class DistributedConfig: + strategy: Strategy = "auto" + devices: int | None = None + + +def select_strategy(prefer: Strategy = "auto") -> Strategy: + if prefer == "auto": + if torch.cuda.is_available() and torch.cuda.device_count() > 1: + return "ddp" + if torch.cuda.is_available(): + return "none" + return "none" + return prefer diff --git a/pyhazards/engine/inference.py b/pyhazards/engine/inference.py new file mode 100644 index 00000000..210c0cbd --- /dev/null +++ b/pyhazards/engine/inference.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from typing import Any, Callable, Iterable, List + +import torch + + +class SlidingWindowInference: + """ + Placeholder for sliding-window inference over large rasters or grids. + Implement windowing logic and stitching as needed. + """ + + def __init__(self, model: torch.nn.Module, window_fn: Callable[..., Iterable[Any]] | None = None): + self.model = model + self.window_fn = window_fn + + def __call__(self, inputs: Any) -> List[torch.Tensor]: + if self.window_fn is None: + raise NotImplementedError("Provide a window_fn to generate windows from inputs.") + outputs: List[torch.Tensor] = [] + self.model.eval() + with torch.no_grad(): + for window in self.window_fn(inputs): + outputs.append(self.model(window)) + return outputs diff --git a/pyhazards/engine/runner.py b/pyhazards/engine/runner.py new file mode 100644 index 00000000..0e90b72a --- /dev/null +++ b/pyhazards/engine/runner.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +from typing import Optional, Union + +import torch.nn as nn + +from ..benchmarks import Benchmark, BenchmarkRunSummary, run_benchmark +from ..configs import ExperimentConfig +from ..datasets import load_dataset +from ..datasets.base import DataBundle +from ..models import build_model + + +class BenchmarkRunner: + """High-level runner that resolves datasets/models and executes a benchmark.""" + + def __init__(self, benchmark: Optional[Union[str, Benchmark]] = None): + self.benchmark = benchmark + + def run( + self, + experiment: ExperimentConfig, + model: Optional[nn.Module] = None, + data: Optional[DataBundle] = None, + output_dir: Optional[str] = None, + ) -> BenchmarkRunSummary: + built_model = model or self._build_model(experiment) + bundle = data or self._load_data(experiment) + benchmark = self.benchmark or experiment.benchmark.name + return run_benchmark( + benchmark=benchmark, + model=built_model, + data=bundle, + config=experiment, + output_dir=output_dir, + ) + + def _build_model(self, experiment: ExperimentConfig) -> nn.Module: + return build_model( + name=experiment.model.name, + task=experiment.model.task, + **experiment.model.params, + ) + + def _load_data(self, experiment: ExperimentConfig) -> DataBundle: + return load_dataset( + experiment.dataset.name, + **experiment.dataset.params, + ).load() diff --git a/pyhazards/engine/trainer.py b/pyhazards/engine/trainer.py new file mode 100644 index 00000000..ccf8a082 --- /dev/null +++ b/pyhazards/engine/trainer.py @@ -0,0 +1,160 @@ +from __future__ import annotations + +from typing import Any, Callable, Dict, Iterable, List, Optional + +import torch +import torch.nn as nn +from torch.utils.data import DataLoader, TensorDataset, Dataset + +from ..datasets.base import DataBundle +from ..metrics import MetricBase +from ..utils.hardware import auto_device +from .distributed import select_strategy + + +class Trainer: + """ + Lightweight training abstraction with a familiar API: + fit -> evaluate -> predict. + """ + + def __init__( + self, + model: nn.Module, + device: Optional[torch.device | str] = None, + metrics: Optional[List[MetricBase]] = None, + strategy: str = "auto", + mixed_precision: bool = False, + ): + self.model = model + self.device = torch.device(device) if device else auto_device() + self.metrics = metrics or [] + self.strategy = select_strategy(strategy) + self.mixed_precision = mixed_precision + self.model.to(self.device) + + def fit( + self, + data: DataBundle, + train_split: str = "train", + val_split: Optional[str] = None, + max_epochs: int = 1, + optimizer: Optional[torch.optim.Optimizer] = None, + loss_fn: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, + batch_size: int = 32, + num_workers: int = 0, + collate_fn: Optional[Callable[[List[Any]], Any]] = None, + ) -> None: + """ + Minimal fit loop that works for tensor-based splits. + Extend/replace with custom DataLoaders for complex data. + """ + if optimizer is None or loss_fn is None: + raise ValueError("optimizer and loss_fn must be provided.") + + train_split_data = data.get_split(train_split) + train_loader = self._make_loader(train_split_data.inputs, train_split_data.targets, batch_size, num_workers, collate_fn) + amp_enabled = self.mixed_precision and self.device.type == "cuda" + try: + scaler = torch.amp.GradScaler("cuda", enabled=amp_enabled) + use_new_amp = True + except (AttributeError, TypeError): + scaler = torch.cuda.amp.GradScaler(enabled=amp_enabled) + use_new_amp = False + + self.model.train() + for _ in range(max_epochs): + for x, y in train_loader: + x = self._to_device(x) + y = self._to_device(y) + optimizer.zero_grad() + if use_new_amp: + with torch.amp.autocast("cuda", enabled=scaler.is_enabled()): + out = self.model(x) + loss = loss_fn(out, y) + else: + with torch.cuda.amp.autocast(enabled=scaler.is_enabled()): + out = self.model(x) + loss = loss_fn(out, y) + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + + if val_split: + self.evaluate(data, split=val_split) + + def evaluate( + self, + data: DataBundle, + split: str = "test", + batch_size: int = 64, + num_workers: int = 0, + collate_fn: Optional[Callable[[List[Any]], Any]] = None, + ) -> Dict[str, float]: + split_data = data.get_split(split) + loader = self._make_loader(split_data.inputs, split_data.targets, batch_size, num_workers, collate_fn, shuffle=False) + self.model.eval() + for metric in self.metrics: + metric.reset() + with torch.no_grad(): + for x, y in loader: + x = self._to_device(x) + y = self._to_device(y) + preds = self.model(x) + for metric in self.metrics: + metric.update(preds, y) + results: Dict[str, float] = {} + for metric in self.metrics: + results.update(metric.compute()) + return results + + def predict( + self, + data: DataBundle, + split: str = "test", + batch_size: int = 64, + num_workers: int = 0, + collate_fn: Optional[Callable[[List[Any]], Any]] = None, + ) -> List[torch.Tensor]: + split_data = data.get_split(split) + loader = self._make_loader(split_data.inputs, split_data.targets, batch_size, num_workers, collate_fn, shuffle=False) + self.model.eval() + outputs: List[torch.Tensor] = [] + with torch.no_grad(): + for x, _ in loader: + x = self._to_device(x) + preds = self.model(x) + outputs.append(preds.cpu()) + return outputs + + def save_checkpoint(self, path: str) -> None: + torch.save({"model_state": self.model.state_dict()}, path) + + def _make_loader( + self, + inputs: Any, + targets: Any, + batch_size: int, + num_workers: int, + collate_fn: Optional[Callable[[List[Any]], Any]], + shuffle: bool = True, + ) -> Iterable: + # Accept torch tensors + if isinstance(inputs, torch.Tensor) and isinstance(targets, torch.Tensor): + dataset = TensorDataset(inputs, targets) + return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, collate_fn=collate_fn) + # Accept torch.utils.data.Dataset directly (for complex dict/graph batches) + if isinstance(inputs, Dataset): + return DataLoader(inputs, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, collate_fn=collate_fn) + raise TypeError("Trainer only supports tensor pairs or torch Dataset inputs. Wrap custom logic in a Dataset.") + + def _to_device(self, obj: Any) -> Any: + if obj is None: + return None + if isinstance(obj, torch.Tensor): + return obj.to(self.device) + if isinstance(obj, (list, tuple)): + return type(obj)(self._to_device(o) for o in obj) + if isinstance(obj, dict): + return {k: self._to_device(v) for k, v in obj.items()} + return obj diff --git a/pyhazards/interactive_map.py b/pyhazards/interactive_map.py new file mode 100644 index 00000000..13a67033 --- /dev/null +++ b/pyhazards/interactive_map.py @@ -0,0 +1,39 @@ +"""Helpers for the external RAI Fire interactive map.""" + +from __future__ import annotations + +import os +import sys +import webbrowser + + +#: Canonical URL for the external RAI Fire interactive map. +RAI_FIRE_URL: str = "https://rai-fire.com/" + + +def _can_launch_browser() -> bool: + if sys.platform.startswith("linux"): + return bool(os.environ.get("DISPLAY") or os.environ.get("WAYLAND_DISPLAY")) + return True + + +def open_interactive_map(open_browser: bool = True) -> str: + """Open the RAI Fire map in the user's browser when possible. + + Args: + open_browser: Whether to attempt to open the default browser. + + Returns: + The canonical RAI Fire URL. + """ + + if open_browser and _can_launch_browser(): + try: + webbrowser.open(RAI_FIRE_URL, new=2) + except Exception: + # Headless and restricted environments should still get the URL. + pass + return RAI_FIRE_URL + + +__all__ = ["RAI_FIRE_URL", "open_interactive_map"] diff --git a/pyhazards/metrics/__init__.py b/pyhazards/metrics/__init__.py new file mode 100644 index 00000000..93ba370a --- /dev/null +++ b/pyhazards/metrics/__init__.py @@ -0,0 +1,84 @@ +from abc import ABC, abstractmethod +from typing import Dict, List, Optional + +import torch +import torch.nn.functional as F + + +class MetricBase(ABC): + @abstractmethod + def update(self, preds: torch.Tensor, targets: torch.Tensor) -> None: + ... + + @abstractmethod + def compute(self) -> Dict[str, float]: + ... + + @abstractmethod + def reset(self) -> None: + ... + + +class ClassificationMetrics(MetricBase): + def __init__(self): + self.reset() + + def reset(self) -> None: + self._preds: List[torch.Tensor] = [] + self._targets: List[torch.Tensor] = [] + + def update(self, preds: torch.Tensor, targets: torch.Tensor) -> None: + self._preds.append(preds.detach().cpu()) + self._targets.append(targets.detach().cpu()) + + def compute(self) -> Dict[str, float]: + preds = torch.cat(self._preds) + targets = torch.cat(self._targets) + pred_labels = preds.argmax(dim=-1) + acc = (pred_labels == targets).float().mean().item() + return {"Acc": acc} + + +class RegressionMetrics(MetricBase): + def __init__(self): + self.reset() + + def reset(self) -> None: + self._preds: List[torch.Tensor] = [] + self._targets: List[torch.Tensor] = [] + + def update(self, preds: torch.Tensor, targets: torch.Tensor) -> None: + self._preds.append(preds.detach().cpu()) + self._targets.append(targets.detach().cpu()) + + def compute(self) -> Dict[str, float]: + preds = torch.cat(self._preds) + targets = torch.cat(self._targets) + mae = F.l1_loss(preds, targets).item() + rmse = torch.sqrt(F.mse_loss(preds, targets)).item() + return {"MAE": mae, "RMSE": rmse} + + +class SegmentationMetrics(MetricBase): + def __init__(self, num_classes: Optional[int] = None): + self.num_classes = num_classes + self.reset() + + def reset(self) -> None: + self._preds: List[torch.Tensor] = [] + self._targets: List[torch.Tensor] = [] + + def update(self, preds: torch.Tensor, targets: torch.Tensor) -> None: + self._preds.append(preds.detach().cpu()) + self._targets.append(targets.detach().cpu()) + + def compute(self) -> Dict[str, float]: + preds = torch.cat(self._preds) + targets = torch.cat(self._targets) + pred_labels = preds.argmax(dim=1) + # simple pixel accuracy; extend to IoU/Dice as needed + acc = (pred_labels == targets).float().mean().item() + return {"PixelAcc": acc} + + +__all__ = ["MetricBase", "ClassificationMetrics", "RegressionMetrics", "SegmentationMetrics"] diff --git a/pyhazards/model_cards/asufm.yaml b/pyhazards/model_cards/asufm.yaml new file mode 100644 index 00000000..7a2d8a0a --- /dev/null +++ b/pyhazards/model_cards/asufm.yaml @@ -0,0 +1,51 @@ +model_name: asufm +display_name: ASUFM +hazard: Wildfire +catalog_status: core +source_file: pyhazards/models/asufm.py +builder_name: asufm_builder +summary: > + A temporal convolution baseline for weekly wildfire activity forecasting. +description: + - > + ``asufm`` is a compact temporal convolution baseline for next-window wildfire + activity prediction. + - > + PyHazards exposes it through the shared wildfire benchmark and config workflow. +paper: + title: Wildfire Spread Prediction in North America Using Satellite Imagery and Vision Transformer + url: https://doi.ieeecomputersociety.org/10.1109/CAI59869.2024.00278 + repo_url: https://github.com/bronteee/fire-asufm +tasks: + - forecasting + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="asufm", + task="forecasting", + input_dim=7, + output_dim=5, + lookback=12, + ) + preds = model(torch.randn(2, 12, 7)) + print(preds.shape) +notes: + - "The smoke path uses weekly wildfire count windows with seasonal time features." +smoke_test: + task: forecasting + build_kwargs: + input_dim: 7 + hidden_dim: 32 + output_dim: 5 + lookback: 12 + dropout: 0.0 + input: + kind: tensor + tensor: + shape: [2, 12, 7] + expected_output: + kind: tensor + shape: [2, 5] diff --git a/pyhazards/model_cards/eqnet.yaml b/pyhazards/model_cards/eqnet.yaml new file mode 100644 index 00000000..0e00baf2 --- /dev/null +++ b/pyhazards/model_cards/eqnet.yaml @@ -0,0 +1,41 @@ +model_name: eqnet +display_name: EQNet +hazard: Earthquake +source_file: pyhazards/models/eqnet.py +builder_name: eqnet_builder +summary: > + A transformer-style earthquake phase-picking baseline for modern sequence + modeling comparisons. +description: + - > + ``eqnet`` extends the PyHazards earthquake benchmark stack with a + lightweight attention-based picking model. + - > + The implementation keeps the shared waveform input and two-pick output + contract so it can be evaluated alongside ``phasenet`` and ``eqtransformer``. +paper: + title: An End-To-End Earthquake Detection Method for Joint Phase Picking and Association Using Deep Learning + url: https://www.osti.gov/biblio/1978539 + repo_url: https://github.com/AI4EPS/EQNet +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="eqnet", task="regression", in_channels=3) + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) +notes: + - "Outputs are P- and S-arrival sample indices." +smoke_test: + task: regression + build_kwargs: + in_channels: 3 + input: + kind: tensor + tensor: + shape: [4, 3, 256] + expected_output: + kind: tensor + shape: [4, 2] diff --git a/pyhazards/model_cards/eqtransformer.yaml b/pyhazards/model_cards/eqtransformer.yaml new file mode 100644 index 00000000..02bb1cf8 --- /dev/null +++ b/pyhazards/model_cards/eqtransformer.yaml @@ -0,0 +1,41 @@ +model_name: eqtransformer +display_name: EQTransformer +hazard: Earthquake +source_file: pyhazards/models/eqtransformer.py +builder_name: eqtransformer_builder +summary: > + A bidirectional sequence encoder for joint earthquake phase picking with + attention pooling over waveform windows. +description: + - > + ``eqtransformer`` is the second earthquake picking baseline in the staged + roadmap and shares the synthetic waveform contract used by ``phasenet``. + - > + The PyHazards adapter focuses on the shared picking interface rather than a + full reproduction of the original multitask training pipeline. +paper: + title: Earthquake Transformer-An attentive deep-learning model for simultaneous earthquake detection and phase picking + url: https://doi.org/10.1038/s41467-020-17591-w + repo_url: https://github.com/smousavi05/EQTransformer +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="eqtransformer", task="regression", in_channels=3) + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) +notes: + - "Outputs are P- and S-arrival sample indices." +smoke_test: + task: regression + build_kwargs: + in_channels: 3 + input: + kind: tensor + tensor: + shape: [4, 3, 256] + expected_output: + kind: tensor + shape: [4, 2] diff --git a/pyhazards/model_cards/firecastnet.yaml b/pyhazards/model_cards/firecastnet.yaml new file mode 100644 index 00000000..34e2a281 --- /dev/null +++ b/pyhazards/model_cards/firecastnet.yaml @@ -0,0 +1,45 @@ +model_name: firecastnet +display_name: FireCastNet +hazard: Wildfire +catalog_status: core +source_file: pyhazards/models/firecastnet.py +builder_name: firecastnet_builder +summary: > + A compact encoder-decoder baseline for wildfire spread mask prediction. +description: + - > + ``firecastnet`` is a raster wildfire spread baseline that uses a shallow + encoder-decoder architecture. + - > + The PyHazards implementation is optimized for the shared smoke benchmark + rather than the full upstream training stack. +paper: + title: "FireCastNet: Earth-as-a-Graph for Seasonal Fire Prediction" + url: https://doi.org/10.1038/s41598-025-30645-7 + repo_url: https://github.com/SeasFire/firecastnet +tasks: + - segmentation + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="firecastnet", task="segmentation", in_channels=12) + logits = model(torch.randn(2, 12, 16, 16)) + print(logits.shape) +notes: + - "The smoke configuration uses the single-frame wildfire spread raster fixture." +smoke_test: + task: segmentation + build_kwargs: + in_channels: 12 + hidden_dim: 24 + out_channels: 1 + dropout: 0.0 + input: + kind: tensor + tensor: + shape: [2, 12, 16, 16] + expected_output: + kind: tensor + shape: [2, 1, 16, 16] diff --git a/pyhazards/model_cards/floodcast.yaml b/pyhazards/model_cards/floodcast.yaml new file mode 100644 index 00000000..afd5fecb --- /dev/null +++ b/pyhazards/model_cards/floodcast.yaml @@ -0,0 +1,43 @@ +model_name: floodcast +display_name: FloodCast +hazard: Flood +source_file: pyhazards/models/floodcast.py +builder_name: floodcast_builder +summary: > + A compact spatiotemporal flood-inundation baseline for raster forecast experiments. +description: + - > + ``floodcast`` is the first public inundation model in the staged PyHazards + flood roadmap. + - > + The adapter uses shared raster tensors so it can be benchmarked through the + ``flood.inundation`` evaluator without dataset-specific glue code. +paper: + title: Large-scale flood modeling and forecasting with FloodCast + url: https://doi.org/10.1038/s41586-024-08028-8 + repo_url: https://github.com/HydroPML/FloodCast +tasks: + - regression + - segmentation +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="floodcast", task="regression", in_channels=3, history=4) + preds = model(torch.randn(2, 4, 3, 16, 16)) + print(preds.shape) +notes: + - "Outputs are next-horizon inundation depth rasters." +smoke_test: + task: regression + build_kwargs: + in_channels: 3 + history: 4 + hidden_dim: 32 + input: + kind: tensor + tensor: + shape: [2, 4, 3, 16, 16] + expected_output: + kind: tensor + shape: [2, 1, 16, 16] diff --git a/pyhazards/model_cards/forefire.yaml b/pyhazards/model_cards/forefire.yaml new file mode 100644 index 00000000..455d34e8 --- /dev/null +++ b/pyhazards/model_cards/forefire.yaml @@ -0,0 +1,43 @@ +model_name: forefire +display_name: ForeFire Adapter +hazard: Wildfire +catalog_status: core +source_file: pyhazards/models/forefire.py +builder_name: forefire_builder +summary: > + A lightweight simulator-style wildfire spread adapter inspired by front-propagation systems. +description: + - > + ``forefire`` is a deterministic raster adapter that approximates simulator-style + front propagation through fixed diffusion kernels. + - > + PyHazards exposes it as a benchmarkable baseline through the standard model registry. +paper: + title: "ForeFire: A Modular, Scriptable C++ Simulation Engine and Library for Wildland-Fire Spread" + url: https://doi.org/10.21105/joss.08680 + repo_url: https://github.com/forefireAPI/forefire +tasks: + - segmentation + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="forefire", task="segmentation", in_channels=12) + logits = model(torch.randn(2, 12, 16, 16)) + print(logits.shape) +notes: + - "This adapter is deterministic and does not learn parameters during the smoke test." +smoke_test: + task: segmentation + build_kwargs: + in_channels: 12 + out_channels: 1 + diffusion_steps: 2 + input: + kind: tensor + tensor: + shape: [2, 12, 16, 16] + expected_output: + kind: tensor + shape: [2, 1, 16, 16] diff --git a/pyhazards/model_cards/fourcastnet_tc.yaml b/pyhazards/model_cards/fourcastnet_tc.yaml new file mode 100644 index 00000000..c76f4cbf --- /dev/null +++ b/pyhazards/model_cards/fourcastnet_tc.yaml @@ -0,0 +1,44 @@ +model_name: fourcastnet_tc +display_name: FourCastNet TC Adapter +hazard: Tropical Cyclone +catalog_status: experimental +source_file: pyhazards/models/fourcastnet_tc.py +builder_name: fourcastnet_tc_builder +summary: > + An experimental wrapper-style storm adapter inspired by FourCastNet forecast fields. +description: + - > + ``fourcastnet_tc`` completes the first wave of experimental + foundation-weather storm adapters in the staged roadmap. + - > + The PyHazards version is intentionally lightweight and uses the same + trajectory output contract as the other storm baselines. +paper: + title: "FourCastNet: A Global Data-driven High-resolution Weather Model using Adaptive Fourier Neural Operators" + url: https://arxiv.org/abs/2202.11214 + repo_url: https://github.com/NVlabs/FourCastNet +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="fourcastnet_tc", task="regression", input_dim=8, history=6, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) +notes: + - "Experimental adapter: intended for shared-evaluator prototyping rather than exact weather-model parity." +smoke_test: + task: regression + build_kwargs: + input_dim: 8 + history: 6 + horizon: 5 + output_dim: 3 + input: + kind: tensor + tensor: + shape: [2, 6, 8] + expected_output: + kind: tensor + shape: [2, 5, 3] diff --git a/pyhazards/model_cards/google_flood_forecasting.yaml b/pyhazards/model_cards/google_flood_forecasting.yaml new file mode 100644 index 00000000..f4d81c43 --- /dev/null +++ b/pyhazards/model_cards/google_flood_forecasting.yaml @@ -0,0 +1,52 @@ +model_name: google_flood_forecasting +display_name: Google Flood Forecasting +hazard: Flood +catalog_status: core +source_file: pyhazards/models/google_flood_forecasting.py +builder_name: google_flood_forecasting_builder +summary: > + A transformer-style sequence baseline for nodewise streamflow forecasting. +description: + - > + ``google_flood_forecasting`` is a compact sequence-to-node forecasting baseline + for flood streamflow prediction. + - > + The PyHazards implementation uses a transformer encoder over per-node history + windows and returns one forecast value per node. +paper: + title: Global Flood Forecasting at a Fine Catchment Resolution using Machine Learning + url: https://research.google/pubs/global-flood-forecasting-at-a-fine-catchment-resolution-using-machine-learning/ + repo_url: https://github.com/google-research/flood-forecasting +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="google_flood_forecasting", + task="regression", + input_dim=2, + out_dim=1, + history=4, + ) + preds = model({"x": torch.randn(2, 4, 6, 2)}) + print(preds.shape) +notes: + - "The smoke path uses the same streamflow-style graph fixture as the other flood baselines." +smoke_test: + task: regression + build_kwargs: + input_dim: 2 + hidden_dim: 32 + out_dim: 1 + history: 4 + dropout: 0.0 + input: + kind: mapping + mapping: + x: + shape: [2, 4, 6, 2] + expected_output: + kind: tensor + shape: [2, 6, 1] diff --git a/pyhazards/model_cards/gpd.yaml b/pyhazards/model_cards/gpd.yaml new file mode 100644 index 00000000..84032f7b --- /dev/null +++ b/pyhazards/model_cards/gpd.yaml @@ -0,0 +1,41 @@ +model_name: gpd +display_name: GPD +hazard: Earthquake +source_file: pyhazards/models/gpd.py +builder_name: gpd_builder +summary: > + A compact CNN baseline for generalized phase detection and historical + earthquake picking comparisons. +description: + - > + ``gpd`` provides a lightweight earthquake picking adapter with the same + waveform-to-pick interface used across the PyHazards earthquake benchmarks. + - > + This adapter is intended as a reproducible low-cost baseline rather than an + exact port of every original training detail. +paper: + title: Generalized Seismic Phase Detection with Deep Learning + url: https://doi.org/10.1785/0120180080 + repo_url: https://github.com/interseismic/generalized-phase-detection +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="gpd", task="regression", in_channels=3) + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) +notes: + - "The adapter keeps a simple two-output pick interface for shared evaluation." +smoke_test: + task: regression + build_kwargs: + in_channels: 3 + input: + kind: tensor + tensor: + shape: [4, 3, 256] + expected_output: + kind: tensor + shape: [4, 2] diff --git a/pyhazards/model_cards/graphcast_tc.yaml b/pyhazards/model_cards/graphcast_tc.yaml new file mode 100644 index 00000000..3d6c83d4 --- /dev/null +++ b/pyhazards/model_cards/graphcast_tc.yaml @@ -0,0 +1,43 @@ +model_name: graphcast_tc +display_name: GraphCast TC Adapter +hazard: Tropical Cyclone +catalog_status: experimental +source_file: pyhazards/models/graphcast_tc.py +builder_name: graphcast_tc_builder +summary: > + An experimental wrapper-style storm adapter inspired by GraphCast/GenCast forecast fields. +description: + - > + ``graphcast_tc`` is an experimental foundation-weather adapter that keeps + the shared storm trajectory interface while remaining lightweight enough for CI. + - > + The PyHazards version is intentionally wrapper-style and should be treated as + an adapter contract rather than a full reproduction of the original weather model. +paper: + title: "GraphCast: Learning skillful medium-range global weather forecasting" + url: https://www.science.org/doi/10.1126/science.adi2336 + repo_url: https://github.com/google-deepmind/graphcast +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="graphcast_tc", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) +notes: + - "Experimental adapter: intended for shared-evaluator prototyping rather than exact weather-model parity." +smoke_test: + task: regression + build_kwargs: + input_dim: 8 + horizon: 5 + output_dim: 3 + input: + kind: tensor + tensor: + shape: [2, 6, 8] + expected_output: + kind: tensor + shape: [2, 5, 3] diff --git a/pyhazards/model_cards/hurricast.yaml b/pyhazards/model_cards/hurricast.yaml new file mode 100644 index 00000000..c27d34a3 --- /dev/null +++ b/pyhazards/model_cards/hurricast.yaml @@ -0,0 +1,49 @@ +model_name: hurricast +display_name: Hurricast +hazard: Tropical Cyclone +source_file: pyhazards/models/hurricast.py +builder_name: hurricast_builder +summary: > + A compact multimodal storm baseline for hurricane track and intensity forecasting. +description: + - > + ``hurricast`` is the first basin-specific storm baseline in the staged PyHazards + roadmap and operates on storm-history sequences. + - > + This initial adapter focuses on the shared tropical-cyclone forecasting interface + and is intended as a reproducible starting point before broader storm-model breadth. +paper: + title: "Hurricane Forecasting: A Novel Multimodal Machine Learning Framework" + url: https://arxiv.org/abs/2102.01204 + repo_url: https://github.com/leobix/hurricast +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="hurricast", + task="regression", + input_dim=8, + horizon=5, + output_dim=3, + ) + + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) +notes: + - "Outputs are lead-time sequences of latitude, longitude, and intensity targets." +smoke_test: + task: regression + build_kwargs: + input_dim: 8 + horizon: 5 + output_dim: 3 + input: + kind: tensor + tensor: + shape: [2, 6, 8] + expected_output: + kind: tensor + shape: [2, 5, 3] diff --git a/pyhazards/model_cards/hydrographnet.yaml b/pyhazards/model_cards/hydrographnet.yaml new file mode 100644 index 00000000..d6656b64 --- /dev/null +++ b/pyhazards/model_cards/hydrographnet.yaml @@ -0,0 +1,60 @@ +model_name: hydrographnet +display_name: HydroGraphNet +hazard: Flood +catalog_status: variant +source_file: pyhazards/models/hydrographnet.py +builder_name: hydrographnet_builder +summary: > + A physics-informed graph neural network for flood forecasting with interpretable + KAN-style components, residual message passing, and delta-state decoding. +description: + - > + ``hydrographnet`` is the PyHazards entrypoint for flood forecasting on irregular + meshes with graph-structured hydrologic state updates. + - > + In PyHazards, this model is typically paired with the ERA5-based hydrograph + adapter ``load_hydrograph_data`` for end-to-end smoke validation. +paper: + title: Interpretable physics-informed graph neural networks for flood forecasting + url: https://onlinelibrary.wiley.com/doi/10.1111/mice.13484 +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, + ) + + batch = { + "x": torch.randn(1, 3, 6, 2), + "adj": torch.eye(6).unsqueeze(0), + "coords": torch.randn(6, 2), + } + preds = model(batch) + print(preds.shape) +notes: + - "The smoke test uses a synthetic graph batch so it stays CPU-safe in CI." +smoke_test: + task: regression + build_kwargs: + node_in_dim: 2 + edge_in_dim: 3 + out_dim: 1 + input: + kind: mapping + mapping: + x: + shape: [1, 3, 6, 2] + adj: + shape: [1, 6, 6] + coords: + shape: [6, 2] + expected_output: + kind: tensor + shape: [1, 6, 1] diff --git a/pyhazards/model_cards/neuralhydrology_ealstm.yaml b/pyhazards/model_cards/neuralhydrology_ealstm.yaml new file mode 100644 index 00000000..e38f3075 --- /dev/null +++ b/pyhazards/model_cards/neuralhydrology_ealstm.yaml @@ -0,0 +1,42 @@ +model_name: neuralhydrology_ealstm +display_name: EA-LSTM +hazard: Flood +source_file: pyhazards/models/neuralhydrology_ealstm.py +builder_name: neuralhydrology_ealstm_builder +summary: > + An entity-aware hydrology baseline with static-feature gating over streamflow histories. +description: + - > + ``neuralhydrology_ealstm`` complements the plain LSTM adapter with a + lightweight static gating path inspired by EA-LSTM style hydrology models. + - > + It keeps the same graph-temporal input contract as the rest of the flood + streamflow roadmap. +paper: + title: Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets + url: https://doi.org/10.5194/hess-23-5089-2019 + repo_url: https://github.com/neuralhydrology/neuralhydrology +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="neuralhydrology_ealstm", task="regression", input_dim=2, out_dim=1) + preds = model({"x": torch.randn(1, 4, 6, 2)}) + print(preds.shape) +notes: + - "This adapter focuses on the entity-aware gating contract, not exact repo parity." +smoke_test: + task: regression + build_kwargs: + input_dim: 2 + out_dim: 1 + input: + kind: mapping + mapping: + x: + shape: [1, 4, 6, 2] + expected_output: + kind: tensor + shape: [1, 6, 1] diff --git a/pyhazards/model_cards/neuralhydrology_lstm.yaml b/pyhazards/model_cards/neuralhydrology_lstm.yaml new file mode 100644 index 00000000..dd843815 --- /dev/null +++ b/pyhazards/model_cards/neuralhydrology_lstm.yaml @@ -0,0 +1,42 @@ +model_name: neuralhydrology_lstm +display_name: NeuralHydrology LSTM +hazard: Flood +source_file: pyhazards/models/neuralhydrology_lstm.py +builder_name: neuralhydrology_lstm_builder +summary: > + An adapter-style LSTM baseline for nodewise streamflow forecasting on graph-temporal inputs. +description: + - > + ``neuralhydrology_lstm`` is the first community-style hydrology baseline in + the PyHazards flood roadmap. + - > + The adapter consumes the shared graph-temporal streamflow batch format and + produces next-step nodewise discharge predictions. +paper: + title: Towards learning universal, regional, and local hydrological behaviors via machine learning applied to large-sample datasets + url: https://doi.org/10.5194/hess-23-5089-2019 + repo_url: https://github.com/neuralhydrology/neuralhydrology +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="neuralhydrology_lstm", task="regression", input_dim=2, out_dim=1) + preds = model({"x": torch.randn(1, 4, 6, 2)}) + print(preds.shape) +notes: + - "The smoke test uses the shared synthetic streamflow dataset shape." +smoke_test: + task: regression + build_kwargs: + input_dim: 2 + out_dim: 1 + input: + kind: mapping + mapping: + x: + shape: [1, 4, 6, 2] + expected_output: + kind: tensor + shape: [1, 6, 1] diff --git a/pyhazards/model_cards/pangu_tc.yaml b/pyhazards/model_cards/pangu_tc.yaml new file mode 100644 index 00000000..d3896011 --- /dev/null +++ b/pyhazards/model_cards/pangu_tc.yaml @@ -0,0 +1,43 @@ +model_name: pangu_tc +display_name: Pangu TC Adapter +hazard: Tropical Cyclone +catalog_status: experimental +source_file: pyhazards/models/pangu_tc.py +builder_name: pangu_tc_builder +summary: > + An experimental wrapper-style storm adapter inspired by Pangu-Weather forecast fields. +description: + - > + ``pangu_tc`` adds a second foundation-weather reference path behind the + shared tropical-cyclone evaluator. + - > + The implementation is intentionally lightweight and should be interpreted as + an adapter contract for forecast-field driven storm evaluation. +paper: + title: Accurate medium-range global weather forecasting with 3D neural networks + url: https://www.nature.com/articles/s41586-023-06185-3 + repo_url: https://github.com/198808xc/Pangu-Weather +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="pangu_tc", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) +notes: + - "Experimental adapter: intended for shared-evaluator prototyping rather than exact weather-model parity." +smoke_test: + task: regression + build_kwargs: + input_dim: 8 + horizon: 5 + output_dim: 3 + input: + kind: tensor + tensor: + shape: [2, 6, 8] + expected_output: + kind: tensor + shape: [2, 5, 3] diff --git a/pyhazards/model_cards/phasenet.yaml b/pyhazards/model_cards/phasenet.yaml new file mode 100644 index 00000000..ea660566 --- /dev/null +++ b/pyhazards/model_cards/phasenet.yaml @@ -0,0 +1,46 @@ +model_name: phasenet +display_name: PhaseNet +hazard: Earthquake +source_file: pyhazards/models/phasenet.py +builder_name: phasenet_builder +summary: > + A lightweight phase-picking baseline that predicts P- and S-arrival indices from + multichannel waveform windows. +description: + - > + ``phasenet`` is the first earthquake picking baseline in the staged PyHazards + roadmap and is paired with the synthetic waveform dataset for smoke validation. + - > + This initial adapter focuses on the shared waveform-to-pick interface and does not + claim exact reproduction of the original PhaseNet training stack. +paper: + title: "PhaseNet: A Deep-Neural-Network-Based Seismic Arrival Time Picking Method" + url: https://arxiv.org/abs/1803.03211 + repo_url: https://github.com/AI4EPS/PhaseNet +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="phasenet", + task="regression", + in_channels=3, + ) + + picks = model(torch.randn(4, 3, 256)) + print(picks.shape) +notes: + - "Outputs are P- and S-arrival sample indices in the current smoke-test adapter." +smoke_test: + task: regression + build_kwargs: + in_channels: 3 + input: + kind: tensor + tensor: + shape: [4, 3, 256] + expected_output: + kind: tensor + shape: [4, 2] diff --git a/pyhazards/model_cards/saf_net.yaml b/pyhazards/model_cards/saf_net.yaml new file mode 100644 index 00000000..4b991630 --- /dev/null +++ b/pyhazards/model_cards/saf_net.yaml @@ -0,0 +1,41 @@ +model_name: saf_net +display_name: SAF-Net +hazard: Tropical Cyclone +source_file: pyhazards/models/saf_net.py +builder_name: saf_net_builder +summary: > + A spatiotemporal tropical-cyclone baseline with an intensity-focused head and shared trajectory output. +description: + - > + ``saf_net`` adds an intensity-oriented storm baseline to the shared + ``tc.track_intensity`` evaluator. + - > + The adapter keeps full trajectory outputs so it can use the same report + format as the other PyHazards storm models. +paper: + title: "SAF-Net: A spatio-temporal deep learning method for typhoon intensity prediction" + url: https://www.sciencedirect.com/science/article/pii/S1568494623003152 + repo_url: https://github.com/xuguangning1218/TI_Prediction +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="saf_net", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) +notes: + - "Track channels are retained so the shared storm evaluator can score all baselines consistently." +smoke_test: + task: regression + build_kwargs: + input_dim: 8 + horizon: 5 + input: + kind: tensor + tensor: + shape: [2, 6, 8] + expected_output: + kind: tensor + shape: [2, 5, 3] diff --git a/pyhazards/model_cards/tcif_fusion.yaml b/pyhazards/model_cards/tcif_fusion.yaml new file mode 100644 index 00000000..1c5a07cd --- /dev/null +++ b/pyhazards/model_cards/tcif_fusion.yaml @@ -0,0 +1,42 @@ +model_name: tcif_fusion +display_name: TCIF-fusion +hazard: Tropical Cyclone +source_file: pyhazards/models/tcif_fusion.py +builder_name: tcif_fusion_builder +summary: > + A knowledge-guided fusion baseline for tropical cyclone track and intensity forecasting. +description: + - > + ``tcif_fusion`` combines multiple feature streams behind the shared storm + forecasting interface used throughout the PyHazards cyclone roadmap. + - > + The adapter focuses on the fusion contract and evaluator compatibility + rather than full reproduction of the original training stack. +paper: + title: Tropical cyclone intensity forecasting using model knowledge guided deep learning model + url: https://doi.org/10.5194/egusphere-2024-250 + repo_url: https://github.com/wangchong96/TCIF-fusion +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="tcif_fusion", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) +notes: + - "Outputs are shared storm forecast trajectories over the configured horizon." +smoke_test: + task: regression + build_kwargs: + input_dim: 8 + horizon: 5 + output_dim: 3 + input: + kind: tensor + tensor: + shape: [2, 6, 8] + expected_output: + kind: tensor + shape: [2, 5, 3] diff --git a/pyhazards/model_cards/tropicalcyclone_mlp.yaml b/pyhazards/model_cards/tropicalcyclone_mlp.yaml new file mode 100644 index 00000000..98cb3f09 --- /dev/null +++ b/pyhazards/model_cards/tropicalcyclone_mlp.yaml @@ -0,0 +1,43 @@ +model_name: tropicalcyclone_mlp +display_name: Tropical Cyclone MLP +hazard: Tropical Cyclone +source_file: pyhazards/models/tropicalcyclone_mlp.py +builder_name: tropicalcyclone_mlp_builder +summary: > + A compact MLP baseline for hurricane track and intensity forecasting. +description: + - > + ``tropicalcyclone_mlp`` complements ``hurricast`` with a lighter-weight + hurricane baseline that uses the same storm-history input contract. + - > + The adapter is useful for practical low-cost intensity and trajectory + experiments in basin-filtered settings. +paper: + title: Deep Learning Experiments for Tropical Cyclone Intensity Forecasts + url: https://doi.org/10.1145/3447548.3467351 + repo_url: https://github.com/wenweixu/tropicalcyclone_MLP +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="tropicalcyclone_mlp", task="regression", input_dim=8, history=6) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) +notes: + - "Outputs are lead-time sequences of latitude, longitude, and intensity targets." +smoke_test: + task: regression + build_kwargs: + input_dim: 8 + history: 6 + horizon: 5 + output_dim: 3 + input: + kind: tensor + tensor: + shape: [2, 6, 8] + expected_output: + kind: tensor + shape: [2, 5, 3] diff --git a/pyhazards/model_cards/tropicyclonenet.yaml b/pyhazards/model_cards/tropicyclonenet.yaml new file mode 100644 index 00000000..e266ac89 --- /dev/null +++ b/pyhazards/model_cards/tropicyclonenet.yaml @@ -0,0 +1,42 @@ +model_name: tropicyclonenet +display_name: TropiCycloneNet +hazard: Tropical Cyclone +source_file: pyhazards/models/tropicyclonenet.py +builder_name: tropicyclonenet_builder +summary: > + A GRU plus attention baseline for all-basin tropical cyclone forecasting. +description: + - > + ``tropicyclonenet`` extends the shared storm benchmark stack beyond the + hurricane-only presets. + - > + The PyHazards adapter keeps a single storm-history to forecast-trajectory + interface so it can share the same evaluator as ``hurricast``. +paper: + title: Benchmark dataset and deep learning method for global tropical cyclone forecasting + url: https://www.nature.com/articles/s41597-023-02721-x + repo_url: https://github.com/xiaochengfuhuo/TropiCycloneNet +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="tropicyclonenet", task="regression", input_dim=8, horizon=5) + preds = model(torch.randn(2, 6, 8)) + print(preds.shape) +notes: + - "Outputs are lead-time sequences of latitude, longitude, and intensity targets." +smoke_test: + task: regression + build_kwargs: + input_dim: 8 + horizon: 5 + output_dim: 3 + input: + kind: tensor + tensor: + shape: [2, 6, 8] + expected_output: + kind: tensor + shape: [2, 5, 3] diff --git a/pyhazards/model_cards/urbanfloodcast.yaml b/pyhazards/model_cards/urbanfloodcast.yaml new file mode 100644 index 00000000..a1a1e033 --- /dev/null +++ b/pyhazards/model_cards/urbanfloodcast.yaml @@ -0,0 +1,43 @@ +model_name: urbanfloodcast +display_name: UrbanFloodCast +hazard: Flood +source_file: pyhazards/models/urbanfloodcast.py +builder_name: urbanfloodcast_builder +summary: > + A U-Net style urban inundation baseline for dense-grid flood prediction. +description: + - > + ``urbanfloodcast`` adds an urban-focused raster baseline to the PyHazards + inundation benchmark stack. + - > + The implementation keeps the shared spatiotemporal tensor contract used by + the synthetic inundation smoke dataset. +paper: + title: "UrbanFloodCast: WMO Urban Flooding Forecasting Challenge" + url: https://arxiv.org/abs/2405.21179 + repo_url: https://github.com/HydroPML/UrbanFloodCast +tasks: + - regression + - segmentation +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="urbanfloodcast", task="regression", in_channels=3, history=4) + preds = model(torch.randn(2, 4, 3, 16, 16)) + print(preds.shape) +notes: + - "Outputs are next-horizon inundation depth rasters." +smoke_test: + task: regression + build_kwargs: + in_channels: 3 + history: 4 + base_channels: 32 + input: + kind: tensor + tensor: + shape: [2, 4, 3, 16, 16] + expected_output: + kind: tensor + shape: [2, 1, 16, 16] diff --git a/pyhazards/model_cards/wavecastnet.yaml b/pyhazards/model_cards/wavecastnet.yaml new file mode 100644 index 00000000..c3e4d526 --- /dev/null +++ b/pyhazards/model_cards/wavecastnet.yaml @@ -0,0 +1,63 @@ +model_name: wavecastnet +display_name: WaveCastNet +hazard: Earthquake +catalog_status: variant +source_file: pyhazards/models/wavecastnet.py +builder_name: wavecastnet_builder +summary: > + A ConvLEM-based sequence-to-sequence model for dense-grid earthquake wavefield + forecasting and early-warning style rollout experiments. +description: + - > + ``wavecastnet`` is the PyHazards entrypoint for dense-grid earthquake wavefield + forecasting based on the ConvLEM encoder-decoder design described by Lyu et al. (2025). + - > + This implementation focuses on the core dense-grid forecasting path and keeps data + loading outside the model so users can adapt it to their own simulation or sensor pipelines. +paper: + title: Rapid wavefield forecasting for earthquake early warning via deep sequence to sequence learning + url: https://doi.org/10.1038/s41467-025-65435-2 +tasks: + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="wavecastnet", + task="regression", + in_channels=3, + height=32, + width=24, + temporal_in=6, + temporal_out=4, + hidden_dim=32, + num_layers=1, + dropout=0.0, + ) + + x = torch.randn(2, 3, 6, 32, 24) + y = model(x) + print(y.shape) +notes: + - "The PyHazards version currently targets dense-grid forecasting rather than the paper's sparse-sensor variants." + - "The smoke test uses reduced spatial and temporal sizes so it stays CPU-safe in CI." +smoke_test: + task: regression + build_kwargs: + in_channels: 3 + height: 12 + width: 10 + temporal_in: 5 + temporal_out: 4 + hidden_dim: 16 + num_layers: 1 + kernel_size: 3 + dropout: 0.0 + input: + kind: tensor + tensor: + shape: [2, 3, 5, 12, 10] + expected_output: + kind: tensor + shape: [2, 3, 4, 12, 10] diff --git a/pyhazards/model_cards/wildfire_aspp.yaml b/pyhazards/model_cards/wildfire_aspp.yaml new file mode 100644 index 00000000..5bc55cf4 --- /dev/null +++ b/pyhazards/model_cards/wildfire_aspp.yaml @@ -0,0 +1,49 @@ +model_name: wildfire_aspp +display_name: CNN-ASPP +hazard: Wildfire +catalog_status: variant +source_file: pyhazards/models/wildfire_aspp.py +builder_name: wildfire_aspp_builder +summary: > + An explainable CNN segmentation model with an ASPP mechanism for next-day wildfire + spread prediction. +description: + - > + ``wildfire_aspp`` is the backward-compatible public PyHazards entrypoint for the + CNN + ASPP wildfire spread model. + - > + PyHazards keeps the alias for compatibility while the implementation delegates to + the native ``wildfire_cnn_aspp`` builder under the hood. +paper: + title: Application of Explainable Artificial Intelligence in Predicting Wildfire Spread + url: https://ieeexplore.ieee.org/document/10568207 +tasks: + - segmentation +aliases: + - wildfire_cnn_aspp +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_aspp", + task="segmentation", + in_channels=12, + ) + + x = torch.randn(2, 12, 64, 64) + logits = model(x) + print(logits.shape) +notes: + - "``wildfire_cnn_aspp`` remains available as an alias for the same public model." +smoke_test: + task: segmentation + build_kwargs: + in_channels: 12 + input: + kind: tensor + tensor: + shape: [2, 12, 16, 16] + expected_output: + kind: tensor + shape: [2, 1, 16, 16] diff --git a/pyhazards/model_cards/wildfire_forecasting.yaml b/pyhazards/model_cards/wildfire_forecasting.yaml new file mode 100644 index 00000000..258dcd81 --- /dev/null +++ b/pyhazards/model_cards/wildfire_forecasting.yaml @@ -0,0 +1,53 @@ +model_name: wildfire_forecasting +display_name: Wildfire Forecasting +hazard: Wildfire +catalog_status: core +source_file: pyhazards/models/wildfire_forecasting.py +builder_name: wildfire_forecasting_builder +summary: > + A sequence forecasting baseline for next-window wildfire activity across weekly count features. +description: + - > + ``wildfire_forecasting`` is a compact GRU-attention forecaster for weekly wildfire + activity windows. + - > + The PyHazards implementation targets smoke-testable next-window size-group prediction + through the shared wildfire benchmark flow. +paper: + title: Wildfire Danger Prediction and Understanding with Deep Learning + url: https://doi.org/10.1029/2022GL099368 + repo_url: https://github.com/Orion-AI-Lab/wildfire_forecasting +tasks: + - forecasting + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_forecasting", + task="forecasting", + input_dim=7, + output_dim=5, + lookback=12, + ) + preds = model(torch.randn(2, 12, 7)) + print(preds.shape) +notes: + - "This public adapter is exercised on the weekly wildfire smoke benchmark." +smoke_test: + task: forecasting + build_kwargs: + input_dim: 7 + hidden_dim: 32 + output_dim: 5 + lookback: 12 + num_layers: 2 + dropout: 0.0 + input: + kind: tensor + tensor: + shape: [2, 12, 7] + expected_output: + kind: tensor + shape: [2, 5] diff --git a/pyhazards/model_cards/wildfire_fpa.yaml b/pyhazards/model_cards/wildfire_fpa.yaml new file mode 100644 index 00000000..1a2a18e2 --- /dev/null +++ b/pyhazards/model_cards/wildfire_fpa.yaml @@ -0,0 +1,57 @@ +model_name: wildfire_fpa +display_name: DNN-LSTM-AutoEncoder +hazard: Wildfire +catalog_status: core +source_file: pyhazards/models/wildfire_fpa.py +builder_name: wildfire_fpa_builder +summary: > + A two-stage wildfire framework with a DNN stage for incident-level cause and size + prediction plus an LSTM + autoencoder stage for weekly forecasting. +description: + - > + ``wildfire_fpa`` is the paper-facing PyHazards entrypoint for the FPA-FOD wildfire + framework described by Shen et al. (2023). + - > + PyHazards exposes the combined DNN-LSTM-AutoEncoder workflow through one public + registry name while keeping the lower-level components internal. +paper: + title: Developing risk assessment framework for wildfire in the United States + url: https://www.sciencedirect.com/science/article/pii/S2949926723000033 +tasks: + - classification + - forecasting + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_fpa", + task="classification", + in_dim=8, + out_dim=5, + hidden_dim=64, + depth=2, + ) + + x = torch.randn(4, 8) + logits = model(x) + print(logits.shape) +notes: + - "This is the only retained public method from Shen et al. (2023) in the PyHazards catalog." + - "Use ``task=\"classification\"`` for the DNN stage." + - "Use ``task=\"forecasting\"`` or ``task=\"regression\"`` for the sequence stage." +smoke_test: + task: classification + build_kwargs: + in_dim: 8 + out_dim: 5 + hidden_dim: 32 + depth: 2 + input: + kind: tensor + tensor: + shape: [4, 8] + expected_output: + kind: tensor + shape: [4, 5] diff --git a/pyhazards/model_cards/wildfire_mamba.yaml b/pyhazards/model_cards/wildfire_mamba.yaml new file mode 100644 index 00000000..6762874f --- /dev/null +++ b/pyhazards/model_cards/wildfire_mamba.yaml @@ -0,0 +1,58 @@ +model_name: wildfire_mamba +display_name: Wildfire Mamba +hazard: Wildfire +include_in_public_catalog: false +catalog_status: hidden +source_file: pyhazards/models/wildfire_mamba.py +builder_name: wildfire_mamba_builder +summary: > + A Mamba-inspired spatio-temporal wildfire model that mixes county-level temporal + encoders with a lightweight graph convolution over spatial adjacency. +description: + - > + ``wildfire_mamba`` models county-day ERA5 sequences by combining selective + state-space temporal blocks with a simple spatial graph layer. + - > + The PyHazards implementation targets binary next-day per-county wildfire + classification and supports an optional count head for multi-task extensions. +paper: + title: "Mamba: Linear-Time Sequence Modeling with Selective State Spaces" + url: https://arxiv.org/abs/2312.00752 +tasks: + - classification +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfire_mamba", + task="classification", + in_dim=3, + num_counties=4, + past_days=5, + ) + + x = torch.randn(2, 5, 4, 3) + logits = model(x) + print(logits.shape) +notes: + - "The CI smoke test validates the default binary-classification path on synthetic data." +smoke_test: + task: classification + build_kwargs: + in_dim: 3 + num_counties: 4 + past_days: 5 + hidden_dim: 32 + gcn_hidden: 16 + mamba_layers: 2 + state_dim: 16 + conv_kernel: 3 + dropout: 0.0 + input: + kind: tensor + tensor: + shape: [2, 5, 4, 3] + expected_output: + kind: tensor + shape: [2, 4] diff --git a/pyhazards/model_cards/wildfirespreadts.yaml b/pyhazards/model_cards/wildfirespreadts.yaml new file mode 100644 index 00000000..b06dad73 --- /dev/null +++ b/pyhazards/model_cards/wildfirespreadts.yaml @@ -0,0 +1,50 @@ +model_name: wildfirespreadts +display_name: WildfireSpreadTS +hazard: Wildfire +catalog_status: core +source_file: pyhazards/models/wildfirespreadts.py +builder_name: wildfirespreadts_builder +summary: > + A temporal convolution wildfire spread baseline over short raster history windows. +description: + - > + ``wildfirespreadts`` models wildfire spread as a sequence-to-mask prediction task. + - > + The PyHazards adapter uses a compact 3D convolution stack that consumes short + raster history windows and predicts the next spread mask. +paper: + title: "WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction" + url: https://openreview.net/forum?id=RgdGkPRQ03 + repo_url: https://github.com/SebastianGer/WildfireSpreadTS +tasks: + - segmentation + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model( + name="wildfirespreadts", + task="segmentation", + history=4, + in_channels=6, + ) + logits = model(torch.randn(2, 4, 6, 16, 16)) + print(logits.shape) +notes: + - "The smoke dataset uses temporal wildfire spread tensors rather than single-frame rasters." +smoke_test: + task: segmentation + build_kwargs: + history: 4 + in_channels: 6 + hidden_dim: 24 + out_channels: 1 + dropout: 0.0 + input: + kind: tensor + tensor: + shape: [2, 4, 6, 16, 16] + expected_output: + kind: tensor + shape: [2, 1, 16, 16] diff --git a/pyhazards/model_cards/wrf_sfire.yaml b/pyhazards/model_cards/wrf_sfire.yaml new file mode 100644 index 00000000..d3b8a8d5 --- /dev/null +++ b/pyhazards/model_cards/wrf_sfire.yaml @@ -0,0 +1,44 @@ +model_name: wrf_sfire +display_name: WRF-SFIRE Adapter +hazard: Wildfire +catalog_status: core +source_file: pyhazards/models/wrf_sfire.py +builder_name: wrf_sfire_builder +summary: > + A lightweight raster wildfire spread adapter inspired by WRF-SFIRE style transport. +description: + - > + ``wrf_sfire`` approximates simulator-style spread transport with a fixed diffusion + and terrain-moisture modulation layer. + - > + The PyHazards adapter is designed for consistent smoke benchmarking rather than + full physical simulation. +paper: + title: Coupled atmosphere-wildland fire modeling with WRF 3.3 and SFIRE 2011 + url: https://doi.org/10.5194/gmd-4-591-2011 + repo_url: https://github.com/openwfm/WRF-SFIRE +tasks: + - segmentation + - regression +example: | + import torch + from pyhazards.models import build_model + + model = build_model(name="wrf_sfire", task="segmentation", in_channels=12) + logits = model(torch.randn(2, 12, 16, 16)) + print(logits.shape) +notes: + - "This smoke-path adapter keeps the simulator slot benchmarkable without external binaries." +smoke_test: + task: segmentation + build_kwargs: + in_channels: 12 + out_channels: 1 + diffusion_steps: 3 + input: + kind: tensor + tensor: + shape: [2, 12, 16, 16] + expected_output: + kind: tensor + shape: [2, 1, 16, 16] diff --git a/pyhazards/model_catalog.py b/pyhazards/model_catalog.py new file mode 100644 index 00000000..06c32178 --- /dev/null +++ b/pyhazards/model_catalog.py @@ -0,0 +1,1223 @@ +from __future__ import annotations + +import inspect +from collections import defaultdict +from pathlib import Path +from typing import Any, Dict, Iterable, List, Literal, Mapping, Optional, Sequence, Set + +import torch +import yaml +from pydantic import BaseModel, Field, model_validator + + +REPO_ROOT = Path(__file__).resolve().parent.parent +MODEL_CARDS_DIR = Path(__file__).resolve().parent / "model_cards" +DOCS_SOURCE_DIR = REPO_ROOT / "docs" / "source" +MODULE_DOCS_DIR = DOCS_SOURCE_DIR / "modules" +MODEL_PAGE_PATH = DOCS_SOURCE_DIR / "pyhazards_models.rst" +API_PAGE_PATH = DOCS_SOURCE_DIR / "api" / "pyhazards.models.rst" + +GENERATED_MARKER = ( + ".. This file is generated by scripts/render_model_docs.py. Do not edit by hand." +) +MODEL_PR_MARKER = "" +MODEL_REVIEW_MARKER = "" + +NON_CATALOG_MODELS = { + "mlp", + "cnn", + "temporal", +} + +HAZARD_DISPLAY_ORDER = [ + "Wildfire", + "Earthquake", + "Flood", + "Tropical Cyclone", +] + +NORMALIZED_HAZARD_LABELS = { + "Hurricane": "Tropical Cyclone", +} + +CATALOG_STATUS_ORDER = [ + "core", + "variant", + "experimental", +] + +IMPLEMENTED_SECTION_TITLE = "Implemented Models" +IMPLEMENTED_SECTION_SUMMARY = ( + "This table includes both core baselines and public variants or additional " + "implementations for the hazard family." +) +EXPERIMENTAL_SECTION_TITLE = "Experimental Adapters" +EXPERIMENTAL_SECTION_SUMMARY = ( + "These entries remain public as lightweight wrapper or prototype integrations " + "and should not be counted as stable implemented methods." +) + +HAZARD_SECTION_SUMMARIES = { + "Wildfire": ( + "Wildfire models cover danger forecasting, weekly activity forecasting, " + "and spread prediction under the shared wildfire benchmark family." + ), + "Earthquake": ( + "Earthquake models span phase picking and dense-grid forecasting, with " + "detail pages linked to the shared earthquake benchmark coverage." + ), + "Flood": ( + "Flood models cover streamflow and inundation forecasting, ranging from " + "sequence baselines to dense-grid flood-mapping architectures." + ), + "Tropical Cyclone": ( + "Storm models are organized under one tropical-cyclone family, including " + "basin-specific hurricane baselines and shared all-basin forecasting models." + ), +} + +TASK_DISPLAY_LABELS = { + "wildfire.danger": "Danger", + "wildfire.spread": "Spread", + "earthquake.picking": "Phase Picking", + "earthquake.forecasting": "Wavefield Forecasting", + "flood.streamflow": "Streamflow", + "flood.inundation": "Inundation", + "tc.track_intensity": "Track + Intensity", + "classification": "Classification", + "regression": "Forecasting", + "segmentation": "Segmentation", +} + +MATURITY_LABELS = { + "core": "Implemented", + "variant": "Implemented", + "experimental": "Experimental Adapter", + "hidden": "Hidden", +} + +MATURITY_BADGE_ROLES = { + "core": "success", + "variant": "success", + "experimental": "warning", + "hidden": "secondary", +} + +STARTER_MODELS = { + "Wildfire": "firecastnet", + "Earthquake": "phasenet", + "Flood": "floodcast", + "Tropical Cyclone": "hurricast", +} + + +class PaperReference(BaseModel): + title: str + url: str + repo_url: Optional[str] = None + + +class SmokeTensorSpec(BaseModel): + shape: List[int] + dtype: str = "float32" + + +class SmokeInputSpec(BaseModel): + kind: str + tensor: Optional[SmokeTensorSpec] = None + mapping: Dict[str, SmokeTensorSpec] = Field(default_factory=dict) + kwargs: Dict[str, SmokeTensorSpec] = Field(default_factory=dict) + + @model_validator(mode="after") + def validate_payload(self) -> "SmokeInputSpec": + kind = self.kind.lower() + if kind == "tensor" and self.tensor is None: + raise ValueError("tensor smoke inputs require the 'tensor' field") + if kind == "mapping" and not self.mapping: + raise ValueError("mapping smoke inputs require the 'mapping' field") + if kind == "kwargs" and not self.kwargs: + raise ValueError("kwargs smoke inputs require the 'kwargs' field") + if kind not in {"tensor", "mapping", "kwargs"}: + raise ValueError("smoke input kind must be one of: tensor, mapping, kwargs") + return self + + +class SmokeOutputSpec(BaseModel): + kind: str = "tensor" + shape: Optional[List[int]] = None + shapes: List[List[int]] = Field(default_factory=list) + + @model_validator(mode="after") + def validate_payload(self) -> "SmokeOutputSpec": + kind = self.kind.lower() + if kind == "tensor" and self.shape is None: + raise ValueError("tensor smoke outputs require the 'shape' field") + if kind == "sequence" and not self.shapes: + raise ValueError("sequence smoke outputs require the 'shapes' field") + if kind not in {"tensor", "sequence"}: + raise ValueError("smoke output kind must be one of: tensor, sequence") + return self + + +class SmokeTestSpec(BaseModel): + task: str + build_kwargs: Dict[str, Any] = Field(default_factory=dict) + input: SmokeInputSpec + expected_output: SmokeOutputSpec + + +class ModelCard(BaseModel): + model_name: str + display_name: str + hazard: str + include_in_public_catalog: bool = True + catalog_status: Literal["core", "variant", "experimental", "hidden"] = "core" + family_key: Optional[str] = None + family_label: Optional[str] = None + source_file: str + builder_name: str + summary: str + description: List[str] + paper: PaperReference + tasks: List[str] + example: str + notes: List[str] = Field(default_factory=list) + aliases: List[str] = Field(default_factory=list) + doc_slug: Optional[str] = None + smoke_test: SmokeTestSpec + + @model_validator(mode="after") + def validate_catalog_metadata(self) -> "ModelCard": + if self.catalog_status == "hidden" and self.include_in_public_catalog: + raise ValueError("hidden catalog_status requires include_in_public_catalog: false") + if self.family_key and not self.family_label: + raise ValueError("family_key requires family_label") + if self.family_label and not self.family_key: + raise ValueError("family_label requires family_key") + return self + + @property + def registry_names(self) -> List[str]: + return [self.model_name] + list(self.aliases) + + @property + def module_doc_name(self) -> str: + return self.doc_slug or "models_{name}".format(name=self.model_name) + + @property + def module_doc_path(self) -> Path: + return MODULE_DOCS_DIR / "{name}.rst".format(name=self.module_doc_name) + + @property + def source_path(self) -> Path: + return REPO_ROOT / self.source_file + + +def load_model_cards(cards_dir: Path = MODEL_CARDS_DIR) -> List[ModelCard]: + cards: List[ModelCard] = [] + seen_registry_names: Set[str] = set() + for path in sorted(cards_dir.glob("*.y*ml")): + raw = yaml.safe_load(path.read_text(encoding="utf-8")) or {} + card = ModelCard.model_validate(raw) + if path.stem != card.model_name: + raise ValueError( + "Model card filename must match model_name: " + "{path} vs {name}".format(path=path.name, name=card.model_name) + ) + overlap = set(card.registry_names) & seen_registry_names + if overlap: + raise ValueError( + "Duplicate model catalog names detected: {names}".format( + names=", ".join(sorted(overlap)) + ) + ) + seen_registry_names.update(card.registry_names) + cards.append(card) + return cards + + +def card_by_registry_name(cards: Sequence[ModelCard]) -> Dict[str, ModelCard]: + mapping: Dict[str, ModelCard] = {} + for card in cards: + for name in card.registry_names: + mapping[name] = card + return mapping + + +def group_cards_by_hazard(cards: Sequence[ModelCard]) -> Dict[str, List[ModelCard]]: + grouped: Dict[str, List[ModelCard]] = defaultdict(list) + for card in cards: + grouped[_display_hazard_label(card.hazard)].append(card) + + def order_key(hazard: str) -> tuple[int, str]: + if hazard in HAZARD_DISPLAY_ORDER: + return (HAZARD_DISPLAY_ORDER.index(hazard), hazard.lower()) + return (len(HAZARD_DISPLAY_ORDER), hazard.lower()) + + return { + hazard: sorted(hazard_cards, key=lambda item: item.display_name.lower()) + for hazard, hazard_cards in sorted(grouped.items(), key=lambda item: order_key(item[0])) + } + + +def public_catalog_cards(cards: Sequence[ModelCard]) -> List[ModelCard]: + return [card for card in cards if card.include_in_public_catalog] + + +def _cards_by_status(cards: Sequence[ModelCard]) -> Dict[str, List[ModelCard]]: + grouped: Dict[str, List[ModelCard]] = {status: [] for status in CATALOG_STATUS_ORDER} + for card in cards: + if card.catalog_status in grouped: + grouped[card.catalog_status].append(card) + for status in grouped: + grouped[status] = sorted(grouped[status], key=lambda item: item.display_name.lower()) + return grouped + + +def _grouped_catalog_entries(cards: Sequence[ModelCard]) -> List[List[ModelCard]]: + entries: List[List[ModelCard]] = [] + family_seen: Set[str] = set() + ordered_cards = sorted(cards, key=lambda item: item.display_name.lower()) + for card in ordered_cards: + if card.family_key: + if card.family_key in family_seen: + continue + family_seen.add(card.family_key) + family_cards = [ + member + for member in ordered_cards + if member.family_key == card.family_key + ] + entries.append(family_cards) + continue + entries.append([card]) + return entries + + +def _paper_sentence(card: ModelCard) -> str: + sentence = "`{title} <{url}>`_".format( + title=card.paper.title, + url=card.paper.url, + ) + if card.paper.repo_url: + sentence += " (`repo <{url}>`__)".format(url=card.paper.repo_url) + return sentence + "." + + +def _single_line(text: str) -> str: + return " ".join(text.split()) + + +def _indent_block(text: str, prefix: str = " ") -> str: + lines = text.rstrip().splitlines() + return "\n".join(prefix + line if line else prefix.rstrip() for line in lines) + + +def _indent_lines(lines: Sequence[str], prefix: str = " ") -> List[str]: + return [prefix + line if line else "" for line in lines] + + +def _display_hazard_label(hazard: str) -> str: + return NORMALIZED_HAZARD_LABELS.get(hazard, hazard) + + +def _badge(role: str, text: str) -> str: + return f":bdg-{role}:`{text}`" + + +def _task_display_labels(tasks: Sequence[str]) -> List[str]: + labels = [] + seen: Set[str] = set() + for task in tasks: + label = TASK_DISPLAY_LABELS.get(task, task.replace("_", " ").title()) + if label not in seen: + labels.append(label) + seen.add(label) + return labels + + +def _ordered_unique(items: Iterable[str]) -> List[str]: + seen: Set[str] = set() + ordered: List[str] = [] + for item in items: + if item in seen: + continue + seen.add(item) + ordered.append(item) + return ordered + + +def _doc_link(card: ModelCard, absolute: bool = False) -> str: + target = "/modules/{slug}".format(slug=card.module_doc_name) if absolute else "modules/{slug}".format(slug=card.module_doc_name) + return ":doc:`{name} <{target}>`".format( + name=card.display_name, + target=target, + ) + + +def _benchmark_link(slug: str, display_name: str, absolute: bool = False) -> str: + target = f"/benchmarks/{slug}" if absolute else f"benchmarks/{slug}" + return f":doc:`{display_name} <{target}>`" + + +def _paper_links(card: ModelCard) -> str: + links = [f"**Paper:** `{card.paper.title} <{card.paper.url}>`_"] + if card.paper.repo_url: + links.append(f"**Repo:** `Repository <{card.paper.repo_url}>`__") + return " | ".join(links) + + +def _benchmark_links_by_model() -> Dict[str, Dict[str, Any]]: + from .benchmark_catalog import load_benchmark_cards + + mapping: Dict[str, Dict[str, Any]] = {} + for benchmark_card in load_benchmark_cards(): + for model_name in benchmark_card.linked_models: + entry = mapping.setdefault( + model_name, + { + "family": None, + "ecosystems": [], + "tasks": [], + }, + ) + if benchmark_card.kind == "family": + entry["family"] = benchmark_card + else: + entry["ecosystems"].append(benchmark_card) + entry["tasks"].extend(benchmark_card.tasks) + + for entry in mapping.values(): + entry["tasks"] = _ordered_unique(entry["tasks"]) + entry["ecosystems"] = sorted( + entry["ecosystems"], + key=lambda item: item.display_name.lower(), + ) + return mapping + + +def _family_row_name(cards: Sequence[ModelCard]) -> str: + label = cards[0].family_label or cards[0].display_name + return label + + +def _family_row_summary(cards: Sequence[ModelCard], status: str, absolute_links: bool = False) -> str: + members = ", ".join( + _doc_link(card, absolute=absolute_links) + for card in cards + ) + if status == "variant": + prefix = ( + "Family variants: {members}. These entries come from the same source " + "paper and are grouped here so they do not count as separate core " + "methods." + ) + else: + prefix = "Family members: {members}." + return "{prefix} {paper}".format( + prefix=prefix.format(members=members), + paper=_paper_sentence(cards[0]), + ) + + +def _row_name(cards: Sequence[ModelCard]) -> str: + if len(cards) == 1: + return _doc_link(cards[0]) + return _family_row_name(cards) + + +def _row_summary(cards: Sequence[ModelCard], status: str, absolute_links: bool = False) -> str: + if len(cards) == 1: + card = cards[0] + return "{summary} {paper}".format( + summary=_single_line(card.summary).rstrip(".") + ".", + paper=_paper_sentence(card), + ) + return _family_row_summary(cards, status, absolute_links=absolute_links) + + +def _stat_card(title: str, value: str, note: str) -> List[str]: + return [ + ".. grid-item-card:: {title}".format(title=title), + " :class-card: catalog-stat-card", + "", + " .. container:: catalog-stat-value", + "", + " {value}".format(value=value), + "", + " .. container:: catalog-stat-note", + "", + " {note}".format(note=note), + "", + ] + + +def _entry_task_labels(card: ModelCard, benchmark_links: Dict[str, Dict[str, Any]]) -> List[str]: + entry = benchmark_links.get(card.model_name) + ecosystems = entry.get("ecosystems") if entry else [] + ecosystem_tasks = [ + task + for ecosystem in ecosystems + for task in ecosystem.tasks + ] + if ecosystem_tasks: + return _task_display_labels(_ordered_unique(ecosystem_tasks)) + return _task_display_labels(card.tasks) + + +def _entry_benchmark_family(card: ModelCard, benchmark_links: Dict[str, Dict[str, Any]], absolute: bool = False) -> Optional[str]: + entry = benchmark_links.get(card.model_name) + family = entry.get("family") if entry else None + if family is None: + return None + return _benchmark_link(family.slug, family.display_name, absolute=absolute) + + +def _entry_ecosystem_links(card: ModelCard, benchmark_links: Dict[str, Dict[str, Any]], absolute: bool = False) -> List[str]: + entry = benchmark_links.get(card.model_name) + ecosystems = entry.get("ecosystems") if entry else [] + return [ + _benchmark_link(ecosystem.slug, ecosystem.display_name, absolute=absolute) + for ecosystem in ecosystems + ] + + +def _render_model_card(card: ModelCard, benchmark_links: Dict[str, Dict[str, Any]], absolute: bool = False) -> List[str]: + maturity_label = MATURITY_LABELS[card.catalog_status] + maturity_role = MATURITY_BADGE_ROLES[card.catalog_status] + task_badges = " ".join( + _badge("secondary", label) + for label in _entry_task_labels(card, benchmark_links) + ) + hazard_badge = _badge("primary", _display_hazard_label(card.hazard)) + maturity_badge = _badge(maturity_role, maturity_label) + benchmark_family = _entry_benchmark_family(card, benchmark_links, absolute=absolute) + ecosystems = _entry_ecosystem_links(card, benchmark_links, absolute=absolute) + detail_link = _doc_link(card, absolute=absolute) + + lines = [ + ".. grid-item-card:: {title}".format(title=card.display_name), + " :class-card: catalog-entry-card", + "", + " .. container:: catalog-entry-summary", + "", + " {summary}".format(summary=_single_line(card.summary).rstrip(".") + "."), + "", + " .. container:: catalog-chip-row", + "", + " {chips}".format( + chips=" ".join( + chip for chip in [hazard_badge, task_badges, maturity_badge] if chip + ) + ), + "", + " .. container:: catalog-meta-row", + "", + " **Details:** {detail}".format(detail=detail_link), + "", + ] + + if benchmark_family: + lines.extend( + [ + " .. container:: catalog-meta-row", + "", + " **Benchmark Family:** {link}".format(link=benchmark_family), + "", + ] + ) + if ecosystems: + lines.extend( + [ + " .. container:: catalog-meta-row", + "", + " **Benchmark Ecosystems:** {links}".format( + links=", ".join(ecosystems), + ), + "", + ] + ) + + lines.extend( + [ + " .. container:: catalog-link-row", + "", + " {links}".format(links=_paper_links(card)), + "", + ] + ) + return lines + + +def _render_model_grid(cards: Sequence[ModelCard], benchmark_links: Dict[str, Dict[str, Any]], absolute: bool = False) -> List[str]: + lines: List[str] = [ + ".. grid:: 1 1 2 2", + " :gutter: 2", + " :class-container: catalog-grid", + "", + ] + for card in cards: + lines.extend(_indent_lines(_render_model_card(card, benchmark_links, absolute=absolute))) + return lines + + +def _render_recommended_grid(cards: Sequence[ModelCard], benchmark_links: Dict[str, Dict[str, Any]]) -> List[str]: + by_name = {card.model_name: card for card in cards} + lines: List[str] = [ + ".. grid:: 1 1 2 4", + " :gutter: 2", + " :class-container: catalog-recommend-grid", + "", + ] + for hazard in HAZARD_DISPLAY_ORDER: + model_name = STARTER_MODELS.get(hazard) + card = by_name.get(model_name) if model_name else None + if card is None: + continue + benchmark_family = _entry_benchmark_family(card, benchmark_links) + note = _single_line(card.summary).rstrip(".") + "." + lines.extend( + _indent_lines( + [ + ".. grid-item-card:: {hazard}".format(hazard=hazard), + " :class-card: catalog-detail-card", + "", + " **Start with:** {detail}".format(detail=_doc_link(card)), + "", + " {note}".format(note=note), + "", + ( + " **Benchmark:** {benchmark}".format(benchmark=benchmark_family) + if benchmark_family + else " **Benchmark:** See the model detail page for compatible benchmark coverage." + ), + "", + ] + ) + ) + return lines + + +def render_model_page(cards: Sequence[ModelCard]) -> str: + public_cards = public_catalog_cards(cards) + grouped = group_cards_by_hazard(public_cards) + benchmark_links = _benchmark_links_by_model() + implemented_cards = [ + card for card in public_cards if card.catalog_status in {"core", "variant"} + ] + experimental_cards = [ + card for card in public_cards if card.catalog_status == "experimental" + ] + benchmark_linked_count = len( + [card for card in public_cards if card.model_name in benchmark_links] + ) + + lines: List[str] = [ + GENERATED_MARKER, + "", + "Models", + "===================", + "", + "Browse PyHazards model implementations across hazard families, compare", + "scope and maturity, and navigate to model-specific detail pages.", + "", + "At a Glance", + "-----------", + "", + ".. grid:: 1 2 4 4", + " :gutter: 2", + " :class-container: catalog-grid", + "", + ] + lines.extend( + _indent_lines( + _stat_card( + "Hazard Families", + str(len(grouped)), + "Catalog tabs grouped by the normalized public hazard taxonomy.", + ) + ) + ) + lines.extend( + _indent_lines( + _stat_card( + "Implemented Models", + str(len(implemented_cards)), + "Public core baselines plus additional implemented variants.", + ) + ) + ) + lines.extend( + _indent_lines( + _stat_card( + "Experimental Adapters", + str(len(experimental_cards)), + "Prototype weather-model integrations kept separate from the stable catalog.", + ) + ) + ) + lines.extend( + _indent_lines( + _stat_card( + "Benchmark-linked Models", + str(benchmark_linked_count), + "Models with explicit benchmark-family or ecosystem links on this page.", + ) + ) + ) + + lines.extend( + [ + "", + "Catalog by Hazard", + "-----------------", + "", + "Use the hazard tabs below to browse the public catalog. Each card keeps", + "the index-page summary short, then links into model-specific detail", + "pages and compatible benchmark coverage.", + "", + ".. tab-set::", + " :class: catalog-tabs", + "", + ] + ) + + for hazard, hazard_cards in grouped.items(): + cards_by_status = _cards_by_status(hazard_cards) + implemented = cards_by_status["core"] + cards_by_status["variant"] + experimental = cards_by_status["experimental"] + tab_lines: List[str] = [ + ".. tab-item:: {hazard}".format(hazard=hazard), + "", + " .. container:: catalog-section-note", + "", + " {summary}".format(summary=HAZARD_SECTION_SUMMARIES[hazard]), + "", + " .. rubric:: {title}".format(title=IMPLEMENTED_SECTION_TITLE), + "", + " .. container:: catalog-section-note", + "", + " {summary}".format(summary=IMPLEMENTED_SECTION_SUMMARY), + "", + ] + if implemented: + tab_lines.extend(_indent_lines(_render_model_grid(implemented, benchmark_links))) + else: + tab_lines.extend( + [ + " No public implemented methods are currently available for this hazard family.", + "", + ] + ) + + if experimental: + tab_lines.extend( + [ + " .. rubric:: {title}".format(title=EXPERIMENTAL_SECTION_TITLE), + "", + " .. container:: catalog-section-note", + "", + " {summary}".format(summary=EXPERIMENTAL_SECTION_SUMMARY), + "", + ] + ) + tab_lines.extend(_indent_lines(_render_model_grid(experimental, benchmark_links))) + tab_lines.append("") + lines.extend(_indent_lines(tab_lines)) + + lines.extend( + [ + "", + "Recommended Entry Points", + "------------------------", + "", + "If you are new to PyHazards, these four models provide the clearest", + "starting point for each hazard family.", + "", + ] + ) + lines.extend(_render_recommended_grid(public_cards, benchmark_links)) + + lines.extend( + [ + "", + "Programmatic Use", + "----------------", + "", + "Use :doc:`api/pyhazards.models` for the developer registry workflow,", + "builder examples, and package-level API lookup. Use", + ":doc:`pyhazards_benchmarks` to compare compatible benchmark families", + "before selecting a model for evaluation.", + "", + ".. toctree::", + " :maxdepth: 1", + " :hidden:", + "", + ] + ) + for card in public_cards: + lines.append(" modules/{slug}".format(slug=card.module_doc_name)) + + lines.append("") + return "\n".join(lines) + + +def render_api_page(cards: Sequence[ModelCard]) -> str: + grouped = group_cards_by_hazard(public_catalog_cards(cards)) + lines: List[str] = [ + GENERATED_MARKER, + "", + "pyhazards.models package", + "========================", + "", + "Catalog Summary", + "---------------", + "", + "This page links the public model catalog, the developer registry", + "workflow, and the package submodules used to implement model builders.", + "", + "For the curated browsing experience, use :doc:`/pyhazards_models`.", + "", + ] + for hazard, hazard_cards in grouped.items(): + lines.extend([hazard, "~" * len(hazard), ""]) + cards_by_status = _cards_by_status(hazard_cards) + implemented_cards = cards_by_status["core"] + cards_by_status["variant"] + experimental_cards = cards_by_status["experimental"] + lines.extend([IMPLEMENTED_SECTION_TITLE, "+" * len(IMPLEMENTED_SECTION_TITLE), ""]) + if not implemented_cards: + lines.extend( + [ + "No public implemented methods are currently available for this hazard family.", + "", + ] + ) + else: + links = ", ".join(_doc_link(card, absolute=True) for card in implemented_cards) + lines.extend([links + ".", ""]) + + if experimental_cards: + lines.extend([EXPERIMENTAL_SECTION_TITLE, "+" * len(EXPERIMENTAL_SECTION_TITLE), ""]) + links = ", ".join(_doc_link(card, absolute=True) for card in experimental_cards) + lines.extend([links + ".", ""]) + + lines.extend( + [ + "Developer Registry Workflow", + "---------------------------", + "", + "Use this section when you need the package-level builder and registry", + "interface rather than the public catalog presentation.", + "", + "Build a Registered Model", + "~~~~~~~~~~~~~~~~~~~~~~~~", + "", + ".. code-block:: python", + "", + " from pyhazards.models import build_model", + "", + " model = build_model(", + ' name="phasenet",', + ' task="regression",', + " in_channels=3,", + " )", + "", + "Register a Custom Model", + "~~~~~~~~~~~~~~~~~~~~~~~", + "", + ".. code-block:: python", + "", + " import torch.nn as nn", + " from pyhazards.models import build_model, register_model", + "", + " def my_custom_builder(task: str, in_dim: int, out_dim: int, **kwargs) -> nn.Module:", + ' hidden = kwargs.get("hidden_dim", 128)', + " return nn.Sequential(", + " nn.Linear(in_dim, hidden),", + " nn.ReLU(),", + " nn.Linear(hidden, out_dim),", + " )", + "", + ' register_model("my_mlp", my_custom_builder, defaults={"hidden_dim": 128})', + ' model = build_model(name="my_mlp", task="regression", in_dim=16, out_dim=1)', + "", + "Notes", + "~~~~~", + "", + "- Builders receive ``task`` plus any kwargs you pass.", + "- ``register_model`` stores optional defaults so configs can stay small.", + "- Use :doc:`/implementation` for the full contributor workflow.", + "", + "Submodules", + "----------", + "", + "pyhazards.models.backbones module", + "----------------------------------", + "", + ".. automodule:: pyhazards.models.backbones", + " :members:", + " :undoc-members:", + " :show-inheritance:", + "", + "pyhazards.models.heads module", + "------------------------------", + "", + ".. automodule:: pyhazards.models.heads", + " :members:", + " :undoc-members:", + " :show-inheritance:", + "", + "pyhazards.models.builder module", + "-------------------------------", + "", + ".. automodule:: pyhazards.models.builder", + " :members:", + " :undoc-members:", + " :show-inheritance:", + "", + "pyhazards.models.registry module", + "--------------------------------", + "", + ".. automodule:: pyhazards.models.registry", + " :members:", + " :undoc-members:", + " :show-inheritance:", + "", + "Module contents", + "---------------", + "", + ".. automodule:: pyhazards.models", + " :members:", + " :undoc-members:", + " :show-inheritance:", + "", + ] + ) + return "\n".join(lines) + + +def render_module_page(card: ModelCard) -> str: + title = card.display_name + benchmark_links = _benchmark_links_by_model() + task_labels = _entry_task_labels(card, benchmark_links) + benchmark_family = _entry_benchmark_family(card, benchmark_links, absolute=True) + ecosystems = _entry_ecosystem_links(card, benchmark_links, absolute=True) + lines: List[str] = [ + GENERATED_MARKER, + "", + ] + if not card.include_in_public_catalog: + lines.extend([":orphan:", ""]) + + lines.extend( + [ + title, + "=" * len(title), + "", + "Overview", + "--------", + "", + _single_line(card.description[0]), + "", + "At a Glance", + "-----------", + "", + ".. grid:: 1 2 4 4", + " :gutter: 2", + " :class-container: catalog-grid", + "", + ] + ) + lines.extend( + _indent_lines( + _stat_card("Hazard Family", _display_hazard_label(card.hazard), "Public catalog grouping used for this model.") + ) + ) + lines.extend( + _indent_lines( + _stat_card("Maturity", MATURITY_LABELS[card.catalog_status], "Catalog maturity label used on the index page.") + ) + ) + lines.extend( + _indent_lines( + _stat_card("Tasks", str(len(task_labels)), ", ".join(task_labels)) + ) + ) + lines.extend( + _indent_lines( + _stat_card( + "Benchmark Family", + benchmark_family or "Unmapped", + "Primary benchmark-family link used for compatible evaluation coverage.", + ) + ) + ) + lines.extend( + [ + "", + "Description", + "-----------", + "", + ] + ) + for paragraph in card.description: + lines.append(_single_line(paragraph)) + lines.append("") + + lines.extend( + [ + "Benchmark Compatibility", + "-----------------------", + "", + "**Primary benchmark family:** {family}".format( + family=benchmark_family or "Not yet mapped." + ), + "", + ] + ) + if ecosystems: + lines.extend( + [ + "**Mapped benchmark ecosystems:** {ecosystems}".format( + ecosystems=", ".join(ecosystems) + ), + "", + ] + ) + lines.extend( + [ + "External References", + "-------------------", + "", + _paper_links(card), + "", + "Registry Name", + "-------------", + "", + "Primary entrypoint: ``{name}``".format(name=card.model_name), + "", + ] + ) + + if card.family_label: + lines.append("Family grouping: ``{family}``".format(family=card.family_label)) + lines.append("") + + if card.aliases: + lines.append("Aliases: {aliases}".format( + aliases=", ".join("``{name}``".format(name=name) for name in card.aliases) + )) + lines.append("") + + lines.extend( + [ + "Supported Tasks", + "---------------", + "", + ] + ) + for task in task_labels: + lines.append("- {task}".format(task=task)) + lines.append("") + lines.extend( + [ + "Programmatic Use", + "----------------", + "", + ".. code-block:: python", + "", + _indent_block(card.example), + "", + ] + ) + + if card.notes: + lines.extend(["Notes", "-----", ""]) + for note in card.notes: + lines.append("- {note}".format(note=note)) + lines.append("") + + return "\n".join(lines) + + +def rendered_docs(cards: Sequence[ModelCard]) -> Dict[Path, str]: + targets: Dict[Path, str] = { + MODEL_PAGE_PATH: render_model_page(cards), + API_PAGE_PATH: render_api_page(cards), + } + for card in cards: + targets[card.module_doc_path] = render_module_page(card) + return targets + + +def sync_generated_docs(cards: Sequence[ModelCard], check: bool = False) -> List[Path]: + changes: List[Path] = [] + targets = rendered_docs(cards) + + for path, content in targets.items(): + current = path.read_text(encoding="utf-8") if path.exists() else None + if current != content: + changes.append(path) + if not check: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(content, encoding="utf-8") + + managed_paths = set(targets.keys()) + for path in MODULE_DOCS_DIR.glob("models_*.rst"): + if path in managed_paths or not path.exists(): + continue + text = path.read_text(encoding="utf-8") + if GENERATED_MARKER not in text: + continue + changes.append(path) + if not check: + path.unlink() + + return changes + + +def model_catalog_alignment_issues(cards: Sequence[ModelCard]) -> List[str]: + from pyhazards.models import available_models + + issues: List[str] = [] + registered = set(available_models()) + for card in cards: + for name in card.registry_names: + if name not in registered: + issues.append("Model '{name}' is documented but not registered.".format(name=name)) + return issues + + +def builder_contract_issues(card: ModelCard) -> List[str]: + from pyhazards.models.registry import get_model_config + + issues: List[str] = [] + config = get_model_config(card.model_name) + if config is None: + return ["Model '{name}' is not registered.".format(name=card.model_name)] + + builder = config["builder"] + if builder.__name__ != card.builder_name: + issues.append( + "Model card expects builder '{expected}' but registry uses '{actual}'.".format( + expected=card.builder_name, + actual=builder.__name__, + ) + ) + + params = inspect.signature(builder).parameters + has_var_kwargs = any( + parameter.kind == inspect.Parameter.VAR_KEYWORD + for parameter in params.values() + ) + if "task" not in params and not has_var_kwargs: + issues.append( + "Builder '{name}' must accept a 'task' argument or **kwargs.".format( + name=builder.__name__ + ) + ) + if not has_var_kwargs: + issues.append( + "Builder '{name}' should accept **kwargs for registry compatibility.".format( + name=builder.__name__ + ) + ) + return issues + + +def source_contract_issues(card: ModelCard) -> List[str]: + issues: List[str] = [] + if not card.source_path.exists(): + return [ + "Documented source file does not exist: {path}".format(path=card.source_file) + ] + + source = card.source_path.read_text(encoding="utf-8") + if "ValueError" not in source or ("shape" not in source and "ndim" not in source): + issues.append( + "Source file '{path}' should include explicit input-shape validation with a clear error.".format( + path=card.source_file + ) + ) + return issues + + +def _dtype_for_name(dtype_name: str) -> torch.dtype: + if not hasattr(torch, dtype_name): + raise ValueError("Unsupported torch dtype in smoke test: {name}".format(name=dtype_name)) + dtype = getattr(torch, dtype_name) + if not isinstance(dtype, torch.dtype): + raise ValueError("Unsupported torch dtype in smoke test: {name}".format(name=dtype_name)) + return dtype + + +def _make_tensor(spec: SmokeTensorSpec) -> torch.Tensor: + return torch.randn(*spec.shape, dtype=_dtype_for_name(spec.dtype)) + + +def _prepare_smoke_input(spec: SmokeInputSpec) -> Any: + kind = spec.kind.lower() + if kind == "tensor": + return _make_tensor(spec.tensor) + if kind == "mapping": + return {name: _make_tensor(tensor_spec) for name, tensor_spec in spec.mapping.items()} + return {name: _make_tensor(tensor_spec) for name, tensor_spec in spec.kwargs.items()} + + +def _shape_of_output(output: Any) -> Any: + if isinstance(output, torch.Tensor): + return list(output.shape) + if isinstance(output, (list, tuple)): + return [_shape_of_output(item) for item in output] + return type(output).__name__ + + +def run_smoke_test(card: ModelCard) -> Dict[str, Any]: + from pyhazards.models import build_model + + torch.manual_seed(0) + model = build_model( + name=card.model_name, + task=card.smoke_test.task, + **card.smoke_test.build_kwargs + ) + model.eval() + prepared = _prepare_smoke_input(card.smoke_test.input) + with torch.no_grad(): + if card.smoke_test.input.kind.lower() == "tensor": + output = model(prepared) + elif card.smoke_test.input.kind.lower() == "mapping": + output = model(prepared) + else: + output = model(**prepared) + + actual_shape = _shape_of_output(output) + expected = card.smoke_test.expected_output + if expected.kind.lower() == "tensor": + ok = actual_shape == expected.shape + else: + ok = actual_shape == expected.shapes + + return { + "ok": ok, + "actual_shape": actual_shape, + "expected_shape": expected.shape if expected.kind.lower() == "tensor" else expected.shapes, + "summary": "{name}: output shape {actual}".format( + name=card.model_name, + actual=actual_shape, + ), + } + + +def touched_card_names(cards: Sequence[ModelCard], changed_files: Iterable[str]) -> List[str]: + source_to_name = { + str(card.source_file): card.model_name + for card in cards + } + names: Set[str] = set() + for path in changed_files: + if path.startswith("pyhazards/model_cards/") and path.endswith((".yaml", ".yml")): + names.add(Path(path).stem) + if path in source_to_name: + names.add(source_to_name[path]) + return sorted(name for name in names if name) diff --git a/pyhazards/models/__init__.py b/pyhazards/models/__init__.py new file mode 100644 index 00000000..55d0f5cb --- /dev/null +++ b/pyhazards/models/__init__.py @@ -0,0 +1,1020 @@ +from .backbones import CNNPatchEncoder, MLPBackbone, TemporalEncoder +from .asufm import ASUFM, asufm_builder +from .builder import build_model, default_builder +from .cnn_aspp import WildfireCNNASPP, cnn_aspp_builder +from .eqnet import EQNet, eqnet_builder +from .eqtransformer import EQTransformer, eqtransformer_builder +from .firecastnet import FireCastNet, firecastnet_builder +from .firemm_ir import FireMMIR, firemm_ir_builder +from .firepred import FirePred, firepred_builder +from .gemini_25_pro_wildfire_prompted import ( + Gemini25ProWildfirePrompted, + gemini_25_pro_wildfire_prompted_builder, +) +from .internvl3_wildfire_prompted import ( + InternVL3WildfirePrompted, + internvl3_wildfire_prompted_builder, +) +from .llama4_wildfire_prompted import ( + Llama4WildfirePrompted, + llama4_wildfire_prompted_builder, +) +from .modis_active_fire_c61 import MODISActiveFireC61, modis_active_fire_c61_builder +from .prithvi_burnscars import PrithviBurnScars, prithvi_burnscars_builder +from .prithvi_eo_2_tl import PrithviEO2TL, prithvi_eo_2_tl_builder +from .prithvi_wxc import PrithviWxC, prithvi_wxc_builder +from .qwen25_vl_wildfire_prompted import ( + Qwen25VLWildfirePrompted, + qwen25_vl_wildfire_prompted_builder, +) +from .ts_satfire import TSSatFire, ts_satfire_builder +from .viirs_375m_active_fire import VIIRS375mActiveFire, viirs_375m_active_fire_builder +from .floodcast import FloodCast, floodcast_builder +from .forefire import ForeFireAdapter, forefire_builder +from .fourcastnet_tc import FourCastNetTC, fourcastnet_tc_builder +from .gpd import GPD, gpd_builder +from .google_flood_forecasting import GoogleFloodForecasting, google_flood_forecasting_builder +from .graphcast_tc import GraphCastTC, graphcast_tc_builder +from .heads import ClassificationHead, RegressionHead, SegmentationHead +from .hurricast import Hurricast, hurricast_builder +from .hydrographnet import HydroGraphNet, HydroGraphNetLoss, hydrographnet_builder +from .neuralhydrology_ealstm import NeuralHydrologyEALSTM, neuralhydrology_ealstm_builder +from .neuralhydrology_lstm import NeuralHydrologyLSTM, neuralhydrology_lstm_builder +from .pangu_tc import PanguTC, pangu_tc_builder +from .phasenet import PhaseNet, phasenet_builder +from .registry import available_models, register_model +from .saf_net import SAFNet, saf_net_builder +from .tcif_fusion import TCIFFusion, tcif_fusion_builder +from .tropicalcyclone_mlp import TropicalCycloneMLP, tropicalcyclone_mlp_builder +from .tropicyclonenet import TropiCycloneNet, tropicyclonenet_builder +from .urbanfloodcast import UrbanFloodCast, urbanfloodcast_builder +from .wavecastnet import ( + ConvLEMCell, + WaveCastNet, + WaveCastNetLoss, + WavefieldMetrics, + wavecastnet_builder, +) +from .wildfire_aspp import TverskyLoss, WildfireASPP, wildfire_aspp_builder +from .wildfire_fpa import WildfireFPA, wildfire_fpa_builder +from .wildfire_mamba import WildfireMamba, wildfire_mamba_builder +from .wildfiregpt import WildfireGPTReasoner, wildfiregpt_builder +from .logistic_regression import LogisticRegressionModel, logistic_regression_builder +from .random_forest import RandomForestModel, random_forest_builder +from .xgboost import XGBoostModel, xgboost_builder +from .lightgbm import LightGBMModel, lightgbm_builder +from .unet import TinyUNet, unet_builder +from .resnet18_unet import TinyResNet18UNet, resnet18_unet_builder +from .attention_unet import TinyAttentionUNet, attention_unet_builder +from .deeplabv3p import TinyDeepLabV3P, deeplabv3p_builder +from .convlstm import TinyConvLSTM, convlstm_builder +from .mau import TinyMAU, mau_builder +from .predrnn_v2 import TinyPredRNNv2, predrnn_v2_builder +from .rainformer import TinyRainformer, rainformer_builder +from .earthformer import TinyEarthFormer, earthformer_builder +from .swinlstm import TinySwinLSTM, swinlstm_builder +from .earthfarseer import TinyEarthFarseer, earthfarseer_builder +from .convgru_trajgru import TinyConvGRTrajGRU, convgru_trajgru_builder +from .tcn import TinyTCN, tcn_builder +from .utae import TinyUTAE, utae_builder +from .segformer import TinySegFormer, segformer_builder +from .swin_unet import TinySwinUNet, swin_unet_builder +from .vit_segmenter import TinyViTSegmenter, vit_segmenter_builder +from .deep_ensemble import DeepEnsemble, deep_ensemble_builder +from .wildfirespreadts import WildfireSpreadTS, wildfirespreadts_builder +from .wrf_sfire import WRFSFireAdapter, wrf_sfire_builder + + +__all__ = [ + "build_model", + "available_models", + "register_model", + "MLPBackbone", + "CNNPatchEncoder", + "TemporalEncoder", + "ASUFM", + "asufm_builder", + "ClassificationHead", + "RegressionHead", + "SegmentationHead", + "EQNet", + "eqnet_builder", + "EQTransformer", + "eqtransformer_builder", + "FireCastNet", + "firecastnet_builder", + "FireMMIR", + "firemm_ir_builder", + "FirePred", + "firepred_builder", + "Gemini25ProWildfirePrompted", + "gemini_25_pro_wildfire_prompted_builder", + "InternVL3WildfirePrompted", + "internvl3_wildfire_prompted_builder", + "Llama4WildfirePrompted", + "llama4_wildfire_prompted_builder", + "MODISActiveFireC61", + "modis_active_fire_c61_builder", + "PrithviBurnScars", + "prithvi_burnscars_builder", + "PrithviEO2TL", + "prithvi_eo_2_tl_builder", + "PrithviWxC", + "prithvi_wxc_builder", + "Qwen25VLWildfirePrompted", + "qwen25_vl_wildfire_prompted_builder", + "TSSatFire", + "ts_satfire_builder", + "VIIRS375mActiveFire", + "viirs_375m_active_fire_builder", + "FloodCast", + "floodcast_builder", + "ForeFireAdapter", + "forefire_builder", + "FourCastNetTC", + "fourcastnet_tc_builder", + "GPD", + "gpd_builder", + "GoogleFloodForecasting", + "google_flood_forecasting_builder", + "GraphCastTC", + "graphcast_tc_builder", + "Hurricast", + "hurricast_builder", + "HydroGraphNet", + "HydroGraphNetLoss", + "hydrographnet_builder", + "NeuralHydrologyEALSTM", + "neuralhydrology_ealstm_builder", + "NeuralHydrologyLSTM", + "neuralhydrology_lstm_builder", + "PanguTC", + "pangu_tc_builder", + "PhaseNet", + "phasenet_builder", + "SAFNet", + "saf_net_builder", + "TCIFFusion", + "tcif_fusion_builder", + "TropicalCycloneMLP", + "tropicalcyclone_mlp_builder", + "TropiCycloneNet", + "tropicyclonenet_builder", + "UrbanFloodCast", + "urbanfloodcast_builder", + "WildfireASPP", + "TverskyLoss", + "wildfire_aspp_builder", + "WildfireCNNASPP", + "cnn_aspp_builder", + "WildfireFPA", + "wildfire_fpa_builder", + "WildfireMamba", + "wildfire_mamba_builder", + "WildfireGPTReasoner", + "wildfiregpt_builder", + "WildfireSpreadTS", + "wildfirespreadts_builder", + "WRFSFireAdapter", + "wrf_sfire_builder", + "ConvLEMCell", + "WaveCastNet", + "WaveCastNetLoss", + "WavefieldMetrics", + "wavecastnet_builder", +] + + +register_model( + "mlp", + default_builder, + defaults={"hidden_dim": 256, "depth": 2}, +) + +register_model( + "cnn", + default_builder, + defaults={"hidden_dim": 64, "in_channels": 3}, +) + +register_model( + "temporal", + default_builder, + defaults={"hidden_dim": 128, "num_layers": 1}, +) + +register_model( + "wildfire_fpa", + wildfire_fpa_builder, + defaults={ + "in_dim": 8, + "out_dim": 1, + "input_dim": 7, + "output_dim": 1, + "depth": 2, + "hidden_dim": 64, + "activation": "relu", + "dropout": 0.1, + "latent_dim": 32, + "num_layers": 2, + "lookback": 12, + }, +) + +register_model( + "wildfire_mamba", + wildfire_mamba_builder, + defaults={ + "hidden_dim": 128, + "gcn_hidden": 64, + "mamba_layers": 2, + "state_dim": 64, + "conv_kernel": 5, + "dropout": 0.1, + "with_count_head": False, + }, +) + +register_model( + "wildfire_aspp", + wildfire_aspp_builder, + defaults={"in_channels": 12}, +) + +register_model( + "asufm", + asufm_builder, + defaults={ + "image_size": 64, + "patch_size": 4, + "in_channels": 6, + "out_dim": 1, + "embed_dim": 96, + "depths": (2, 2, 2, 2), + "num_heads": (3, 6, 12, 24), + "window_size": 8, + "mlp_ratio": 4.0, + "dropout": 0.0, + "drop_path_rate": 0.1, + "focal_window": 3, + "focal_level": 2, + "use_focal_modulation": True, + "spatial_attention": True, + "skip_num": 3, + "use_checkpoint": True, + }, +) + +register_model( + "wildfirespreadts", + wildfirespreadts_builder, + defaults={ + "history": 4, + "in_channels": 6, + "hidden_dim": 32, + "out_channels": 1, + "dropout": 0.1, + }, +) + +register_model( + "forefire", + forefire_builder, + defaults={ + "in_channels": 12, + "out_channels": 1, + "diffusion_steps": 2, + }, +) + +register_model( + "wrf_sfire", + wrf_sfire_builder, + defaults={ + "in_channels": 12, + "out_channels": 1, + "diffusion_steps": 3, + }, +) + +register_model( + "firecastnet", + firecastnet_builder, + defaults={ + "in_channels": 12, + "hidden_dim": 32, + "out_channels": 1, + "dropout": 0.1, + }, +) + +register_model( + "firepred", + firepred_builder, + defaults={ + "history": 5, + "in_channels": 8, + "hidden_dim": 32, + "out_channels": 1, + "dropout": 0.1, + }, +) + +register_model( + "modis_active_fire_c61", + modis_active_fire_c61_builder, + defaults={ + "in_channels": 5, + "hidden_dim": 24, + "out_dim": 1, + "context_kernel": 9, + "dropout": 0.1, + }, +) + +register_model( + "prithvi_eo_2_tl", + prithvi_eo_2_tl_builder, + defaults={ + "image_size": 32, + "in_channels": 6, + "out_dim": 1, + "patch_size": 4, + "embed_dim": 128, + "depth": 4, + "num_heads": 4, + "mlp_ratio": 4.0, + "dropout": 0.1, + "time_dim": 1, + "location_dim": 2, + "decoder_channels": 64, + }, +) + +register_model( + "prithvi_burnscars", + prithvi_burnscars_builder, + defaults={ + "image_size": 32, + "in_channels": 6, + "out_dim": 1, + "patch_size": 4, + "embed_dim": 128, + "depth": 4, + "num_heads": 4, + "mlp_ratio": 4.0, + "dropout": 0.1, + "time_dim": 1, + "location_dim": 2, + "decoder_channels": 64, + }, +) + +register_model( + "prithvi_wxc", + prithvi_wxc_builder, + defaults={ + "image_size": 32, + "in_channels": 8, + "out_dim": 1, + "patch_size": 4, + "embed_dim": 128, + "depth": 4, + "num_heads": 4, + "mlp_ratio": 4.0, + "dropout": 0.1, + "lead_time_dim": 1, + "variable_summary_dim": 8, + "decoder_channels": 64, + }, +) + +register_model( + "gemini_25_pro_wildfire_prompted", + gemini_25_pro_wildfire_prompted_builder, + defaults={ + "in_channels": 6, + "out_dim": 1, + "hidden_dim": 96, + "prompt_dim": 32, + "num_prompt_tokens": 6, + "num_heads": 8, + "dropout": 0.1, + }, +) + +register_model( + "internvl3_wildfire_prompted", + internvl3_wildfire_prompted_builder, + defaults={ + "in_channels": 6, + "out_dim": 1, + "hidden_dim": 96, + "prompt_dim": 32, + "num_prompt_tokens": 5, + "num_heads": 6, + "dropout": 0.1, + }, +) + +register_model( + "llama4_wildfire_prompted", + llama4_wildfire_prompted_builder, + defaults={ + "in_channels": 6, + "out_dim": 1, + "hidden_dim": 80, + "prompt_dim": 32, + "num_prompt_tokens": 4, + "num_heads": 8, + "dropout": 0.1, + }, +) + +register_model( + "qwen25_vl_wildfire_prompted", + qwen25_vl_wildfire_prompted_builder, + defaults={ + "in_channels": 6, + "out_dim": 1, + "hidden_dim": 64, + "prompt_dim": 24, + "num_prompt_tokens": 4, + "num_heads": 4, + "dropout": 0.1, + }, +) + +register_model( + "ts_satfire", + ts_satfire_builder, + defaults={ + "history": 5, + "in_channels": 8, + "hidden_dim": 32, + "out_channels": 1, + "dropout": 0.1, + }, +) + +register_model( + "viirs_375m_active_fire", + viirs_375m_active_fire_builder, + defaults={ + "in_channels": 5, + "hidden_dim": 24, + "out_dim": 1, + "context_kernel": 7, + "dropout": 0.1, + }, +) + +register_model( + "wildfiregpt", + wildfiregpt_builder, + defaults={ + "in_channels": 12, + "out_dim": 1, + "base_channels": 32, + "hidden_dim": 64, + "profile_dim": 8, + "retrieved_dim": 16, + "num_heads": 4, + "dropout": 0.1, + }, +) + +register_model( + "firemm_ir", + firemm_ir_builder, + defaults={ + "in_channels": 6, + "out_dim": 1, + "hidden_dim": 64, + "instruction_dim": 16, + "num_memory_slots": 3, + "num_heads": 4, + "dropout": 0.1, + }, +) + +register_model( + "wildfire_cnn_aspp", + cnn_aspp_builder, + defaults={ + "in_channels": 12, + "base_channels": 32, + "aspp_channels": 32, + "dilations": (1, 3, 6, 12), + "dropout": 0.0, + }, +) + +register_model( + "hydrographnet", + hydrographnet_builder, + defaults={ + "hidden_dim": 64, + "harmonics": 5, + "num_gn_blocks": 5, + }, +) + +register_model( + "neuralhydrology_lstm", + neuralhydrology_lstm_builder, + defaults={ + "input_dim": 2, + "hidden_dim": 64, + "num_layers": 2, + "out_dim": 1, + "dropout": 0.1, + }, +) + +register_model( + "neuralhydrology_ealstm", + neuralhydrology_ealstm_builder, + defaults={ + "input_dim": 2, + "hidden_dim": 64, + "num_layers": 1, + "out_dim": 1, + "dropout": 0.1, + }, +) + +register_model( + "floodcast", + floodcast_builder, + defaults={ + "in_channels": 3, + "history": 4, + "hidden_dim": 32, + "out_channels": 1, + "dropout": 0.1, + }, +) + +register_model( + "urbanfloodcast", + urbanfloodcast_builder, + defaults={ + "in_channels": 3, + "history": 4, + "base_channels": 32, + "out_channels": 1, + }, +) + +register_model( + "google_flood_forecasting", + google_flood_forecasting_builder, + defaults={ + "input_dim": 2, + "hidden_dim": 64, + "out_dim": 1, + "history": 4, + "dropout": 0.1, + }, +) + +register_model( + "phasenet", + phasenet_builder, + defaults={ + "in_channels": 3, + "hidden_dim": 32, + }, +) + +register_model( + "eqtransformer", + eqtransformer_builder, + defaults={ + "in_channels": 3, + "hidden_dim": 48, + "num_layers": 2, + "dropout": 0.1, + }, +) + +register_model( + "gpd", + gpd_builder, + defaults={ + "in_channels": 3, + "hidden_dim": 32, + "dropout": 0.1, + }, +) + +register_model( + "eqnet", + eqnet_builder, + defaults={ + "in_channels": 3, + "hidden_dim": 48, + "num_heads": 4, + "num_layers": 2, + "dropout": 0.1, + }, +) + +register_model( + "wavecastnet", + wavecastnet_builder, + defaults={ + "hidden_dim": 144, + "num_layers": 2, + "kernel_size": 3, + "dt": 1.0, + "activation": "tanh", + "dropout": 0.1, + }, +) + +register_model( + "tropicalcyclone_mlp", + tropicalcyclone_mlp_builder, + defaults={ + "input_dim": 8, + "history": 6, + "hidden_dim": 64, + "horizon": 5, + "output_dim": 3, + "dropout": 0.1, + }, +) + +register_model( + "hurricast", + hurricast_builder, + defaults={ + "input_dim": 8, + "hidden_dim": 64, + "num_layers": 2, + "horizon": 5, + "output_dim": 3, + "dropout": 0.1, + }, +) + +register_model( + "tropicyclonenet", + tropicyclonenet_builder, + defaults={ + "input_dim": 8, + "hidden_dim": 64, + "horizon": 5, + "output_dim": 3, + "num_layers": 2, + "dropout": 0.1, + }, +) + +register_model( + "saf_net", + saf_net_builder, + defaults={ + "input_dim": 8, + "hidden_dim": 64, + "horizon": 5, + "dropout": 0.1, + }, +) + +register_model( + "tcif_fusion", + tcif_fusion_builder, + defaults={ + "input_dim": 8, + "hidden_dim": 64, + "horizon": 5, + "output_dim": 3, + "dropout": 0.1, + }, +) + +register_model( + "graphcast_tc", + graphcast_tc_builder, + defaults={ + "input_dim": 8, + "hidden_dim": 96, + "horizon": 5, + "output_dim": 3, + "num_layers": 2, + "num_heads": 4, + "dropout": 0.1, + }, +) + +register_model( + "pangu_tc", + pangu_tc_builder, + defaults={ + "input_dim": 8, + "hidden_dim": 96, + "horizon": 5, + "output_dim": 3, + "dropout": 0.1, + }, +) + +register_model( + "fourcastnet_tc", + fourcastnet_tc_builder, + defaults={ + "input_dim": 8, + "history": 6, + "hidden_dim": 96, + "horizon": 5, + "output_dim": 3, + "dropout": 0.1, + }, +) + + +__all__.extend([ + "LogisticRegressionModel", "logistic_regression_builder", + "RandomForestModel", "random_forest_builder", + "XGBoostModel", "xgboost_builder", + "LightGBMModel", "lightgbm_builder", + "TinyUNet", "unet_builder", + "TinyResNet18UNet", "resnet18_unet_builder", + "TinyAttentionUNet", "attention_unet_builder", + "TinyDeepLabV3P", "deeplabv3p_builder", + "TinyConvLSTM", "convlstm_builder", + "TinyMAU", "mau_builder", + "TinyPredRNNv2", "predrnn_v2_builder", + "TinyRainformer", "rainformer_builder", + "TinyEarthFormer", "earthformer_builder", + "TinySwinLSTM", "swinlstm_builder", + "TinyEarthFarseer", "earthfarseer_builder", + "TinyConvGRTrajGRU", "convgru_trajgru_builder", + "TinyTCN", "tcn_builder", + "TinyUTAE", "utae_builder", + "TinySegFormer", "segformer_builder", + "TinySwinUNet", "swin_unet_builder", + "TinyViTSegmenter", "vit_segmenter_builder", + "DeepEnsemble", "deep_ensemble_builder", +]) + + +register_model( + "logistic_regression", + logistic_regression_builder, + defaults={ + "solver": "lbfgs", + "max_iter": 500, + "class_weight": "balanced", + }, +) + +register_model( + "random_forest", + random_forest_builder, + defaults={ + "n_estimators": 500, + "max_depth": None, + "class_weight": "balanced_subsample", + }, +) + +register_model( + "xgboost", + xgboost_builder, + defaults={ + "max_depth": 8, + "eta": 0.05, + "subsample": 0.8, + "colsample_bytree": 0.8, + "num_boost_round": 800, + }, +) + +register_model( + "lightgbm", + lightgbm_builder, + defaults={ + "num_leaves": 63, + "learning_rate": 0.05, + "feature_fraction": 0.8, + "bagging_fraction": 0.8, + "num_boost_round": 800, + }, +) + +register_model( + "unet", + unet_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "base_channels": 16, + }, +) + +register_model( + "resnet18_unet", + resnet18_unet_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "stem_channels": 16, + }, +) + +register_model( + "attention_unet", + attention_unet_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "base_channels": 8, + }, +) + +register_model( + "deeplabv3p", + deeplabv3p_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "base_channels": 16, + }, +) + +register_model( + "convlstm", + convlstm_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "enc_channels": 16, + "hidden_channels": 16, + "num_layers": 2, + "kernel_size": 3, + }, +) + +register_model( + "mau", + mau_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "hidden_channels": 12, + }, +) + +register_model( + "predrnn_v2", + predrnn_v2_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "hidden_channels": 12, + }, +) + +register_model( + "rainformer", + rainformer_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "hidden_channels": 16, + "num_heads": 4, + "num_layers": 2, + }, +) + +register_model( + "earthformer", + earthformer_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "hidden_channels": 16, + "num_heads": 4, + "num_layers": 2, + }, +) + +register_model( + "swinlstm", + swinlstm_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "embed_dim": 16, + "hidden_channels": 16, + "num_heads": 4, + "window_size": 3, + }, +) + +register_model( + "earthfarseer", + earthfarseer_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "hidden_channels": 16, + "num_heads": 4, + "num_layers": 2, + }, +) + +register_model( + "convgru_trajgru", + convgru_trajgru_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "enc_channels": 16, + "hidden_channels": 16, + "kernel_size": 3, + }, +) + +register_model( + "tcn", + tcn_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "embed_dim": 16, + "hidden_channels": 16, + "kernel_size": 3, + "num_levels": 3, + "dropout": 0.1, + }, +) + +register_model( + "utae", + utae_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "hidden_channels": 16, + "num_heads": 4, + }, +) + +register_model( + "segformer", + segformer_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "embed_dims": (16, 32), + "num_heads": (1, 2), + "sr_ratios": (4, 2), + "mlp_ratio": 2.0, + "dropout": 0.1, + }, +) + +register_model( + "swin_unet", + swin_unet_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "embed_dims": (16, 32), + "num_heads": (1, 2), + "window_size": 3, + "mlp_ratio": 2.0, + "dropout": 0.1, + }, +) + +register_model( + "vit_segmenter", + vit_segmenter_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "patch_size": 4, + "embed_dim": 64, + "depth": 4, + "num_heads": 4, + "mlp_ratio": 2.0, + "dropout": 0.1, + }, +) + +register_model( + "deep_ensemble", + deep_ensemble_builder, + defaults={ + "in_channels": 1, + "out_dim": 1, + "base_channels": 8, + "ensemble_size": 5, + }, +) diff --git a/pyhazards/models/_wildfire_benchmark_utils.py b/pyhazards/models/_wildfire_benchmark_utils.py new file mode 100644 index 00000000..9b8d0b8e --- /dev/null +++ b/pyhazards/models/_wildfire_benchmark_utils.py @@ -0,0 +1,110 @@ +from __future__ import annotations + +import inspect +from typing import Any, Optional + +import numpy as np +import torch +import torch.nn as nn + +from ..datasets.base import DataBundle + + +def require_task(task: str, allowed: set[str], model_name: str) -> None: + normalized = task.lower() + if normalized not in allowed: + allowed_text = ", ".join(sorted(allowed)) + raise ValueError(f"Model '{model_name}' does not support task={task!r}. Allowed tasks: {allowed_text}") + + +def filter_init_kwargs(callable_obj: Any, kwargs: dict[str, Any]) -> dict[str, Any]: + sig = inspect.signature(callable_obj) + accepts_kwargs = any(p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()) + if accepts_kwargs: + return dict(kwargs) + allowed = {name for name in sig.parameters if name != 'self'} + return {k: v for k, v in kwargs.items() if k in allowed} + + +class SegmentationPort(nn.Module): + def __init__(self, model: nn.Module, out_channels: int = 1): + super().__init__() + self.model = model + self.out_channels = int(out_channels) + self.output_head = nn.Identity() if self.out_channels == 1 else nn.Conv2d(1, self.out_channels, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + logits = self.model(x) + return self.output_head(logits) + + +def _to_numpy_2d(x: torch.Tensor) -> np.ndarray: + if not isinstance(x, torch.Tensor): + raise TypeError('Expected torch.Tensor inputs for estimator-based models.') + x_np = x.detach().cpu().float().numpy() + if x_np.ndim == 1: + x_np = x_np[:, None] + if x_np.ndim > 2: + x_np = x_np.reshape(x_np.shape[0], -1) + return x_np + + +def _to_numpy_labels(y: torch.Tensor) -> np.ndarray: + if not isinstance(y, torch.Tensor): + raise TypeError('Expected torch.Tensor targets for estimator-based models.') + y_np = y.detach().cpu().numpy() + if y_np.ndim > 1: + y_np = y_np.reshape(y_np.shape[0], -1) + if y_np.shape[1] != 1: + raise ValueError('Estimator-based models expect a single target column.') + y_np = y_np[:, 0] + return y_np.astype(np.int64) + + +class EstimatorPort(nn.Module): + def __init__(self): + super().__init__() + self._is_fitted = False + + def fit_bundle( + self, + data: DataBundle, + train_split: str = 'train', + val_split: Optional[str] = None, + **_: Any, + ) -> None: + train_data = data.get_split(train_split) + x_train = _to_numpy_2d(train_data.inputs) + y_train = _to_numpy_labels(train_data.targets) + + x_val = None + y_val = None + if val_split: + val_data = data.get_split(val_split) + x_val = _to_numpy_2d(val_data.inputs) + y_val = _to_numpy_labels(val_data.targets) + + self._fit_numpy(x_train=x_train, y_train=y_train, x_val=x_val, y_val=y_val) + self._is_fitted = True + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if not self._is_fitted: + raise RuntimeError( + f'{self.__class__.__name__} has not been fitted. Use Trainer.fit(...) with a tensor-backed DataBundle first.' + ) + x_np = _to_numpy_2d(x) + probs_pos = self._predict_positive_proba(x_np) + probs = np.stack([1.0 - probs_pos, probs_pos], axis=-1).astype(np.float32) + return torch.from_numpy(probs).to(x.device) + + def _fit_numpy( + self, + x_train: np.ndarray, + y_train: np.ndarray, + x_val: Optional[np.ndarray], + y_val: Optional[np.ndarray], + ) -> None: + raise NotImplementedError + + def _predict_positive_proba(self, x: np.ndarray) -> np.ndarray: + raise NotImplementedError diff --git a/pyhazards/models/asufm.py b/pyhazards/models/asufm.py new file mode 100644 index 00000000..f19fff14 --- /dev/null +++ b/pyhazards/models/asufm.py @@ -0,0 +1,583 @@ +from __future__ import annotations + +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class DropPath(nn.Module): + def __init__(self, drop_prob: float = 0.0): + super().__init__() + self.drop_prob = float(drop_prob) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.drop_prob == 0.0 or not self.training: + return x + keep_prob = 1.0 - self.drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0: + random_tensor.div_(keep_prob) + return x * random_tensor + + +def _window_partition(x: torch.Tensor, window_size: int) -> tuple[torch.Tensor, int, int]: + b, h, w, c = x.shape + pad_h = (window_size - h % window_size) % window_size + pad_w = (window_size - w % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = x.permute(0, 3, 1, 2).contiguous() + x = F.pad(x, (0, pad_w, 0, pad_h)) + x = x.permute(0, 2, 3, 1).contiguous() + hp, wp = h + pad_h, w + pad_w + x = x.view(b, hp // window_size, window_size, wp // window_size, window_size, c) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous() + return x.view(-1, window_size * window_size, c), hp, wp + + +def _window_reverse(windows: torch.Tensor, window_size: int, hp: int, wp: int, batch_size: int) -> torch.Tensor: + channels = windows.shape[-1] + x = windows.view(batch_size, hp // window_size, wp // window_size, window_size, window_size, channels) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous() + return x.view(batch_size, hp, wp, channels) + + +class PatchEmbed(nn.Module): + def __init__( + self, + image_size: int = 64, + patch_size: int = 4, + in_channels: int = 6, + embed_dim: int = 96, + patch_norm: bool = True, + ): + super().__init__() + self.image_size = int(image_size) + self.patch_size = int(patch_size) + self.in_channels = int(in_channels) + self.embed_dim = int(embed_dim) + self.proj = nn.Conv2d( + self.in_channels, + self.embed_dim, + kernel_size=self.patch_size, + stride=self.patch_size, + ) + self.norm = nn.LayerNorm(self.embed_dim) if patch_norm else nn.Identity() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError(f"PatchEmbed expected (B,C,H,W), got {tuple(x.shape)}") + _, channels, height, width = x.shape + if channels != self.in_channels: + raise ValueError(f"PatchEmbed expected {self.in_channels} channels, got {channels}") + if height != self.image_size or width != self.image_size: + raise ValueError( + f"PatchEmbed expected spatial size ({self.image_size}, {self.image_size}), " + f"got ({height}, {width})" + ) + x = self.proj(x) + x = x.permute(0, 2, 3, 1).contiguous() + x = self.norm(x) + return x.permute(0, 3, 1, 2).contiguous() + + +class PatchMerging(nn.Module): + def __init__(self, dim: int): + super().__init__() + self.norm = nn.LayerNorm(4 * dim) + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + b, c, h, w = x.shape + if h % 2 != 0 or w % 2 != 0: + raise ValueError(f"PatchMerging requires even spatial dims, got ({h}, {w})") + x = x.permute(0, 2, 3, 1).contiguous() + x0 = x[:, 0::2, 0::2, :] + x1 = x[:, 1::2, 0::2, :] + x2 = x[:, 0::2, 1::2, :] + x3 = x[:, 1::2, 1::2, :] + x = torch.cat([x0, x1, x2, x3], dim=-1) + x = self.norm(x) + x = self.reduction(x) + return x.permute(0, 3, 1, 2).contiguous() + + +class PatchExpand(nn.Module): + def __init__(self, dim: int): + super().__init__() + self.expand = nn.Conv2d(dim, 2 * dim, kernel_size=1, bias=False) + self.norm = nn.LayerNorm(dim // 2) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.expand(x) + x = F.pixel_shuffle(x, upscale_factor=2) + x = x.permute(0, 2, 3, 1).contiguous() + x = self.norm(x) + return x.permute(0, 3, 1, 2).contiguous() + + +class FinalPatchExpandX4(nn.Module): + def __init__(self, dim: int): + super().__init__() + self.expand = nn.Conv2d(dim, 16 * dim, kernel_size=1, bias=False) + self.norm = nn.LayerNorm(dim) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.expand(x) + x = F.pixel_shuffle(x, upscale_factor=4) + x = x.permute(0, 2, 3, 1).contiguous() + x = self.norm(x) + return x.permute(0, 3, 1, 2).contiguous() + + +class FocalModulation(nn.Module): + def __init__( + self, + dim: int, + focal_window: int = 3, + focal_level: int = 2, + dropout: float = 0.0, + ): + super().__init__() + self.dim = int(dim) + self.focal_level = int(focal_level) + self.proj = nn.Linear(self.dim, 2 * self.dim + self.focal_level + 1) + self.depthwise_layers = nn.ModuleList( + [ + nn.Conv2d( + self.dim, + self.dim, + kernel_size=focal_window + 2 * level, + padding=(focal_window + 2 * level) // 2, + groups=self.dim, + bias=False, + ) + for level in range(self.focal_level) + ] + ) + self.mix = nn.Conv2d(self.dim, self.dim, kernel_size=1, bias=False) + self.out = nn.Linear(self.dim, self.dim) + self.drop = nn.Dropout(dropout) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError(f"FocalModulation expected (B,H,W,C), got {tuple(x.shape)}") + gates_dim = self.focal_level + 1 + projected = self.proj(x) + q, ctx, gates = torch.split(projected, [self.dim, self.dim, gates_dim], dim=-1) + ctx = ctx.permute(0, 3, 1, 2).contiguous() + gates = gates.permute(0, 3, 1, 2).contiguous() + + aggregated = 0.0 + for level, layer in enumerate(self.depthwise_layers): + aggregated = aggregated + layer(ctx) * gates[:, level : level + 1] + global_ctx = ctx.mean(dim=(2, 3), keepdim=True) + aggregated = aggregated + global_ctx * gates[:, -1:] + + modulator = self.mix(aggregated).permute(0, 2, 3, 1).contiguous() + out = q * modulator + out = self.out(out) + return self.drop(out) + + +class MLP(nn.Module): + def __init__(self, dim: int, mlp_ratio: float = 4.0, dropout: float = 0.0): + super().__init__() + hidden_dim = int(dim * mlp_ratio) + self.net = nn.Sequential( + nn.Linear(dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, dim), + nn.Dropout(dropout), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.net(x) + + +class SwinFocalBlock(nn.Module): + def __init__( + self, + dim: int, + num_heads: int, + window_size: int, + shift_size: int, + mlp_ratio: float = 4.0, + dropout: float = 0.0, + drop_path: float = 0.0, + use_focal: bool = False, + focal_window: int = 3, + focal_level: int = 2, + ): + super().__init__() + self.dim = int(dim) + self.window_size = int(window_size) + self.shift_size = int(shift_size) + self.use_focal = bool(use_focal) + self.norm1 = nn.LayerNorm(self.dim) + self.focal = ( + FocalModulation( + dim=self.dim, + focal_window=focal_window, + focal_level=focal_level, + dropout=dropout, + ) + if self.use_focal + else None + ) + self.attn = nn.MultiheadAttention(self.dim, num_heads=int(num_heads), dropout=dropout, batch_first=True) + self.drop_path = DropPath(drop_path) + self.norm2 = nn.LayerNorm(self.dim) + self.mlp = MLP(dim=self.dim, mlp_ratio=mlp_ratio, dropout=dropout) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError(f"SwinFocalBlock expected (B,C,H,W), got {tuple(x.shape)}") + b, c, h, w = x.shape + if c != self.dim: + raise ValueError(f"SwinFocalBlock expected {self.dim} channels, got {c}") + + x_hw = x.permute(0, 2, 3, 1).contiguous() + x_hw = self.norm1(x_hw) + if self.focal is not None: + x_hw = self.focal(x_hw) + + effective_window = min(self.window_size, h, w) + effective_shift = 0 if effective_window <= 1 else min(self.shift_size, effective_window // 2) + if effective_shift > 0: + x_hw = torch.roll(x_hw, shifts=(-effective_shift, -effective_shift), dims=(1, 2)) + + windows, hp, wp = _window_partition(x_hw, effective_window) + attn_out, _ = self.attn(windows, windows, windows, need_weights=False) + windows = windows + attn_out + x_hw = _window_reverse(windows, effective_window, hp, wp, b) + + if effective_shift > 0: + x_hw = torch.roll(x_hw, shifts=(effective_shift, effective_shift), dims=(1, 2)) + x_hw = x_hw[:, :h, :w, :] + x_attn = x_hw.permute(0, 3, 1, 2).contiguous() + x = x + self.drop_path(x_attn) + + tokens = x.permute(0, 2, 3, 1).reshape(b, h * w, c).contiguous() + tokens = tokens + self.drop_path(self.mlp(self.norm2(tokens))) + return tokens.view(b, h, w, c).permute(0, 3, 1, 2).contiguous() + + +class EncoderStage(nn.Module): + def __init__( + self, + dim: int, + depth: int, + num_heads: int, + window_size: int, + mlp_ratio: float, + dropout: float, + drop_path_rates: Sequence[float], + use_focal: bool, + focal_window: int, + focal_level: int, + downsample: bool, + ): + super().__init__() + shift = max(1, window_size // 2) + self.blocks = nn.ModuleList( + [ + SwinFocalBlock( + dim=dim, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if block_idx % 2 == 0 else shift, + mlp_ratio=mlp_ratio, + dropout=dropout, + drop_path=drop_path_rates[block_idx], + use_focal=use_focal, + focal_window=focal_window, + focal_level=focal_level, + ) + for block_idx in range(depth) + ] + ) + self.downsample = PatchMerging(dim) if downsample else None + + def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + for block in self.blocks: + x = block(x) + skip = x + if self.downsample is not None: + x = self.downsample(x) + return x, skip + + +class SpatialAttentionGate(nn.Module): + def __init__(self, dim: int): + super().__init__() + self.project = nn.Sequential( + nn.Conv2d(2 * dim, dim, kernel_size=1, bias=False), + nn.GELU(), + nn.Conv2d(dim, 1, kernel_size=1), + ) + + def forward(self, skip: torch.Tensor, gating: torch.Tensor) -> torch.Tensor: + if gating.shape[-2:] != skip.shape[-2:]: + gating = F.interpolate(gating, size=skip.shape[-2:], mode="bilinear", align_corners=False) + mask = torch.sigmoid(self.project(torch.cat([skip, gating], dim=1))) + return skip * mask + + +class DecoderStage(nn.Module): + def __init__( + self, + dim: int, + depth: int, + num_heads: int, + window_size: int, + mlp_ratio: float, + dropout: float, + drop_path_rates: Sequence[float], + spatial_attention: bool, + ): + super().__init__() + self.skip_gate = SpatialAttentionGate(dim) if spatial_attention else None + self.concat_proj = nn.Conv2d(2 * dim, dim, kernel_size=1, bias=False) + shift = max(1, window_size // 2) + self.blocks = nn.ModuleList( + [ + SwinFocalBlock( + dim=dim, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if block_idx % 2 == 0 else shift, + mlp_ratio=mlp_ratio, + dropout=dropout, + drop_path=drop_path_rates[block_idx], + use_focal=False, + ) + for block_idx in range(depth) + ] + ) + + def forward(self, x: torch.Tensor, skip: torch.Tensor | None) -> torch.Tensor: + if skip is not None: + if self.skip_gate is not None: + skip = self.skip_gate(skip, x) + x = self.concat_proj(torch.cat([x, skip], dim=1)) + for block in self.blocks: + x = block(x) + return x + + +class ASUFM(nn.Module): + """ + Self-contained ASUFM port for PyHazards. + + This implementation follows the official ASUFM design at a high level: + patch embedding, hierarchical Swin-style encoder stages, focal modulation in + the encoder, and an attention-gated U-Net-style decoder. It intentionally + avoids external dependencies such as `timm` and `einops` so the model can be + built directly inside the main PyHazards library. + """ + + def __init__( + self, + image_size: int = 64, + patch_size: int = 4, + in_channels: int = 6, + out_dim: int = 1, + embed_dim: int = 96, + depths: Sequence[int] = (2, 2, 2, 2), + num_heads: Sequence[int] = (3, 6, 12, 24), + window_size: int = 8, + mlp_ratio: float = 4.0, + dropout: float = 0.0, + drop_path_rate: float = 0.1, + focal_window: int = 3, + focal_level: int = 2, + use_focal_modulation: bool = True, + spatial_attention: bool = True, + skip_num: int = 3, + use_checkpoint: bool = False, + ): + super().__init__() + _ = use_checkpoint + + if len(depths) != 4: + raise ValueError(f"ASUFM expects 4 encoder depths, got {tuple(depths)}") + if len(num_heads) != len(depths): + raise ValueError("num_heads must have the same length as depths") + if skip_num < 0 or skip_num > 3: + raise ValueError(f"skip_num must be in [0, 3], got {skip_num}") + + self.image_size = int(image_size) + self.patch_size = int(patch_size) + self.in_channels = int(in_channels) + self.out_dim = int(out_dim) + self.skip_num = int(skip_num) + + dims = [int(embed_dim * (2**idx)) for idx in range(len(depths))] + for dim, heads in zip(dims, num_heads): + if dim % int(heads) != 0: + raise ValueError(f"Channel dim {dim} must be divisible by num_heads={heads}") + + total_blocks = sum(int(depth) for depth in depths) + drop_path_values = torch.linspace(0.0, float(drop_path_rate), total_blocks).tolist() + + self.patch_embed = PatchEmbed( + image_size=self.image_size, + patch_size=self.patch_size, + in_channels=self.in_channels, + embed_dim=int(embed_dim), + patch_norm=True, + ) + + cursor = 0 + self.encoder_stages = nn.ModuleList() + for stage_idx, (dim, depth, heads) in enumerate(zip(dims, depths, num_heads)): + stage_dpr = drop_path_values[cursor : cursor + depth] + cursor += depth + self.encoder_stages.append( + EncoderStage( + dim=dim, + depth=int(depth), + num_heads=int(heads), + window_size=int(window_size), + mlp_ratio=float(mlp_ratio), + dropout=float(dropout), + drop_path_rates=stage_dpr, + use_focal=bool(use_focal_modulation), + focal_window=int(focal_window), + focal_level=int(focal_level), + downsample=stage_idx < len(depths) - 1, + ) + ) + + reverse_depths = list(reversed(depths[:-1])) + reverse_heads = list(reversed(num_heads[:-1])) + reverse_dims = list(reversed(dims[:-1])) + reverse_drop_paths = list(reversed(drop_path_values[:-depths[-1]])) + + self.upsamplers = nn.ModuleList( + [ + PatchExpand(dim=dims[-1]), + PatchExpand(dim=dims[-2]), + PatchExpand(dim=dims[-3]), + ] + ) + self.decoder_stages = nn.ModuleList() + cursor = 0 + for dim, depth, heads in zip(reverse_dims, reverse_depths, reverse_heads): + stage_dpr = reverse_drop_paths[cursor : cursor + depth] + cursor += depth + self.decoder_stages.append( + DecoderStage( + dim=dim, + depth=int(depth), + num_heads=int(heads), + window_size=int(window_size), + mlp_ratio=float(mlp_ratio), + dropout=float(dropout), + drop_path_rates=stage_dpr, + spatial_attention=bool(spatial_attention), + ) + ) + + self.norm_up = nn.LayerNorm(dims[0]) + self.final_up = FinalPatchExpandX4(dim=dims[0]) + self.output_head = nn.Conv2d(dims[0], self.out_dim, kernel_size=1, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError(f"ASUFM expected input of shape (B,C,H,W), got {tuple(x.shape)}") + _, channels, height, width = x.shape + if channels != self.in_channels: + raise ValueError(f"ASUFM expected {self.in_channels} input channels, got {channels}") + + required_factor = self.patch_size * (2 ** (len(self.encoder_stages) - 1)) + if height != self.image_size or width != self.image_size: + raise ValueError( + f"ASUFM expected image_size={self.image_size}, got spatial size ({height}, {width})" + ) + if height % required_factor != 0 or width % required_factor != 0: + raise ValueError( + f"ASUFM requires H and W divisible by {required_factor}, got ({height}, {width})" + ) + + x = self.patch_embed(x) + skips: list[torch.Tensor] = [] + for stage_idx, stage in enumerate(self.encoder_stages): + x, skip = stage(x) + if stage_idx < len(self.encoder_stages) - 1: + skips.append(skip) + + for decoder_idx, (upsample, decoder_stage) in enumerate(zip(self.upsamplers, self.decoder_stages), start=1): + x = upsample(x) + skip = skips[-decoder_idx] if decoder_idx <= self.skip_num else None + x = decoder_stage(x, skip) + + x = x.permute(0, 2, 3, 1).contiguous() + x = self.norm_up(x) + x = x.permute(0, 3, 1, 2).contiguous() + x = self.final_up(x) + return self.output_head(x) + + +def asufm_builder( + task: str, + image_size: int = 64, + patch_size: int = 4, + in_channels: int = 6, + out_dim: int = 1, + embed_dim: int = 96, + depths: Sequence[int] = (2, 2, 2, 2), + num_heads: Sequence[int] = (3, 6, 12, 24), + window_size: int = 8, + mlp_ratio: float = 4.0, + dropout: float = 0.0, + drop_path_rate: float = 0.1, + focal_window: int = 3, + focal_level: int = 2, + use_focal_modulation: bool = True, + spatial_attention: bool = True, + skip_num: int = 3, + use_checkpoint: bool = False, + in_chans: int | None = None, + num_classes: int | None = None, + focal: bool | None = None, + **kwargs, +) -> nn.Module: + _ = kwargs + normalized_task = task.lower() + if normalized_task != "segmentation": + raise ValueError(f"ASUFM is segmentation-only. Got task='{task}'") + + if in_chans is not None: + in_channels = int(in_chans) + if num_classes is not None: + out_dim = int(num_classes) + if focal is not None: + use_focal_modulation = bool(focal) + + return ASUFM( + image_size=image_size, + patch_size=patch_size, + in_channels=in_channels, + out_dim=out_dim, + embed_dim=embed_dim, + depths=tuple(int(v) for v in depths), + num_heads=tuple(int(v) for v in num_heads), + window_size=window_size, + mlp_ratio=mlp_ratio, + dropout=dropout, + drop_path_rate=drop_path_rate, + focal_window=focal_window, + focal_level=focal_level, + use_focal_modulation=use_focal_modulation, + spatial_attention=spatial_attention, + skip_num=skip_num, + use_checkpoint=use_checkpoint, + ) + + +__all__ = ["ASUFM", "asufm_builder"] diff --git a/pyhazards/models/attention_unet.py b/pyhazards/models/attention_unet.py new file mode 100644 index 00000000..77486ce2 --- /dev/null +++ b/pyhazards/models/attention_unet.py @@ -0,0 +1,432 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.unet import ( + binary_ece, + make_synthetic_fire_maps, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .unet import ( + binary_ece, + make_synthetic_fire_maps, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class AttentionUNetTrackOConfig: + in_channels: int = 1 + base_channels: int = 8 + lr: float = 1e-3 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class ConvBlock(nn.Module): + def __init__(self, in_channels: int, out_channels: int): + super().__init__() + self.block = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.block(x) + + +class AttentionGate(nn.Module): + def __init__(self, skip_channels: int, gate_channels: int, inter_channels: int): + super().__init__() + self.w_skip = nn.Conv2d(skip_channels, inter_channels, kernel_size=1) + self.w_gate = nn.Conv2d(gate_channels, inter_channels, kernel_size=1) + self.psi = nn.Conv2d(inter_channels, 1, kernel_size=1) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + + def forward(self, x_skip: torch.Tensor, x_gate: torch.Tensor) -> torch.Tensor: + alpha = self.relu(self.w_skip(x_skip) + self.w_gate(x_gate)) + alpha = self.sigmoid(self.psi(alpha)) + return x_skip * alpha + + +class TinyAttentionUNet(nn.Module): + def __init__(self, in_channels: int = 1, base_channels: int = 8): + super().__init__() + c1, c2, c3 = base_channels, base_channels * 2, base_channels * 4 + + self.enc1 = ConvBlock(in_channels, c1) + self.pool1 = nn.MaxPool2d(kernel_size=2) + + self.enc2 = ConvBlock(c1, c2) + self.pool2 = nn.MaxPool2d(kernel_size=2) + + self.bottleneck = ConvBlock(c2, c3) + + self.up2 = nn.ConvTranspose2d(c3, c2, kernel_size=2, stride=2) + self.att2 = AttentionGate(skip_channels=c2, gate_channels=c2, inter_channels=c2 // 2) + self.dec2 = ConvBlock(c2 + c2, c2) + + self.up1 = nn.ConvTranspose2d(c2, c1, kernel_size=2, stride=2) + self.att1 = AttentionGate(skip_channels=c1, gate_channels=c1, inter_channels=max(1, c1 // 2)) + self.dec1 = ConvBlock(c1 + c1, c1) + + self.head = nn.Conv2d(c1, 1, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x1 = self.enc1(x) + x2 = self.enc2(self.pool1(x1)) + xb = self.bottleneck(self.pool2(x2)) + + y2 = self.up2(xb) + x2_att = self.att2(x2, y2) + y2 = torch.cat([y2, x2_att], dim=1) + y2 = self.dec2(y2) + + y1 = self.up1(y2) + x1_att = self.att1(x1, y1) + y1 = torch.cat([y1, x1_att], dim=1) + y1 = self.dec1(y1) + + return self.head(y1) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_attention_unet_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: AttentionUNetTrackOConfig, +): + if x_train.ndim != 4 or x_val.ndim != 4: + raise ValueError("x_train and x_val must be 4D arrays [N,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyAttentionUNet(in_channels=cfg.in_channels, base_channels=cfg.base_channels).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("Attention U-Net Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: AttentionUNetTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "attention_unet", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 192, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_maps(n_samples=n_samples, image_size=image_size, seed=seed) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = AttentionUNetTrackOConfig( + seed=seed, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_attention_unet_track_o( + x_train, y_train, x_val, y_val, cfg + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "attention_unet_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run Attention U-Net Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=192) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"attention_unet_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] attention unet synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def attention_unet_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "attention_unet") + init_kwargs = filter_init_kwargs(TinyAttentionUNet, {"in_channels": int(in_channels), **kwargs}) + model = TinyAttentionUNet(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyAttentionUNet", "attention_unet_builder"] diff --git a/pyhazards/models/backbones.py b/pyhazards/models/backbones.py new file mode 100644 index 00000000..83924ad3 --- /dev/null +++ b/pyhazards/models/backbones.py @@ -0,0 +1,52 @@ +import torch +import torch.nn as nn + + +class MLPBackbone(nn.Module): + """Simple MLP for tabular features.""" + + def __init__(self, input_dim: int, hidden_dim: int = 256, depth: int = 2): + super().__init__() + layers = [] + dim = input_dim + for _ in range(depth): + layers.extend([nn.Linear(dim, hidden_dim), nn.ReLU()]) + dim = hidden_dim + self.net = nn.Sequential(*layers) + + def forward(self, x): + return self.net(x) + + +class CNNPatchEncoder(nn.Module): + """Lightweight CNN encoder for raster patches.""" + + def __init__(self, in_channels: int = 3, hidden_dim: int = 64): + super().__init__() + self.features = nn.Sequential( + nn.Conv2d(in_channels, hidden_dim, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.ReLU(), + nn.AdaptiveAvgPool2d(1), + ) + + def forward(self, x): + x = self.features(x) + return torch.flatten(x, 1) + + +class TemporalEncoder(nn.Module): + """GRU-based encoder for time-series signals.""" + + def __init__(self, input_dim: int, hidden_dim: int = 128, num_layers: int = 1): + super().__init__() + self.rnn = nn.GRU(input_dim, hidden_dim, num_layers=num_layers, batch_first=True) + + def forward(self, x): + # x: (batch, seq, features) + out, _ = self.rnn(x) + return out[:, -1, :] + + +__all__ = ["MLPBackbone", "CNNPatchEncoder", "TemporalEncoder"] diff --git a/pyhazards/models/builder.py b/pyhazards/models/builder.py new file mode 100644 index 00000000..e9a5bb35 --- /dev/null +++ b/pyhazards/models/builder.py @@ -0,0 +1,74 @@ +from __future__ import annotations + +import inspect +from typing import Any, Dict + +import torch.nn as nn + +from .backbones import CNNPatchEncoder, MLPBackbone, TemporalEncoder +from .heads import ClassificationHead, RegressionHead, SegmentationHead +from .registry import get_model_config + + +def build_model(name: str, task: str, **kwargs: Any) -> nn.Module: + """ + Build a model by name and task. + This delegates to registry metadata to keep a consistent interface. + """ + cfg = get_model_config(name) + if cfg is None: + raise KeyError(f"Model '{name}' is not registered.") + + task = task.lower() + builder = cfg["builder"] + defaults: Dict[str, Any] = cfg.get("defaults", {}) + merged = {**defaults, **kwargs, "task": task} + + # Some builders (e.g., default_builder) require `name`, while others don't. + # Pass `name` only when the callable can accept it. + sig = inspect.signature(builder) + params = sig.parameters + accepts_kwargs = any(p.kind == inspect.Parameter.VAR_KEYWORD for p in params.values()) + if "name" in params or accepts_kwargs: + return builder(**{**merged, "name": name}) + return builder(**merged) + + +def default_builder(name: str, task: str, **kwargs: Any) -> nn.Module: + """ + Generic builder for standard backbones + heads. + """ + task = task.lower() + if name == "mlp": + backbone = MLPBackbone(kwargs["in_dim"], hidden_dim=kwargs.get("hidden_dim", 256), depth=kwargs.get("depth", 2)) + head = _make_head(task, kwargs) + return _combine(backbone, head) + if name == "cnn": + backbone = CNNPatchEncoder(kwargs.get("in_channels", 3), hidden_dim=kwargs.get("hidden_dim", 64)) + head = _make_head(task, kwargs, backbone_out_dim=kwargs.get("hidden_dim", 64)) + return _combine(backbone, head) + if name == "temporal": + backbone = TemporalEncoder(kwargs["in_dim"], hidden_dim=kwargs.get("hidden_dim", 128), num_layers=kwargs.get("num_layers", 1)) + head = _make_head(task, kwargs) + return _combine(backbone, head) + raise ValueError(f"Unknown backbone '{name}'.") + + +def _make_head(task: str, kwargs: Dict[str, Any], backbone_out_dim: int | None = None) -> nn.Module: + if task == "classification": + in_dim = backbone_out_dim or kwargs.get("hidden_dim") or kwargs["in_dim"] + return ClassificationHead(in_dim=in_dim, num_classes=kwargs["out_dim"]) + if task == "regression": + in_dim = backbone_out_dim or kwargs.get("hidden_dim") or kwargs["in_dim"] + return RegressionHead(in_dim=in_dim, out_dim=kwargs.get("out_dim", 1)) + if task == "segmentation": + in_channels = kwargs.get("hidden_dim") or backbone_out_dim or kwargs.get("in_channels", 1) + return SegmentationHead(in_channels=in_channels, num_classes=kwargs["out_dim"]) + raise ValueError(f"Unsupported task '{task}'.") + + +def _combine(backbone: nn.Module, head: nn.Module) -> nn.Module: + return nn.Sequential(backbone, head) + + +__all__ = ["build_model", "default_builder"] diff --git a/pyhazards/models/cnn_aspp.py b/pyhazards/models/cnn_aspp.py new file mode 100644 index 00000000..d2dc8f10 --- /dev/null +++ b/pyhazards/models/cnn_aspp.py @@ -0,0 +1,172 @@ +from __future__ import annotations + +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# --------------------------------------------------------------------- +# Basic blocks +# --------------------------------------------------------------------- + +class ConvBNReLU(nn.Module): + def __init__( + self, + in_ch: int, + out_ch: int, + k: int = 3, + s: int = 1, + p: int = 1, + d: int = 1, + ): + super().__init__() + self.conv = nn.Conv2d( + in_ch, + out_ch, + kernel_size=k, + stride=s, + padding=p, + dilation=d, + bias=False, + ) + self.bn = nn.BatchNorm2d(out_ch) + self.act = nn.ReLU(inplace=True) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.act(self.bn(self.conv(x))) + + +# --------------------------------------------------------------------- +# ASPP +# --------------------------------------------------------------------- + +class ASPP(nn.Module): + """ + Atrous Spatial Pyramid Pooling (ASPP). + + Parallel atrous convolutions + image pooling branch, + followed by projection. + """ + + def __init__( + self, + in_ch: int, + out_ch: int, + dilations: Sequence[int] = (1, 3, 6, 12), + ): + super().__init__() + + if len(dilations) != 4: + raise ValueError("ASPP expects exactly 4 dilation rates") + + d1, d2, d3, d4 = dilations + + self.b1 = ConvBNReLU(in_ch, out_ch, k=1, p=0, d=d1) + self.b2 = ConvBNReLU(in_ch, out_ch, k=3, p=d2, d=d2) + self.b3 = ConvBNReLU(in_ch, out_ch, k=3, p=d3, d=d3) + self.b4 = ConvBNReLU(in_ch, out_ch, k=3, p=d4, d=d4) + + self.pool = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + ConvBNReLU(in_ch, out_ch, k=1, p=0), + ) + + self.proj = ConvBNReLU(out_ch * 5, out_ch, k=1, p=0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + b, c, h, w = x.shape + + p = self.pool(x) + p = F.interpolate(p, size=(h, w), mode="bilinear", align_corners=False) + + y = torch.cat( + [self.b1(x), self.b2(x), self.b3(x), self.b4(x), p], + dim=1, + ) + return self.proj(y) + + +# --------------------------------------------------------------------- +# CNN + ASPP model +# --------------------------------------------------------------------- + +class WildfireCNNASPP(nn.Module): + """ + CNN + ASPP wildfire segmentation model. + + Input: + x : (B, C, H, W) float tensor + + Output: + logits : (B, 1, H, W) float tensor + (sigmoid applied externally) + """ + + def __init__( + self, + in_channels: int = 12, + base_channels: int = 32, + aspp_channels: int = 32, + dilations: Sequence[int] = (1, 3, 6, 12), + dropout: float = 0.0, + ): + super().__init__() + + self.stem = nn.Sequential( + ConvBNReLU(in_channels, base_channels, k=3, p=1), + ConvBNReLU(base_channels, base_channels, k=3, p=1), + ) + + self.aspp = ASPP( + in_ch=base_channels, + out_ch=aspp_channels, + dilations=dilations, + ) + + self.drop = nn.Dropout2d(dropout) if dropout > 0 else nn.Identity() + self.head = nn.Conv2d(aspp_channels, 1, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError( + f"Expected input of shape (B,C,H,W), got {tuple(x.shape)}" + ) + + f = self.stem(x) + y = self.aspp(f) + y = self.drop(y) + return self.head(y) + + +# --------------------------------------------------------------------- +# PyHazards model builder +# --------------------------------------------------------------------- + +def cnn_aspp_builder( + task: str, + in_channels: int = 12, + base_channels: int = 32, + aspp_channels: int = 32, + dilations: Sequence[int] = (1, 3, 6, 12), + dropout: float = 0.0, + **kwargs, +) -> nn.Module: + """ + PyHazards-style model builder. + """ + _ = kwargs # explicitly ignore unused builder args + + if "segmentation" not in task: + raise ValueError( + f"WildfireCNNASPP is segmentation-only. Got task='{task}'" + ) + + return WildfireCNNASPP( + in_channels=in_channels, + base_channels=base_channels, + aspp_channels=aspp_channels, + dilations=dilations, + dropout=dropout, + ) diff --git a/pyhazards/models/convgru_trajgru.py b/pyhazards/models/convgru_trajgru.py new file mode 100644 index 00000000..1066c1b8 --- /dev/null +++ b/pyhazards/models/convgru_trajgru.py @@ -0,0 +1,481 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn.functional as F +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class ConvGRTrajGRUTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + enc_channels: int = 16 + hidden_channels: int = 16 + kernel_size: int = 3 + lr: float = 3e-4 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +def _normalized_base_grid(h: int, w: int, device: torch.device, dtype: torch.dtype) -> torch.Tensor: + ys = torch.linspace(-1.0, 1.0, steps=h, device=device, dtype=dtype) + xs = torch.linspace(-1.0, 1.0, steps=w, device=device, dtype=dtype) + yy, xx = torch.meshgrid(ys, xs, indexing="ij") + return torch.stack((xx, yy), dim=-1) # [H,W,2] + + +def _warp_hidden(hidden: torch.Tensor, flow_xy: torch.Tensor) -> torch.Tensor: + # hidden: [B,C,H,W], flow_xy: [B,2,H,W] in pixel space + b, _, h, w = hidden.shape + base = _normalized_base_grid(h, w, hidden.device, hidden.dtype).unsqueeze(0).repeat(b, 1, 1, 1) + + fx = flow_xy[:, 0] / max((w - 1) / 2.0, 1.0) + fy = flow_xy[:, 1] / max((h - 1) / 2.0, 1.0) + flow = torch.stack((fx, fy), dim=-1) # [B,H,W,2] + + grid = base + flow + return F.grid_sample(hidden, grid, mode="bilinear", padding_mode="border", align_corners=True) + + +class ConvGRUCell(nn.Module): + def __init__(self, input_channels: int, hidden_channels: int, kernel_size: int = 3): + super().__init__() + padding = kernel_size // 2 + self.conv_zr = nn.Conv2d(input_channels + hidden_channels, hidden_channels * 2, kernel_size, padding=padding) + self.conv_n = nn.Conv2d(input_channels + hidden_channels, hidden_channels, kernel_size, padding=padding) + + def forward(self, x_t: torch.Tensor, h_prev: torch.Tensor) -> torch.Tensor: + fused = torch.cat([x_t, h_prev], dim=1) + z, r = torch.chunk(self.conv_zr(fused), 2, dim=1) + z = torch.sigmoid(z) + r = torch.sigmoid(r) + + n = torch.tanh(self.conv_n(torch.cat([x_t, r * h_prev], dim=1))) + return (1.0 - z) * h_prev + z * n + + +class TrajGRUCell(nn.Module): + def __init__(self, input_channels: int, hidden_channels: int, kernel_size: int = 3): + super().__init__() + padding = kernel_size // 2 + self.flow_net = nn.Conv2d(input_channels + hidden_channels, 2, kernel_size=3, padding=1) + self.conv_zr = nn.Conv2d(input_channels + hidden_channels, hidden_channels * 2, kernel_size, padding=padding) + self.conv_n = nn.Conv2d(input_channels + hidden_channels, hidden_channels, kernel_size, padding=padding) + + def forward(self, x_t: torch.Tensor, h_prev: torch.Tensor) -> torch.Tensor: + flow = self.flow_net(torch.cat([x_t, h_prev], dim=1)) + h_warp = _warp_hidden(h_prev, flow) + + fused = torch.cat([x_t, h_warp], dim=1) + z, r = torch.chunk(self.conv_zr(fused), 2, dim=1) + z = torch.sigmoid(z) + r = torch.sigmoid(r) + + n = torch.tanh(self.conv_n(torch.cat([x_t, r * h_warp], dim=1))) + return (1.0 - z) * h_warp + z * n + + +class TinyConvGRTrajGRU(nn.Module): + def __init__(self, in_channels: int = 1, enc_channels: int = 16, hidden_channels: int = 16, kernel_size: int = 3): + super().__init__() + self.hidden_channels = hidden_channels + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels, enc_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(enc_channels, enc_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + self.convgru = ConvGRUCell(input_channels=enc_channels, hidden_channels=hidden_channels, kernel_size=kernel_size) + self.trajgru = TrajGRUCell(input_channels=hidden_channels, hidden_channels=hidden_channels, kernel_size=kernel_size) + + self.decoder = nn.Sequential( + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, 1, kernel_size=1), + ) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + b, _, _, h, w = x_seq.shape + device = x_seq.device + dtype = x_seq.dtype + + h_conv = torch.zeros((b, self.hidden_channels, h, w), device=device, dtype=dtype) + h_traj = torch.zeros((b, self.hidden_channels, h, w), device=device, dtype=dtype) + + for t in range(x_seq.shape[1]): + x_t = self.encoder(x_seq[:, t]) + h_conv = self.convgru(x_t, h_conv) + h_traj = self.trajgru(h_conv, h_traj) + + return self.decoder(h_traj) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_convgru_trajgru_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: ConvGRTrajGRUTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyConvGRTrajGRU( + in_channels=cfg.in_channels, + enc_channels=cfg.enc_channels, + hidden_channels=cfg.hidden_channels, + kernel_size=cfg.kernel_size, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("ConvGRU/TrajGRU Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: ConvGRTrajGRUTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "convgru_trajgru", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = ConvGRTrajGRUTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_convgru_trajgru_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "convgru_trajgru_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run ConvGRU/TrajGRU Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"convgru_trajgru_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] convgru_trajgru synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def convgru_trajgru_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "convgru_trajgru") + init_kwargs = filter_init_kwargs(TinyConvGRTrajGRU, {"in_channels": int(in_channels), **kwargs}) + model = TinyConvGRTrajGRU(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyConvGRTrajGRU", "convgru_trajgru_builder"] diff --git a/pyhazards/models/convlstm.py b/pyhazards/models/convlstm.py new file mode 100644 index 00000000..9b52dd84 --- /dev/null +++ b/pyhazards/models/convlstm.py @@ -0,0 +1,470 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class ConvLSTMTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + enc_channels: int = 16 + hidden_channels: int = 16 + num_layers: int = 2 + kernel_size: int = 3 + lr: float = 3e-4 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class ConvLSTMCell(nn.Module): + def __init__(self, input_channels: int, hidden_channels: int, kernel_size: int = 3): + super().__init__() + padding = kernel_size // 2 + self.hidden_channels = hidden_channels + self.conv = nn.Conv2d( + input_channels + hidden_channels, + hidden_channels * 4, + kernel_size=kernel_size, + padding=padding, + ) + + def forward(self, x_t: torch.Tensor, h_prev: torch.Tensor, c_prev: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + fused = torch.cat([x_t, h_prev], dim=1) + gates = self.conv(fused) + i, f, o, g = torch.chunk(gates, 4, dim=1) + i = torch.sigmoid(i) + f = torch.sigmoid(f) + o = torch.sigmoid(o) + g = torch.tanh(g) + + c = f * c_prev + i * g + h = o * torch.tanh(c) + return h, c + + +class TinyConvLSTM(nn.Module): + def __init__( + self, + in_channels: int = 1, + enc_channels: int = 16, + hidden_channels: int = 16, + num_layers: int = 2, + kernel_size: int = 3, + ): + super().__init__() + self.hidden_channels = hidden_channels + self.num_layers = num_layers + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels, enc_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(enc_channels, enc_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + + cells: List[nn.Module] = [] + for i in range(num_layers): + in_ch = enc_channels if i == 0 else hidden_channels + cells.append(ConvLSTMCell(input_channels=in_ch, hidden_channels=hidden_channels, kernel_size=kernel_size)) + self.cells = nn.ModuleList(cells) + + self.decoder = nn.Sequential( + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, 1, kernel_size=1), + ) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + b, _, _, h, w = x_seq.shape + device = x_seq.device + + h_states = [ + torch.zeros((b, self.hidden_channels, h, w), device=device, dtype=x_seq.dtype) + for _ in range(self.num_layers) + ] + c_states = [ + torch.zeros((b, self.hidden_channels, h, w), device=device, dtype=x_seq.dtype) + for _ in range(self.num_layers) + ] + + for t in range(x_seq.shape[1]): + x_t = self.encoder(x_seq[:, t]) # [B,enc,H,W] + for i, cell in enumerate(self.cells): + h_i, c_i = cell(x_t, h_states[i], c_states[i]) + h_states[i], c_states[i] = h_i, c_i + x_t = h_i + + return self.decoder(h_states[-1]) + + +def _choose_device(device_text: str) -> torch.device: + normalized = str(device_text).strip().lower() + if normalized.startswith("cuda") and torch.cuda.is_available(): + return torch.device(device_text) + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_convlstm_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: ConvLSTMTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyConvLSTM( + in_channels=cfg.in_channels, + enc_channels=cfg.enc_channels, + hidden_channels=cfg.hidden_channels, + num_layers=cfg.num_layers, + kernel_size=cfg.kernel_size, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("ConvLSTM Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: ConvLSTMTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "convlstm", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = ConvLSTMTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_convlstm_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "convlstm_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run ConvLSTM Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"convlstm_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] convlstm synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def convlstm_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "convlstm") + init_kwargs = filter_init_kwargs(TinyConvLSTM, {"in_channels": int(in_channels), **kwargs}) + model = TinyConvLSTM(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyConvLSTM", "convlstm_builder"] diff --git a/pyhazards/models/deep_ensemble.py b/pyhazards/models/deep_ensemble.py new file mode 100644 index 00000000..fbe06a1a --- /dev/null +++ b/pyhazards/models/deep_ensemble.py @@ -0,0 +1,464 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.unet import ( + binary_ece, + make_synthetic_fire_maps, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .unet import ( + binary_ece, + make_synthetic_fire_maps, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class DeepEnsembleTrackOConfig: + in_channels: int = 1 + base_channels: int = 8 + ensemble_size: int = 5 + lr: float = 1e-3 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class ConvBlock(nn.Module): + def __init__(self, in_channels: int, out_channels: int): + super().__init__() + self.block = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.block(x) + + +class TinyEnsembleMember(nn.Module): + def __init__(self, in_channels: int = 1, base_channels: int = 8): + super().__init__() + c1, c2 = base_channels, base_channels * 2 + self.enc1 = ConvBlock(in_channels, c1) + self.pool = nn.MaxPool2d(kernel_size=2) + self.enc2 = ConvBlock(c1, c2) + self.up = nn.ConvTranspose2d(c2, c1, kernel_size=2, stride=2) + self.dec = ConvBlock(c1 + c1, c1) + self.head = nn.Conv2d(c1, 1, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x1 = self.enc1(x) + x2 = self.enc2(self.pool(x1)) + y = self.up(x2) + y = torch.cat([y, x1], dim=1) + y = self.dec(y) + return self.head(y) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def _predict_ensemble_probabilities(models: List[nn.Module], loader: DataLoader, device: torch.device) -> Tuple[np.ndarray, float]: + member_probs: List[np.ndarray] = [] + for model in models: + member_probs.append(_predict_probabilities(model, loader, device)) + if not member_probs: + return np.zeros((0,), dtype=np.float32), 0.0 + stacked = np.stack(member_probs, axis=0) # [M,N] + return np.mean(stacked, axis=0), float(np.mean(np.std(stacked, axis=0))) + + +def _train_one_epoch(model: nn.Module, loader: DataLoader, optimizer: torch.optim.Optimizer, criterion: nn.Module, device: torch.device) -> float: + model.train() + losses: List[float] = [] + for xb, yb in loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + losses.append(float(loss.item())) + return float(np.mean(losses)) if losses else float("nan") + + +def _eval_loss(model: nn.Module, loader: DataLoader, criterion: nn.Module, device: torch.device) -> float: + model.eval() + losses: List[float] = [] + with torch.no_grad(): + for xb, yb in loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + losses.append(float(loss.item())) + return float(np.mean(losses)) if losses else float("nan") + + +def train_deep_ensemble_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: DeepEnsembleTrackOConfig, +): + if x_train.ndim != 4 or x_val.ndim != 4: + raise ValueError("x_train and x_val must be 4D arrays [N,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + if cfg.ensemble_size < 1: + raise ValueError("ensemble_size must be >= 1") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + models: List[nn.Module] = [] + optimizers: List[torch.optim.Optimizer] = [] + for m in range(cfg.ensemble_size): + member_seed = cfg.seed + 1000 + m + torch.manual_seed(member_seed) + np.random.seed(member_seed) + member = TinyEnsembleMember(in_channels=cfg.in_channels, base_channels=cfg.base_channels).to(device) + opt = torch.optim.AdamW(member.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + models.append(member) + optimizers.append(opt) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_states: List[Dict[str, torch.Tensor]] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + train_member_losses: List[float] = [] + val_member_losses: List[float] = [] + + for model, opt in zip(models, optimizers): + tr = _train_one_epoch(model, train_loader, opt, criterion, device) + va = _eval_loss(model, val_loader, criterion, device) + train_member_losses.append(tr) + val_member_losses.append(va) + + tr_loss = float(np.mean(train_member_losses)) if train_member_losses else float("nan") + va_loss = float(np.mean(val_member_losses)) if val_member_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(np.mean([opt.param_groups[0]["lr"] for opt in optimizers])), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_states = [deepcopy(model.state_dict()) for model in models] + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_states is not None: + for model, state in zip(models, best_states): + model.load_state_dict(state) + + val_prob, mean_ensemble_std = _predict_ensemble_probabilities(models, val_loader, device=device) + val_prob = np.clip(val_prob, 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + "mean_ensemble_std": float(mean_ensemble_std), + } + + return models, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("Deep Ensemble Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: DeepEnsembleTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "deep_ensemble", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + "uncertainty": ["mean_ensemble_std"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "ensemble_size": cfg.ensemble_size, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 192, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_maps(n_samples=n_samples, image_size=image_size, seed=seed) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = DeepEnsembleTrackOConfig( + seed=seed, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + models, history, val_metrics, best_epoch, pos_weight = train_deep_ensemble_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob, test_std = _predict_ensemble_probabilities(models, test_loader, _choose_device(cfg.device)) + test_prob = np.clip(test_prob, 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + "mean_ensemble_std": float(test_std), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "members": [model.state_dict() for model in models], + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "deep_ensemble_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run Deep Ensemble Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=192) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"deep_ensemble_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] deep_ensemble synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, require_task + + +class DeepEnsemble(nn.Module): + """Benchmark-facing deep ensemble that averages member logits.""" + + def __init__(self, in_channels: int = 1, base_channels: int = 8, ensemble_size: int = 5): + super().__init__() + if ensemble_size < 1: + raise ValueError('ensemble_size must be >= 1') + self.members = nn.ModuleList( + [TinyEnsembleMember(in_channels=in_channels, base_channels=base_channels) for _ in range(int(ensemble_size))] + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + logits = torch.stack([member(x) for member in self.members], dim=0).mean(dim=0) + return logits + + +def deep_ensemble_builder( + task: str, + in_channels: int = 1, + out_dim: int = 1, + base_channels: int = 8, + ensemble_size: int = 5, + **kwargs: Any, +) -> nn.Module: + _ = kwargs + require_task(task, {"segmentation"}, "deep_ensemble") + model = DeepEnsemble(in_channels=int(in_channels), base_channels=int(base_channels), ensemble_size=int(ensemble_size)) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["DeepEnsemble", "TinyEnsembleMember", "deep_ensemble_builder"] diff --git a/pyhazards/models/deeplabv3p.py b/pyhazards/models/deeplabv3p.py new file mode 100644 index 00000000..0c04437a --- /dev/null +++ b/pyhazards/models/deeplabv3p.py @@ -0,0 +1,452 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn.functional as F +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.unet import ( + binary_ece, + make_synthetic_fire_maps, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .unet import ( + binary_ece, + make_synthetic_fire_maps, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class DeepLabV3PTrackOConfig: + in_channels: int = 1 + base_channels: int = 16 + lr: float = 3e-4 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class ConvBNReLU(nn.Module): + def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, dilation: int = 1): + super().__init__() + padding = dilation * (kernel_size // 2) + self.block = nn.Sequential( + nn.Conv2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=False, + ), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.block(x) + + +class TinyBackbone(nn.Module): + def __init__(self, in_channels: int = 1, base_channels: int = 16): + super().__init__() + c1, c2, c3 = base_channels, base_channels * 2, base_channels * 4 + self.low = nn.Sequential( + ConvBNReLU(in_channels, c1, kernel_size=3, stride=1), + ConvBNReLU(c1, c1, kernel_size=3, stride=1), + ) + self.mid = nn.Sequential( + ConvBNReLU(c1, c2, kernel_size=3, stride=2), + ConvBNReLU(c2, c2, kernel_size=3, stride=1), + ) + self.high = nn.Sequential( + ConvBNReLU(c2, c3, kernel_size=3, stride=2), + ConvBNReLU(c3, c3, kernel_size=3, stride=1), + ) + + def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + low = self.low(x) + x = self.mid(low) + high = self.high(x) + return low, high + + +class ASPP(nn.Module): + def __init__(self, in_channels: int, out_channels: int): + super().__init__() + self.branch1 = ConvBNReLU(in_channels, out_channels, kernel_size=1, stride=1, dilation=1) + self.branch2 = ConvBNReLU(in_channels, out_channels, kernel_size=3, stride=1, dilation=2) + self.branch3 = ConvBNReLU(in_channels, out_channels, kernel_size=3, stride=1, dilation=4) + self.branch4 = ConvBNReLU(in_channels, out_channels, kernel_size=3, stride=1, dilation=6) + self.project = ConvBNReLU(out_channels * 4, out_channels, kernel_size=1, stride=1, dilation=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + b1 = self.branch1(x) + b2 = self.branch2(x) + b3 = self.branch3(x) + b4 = self.branch4(x) + return self.project(torch.cat([b1, b2, b3, b4], dim=1)) + + +class TinyDeepLabV3P(nn.Module): + def __init__(self, in_channels: int = 1, base_channels: int = 16): + super().__init__() + c1, c3 = base_channels, base_channels * 4 + self.backbone = TinyBackbone(in_channels=in_channels, base_channels=base_channels) + self.aspp = ASPP(in_channels=c3, out_channels=c1 * 2) + self.low_proj = ConvBNReLU(c1, c1, kernel_size=1, stride=1, dilation=1) + self.decoder = nn.Sequential( + ConvBNReLU(c1 * 3, c1 * 2, kernel_size=3, stride=1, dilation=1), + ConvBNReLU(c1 * 2, c1, kernel_size=3, stride=1, dilation=1), + nn.Conv2d(c1, 1, kernel_size=1), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + low, high = self.backbone(x) + aspp = self.aspp(high) + aspp_up = F.interpolate(aspp, size=low.shape[-2:], mode="bilinear", align_corners=False) + low = self.low_proj(low) + logits = self.decoder(torch.cat([aspp_up, low], dim=1)) + return logits + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_deeplabv3p_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: DeepLabV3PTrackOConfig, +): + if x_train.ndim != 4 or x_val.ndim != 4: + raise ValueError("x_train and x_val must be 4D arrays [N,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyDeepLabV3P(in_channels=cfg.in_channels, base_channels=cfg.base_channels).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("DeepLabv3+ Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: DeepLabV3PTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "deeplabv3p", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 192, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_maps(n_samples=n_samples, image_size=image_size, seed=seed) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = DeepLabV3PTrackOConfig( + seed=seed, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_deeplabv3p_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "deeplabv3p_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run DeepLabv3+ Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=192) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"deeplabv3p_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] deeplabv3p synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def deeplabv3p_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "deeplabv3p") + init_kwargs = filter_init_kwargs(TinyDeepLabV3P, {"in_channels": int(in_channels), **kwargs}) + model = TinyDeepLabV3P(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyDeepLabV3P", "deeplabv3p_builder"] diff --git a/pyhazards/models/earthfarseer.py b/pyhazards/models/earthfarseer.py new file mode 100644 index 00000000..65b14fc3 --- /dev/null +++ b/pyhazards/models/earthfarseer.py @@ -0,0 +1,453 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class EarthFarseerTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + hidden_channels: int = 16 + num_heads: int = 4 + num_layers: int = 2 + lr: float = 3e-4 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class TemporalBlock(nn.Module): + def __init__(self, dim: int, num_heads: int, dropout: float = 0.0): + super().__init__() + self.norm1 = nn.LayerNorm(dim) + self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, dropout=dropout, batch_first=True) + self.norm2 = nn.LayerNorm(dim) + self.ffn = nn.Sequential( + nn.Linear(dim, dim * 2), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(dim * 2, dim), + nn.Dropout(dropout), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + y = self.norm1(x) + y, _ = self.attn(y, y, y, need_weights=False) + x = x + y + x = x + self.ffn(self.norm2(x)) + return x + + +class TinyEarthFarseer(nn.Module): + def __init__(self, in_channels: int = 1, hidden_channels: int = 16, num_heads: int = 4, num_layers: int = 2): + super().__init__() + self.hidden_channels = hidden_channels + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + + self.temporal_blocks = nn.ModuleList([TemporalBlock(hidden_channels, num_heads) for _ in range(num_layers)]) + self.far_blocks = nn.ModuleList([TemporalBlock(hidden_channels, num_heads) for _ in range(max(1, num_layers - 1))]) + + self.decoder = nn.Sequential( + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, 1, kernel_size=1), + ) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + b, t, c, h, w = x_seq.shape + + x = x_seq.reshape(b * t, c, h, w) + x = self.encoder(x) + d = x.shape[1] + + x = x.reshape(b, t, d, h, w) + x = x.permute(0, 3, 4, 1, 2).contiguous().reshape(b * h * w, t, d) # [BHW,T,D] + + x_main = x + for blk in self.temporal_blocks: + x_main = blk(x_main) + + # Far-seer branch emphasizes farther temporal gaps. + x_far = x[:, ::2, :] + for blk in self.far_blocks: + x_far = blk(x_far) + + x_last = x_main[:, -1, :] + x_far[:, -1, :] + x_last = x_last.reshape(b, h, w, d).permute(0, 3, 1, 2).contiguous() + return self.decoder(x_last) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_earthfarseer_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: EarthFarseerTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyEarthFarseer( + in_channels=cfg.in_channels, + hidden_channels=cfg.hidden_channels, + num_heads=cfg.num_heads, + num_layers=cfg.num_layers, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("EarthFarseer Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: EarthFarseerTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "earthfarseer", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = EarthFarseerTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_earthfarseer_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "earthfarseer_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run EarthFarseer Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"earthfarseer_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] earthfarseer synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def earthfarseer_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "earthfarseer") + init_kwargs = filter_init_kwargs(TinyEarthFarseer, {"in_channels": int(in_channels), **kwargs}) + model = TinyEarthFarseer(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyEarthFarseer", "earthfarseer_builder"] diff --git a/pyhazards/models/earthformer.py b/pyhazards/models/earthformer.py new file mode 100644 index 00000000..745b6e85 --- /dev/null +++ b/pyhazards/models/earthformer.py @@ -0,0 +1,448 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class EarthFormerTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + hidden_channels: int = 16 + num_heads: int = 4 + num_layers: int = 2 + lr: float = 1e-3 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class TemporalTransformerBlock(nn.Module): + def __init__(self, dim: int, num_heads: int, mlp_ratio: float = 2.0, dropout: float = 0.0): + super().__init__() + self.norm1 = nn.LayerNorm(dim) + self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, dropout=dropout, batch_first=True) + self.norm2 = nn.LayerNorm(dim) + hidden = int(dim * mlp_ratio) + self.ffn = nn.Sequential( + nn.Linear(dim, hidden), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden, dim), + nn.Dropout(dropout), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: [BHW, T, D] + x_norm = self.norm1(x) + attn_out, _ = self.attn(x_norm, x_norm, x_norm, need_weights=False) + x = x + attn_out + x = x + self.ffn(self.norm2(x)) + return x + + +class TinyEarthFormer(nn.Module): + def __init__(self, in_channels: int = 1, hidden_channels: int = 16, num_heads: int = 4, num_layers: int = 2): + super().__init__() + self.hidden_channels = hidden_channels + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + + self.temporal_blocks = nn.ModuleList( + [TemporalTransformerBlock(dim=hidden_channels, num_heads=num_heads) for _ in range(num_layers)] + ) + + self.decoder = nn.Sequential( + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, 1, kernel_size=1), + ) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B, T, C, H, W] + b, t, c, h, w = x_seq.shape + + x = x_seq.reshape(b * t, c, h, w) + x = self.encoder(x) + d = x.shape[1] + + x = x.reshape(b, t, d, h, w) + x = x.permute(0, 3, 4, 1, 2).contiguous() # [B,H,W,T,D] + x = x.reshape(b * h * w, t, d) + + for block in self.temporal_blocks: + x = block(x) + + x = x[:, -1, :] # next-step representation per location + x = x.reshape(b, h, w, d).permute(0, 3, 1, 2).contiguous() # [B,D,H,W] + + return self.decoder(x) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_earthformer_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: EarthFormerTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyEarthFormer( + in_channels=cfg.in_channels, + hidden_channels=cfg.hidden_channels, + num_heads=cfg.num_heads, + num_layers=cfg.num_layers, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("Earthformer Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: EarthFormerTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "earthformer", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = EarthFormerTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_earthformer_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "earthformer_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run Earthformer Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = Path(args.output_dir) if args.output_dir else base / f"earthformer_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] earthformer synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def earthformer_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "earthformer") + init_kwargs = filter_init_kwargs(TinyEarthFormer, {"in_channels": int(in_channels), **kwargs}) + model = TinyEarthFormer(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyEarthFormer", "earthformer_builder"] diff --git a/pyhazards/models/eqnet.py b/pyhazards/models/eqnet.py new file mode 100644 index 00000000..1f7784a7 --- /dev/null +++ b/pyhazards/models/eqnet.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class EQNet(nn.Module): + """Transformer-style earthquake phase-picking baseline.""" + + def __init__( + self, + in_channels: int = 3, + hidden_dim: int = 48, + num_heads: int = 4, + num_layers: int = 2, + dropout: float = 0.1, + ): + super().__init__() + self.proj = nn.Conv1d(in_channels, hidden_dim, kernel_size=5, padding=2) + encoder_layer = nn.TransformerEncoderLayer( + d_model=hidden_dim, + nhead=num_heads, + dim_feedforward=2 * hidden_dim, + dropout=dropout, + batch_first=True, + activation="gelu", + ) + self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers) + self.head = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.GELU(), + nn.Linear(hidden_dim, 2), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("EQNet expects inputs shaped (batch, channels, length).") + seq = self.proj(x).transpose(1, 2) + encoded = self.encoder(seq) + pooled = encoded.mean(dim=1) + return self.head(pooled) + + +def eqnet_builder( + task: str, + in_channels: int = 3, + hidden_dim: int = 48, + num_heads: int = 4, + num_layers: int = 2, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("EQNet only supports regression-style phase picking outputs.") + return EQNet( + in_channels=in_channels, + hidden_dim=hidden_dim, + num_heads=num_heads, + num_layers=num_layers, + dropout=dropout, + ) + + +__all__ = ["EQNet", "eqnet_builder"] diff --git a/pyhazards/models/eqtransformer.py b/pyhazards/models/eqtransformer.py new file mode 100644 index 00000000..e33ef726 --- /dev/null +++ b/pyhazards/models/eqtransformer.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class EQTransformer(nn.Module): + """Compact sequence model for joint earthquake phase picking.""" + + def __init__( + self, + in_channels: int = 3, + hidden_dim: int = 48, + num_layers: int = 2, + dropout: float = 0.1, + ): + super().__init__() + self.encoder = nn.Sequential( + nn.Conv1d(in_channels, hidden_dim, kernel_size=11, padding=5), + nn.ReLU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=7, padding=3), + nn.ReLU(), + ) + self.temporal = nn.LSTM( + input_size=hidden_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + bidirectional=True, + dropout=dropout if num_layers > 1 else 0.0, + ) + self.attention = nn.Linear(2 * hidden_dim, 1) + self.head = nn.Sequential( + nn.Linear(2 * hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, 2), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("EQTransformer expects inputs shaped (batch, channels, length).") + encoded = self.encoder(x).transpose(1, 2) + temporal, _ = self.temporal(encoded) + weights = torch.softmax(self.attention(temporal), dim=1) + pooled = torch.sum(weights * temporal, dim=1) + return self.head(pooled) + + +def eqtransformer_builder( + task: str, + in_channels: int = 3, + hidden_dim: int = 48, + num_layers: int = 2, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("EQTransformer only supports regression-style phase picking outputs.") + return EQTransformer( + in_channels=in_channels, + hidden_dim=hidden_dim, + num_layers=num_layers, + dropout=dropout, + ) + + +__all__ = ["EQTransformer", "eqtransformer_builder"] diff --git a/pyhazards/models/firecastnet.py b/pyhazards/models/firecastnet.py new file mode 100644 index 00000000..74c27e2e --- /dev/null +++ b/pyhazards/models/firecastnet.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class FireCastNet(nn.Module): + """Compact encoder-decoder wildfire forecasting network.""" + + def __init__( + self, + in_channels: int = 12, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + ): + super().__init__() + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if out_channels <= 0: + raise ValueError(f"out_channels must be positive, got {out_channels}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.in_channels = int(in_channels) + self.encoder = nn.Sequential( + nn.Conv2d(in_channels, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + self.decoder = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Dropout2d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv2d(hidden_dim, out_channels, kernel_size=1), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError( + "FireCastNet expects input shape (batch, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.in_channels: + raise ValueError(f"FireCastNet expected in_channels={self.in_channels}, got {x.size(1)}.") + encoded = self.encoder(x) + return self.decoder(encoded) + + +def firecastnet_builder( + task: str, + in_channels: int = 12, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"segmentation", "regression"}: + raise ValueError(f"firecastnet supports task='segmentation' or 'regression', got {task!r}.") + return FireCastNet( + in_channels=in_channels, + hidden_dim=hidden_dim, + out_channels=out_channels, + dropout=dropout, + ) + + +__all__ = ["FireCastNet", "firecastnet_builder"] diff --git a/pyhazards/models/firemm_ir.py b/pyhazards/models/firemm_ir.py new file mode 100644 index 00000000..3754a748 --- /dev/null +++ b/pyhazards/models/firemm_ir.py @@ -0,0 +1,181 @@ +from __future__ import annotations + +from typing import Any, Dict, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ModalityEncoder(nn.Module): + def __init__(self, in_channels: int, hidden_dim: int): + super().__init__() + self.net = nn.Sequential( + nn.Conv2d(in_channels, hidden_dim // 2, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(hidden_dim // 2, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.net(x) + + +class ClassAwareMemory(nn.Module): + """Small memory bank inspired by FireMM-IR's class-aware memory module.""" + + def __init__(self, hidden_dim: int, num_memory_slots: int = 3): + super().__init__() + self.hidden_dim = int(hidden_dim) + self.num_memory_slots = int(num_memory_slots) + self.memory = nn.Parameter(torch.randn(self.num_memory_slots, self.hidden_dim) * 0.02) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + batch, channels, height, width = x.shape + tokens = x.flatten(2).transpose(1, 2) # (B, HW, C) + scores = torch.matmul(tokens, self.memory.t()) / max(1.0, self.hidden_dim ** 0.5) + weights = torch.softmax(scores, dim=-1) + retrieved = torch.matmul(weights, self.memory).transpose(1, 2).reshape(batch, channels, height, width) + return x + retrieved + + +class FireMMIR(nn.Module): + """Dual-modality wildfire scene model inspired by FireMM-IR.""" + + def __init__( + self, + in_channels: int = 6, + out_dim: int = 1, + hidden_dim: int = 64, + instruction_dim: int = 16, + num_memory_slots: int = 3, + num_heads: int = 4, + dropout: float = 0.1, + ): + super().__init__() + if in_channels < 2 or in_channels % 2 != 0: + raise ValueError(f"in_channels must be an even number >= 2, got {in_channels}") + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + if hidden_dim % num_heads != 0: + raise ValueError(f"hidden_dim={hidden_dim} must be divisible by num_heads={num_heads}") + + self.in_channels = int(in_channels) + self.hidden_dim = int(hidden_dim) + self.instruction_dim = int(instruction_dim) + self.optical_channels = self.in_channels // 2 + self.infrared_channels = self.in_channels - self.optical_channels + + self.optical_encoder = ModalityEncoder(self.optical_channels, hidden_dim) + self.infrared_encoder = ModalityEncoder(self.infrared_channels, hidden_dim) + self.fusion_gate = nn.Sequential( + nn.Conv2d(hidden_dim * 2, hidden_dim, kernel_size=1), + nn.GELU(), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=1), + nn.Sigmoid(), + ) + self.memory = ClassAwareMemory(hidden_dim=hidden_dim, num_memory_slots=num_memory_slots) + + self.instruction_proj = nn.Linear(self.instruction_dim, hidden_dim) + self.segmentation_token = nn.Parameter(torch.randn(1, hidden_dim) * 0.02) + self.token_attn = nn.MultiheadAttention( + embed_dim=hidden_dim, + num_heads=num_heads, + dropout=dropout, + batch_first=True, + ) + self.ffn = nn.Sequential( + nn.LayerNorm(hidden_dim), + nn.Linear(hidden_dim, hidden_dim * 2), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim * 2, hidden_dim), + ) + self.decoder = nn.Sequential( + nn.Conv2d(hidden_dim * 2, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(hidden_dim, hidden_dim // 2, kernel_size=3, padding=1), + nn.GELU(), + ) + self.head = nn.Conv2d(hidden_dim // 2, out_dim, kernel_size=1) + + def _unpack_inputs(self, inputs: torch.Tensor | Dict[str, Any]) -> Tuple[torch.Tensor, torch.Tensor | None]: + if isinstance(inputs, dict): + x = inputs.get("x") + instruction = inputs.get("instruction_context") + else: + x = inputs + instruction = None + + if not isinstance(x, torch.Tensor): + raise ValueError("FireMMIR expects a tensor input or a dict containing key 'x'.") + if x.ndim != 4: + raise ValueError(f"FireMMIR expects input shape (B, C, H, W), got {tuple(x.shape)}") + if x.size(1) != self.in_channels: + raise ValueError(f"FireMMIR expected in_channels={self.in_channels}, got {x.size(1)}") + return x, instruction + + def _coerce_instruction(self, instruction: torch.Tensor | None, batch: int, device: torch.device) -> torch.Tensor: + if instruction is None: + return torch.zeros(batch, self.instruction_dim, device=device) + if instruction.ndim != 2 or instruction.size(0) != batch: + raise ValueError(f"instruction_context must have shape (B,D), got {tuple(instruction.shape)}") + if instruction.size(1) == self.instruction_dim: + return instruction.to(device=device, dtype=torch.float32) + if instruction.size(1) > self.instruction_dim: + return instruction[:, : self.instruction_dim].to(device=device, dtype=torch.float32) + pad = torch.zeros(batch, self.instruction_dim - instruction.size(1), device=device) + return torch.cat([instruction.to(device=device, dtype=torch.float32), pad], dim=1) + + def forward(self, inputs: torch.Tensor | Dict[str, Any]) -> torch.Tensor: + x, instruction = self._unpack_inputs(inputs) + batch = x.size(0) + device = x.device + + optical = x[:, : self.optical_channels] + infrared = x[:, self.optical_channels :] + optical_feat = self.optical_encoder(optical) + infrared_feat = self.infrared_encoder(infrared) + gate = self.fusion_gate(torch.cat([optical_feat, infrared_feat], dim=1)) + fused = optical_feat + gate * infrared_feat + fused = self.memory(fused) + + visual_tokens = fused.flatten(2).transpose(1, 2) + instruction_token = self.instruction_proj(self._coerce_instruction(instruction, batch, device)).unsqueeze(1) + seg_token = self.segmentation_token.unsqueeze(0).expand(batch, -1, -1) + query_tokens = torch.cat([seg_token, instruction_token], dim=1) + attn_out, _ = self.token_attn(query_tokens, visual_tokens, visual_tokens, need_weights=False) + query_tokens = attn_out + self.ffn(attn_out) + global_token = query_tokens.mean(dim=1) + + context_map = global_token.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, fused.size(-2), fused.size(-1)) + decoded = self.decoder(torch.cat([fused, context_map], dim=1)) + return self.head(decoded) + + +def firemm_ir_builder( + task: str, + in_channels: int = 6, + out_dim: int = 1, + hidden_dim: int = 64, + instruction_dim: int = 16, + num_memory_slots: int = 3, + num_heads: int = 4, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError(f"firemm_ir is segmentation-only in PyHazards, got task={task!r}.") + return FireMMIR( + in_channels=in_channels, + out_dim=out_dim, + hidden_dim=hidden_dim, + instruction_dim=instruction_dim, + num_memory_slots=num_memory_slots, + num_heads=num_heads, + dropout=dropout, + ) + + +__all__ = ["FireMMIR", "firemm_ir_builder"] diff --git a/pyhazards/models/firepred.py b/pyhazards/models/firepred.py new file mode 100644 index 00000000..0cf67033 --- /dev/null +++ b/pyhazards/models/firepred.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class FirePred(nn.Module): + """Hybrid multi-temporal CNN wildfire spread predictor inspired by FirePred.""" + + def __init__( + self, + history: int = 5, + in_channels: int = 8, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + ): + super().__init__() + if history <= 0: + raise ValueError(f"history must be positive, got {history}") + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if out_channels <= 0: + raise ValueError(f"out_channels must be positive, got {out_channels}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.history = int(history) + self.in_channels = int(in_channels) + + self.recent_branch = nn.Sequential( + nn.Conv3d(in_channels, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.GELU(), + nn.Conv3d(hidden_dim, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.GELU(), + ) + self.daily_branch = nn.Sequential( + nn.Conv2d(in_channels, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + self.snapshot_branch = nn.Sequential( + nn.Conv2d(in_channels, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=2, dilation=2), + nn.GELU(), + ) + self.fusion = nn.Sequential( + nn.Conv2d(hidden_dim * 3, hidden_dim * 2, kernel_size=3, padding=1), + nn.GELU(), + nn.Dropout2d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv2d(hidden_dim * 2, out_channels, kernel_size=1), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 5: + raise ValueError( + "FirePred expects input shape (batch, history, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.history: + raise ValueError(f"FirePred expected history={self.history}, got {x.size(1)}.") + if x.size(2) != self.in_channels: + raise ValueError(f"FirePred expected in_channels={self.in_channels}, got {x.size(2)}.") + + x_3d = x.permute(0, 2, 1, 3, 4) + recent = torch.mean(self.recent_branch(x_3d), dim=2) + daily = self.daily_branch(torch.mean(x, dim=1)) + snapshot = self.snapshot_branch(x[:, -1]) + fused = torch.cat([recent, daily, snapshot], dim=1) + return self.fusion(fused) + + +def firepred_builder( + task: str, + history: int = 5, + in_channels: int = 8, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"segmentation", "regression"}: + raise ValueError(f"firepred supports task='segmentation' or 'regression', got {task!r}.") + return FirePred( + history=history, + in_channels=in_channels, + hidden_dim=hidden_dim, + out_channels=out_channels, + dropout=dropout, + ) + + +__all__ = ["FirePred", "firepred_builder"] diff --git a/pyhazards/models/floodcast.py b/pyhazards/models/floodcast.py new file mode 100644 index 00000000..94139d69 --- /dev/null +++ b/pyhazards/models/floodcast.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class FloodCast(nn.Module): + """Compact spatiotemporal inundation baseline.""" + + def __init__( + self, + in_channels: int = 3, + history: int = 4, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + ): + super().__init__() + self.history = int(history) + self.encoder = nn.Sequential( + nn.Conv3d(in_channels, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.ReLU(), + nn.Dropout3d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv3d(hidden_dim, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.ReLU(), + ) + self.head = nn.Conv2d(hidden_dim, out_channels, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 5: + raise ValueError("FloodCast expects inputs shaped (batch, history, channels, height, width).") + if x.size(1) != self.history: + raise ValueError(f"FloodCast expected history={self.history}, got {x.size(1)}.") + encoded = self.encoder(x.permute(0, 2, 1, 3, 4)) + fused = encoded.mean(dim=2) + return self.head(fused) + + +def floodcast_builder( + task: str, + in_channels: int = 3, + history: int = 4, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"regression", "segmentation"}: + raise ValueError("FloodCast only supports regression or segmentation-style inundation outputs.") + return FloodCast( + in_channels=in_channels, + history=history, + hidden_dim=hidden_dim, + out_channels=out_channels, + dropout=dropout, + ) + + +__all__ = ["FloodCast", "floodcast_builder"] diff --git a/pyhazards/models/forefire.py b/pyhazards/models/forefire.py new file mode 100644 index 00000000..d117bc77 --- /dev/null +++ b/pyhazards/models/forefire.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ForeFireAdapter(nn.Module): + """Lightweight deterministic spread adapter inspired by simulator-style fronts.""" + + def __init__( + self, + in_channels: int = 12, + out_channels: int = 1, + diffusion_steps: int = 2, + ): + super().__init__() + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if out_channels != 1: + raise ValueError(f"ForeFireAdapter only supports out_channels=1, got {out_channels}") + if diffusion_steps <= 0: + raise ValueError(f"diffusion_steps must be positive, got {diffusion_steps}") + + self.in_channels = int(in_channels) + self.diffusion_steps = int(diffusion_steps) + kernel = torch.tensor( + [[0.05, 0.15, 0.05], [0.15, 0.20, 0.15], [0.05, 0.15, 0.05]], + dtype=torch.float32, + ).view(1, 1, 3, 3) + self.register_buffer("spread_kernel", kernel) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError( + "ForeFireAdapter expects input shape (batch, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.in_channels: + raise ValueError(f"ForeFireAdapter expected in_channels={self.in_channels}, got {x.size(1)}.") + + state = torch.sigmoid(x[:, :1]) + fuel = torch.sigmoid(x[:, 1:2]) + wind = torch.tanh(x[:, 2:3]).abs() + for _ in range(self.diffusion_steps): + neighborhood = F.conv2d(state, self.spread_kernel, padding=1) + state = torch.clamp(0.45 * state + 0.40 * neighborhood + 0.10 * fuel + 0.05 * wind, 0.0, 1.0) + return state + + +def forefire_builder( + task: str, + in_channels: int = 12, + out_channels: int = 1, + diffusion_steps: int = 2, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"segmentation", "regression"}: + raise ValueError(f"forefire supports task='segmentation' or 'regression', got {task!r}.") + return ForeFireAdapter( + in_channels=in_channels, + out_channels=out_channels, + diffusion_steps=diffusion_steps, + ) + + +__all__ = ["ForeFireAdapter", "forefire_builder"] diff --git a/pyhazards/models/fourcastnet_tc.py b/pyhazards/models/fourcastnet_tc.py new file mode 100644 index 00000000..4230d095 --- /dev/null +++ b/pyhazards/models/fourcastnet_tc.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class FourCastNetTC(nn.Module): + """Experimental wrapper-style FourCastNet storm adapter.""" + + def __init__( + self, + input_dim: int = 8, + history: int = 6, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.history = int(history) + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.net = nn.Sequential( + nn.Linear(self.history * input_dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout) if dropout > 0 else nn.Identity(), + nn.Linear(hidden_dim, hidden_dim), + nn.GELU(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("FourCastNetTC expects inputs shaped (batch, history, features).") + if x.size(1) != self.history: + raise ValueError(f"FourCastNetTC expected history={self.history}, got {x.size(1)}.") + preds = self.net(x.reshape(x.size(0), -1)) + return preds.view(x.size(0), self.horizon, self.output_dim) + + +def fourcastnet_tc_builder( + task: str, + input_dim: int = 8, + history: int = 6, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("FourCastNetTC only supports regression for track/intensity forecasting.") + return FourCastNetTC( + input_dim=input_dim, + history=history, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + dropout=dropout, + ) + + +__all__ = ["FourCastNetTC", "fourcastnet_tc_builder"] diff --git a/pyhazards/models/gemini_25_pro_wildfire_prompted.py b/pyhazards/models/gemini_25_pro_wildfire_prompted.py new file mode 100644 index 00000000..6fcac692 --- /dev/null +++ b/pyhazards/models/gemini_25_pro_wildfire_prompted.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +import torch.nn as nn + +from .qwen25_vl_wildfire_prompted import Qwen25VLWildfirePrompted + + +class Gemini25ProWildfirePrompted(Qwen25VLWildfirePrompted): + """Benchmark-facing wildfire VLM baseline inspired by Gemini 2.5 Pro.""" + + +def gemini_25_pro_wildfire_prompted_builder( + task: str, + in_channels: int = 6, + out_dim: int = 1, + hidden_dim: int = 96, + prompt_dim: int = 32, + num_prompt_tokens: int = 6, + num_heads: int = 8, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError( + f"gemini_25_pro_wildfire_prompted is segmentation-only in PyHazards, got task={task!r}." + ) + return Gemini25ProWildfirePrompted( + in_channels=in_channels, + out_dim=out_dim, + hidden_dim=hidden_dim, + prompt_dim=prompt_dim, + num_prompt_tokens=num_prompt_tokens, + num_heads=num_heads, + dropout=dropout, + ) + + +__all__ = ["Gemini25ProWildfirePrompted", "gemini_25_pro_wildfire_prompted_builder"] diff --git a/pyhazards/models/google_flood_forecasting.py b/pyhazards/models/google_flood_forecasting.py new file mode 100644 index 00000000..f3722272 --- /dev/null +++ b/pyhazards/models/google_flood_forecasting.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class GoogleFloodForecasting(nn.Module): + """Sequence baseline for streamflow-style flood forecasting.""" + + def __init__( + self, + input_dim: int = 2, + hidden_dim: int = 64, + out_dim: int = 1, + history: int = 4, + dropout: float = 0.1, + ): + super().__init__() + if input_dim <= 0: + raise ValueError(f"input_dim must be positive, got {input_dim}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + if history <= 0: + raise ValueError(f"history must be positive, got {history}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.history = int(history) + self.proj = nn.Linear(input_dim, hidden_dim) + self.temporal = nn.TransformerEncoder( + nn.TransformerEncoderLayer( + d_model=hidden_dim, + nhead=4, + dim_feedforward=hidden_dim * 2, + dropout=dropout, + batch_first=True, + activation="gelu", + ), + num_layers=2, + ) + self.head = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, out_dim), + ) + + def forward(self, batch) -> torch.Tensor: + if not isinstance(batch, dict) or "x" not in batch: + raise ValueError("GoogleFloodForecasting expects a mapping input with key 'x'.") + x = batch["x"] + if x.ndim != 4: + raise ValueError( + "GoogleFloodForecasting expects input shape (batch, history, nodes, features), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.history: + raise ValueError(f"GoogleFloodForecasting expected history={self.history}, got {x.size(1)}.") + encoded = self.proj(x) + temporal = encoded.permute(0, 2, 1, 3).reshape(-1, self.history, encoded.size(-1)) + hidden = self.temporal(temporal)[:, -1] + preds = self.head(hidden) + return preds.view(x.size(0), x.size(2), -1) + + +def google_flood_forecasting_builder( + task: str, + input_dim: int = 2, + hidden_dim: int = 64, + out_dim: int = 1, + history: int = 4, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError( + "google_flood_forecasting only supports task='regression', " + f"got {task!r}." + ) + return GoogleFloodForecasting( + input_dim=input_dim, + hidden_dim=hidden_dim, + out_dim=out_dim, + history=history, + dropout=dropout, + ) + + +__all__ = ["GoogleFloodForecasting", "google_flood_forecasting_builder"] diff --git a/pyhazards/models/gpd.py b/pyhazards/models/gpd.py new file mode 100644 index 00000000..bb034f49 --- /dev/null +++ b/pyhazards/models/gpd.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class GPD(nn.Module): + """Simple CNN baseline for generalized phase detection style picking.""" + + def __init__(self, in_channels: int = 3, hidden_dim: int = 32, dropout: float = 0.1): + super().__init__() + self.features = nn.Sequential( + nn.Conv1d(in_channels, hidden_dim, kernel_size=9, padding=4), + nn.ReLU(), + nn.MaxPool1d(kernel_size=2), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=7, padding=3), + nn.ReLU(), + nn.MaxPool1d(kernel_size=2), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=5, padding=2), + nn.ReLU(), + nn.AdaptiveAvgPool1d(1), + ) + self.head = nn.Sequential( + nn.Flatten(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, 2), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("GPD expects inputs shaped (batch, channels, length).") + return self.head(self.features(x)) + + +def gpd_builder( + task: str, + in_channels: int = 3, + hidden_dim: int = 32, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("GPD only supports regression-style phase picking outputs.") + return GPD(in_channels=in_channels, hidden_dim=hidden_dim, dropout=dropout) + + +__all__ = ["GPD", "gpd_builder"] diff --git a/pyhazards/models/graphcast_tc.py b/pyhazards/models/graphcast_tc.py new file mode 100644 index 00000000..f61fb445 --- /dev/null +++ b/pyhazards/models/graphcast_tc.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class GraphCastTC(nn.Module): + """Experimental wrapper-style GraphCast storm adapter.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + num_layers: int = 2, + num_heads: int = 4, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.proj = nn.Linear(input_dim, hidden_dim) + encoder_layer = nn.TransformerEncoderLayer( + d_model=hidden_dim, + nhead=num_heads, + dim_feedforward=2 * hidden_dim, + dropout=dropout, + batch_first=True, + ) + self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers) + self.head = nn.Linear(hidden_dim, self.horizon * self.output_dim) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("GraphCastTC expects inputs shaped (batch, history, features).") + encoded = self.encoder(self.proj(x)) + preds = self.head(encoded.mean(dim=1)) + return preds.view(x.size(0), self.horizon, self.output_dim) + + +def graphcast_tc_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + num_layers: int = 2, + num_heads: int = 4, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("GraphCastTC only supports regression for track/intensity forecasting.") + return GraphCastTC( + input_dim=input_dim, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + num_layers=num_layers, + num_heads=num_heads, + dropout=dropout, + ) + + +__all__ = ["GraphCastTC", "graphcast_tc_builder"] diff --git a/pyhazards/models/heads.py b/pyhazards/models/heads.py new file mode 100644 index 00000000..26fae097 --- /dev/null +++ b/pyhazards/models/heads.py @@ -0,0 +1,37 @@ +import torch.nn as nn + + +class ClassificationHead(nn.Module): + """Simple classification head.""" + + def __init__(self, in_dim: int, num_classes: int): + super().__init__() + self.fc = nn.Linear(in_dim, num_classes) + + def forward(self, x): + return self.fc(x) + + +class RegressionHead(nn.Module): + """Regression head for scalar or multi-target outputs.""" + + def __init__(self, in_dim: int, out_dim: int = 1): + super().__init__() + self.fc = nn.Linear(in_dim, out_dim) + + def forward(self, x): + return self.fc(x) + + +class SegmentationHead(nn.Module): + """Segmentation head for raster masks.""" + + def __init__(self, in_channels: int, num_classes: int): + super().__init__() + self.conv = nn.Conv2d(in_channels, num_classes, kernel_size=1) + + def forward(self, x): + return self.conv(x) + + +__all__ = ["ClassificationHead", "RegressionHead", "SegmentationHead"] diff --git a/pyhazards/models/hurricast.py b/pyhazards/models/hurricast.py new file mode 100644 index 00000000..0c24cfde --- /dev/null +++ b/pyhazards/models/hurricast.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class Hurricast(nn.Module): + """Compact storm-track and intensity baseline for Wave 2 vertical slices.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 64, + num_layers: int = 2, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.encoder = nn.LSTM( + input_dim, + hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=dropout if num_layers > 1 else 0.0, + ) + self.head = nn.Sequential( + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("Hurricast expects inputs shaped (batch, history, features).") + encoded, _ = self.encoder(x) + last = encoded[:, -1, :] + preds = self.head(last) + return preds.view(x.size(0), self.horizon, self.output_dim) + + +def hurricast_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 64, + num_layers: int = 2, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("Hurricast only supports regression for track/intensity forecasting.") + return Hurricast( + input_dim=input_dim, + hidden_dim=hidden_dim, + num_layers=num_layers, + horizon=horizon, + output_dim=output_dim, + dropout=dropout, + ) + + +__all__ = ["Hurricast", "hurricast_builder"] diff --git a/pyhazards/models/hydrographnet.py b/pyhazards/models/hydrographnet.py new file mode 100644 index 00000000..e3368275 --- /dev/null +++ b/pyhazards/models/hydrographnet.py @@ -0,0 +1,379 @@ +from __future__ import annotations + +from typing import Dict, Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class MLP(nn.Module): + def __init__(self, in_dim: int, out_dim: int, hidden_dim: int = 64, dropout: float = 0.0): + super().__init__() + self.layers = nn.Sequential( + nn.Linear(in_dim, hidden_dim), + nn.ReLU(), + nn.Dropout(dropout) if dropout > 0 else nn.Identity(), + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Dropout(dropout) if dropout > 0 else nn.Identity(), + nn.Linear(hidden_dim, out_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.layers(x) + + +class KAN(nn.Module): + """ + Lightweight KAN-style harmonic basis encoder for node features. + """ + + def __init__(self, in_dim: int, harmonics: int = 5, hidden_dim: int = 64): + super().__init__() + self.in_dim = in_dim + self.harmonics = harmonics + self.feature_proj = nn.ModuleList( + [nn.Linear(2 * harmonics + 1, hidden_dim) for _ in range(in_dim)] + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: (B, N, F) + outputs = [] + for i in range(self.in_dim): + xi = x[:, :, i].unsqueeze(-1) + basis = [torch.ones_like(xi)] + for k in range(1, self.harmonics + 1): + basis.append(torch.sin(k * xi)) + basis.append(torch.cos(k * xi)) + basis = torch.cat(basis, dim=-1) + outputs.append(self.feature_proj[i](basis)) + return torch.stack(outputs, dim=0).sum(dim=0) + + +class GNBlock(nn.Module): + """ + Message-passing block with residual edge and node updates. + """ + + def __init__(self, hidden_dim: int, dropout: float = 0.0): + super().__init__() + self.edge_mlp = MLP(3 * hidden_dim, hidden_dim, hidden_dim, dropout=dropout) + self.node_mlp = MLP(2 * hidden_dim, hidden_dim, hidden_dim, dropout=dropout) + + def forward( + self, + node: torch.Tensor, + edge: torch.Tensor, + senders: torch.Tensor, + receivers: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + sender_feat = node[:, senders, :] + receiver_feat = node[:, receivers, :] + + edge_input = torch.cat([edge, sender_feat, receiver_feat], dim=-1) + edge = edge + self.edge_mlp(edge_input) + + agg = torch.zeros_like(node) + agg.index_add_(1, receivers, edge) + + # Degree-normalized aggregation improves stability when graph density changes. + deg = torch.zeros(node.size(1), device=node.device, dtype=node.dtype) + deg.index_add_(0, receivers, torch.ones_like(receivers, dtype=node.dtype)) + agg = agg / deg.clamp(min=1.0).view(1, -1, 1) + + node_input = torch.cat([node, agg], dim=-1) + node = node + self.node_mlp(node_input) + return node, edge + + +class HydroGraphNet(nn.Module): + """ + PhysicsNeMo-inspired HydroGraphNet: + encoder -> message-passing processor -> residual delta-state decoder. + + Supports one-step forward prediction and autoregressive rollout. + """ + + def __init__( + self, + node_in_dim: int, + edge_in_dim: int, + out_dim: int, + hidden_dim: int = 64, + harmonics: int = 5, + num_gn_blocks: int = 5, + state_dim: Optional[int] = None, + rollout_steps: int = 1, + enforce_nonnegative: bool = False, + dropout: float = 0.0, + ): + super().__init__() + self.node_in_dim = int(node_in_dim) + self.edge_in_dim = int(edge_in_dim) + self.out_dim = int(out_dim) + self.state_dim = int(state_dim) if state_dim is not None else min(2, self.node_in_dim) + self.state_dim = max(1, min(self.state_dim, self.node_in_dim)) + if self.out_dim > self.state_dim: + raise ValueError( + f"out_dim={self.out_dim} cannot exceed residual state_dim={self.state_dim}." + ) + self.rollout_steps = max(1, int(rollout_steps)) + self.enforce_nonnegative = bool(enforce_nonnegative) + + # Encoder + self.node_encoder = KAN( + in_dim=self.node_in_dim, + hidden_dim=hidden_dim, + harmonics=harmonics, + ) + self.edge_encoder = MLP( + in_dim=self.edge_in_dim, + out_dim=hidden_dim, + hidden_dim=hidden_dim, + dropout=dropout, + ) + + # Processor + self.processor = nn.ModuleList( + [GNBlock(hidden_dim=hidden_dim, dropout=dropout) for _ in range(num_gn_blocks)] + ) + + # Decoder predicts delta of physically meaningful states. + self.decoder = MLP( + in_dim=hidden_dim, + out_dim=self.state_dim, + hidden_dim=hidden_dim, + dropout=dropout, + ) + + def _edge_index(self, adj: torch.Tensor, batch_size: int) -> Tuple[torch.Tensor, torch.Tensor]: + if adj.dim() == 2: + a = adj + elif adj.dim() == 3: + if adj.size(0) != batch_size: + raise ValueError(f"adj batch size mismatch: got {adj.size(0)}, expected {batch_size}") + a = adj[0] + for i in range(1, batch_size): + if not torch.allclose(adj[i], a): + raise ValueError( + "Per-sample varying adjacency is not supported yet. " + "Provide a shared (N, N) adjacency or identical (B, N, N) adjacency." + ) + else: + raise ValueError("adj must be shaped (N, N) or (B, N, N).") + + a = (a > 0).to(dtype=torch.bool) + a.fill_diagonal_(True) + return a.nonzero(as_tuple=True) + + def _match_edge_dim(self, edge_feat: torch.Tensor) -> torch.Tensor: + # edge_feat: (B, E, F_edge_raw) + f_raw = edge_feat.size(-1) + if f_raw == self.edge_in_dim: + return edge_feat + if f_raw > self.edge_in_dim: + return edge_feat[..., : self.edge_in_dim] + pad = torch.zeros( + edge_feat.size(0), + edge_feat.size(1), + self.edge_in_dim - f_raw, + device=edge_feat.device, + dtype=edge_feat.dtype, + ) + return torch.cat([edge_feat, pad], dim=-1) + + def _prepare_edge_inputs( + self, + batch: Dict[str, torch.Tensor], + senders: torch.Tensor, + receivers: torch.Tensor, + batch_size: int, + device: torch.device, + dtype: torch.dtype, + ) -> torch.Tensor: + edge_attr = batch.get("edge_attr") + if edge_attr is not None: + edge_attr = edge_attr.to(device=device, dtype=dtype) + if edge_attr.dim() == 2: + edge_attr = edge_attr.unsqueeze(0).expand(batch_size, -1, -1) + elif edge_attr.dim() == 3 and edge_attr.size(0) == 1 and batch_size > 1: + edge_attr = edge_attr.expand(batch_size, -1, -1) + if edge_attr.dim() != 3: + raise ValueError("edge_attr must be shaped (E, F_edge) or (B, E, F_edge).") + if edge_attr.size(1) != senders.numel(): + raise ValueError( + f"edge_attr edge count mismatch: got {edge_attr.size(1)}, expected {senders.numel()}." + ) + return self._match_edge_dim(edge_attr) + + # Derive geometric edge features from coords: [dx, dy, distance] + coords = batch.get("coords") + if coords is None: + edge_feat = torch.zeros(batch_size, senders.numel(), 3, device=device, dtype=dtype) + return self._match_edge_dim(edge_feat) + + coords = coords.to(device=device, dtype=dtype) + if coords.dim() == 2: + coords = coords.unsqueeze(0).expand(batch_size, -1, -1) + elif coords.dim() == 3 and coords.size(0) == 1 and batch_size > 1: + coords = coords.expand(batch_size, -1, -1) + if coords.dim() != 3: + raise ValueError("coords must be shaped (N, 2) or (B, N, 2).") + + src = coords[:, senders, :] + dst = coords[:, receivers, :] + delta = src - dst + dist = torch.norm(delta, dim=-1, keepdim=True) + edge_feat = torch.cat([delta, dist], dim=-1) + return self._match_edge_dim(edge_feat) + + def _one_step( + self, + node_x: torch.Tensor, + batch: Dict[str, torch.Tensor], + ) -> torch.Tensor: + # node_x: (B, N, F) + if node_x.ndim != 3: + raise ValueError(f"Expected node_x with shape (B,N,F), got {tuple(node_x.shape)}") + if node_x.size(-1) < self.state_dim: + raise ValueError( + f"Input feature dim {node_x.size(-1)} is smaller than state_dim {self.state_dim}." + ) + + adj = batch.get("adj") + if adj is None: + raise ValueError("HydroGraphNet requires `adj` in the batch.") + adj = adj.to(device=node_x.device) + + senders, receivers = self._edge_index(adj, batch_size=node_x.size(0)) + + # ---- encoder ---- + node = self.node_encoder(node_x) + edge_in = self._prepare_edge_inputs( + batch=batch, + senders=senders, + receivers=receivers, + batch_size=node.size(0), + device=node.device, + dtype=node.dtype, + ) + edge = self.edge_encoder(edge_in) + + # ---- processor ---- + for gn in self.processor: + node, edge = gn(node, edge, senders, receivers) + + # ---- decoder: residual state update ---- + delta_state = self.decoder(node) # (B, N, state_dim) + prev_state = node_x[..., : self.state_dim] + next_state = prev_state + delta_state + if self.enforce_nonnegative: + next_state = next_state.clamp_min(0.0) + + # Return requested targets from the evolved state. + return next_state[..., : self.out_dim] + + def rollout(self, batch: Dict[str, torch.Tensor], predict_steps: int) -> torch.Tensor: + batch_roll = dict(batch) + batch_roll["predict_steps"] = int(predict_steps) + out = self.forward(batch_roll) + if out.ndim != 4: + raise RuntimeError("rollout expected stacked output with shape (B, S, N, out_dim).") + return out + + def forward(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor: + # batch["x"]: (B, T, N, F) + x = batch["x"] + if x.ndim != 4: + raise ValueError( + f"HydroGraphNet expects x shaped (B, T, N, F), got {tuple(x.shape)}" + ) + + predict_steps = int(batch.get("predict_steps", self.rollout_steps)) + predict_steps = max(1, predict_steps) + + history = x + preds = [] + for _ in range(predict_steps): + node_x = history[:, -1] + y_next = self._one_step(node_x=node_x, batch=batch) # (B, N, out_dim) + preds.append(y_next) + + if predict_steps > 1: + next_frame = history[:, -1].clone() + next_frame[..., : self.out_dim] = y_next + history = torch.cat([history[:, 1:], next_frame.unsqueeze(1)], dim=1) + + if predict_steps == 1: + return preds[0] + return torch.stack(preds, dim=1) + + +class HydroGraphNetLoss(nn.Module): + """ + Supervised regression loss with optional continuity regularization. + """ + + def __init__(self, supervised_weight: float = 1.0, continuity_weight: float = 0.0): + super().__init__() + self.supervised_weight = float(supervised_weight) + self.continuity_weight = float(continuity_weight) + + def forward( + self, + preds: torch.Tensor, + targets: torch.Tensor, + prev_state: Optional[torch.Tensor] = None, + cell_area: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, Dict[str, float]]: + supervised = F.mse_loss(preds, targets) + total = self.supervised_weight * supervised + metrics: Dict[str, float] = {"mse": float(supervised.detach().cpu())} + + if ( + self.continuity_weight > 0 + and prev_state is not None + and cell_area is not None + and preds.size(-1) >= 2 + and prev_state.size(-1) >= 2 + ): + # Approximate local continuity: depth-change * area ~= volume-change + depth_delta = preds[..., 0] - prev_state[..., 0] + volume_delta = preds[..., 1] - prev_state[..., 1] + area = cell_area.to(device=preds.device, dtype=preds.dtype) + if area.dim() == 1: + area = area.unsqueeze(0) + continuity = F.mse_loss(depth_delta * area, volume_delta) + total = total + self.continuity_weight * continuity + metrics["continuity"] = float(continuity.detach().cpu()) + + metrics["total"] = float(total.detach().cpu()) + return total, metrics + + +def hydrographnet_builder( + task: str, + node_in_dim: int, + edge_in_dim: int, + out_dim: int, + **kwargs, +) -> HydroGraphNet: + if task != "regression": + raise ValueError("HydroGraphNet only supports regression") + + return HydroGraphNet( + node_in_dim=node_in_dim, + edge_in_dim=edge_in_dim, + out_dim=out_dim, + hidden_dim=kwargs.get("hidden_dim", 64), + harmonics=kwargs.get("harmonics", 5), + num_gn_blocks=kwargs.get("num_gn_blocks", 5), + state_dim=kwargs.get("state_dim"), + rollout_steps=kwargs.get("rollout_steps", 1), + enforce_nonnegative=kwargs.get("enforce_nonnegative", False), + dropout=kwargs.get("dropout", 0.0), + ) + + +__all__ = ["HydroGraphNet", "HydroGraphNetLoss", "hydrographnet_builder"] diff --git a/pyhazards/models/internvl3_wildfire_prompted.py b/pyhazards/models/internvl3_wildfire_prompted.py new file mode 100644 index 00000000..cc038a9c --- /dev/null +++ b/pyhazards/models/internvl3_wildfire_prompted.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +import torch.nn as nn + +from .qwen25_vl_wildfire_prompted import Qwen25VLWildfirePrompted + + +class InternVL3WildfirePrompted(Qwen25VLWildfirePrompted): + """Benchmark-facing wildfire VLM baseline inspired by InternVL3.""" + + +def internvl3_wildfire_prompted_builder( + task: str, + in_channels: int = 6, + out_dim: int = 1, + hidden_dim: int = 96, + prompt_dim: int = 32, + num_prompt_tokens: int = 5, + num_heads: int = 6, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError( + f"internvl3_wildfire_prompted is segmentation-only in PyHazards, got task={task!r}." + ) + return InternVL3WildfirePrompted( + in_channels=in_channels, + out_dim=out_dim, + hidden_dim=hidden_dim, + prompt_dim=prompt_dim, + num_prompt_tokens=num_prompt_tokens, + num_heads=num_heads, + dropout=dropout, + ) + + +__all__ = ["InternVL3WildfirePrompted", "internvl3_wildfire_prompted_builder"] diff --git a/pyhazards/models/lightgbm.py b/pyhazards/models/lightgbm.py new file mode 100644 index 00000000..d1ed541a --- /dev/null +++ b/pyhazards/models/lightgbm.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from typing import Any, Optional + +import numpy as np +import torch.nn as nn + +from ._wildfire_benchmark_utils import EstimatorPort, filter_init_kwargs, require_task + + +class LightGBMModel(EstimatorPort): + """A boosted-tree wildfire occurrence baseline using LightGBM binary classification.""" + + def __init__(self, num_leaves: int = 63, learning_rate: float = 0.05, feature_fraction: float = 0.8, bagging_fraction: float = 0.8, num_boost_round: int = 800): + super().__init__() + self.params = { + "objective": "binary", + "metric": "binary_logloss", + "num_leaves": int(num_leaves), + "learning_rate": float(learning_rate), + "feature_fraction": float(feature_fraction), + "bagging_fraction": float(bagging_fraction), + "verbose": -1, + } + self.num_boost_round = int(num_boost_round) + self.booster = None + + def _fit_numpy( + self, + x_train: np.ndarray, + y_train: np.ndarray, + x_val: Optional[np.ndarray], + y_val: Optional[np.ndarray], + ) -> None: + import lightgbm as lgb + + dtrain = lgb.Dataset(x_train, label=y_train) + valid_sets = [dtrain] + valid_names = ["train"] + if x_val is not None and y_val is not None: + dval = lgb.Dataset(x_val, label=y_val, reference=dtrain) + valid_sets.append(dval) + valid_names.append("val") + self.booster = lgb.train( + params=self.params, + train_set=dtrain, + num_boost_round=self.num_boost_round, + valid_sets=valid_sets, + valid_names=valid_names, + callbacks=[lgb.log_evaluation(period=0)], + ) + + def _predict_positive_proba(self, x: np.ndarray) -> np.ndarray: + if self.booster is None: + raise RuntimeError("LightGBM booster is not fitted.") + return np.asarray(self.booster.predict(x), dtype=np.float32) + + +def lightgbm_builder(task: str, **kwargs: Any) -> nn.Module: + require_task(task, {"classification"}, "lightgbm") + build_kwargs = filter_init_kwargs(LightGBMModel, kwargs) + return LightGBMModel(**build_kwargs) + + +__all__ = ["LightGBMModel", "lightgbm_builder"] diff --git a/pyhazards/models/llama4_wildfire_prompted.py b/pyhazards/models/llama4_wildfire_prompted.py new file mode 100644 index 00000000..858d173c --- /dev/null +++ b/pyhazards/models/llama4_wildfire_prompted.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +import torch.nn as nn + +from .qwen25_vl_wildfire_prompted import Qwen25VLWildfirePrompted + + +class Llama4WildfirePrompted(Qwen25VLWildfirePrompted): + """Benchmark-facing wildfire multimodal baseline inspired by Meta Llama 4.""" + + +def llama4_wildfire_prompted_builder( + task: str, + in_channels: int = 6, + out_dim: int = 1, + hidden_dim: int = 80, + prompt_dim: int = 32, + num_prompt_tokens: int = 4, + num_heads: int = 8, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError( + f"llama4_wildfire_prompted is segmentation-only in PyHazards, got task={task!r}." + ) + return Llama4WildfirePrompted( + in_channels=in_channels, + out_dim=out_dim, + hidden_dim=hidden_dim, + prompt_dim=prompt_dim, + num_prompt_tokens=num_prompt_tokens, + num_heads=num_heads, + dropout=dropout, + ) + + +__all__ = ["Llama4WildfirePrompted", "llama4_wildfire_prompted_builder"] diff --git a/pyhazards/models/logistic_regression.py b/pyhazards/models/logistic_regression.py new file mode 100644 index 00000000..83bba479 --- /dev/null +++ b/pyhazards/models/logistic_regression.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from typing import Any, Optional + +import numpy as np +import torch.nn as nn + +from ._wildfire_benchmark_utils import EstimatorPort, filter_init_kwargs, require_task + + +class LogisticRegressionModel(EstimatorPort): + """A classical tabular binary-classification baseline for wildfire occurrence probability.""" + + def __init__(self, solver: str = "lbfgs", max_iter: int = 500, class_weight: Any = "balanced"): + super().__init__() + from sklearn.linear_model import LogisticRegression + + self.estimator = LogisticRegression( + solver=solver, + max_iter=int(max_iter), + class_weight=class_weight, + ) + + def _fit_numpy( + self, + x_train: np.ndarray, + y_train: np.ndarray, + x_val: Optional[np.ndarray], + y_val: Optional[np.ndarray], + ) -> None: + _ = x_val, y_val + self.estimator.fit(x_train, y_train) + + def _predict_positive_proba(self, x: np.ndarray) -> np.ndarray: + return self.estimator.predict_proba(x)[:, 1] + + +def logistic_regression_builder(task: str, **kwargs: Any) -> nn.Module: + require_task(task, {"classification"}, "logistic_regression") + build_kwargs = filter_init_kwargs(LogisticRegressionModel, kwargs) + return LogisticRegressionModel(**build_kwargs) + + +__all__ = ["LogisticRegressionModel", "logistic_regression_builder"] diff --git a/pyhazards/models/mau.py b/pyhazards/models/mau.py new file mode 100644 index 00000000..fee73423 --- /dev/null +++ b/pyhazards/models/mau.py @@ -0,0 +1,512 @@ +from __future__ import annotations + +import argparse +import csv +import json +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + + +@dataclass +class MAUTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + hidden_channels: int = 12 + lr: float = 1e-3 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +def binary_ece(y_true: np.ndarray, y_prob: np.ndarray, n_bins: int = 15) -> float: + bins = np.linspace(0.0, 1.0, n_bins + 1) + ece = 0.0 + n = float(len(y_true)) + for i in range(n_bins): + lo, hi = bins[i], bins[i + 1] + if i == n_bins - 1: + mask = (y_prob >= lo) & (y_prob <= hi) + else: + mask = (y_prob >= lo) & (y_prob < hi) + if not np.any(mask): + continue + acc = float(np.mean(y_true[mask])) + conf = float(np.mean(y_prob[mask])) + ece += (float(np.sum(mask)) / n) * abs(acc - conf) + return float(ece) + + +def normalized_consistency_score(mean_day_to_day_change: float) -> float: + return float(np.clip(1.0 - float(mean_day_to_day_change), 0.0, 1.0)) + + +class MAUCell(nn.Module): + def __init__(self, hidden_channels: int): + super().__init__() + in_channels = hidden_channels * 3 + self.conv_gate = nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1) + self.conv_cand = nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1) + + def forward(self, x_t: torch.Tensor, h_prev: torch.Tensor, m_prev: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + fused = torch.cat([x_t, h_prev, m_prev], dim=1) + gate = torch.sigmoid(self.conv_gate(fused)) + candidate = torch.tanh(self.conv_cand(fused)) + + h = gate * h_prev + (1.0 - gate) * candidate + m = 0.5 * m_prev + 0.5 * h + return h, m + + +class TinyMAU(nn.Module): + def __init__(self, in_channels: int = 1, hidden_channels: int = 12): + super().__init__() + self.hidden_channels = hidden_channels + self.encoder = nn.Sequential( + nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + self.cell = MAUCell(hidden_channels=hidden_channels) + self.decoder = nn.Sequential( + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, 1, kernel_size=1), + ) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B, T, C, H, W] + b, _, _, h, w = x_seq.shape + device = x_seq.device + + h_state = torch.zeros((b, self.hidden_channels, h, w), device=device) + m_state = torch.zeros((b, self.hidden_channels, h, w), device=device) + + for t in range(x_seq.shape[1]): + x_t = self.encoder(x_seq[:, t]) + h_state, m_state = self.cell(x_t, h_state, m_state) + + return self.decoder(h_state) + + +def make_synthetic_fire_sequences( + n_samples: int, + seq_len: int, + image_size: int, + seed: int, +) -> Tuple[np.ndarray, np.ndarray]: + rng = np.random.default_rng(seed) + yy, xx = np.meshgrid(np.arange(image_size), np.arange(image_size), indexing="ij") + + x = np.zeros((n_samples, seq_len, 1, image_size, image_size), dtype=np.float32) + y = np.zeros((n_samples, 1, image_size, image_size), dtype=np.float32) + + for i in range(n_samples): + n_sources = int(rng.integers(1, 4)) + + sources = [] + for _ in range(n_sources): + sources.append( + { + "cx0": float(rng.uniform(0, image_size - 1)), + "cy0": float(rng.uniform(0, image_size - 1)), + "vx": float(rng.uniform(-1.2, 1.2)), + "vy": float(rng.uniform(-1.2, 1.2)), + "sigma": float(rng.uniform(1.8, 4.2)), + "amp": float(rng.uniform(0.8, 2.2)), + } + ) + + terrain = (yy / max(1, image_size - 1)) * rng.uniform(-0.15, 0.15) + wind = (xx / max(1, image_size - 1)) * rng.uniform(-0.25, 0.25) + + last_field = None + for t in range(seq_len + 1): + field = rng.normal(0.0, 0.12, size=(image_size, image_size)) + for s in sources: + cx = float(np.clip(s["cx0"] + s["vx"] * t, 0.0, image_size - 1)) + cy = float(np.clip(s["cy0"] + s["vy"] * t, 0.0, image_size - 1)) + sigma = s["sigma"] + amp = s["amp"] + dist2 = (xx - cx) ** 2 + (yy - cy) ** 2 + field += amp * np.exp(-dist2 / (2.0 * sigma * sigma)) + + signal = field + terrain + wind + rng.normal(0.0, 0.08, size=(image_size, image_size)) + if t < seq_len: + x[i, t, 0] = signal.astype(np.float32) + else: + last_field = field + + if last_field is None: + raise RuntimeError("Synthetic generation failed to produce final frame") + + threshold = float(np.quantile(last_field, 0.90)) + y[i, 0] = (last_field > threshold).astype(np.float32) + + x_mean = float(np.mean(x)) + x_std = float(np.std(x) + 1e-6) + x = (x - x_mean) / x_std + return x, y + + +def split_train_val_test( + x: np.ndarray, + y: np.ndarray, + seed: int, + train_ratio: float = 0.7, + val_ratio: float = 0.15, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + n = x.shape[0] + rng = np.random.default_rng(seed) + idx = rng.permutation(n) + + n_train = max(1, int(n * train_ratio)) + n_val = max(1, int(n * val_ratio)) + n_train = min(n_train, n - 2) + n_val = min(n_val, n - n_train - 1) + + train_idx = idx[:n_train] + val_idx = idx[n_train : n_train + n_val] + test_idx = idx[n_train + n_val :] + + return ( + x[train_idx], + y[train_idx], + x[val_idx], + y[val_idx], + x[test_idx], + y[test_idx], + ) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_mau_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: MAUTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyMAU(in_channels=cfg.in_channels, hidden_channels=cfg.hidden_channels).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("MAU Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: MAUTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "mau", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = MAUTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_mau_track_o(x_train, y_train, x_val, y_val, cfg) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "mau_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run MAU Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = Path(args.output_dir) if args.output_dir else base / f"mau_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] mau synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def mau_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "mau") + init_kwargs = filter_init_kwargs(TinyMAU, {"in_channels": int(in_channels), **kwargs}) + model = TinyMAU(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyMAU", "mau_builder"] diff --git a/pyhazards/models/modis_active_fire_c61.py b/pyhazards/models/modis_active_fire_c61.py new file mode 100644 index 00000000..d9bfa23e --- /dev/null +++ b/pyhazards/models/modis_active_fire_c61.py @@ -0,0 +1,112 @@ +from __future__ import annotations + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class MODISActiveFireC61(nn.Module): + """Algorithm-inspired MODIS Collection 6.1 active-fire detector with learnable calibration.""" + + def __init__( + self, + in_channels: int = 5, + hidden_dim: int = 24, + out_dim: int = 1, + context_kernel: int = 9, + dropout: float = 0.1, + ): + super().__init__() + if in_channels < 5: + raise ValueError( + "MODISActiveFireC61 expects at least 5 channels: " + "mid_ir, long_ir, frp_proxy, cloud_free, dryness." + ) + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + if context_kernel <= 1 or context_kernel % 2 == 0: + raise ValueError(f"context_kernel must be an odd integer > 1, got {context_kernel}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0,1), got {dropout}") + + self.in_channels = int(in_channels) + self.context_pool = nn.AvgPool2d(kernel_size=context_kernel, stride=1, padding=context_kernel // 2) + + evidence_channels = self.in_channels + 5 + self.evidence_encoder = nn.Sequential( + nn.Conv2d(evidence_channels, hidden_dim, kernel_size=1), + nn.GELU(), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + self.calibration_head = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Dropout2d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv2d(hidden_dim, out_dim, kernel_size=1), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError( + "MODISActiveFireC61 expects input shape (batch, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) < 5: + raise ValueError(f"MODISActiveFireC61 expected at least 5 channels, got {x.size(1)}.") + + x = x[:, : self.in_channels] + mid_ir = x[:, 0:1] + long_ir = x[:, 1:2] + frp_proxy = x[:, 2:3] + cloud_free = x[:, 3:4] + dryness = x[:, 4:5] + + local_background = self.context_pool(mid_ir) + thermal_excess = mid_ir - local_background + split_window = mid_ir - long_ir + fire_signal = F.relu(thermal_excess) + 0.4 * F.relu(split_window) + contextual_ratio = fire_signal / (torch.abs(local_background) + 1.0) + confidence_gate = torch.sigmoid(cloud_free) * torch.sigmoid(dryness) + + evidence = torch.cat( + [ + x, + thermal_excess, + split_window, + fire_signal, + contextual_ratio, + confidence_gate + frp_proxy, + ], + dim=1, + ) + encoded = self.evidence_encoder(evidence) + return self.calibration_head(encoded) + + +def modis_active_fire_c61_builder( + task: str, + in_channels: int = 5, + hidden_dim: int = 24, + out_dim: int = 1, + context_kernel: int = 9, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError( + f"modis_active_fire_c61 is segmentation-only in PyHazards, got task={task!r}." + ) + return MODISActiveFireC61( + in_channels=in_channels, + hidden_dim=hidden_dim, + out_dim=out_dim, + context_kernel=context_kernel, + dropout=dropout, + ) + + +__all__ = ["MODISActiveFireC61", "modis_active_fire_c61_builder"] diff --git a/pyhazards/models/neuralhydrology_ealstm.py b/pyhazards/models/neuralhydrology_ealstm.py new file mode 100644 index 00000000..4dcac1fd --- /dev/null +++ b/pyhazards/models/neuralhydrology_ealstm.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +from typing import Any + +import torch +import torch.nn as nn + + +def _streamflow_inputs(batch: Any) -> torch.Tensor: + x = batch["x"] if isinstance(batch, dict) else batch + if x.ndim != 4: + raise ValueError("EA-LSTM expects inputs shaped (batch, history, nodes, features).") + return x + + +class NeuralHydrologyEALSTM(nn.Module): + """Entity-aware LSTM style streamflow baseline.""" + + def __init__( + self, + input_dim: int = 2, + hidden_dim: int = 64, + num_layers: int = 1, + out_dim: int = 1, + dropout: float = 0.1, + ): + super().__init__() + self.hidden_dim = int(hidden_dim) + self.out_dim = int(out_dim) + self.dynamic_encoder = nn.LSTM( + input_size=input_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=dropout if num_layers > 1 else 0.0, + ) + self.static_gate = nn.Sequential( + nn.Linear(input_dim, hidden_dim), + nn.Sigmoid(), + ) + self.head = nn.Linear(hidden_dim, self.out_dim) + + def forward(self, batch: Any) -> torch.Tensor: + x = _streamflow_inputs(batch) + bsz, history, nodes, features = x.shape + series = x.permute(0, 2, 1, 3).reshape(bsz * nodes, history, features) + encoded, _ = self.dynamic_encoder(series) + static_features = series.mean(dim=1) + gated = encoded[:, -1] * self.static_gate(static_features) + preds = self.head(gated) + return preds.view(bsz, nodes, self.out_dim) + + +def neuralhydrology_ealstm_builder( + task: str, + input_dim: int = 2, + hidden_dim: int = 64, + num_layers: int = 1, + out_dim: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("NeuralHydrologyEALSTM only supports regression for streamflow forecasting.") + return NeuralHydrologyEALSTM( + input_dim=input_dim, + hidden_dim=hidden_dim, + num_layers=num_layers, + out_dim=out_dim, + dropout=dropout, + ) + + +__all__ = ["NeuralHydrologyEALSTM", "neuralhydrology_ealstm_builder"] diff --git a/pyhazards/models/neuralhydrology_lstm.py b/pyhazards/models/neuralhydrology_lstm.py new file mode 100644 index 00000000..f353caf8 --- /dev/null +++ b/pyhazards/models/neuralhydrology_lstm.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +from typing import Any + +import torch +import torch.nn as nn + + +def _streamflow_inputs(batch: Any) -> torch.Tensor: + x = batch["x"] if isinstance(batch, dict) else batch + if x.ndim != 4: + raise ValueError("NeuralHydrology-style models expect inputs shaped (batch, history, nodes, features).") + return x + + +class NeuralHydrologyLSTM(nn.Module): + """Adapter-style LSTM streamflow baseline.""" + + def __init__( + self, + input_dim: int = 2, + hidden_dim: int = 64, + num_layers: int = 2, + out_dim: int = 1, + dropout: float = 0.1, + ): + super().__init__() + self.out_dim = int(out_dim) + self.encoder = nn.LSTM( + input_size=input_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=dropout if num_layers > 1 else 0.0, + ) + self.head = nn.Linear(hidden_dim, self.out_dim) + + def forward(self, batch: Any) -> torch.Tensor: + x = _streamflow_inputs(batch) + bsz, history, nodes, features = x.shape + series = x.permute(0, 2, 1, 3).reshape(bsz * nodes, history, features) + encoded, _ = self.encoder(series) + preds = self.head(encoded[:, -1]) + return preds.view(bsz, nodes, self.out_dim) + + +def neuralhydrology_lstm_builder( + task: str, + input_dim: int = 2, + hidden_dim: int = 64, + num_layers: int = 2, + out_dim: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("NeuralHydrologyLSTM only supports regression for streamflow forecasting.") + return NeuralHydrologyLSTM( + input_dim=input_dim, + hidden_dim=hidden_dim, + num_layers=num_layers, + out_dim=out_dim, + dropout=dropout, + ) + + +__all__ = ["NeuralHydrologyLSTM", "neuralhydrology_lstm_builder"] diff --git a/pyhazards/models/pangu_tc.py b/pyhazards/models/pangu_tc.py new file mode 100644 index 00000000..9335c263 --- /dev/null +++ b/pyhazards/models/pangu_tc.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class PanguTC(nn.Module): + """Experimental wrapper-style Pangu-Weather storm adapter.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.temporal = nn.Sequential( + nn.Conv1d(input_dim, hidden_dim, kernel_size=5, padding=2), + nn.GELU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + self.head = nn.Sequential( + nn.Dropout(dropout) if dropout > 0 else nn.Identity(), + nn.Linear(hidden_dim, hidden_dim), + nn.GELU(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("PanguTC expects inputs shaped (batch, history, features).") + encoded = self.temporal(x.transpose(1, 2)).mean(dim=-1) + preds = self.head(encoded) + return preds.view(x.size(0), self.horizon, self.output_dim) + + +def pangu_tc_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 96, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("PanguTC only supports regression for track/intensity forecasting.") + return PanguTC( + input_dim=input_dim, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + dropout=dropout, + ) + + +__all__ = ["PanguTC", "pangu_tc_builder"] diff --git a/pyhazards/models/phasenet.py b/pyhazards/models/phasenet.py new file mode 100644 index 00000000..eb3197bb --- /dev/null +++ b/pyhazards/models/phasenet.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class PhaseNet(nn.Module): + """Lightweight phase-picking network for synthetic waveform smoke runs.""" + + def __init__(self, in_channels: int = 3, hidden_dim: int = 32): + super().__init__() + self.encoder = nn.Sequential( + nn.Conv1d(in_channels, hidden_dim, kernel_size=9, padding=4), + nn.ReLU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=7, padding=3), + nn.ReLU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=5, padding=2), + nn.ReLU(), + ) + self.head = nn.Sequential( + nn.AdaptiveAvgPool1d(1), + nn.Flatten(), + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, 2), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("PhaseNet expects inputs shaped (batch, channels, length).") + return self.head(self.encoder(x)) + + +def phasenet_builder( + task: str, + in_channels: int = 3, + hidden_dim: int = 32, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("PhaseNet only supports regression-style phase picking outputs.") + return PhaseNet(in_channels=in_channels, hidden_dim=hidden_dim) + + +__all__ = ["PhaseNet", "phasenet_builder"] diff --git a/pyhazards/models/predrnn_v2.py b/pyhazards/models/predrnn_v2.py new file mode 100644 index 00000000..1726c8b9 --- /dev/null +++ b/pyhazards/models/predrnn_v2.py @@ -0,0 +1,452 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class PredRNNv2TrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + hidden_channels: int = 12 + lr: float = 1e-3 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class SpatioTemporalLSTMCell(nn.Module): + def __init__(self, in_channels: int, hidden_channels: int): + super().__init__() + self.hidden_channels = hidden_channels + + self.conv_x = nn.Conv2d(in_channels, hidden_channels * 7, kernel_size=3, padding=1) + self.conv_h = nn.Conv2d(hidden_channels, hidden_channels * 4, kernel_size=3, padding=1) + self.conv_m = nn.Conv2d(hidden_channels, hidden_channels * 3, kernel_size=3, padding=1) + + self.conv_o = nn.Conv2d(hidden_channels * 2, hidden_channels, kernel_size=1) + self.conv_last = nn.Conv2d(hidden_channels * 2, hidden_channels, kernel_size=1) + + def forward( + self, + x_t: torch.Tensor, + h_prev: torch.Tensor, + c_prev: torch.Tensor, + m_prev: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + x_concat = self.conv_x(x_t) + h_concat = self.conv_h(h_prev) + m_concat = self.conv_m(m_prev) + + x_i, x_f, x_g, x_i_m, x_f_m, x_g_m, x_o = torch.split(x_concat, self.hidden_channels, dim=1) + h_i, h_f, h_g, h_o = torch.split(h_concat, self.hidden_channels, dim=1) + m_i, m_f, m_g = torch.split(m_concat, self.hidden_channels, dim=1) + + i_t = torch.sigmoid(x_i + h_i) + f_t = torch.sigmoid(x_f + h_f + 1.0) + g_t = torch.tanh(x_g + h_g) + c_t = f_t * c_prev + i_t * g_t + + i_t_m = torch.sigmoid(x_i_m + m_i) + f_t_m = torch.sigmoid(x_f_m + m_f + 1.0) + g_t_m = torch.tanh(x_g_m + m_g) + m_t = f_t_m * m_prev + i_t_m * g_t_m + + mem = torch.cat([c_t, m_t], dim=1) + o_t = torch.sigmoid(x_o + h_o + self.conv_o(mem)) + h_t = o_t * torch.tanh(self.conv_last(mem)) + + return h_t, c_t, m_t + + +class TinyPredRNNv2(nn.Module): + def __init__(self, in_channels: int = 1, hidden_channels: int = 12): + super().__init__() + self.hidden_channels = hidden_channels + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + self.cell = SpatioTemporalLSTMCell(in_channels=hidden_channels, hidden_channels=hidden_channels) + self.decoder = nn.Sequential( + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, 1, kernel_size=1), + ) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B, T, C, H, W] + b, _, _, h, w = x_seq.shape + device = x_seq.device + + h_state = torch.zeros((b, self.hidden_channels, h, w), device=device) + c_state = torch.zeros((b, self.hidden_channels, h, w), device=device) + m_state = torch.zeros((b, self.hidden_channels, h, w), device=device) + + for t in range(x_seq.shape[1]): + x_t = self.encoder(x_seq[:, t]) + h_state, c_state, m_state = self.cell(x_t, h_state, c_state, m_state) + + return self.decoder(h_state) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_predrnn_v2_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: PredRNNv2TrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyPredRNNv2(in_channels=cfg.in_channels, hidden_channels=cfg.hidden_channels).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("PredRNN-v2 Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: PredRNNv2TrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "predrnn_v2", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = PredRNNv2TrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_predrnn_v2_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "predrnn_v2_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run PredRNN-v2 Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = Path(args.output_dir) if args.output_dir else base / f"predrnn_v2_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] predrnn_v2 synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def predrnn_v2_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "predrnn_v2") + init_kwargs = filter_init_kwargs(TinyPredRNNv2, {"in_channels": int(in_channels), **kwargs}) + model = TinyPredRNNv2(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyPredRNNv2", "predrnn_v2_builder"] diff --git a/pyhazards/models/prithvi_burnscars.py b/pyhazards/models/prithvi_burnscars.py new file mode 100644 index 00000000..67ce98ab --- /dev/null +++ b/pyhazards/models/prithvi_burnscars.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +from typing import Any, Dict + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .prithvi_eo_2_tl import PrithviEOBackbone + + +class PrithviBurnScars(nn.Module): + """Burn-scar segmentation model built on a Prithvi-EO-style temporal backbone.""" + + def __init__( + self, + image_size: int = 32, + in_channels: int = 6, + out_dim: int = 1, + patch_size: int = 4, + embed_dim: int = 128, + depth: int = 4, + num_heads: int = 4, + mlp_ratio: float = 4.0, + dropout: float = 0.1, + time_dim: int = 1, + location_dim: int = 2, + decoder_channels: int = 64, + ): + super().__init__() + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + self.in_channels = int(in_channels) + self.backbone = PrithviEOBackbone( + image_size=image_size, + in_channels=in_channels, + patch_size=patch_size, + embed_dim=embed_dim, + depth=depth, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + dropout=dropout, + time_dim=time_dim, + location_dim=location_dim, + ) + self.skip = nn.Sequential( + nn.Conv2d(in_channels, decoder_channels, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(decoder_channels, decoder_channels, kernel_size=3, padding=1), + nn.GELU(), + ) + self.decoder = nn.Sequential( + nn.Conv2d(embed_dim + decoder_channels, decoder_channels, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(decoder_channels, decoder_channels // 2, kernel_size=3, padding=1), + nn.GELU(), + ) + self.head = nn.Conv2d(decoder_channels // 2, out_dim, kernel_size=1) + + def _extract_x(self, inputs: torch.Tensor | Dict[str, Any]) -> torch.Tensor: + if isinstance(inputs, dict): + x = inputs.get("x") + else: + x = inputs + if not isinstance(x, torch.Tensor): + raise ValueError("PrithviBurnScars expects a tensor input or a dict containing key 'x'.") + if x.ndim != 5: + raise ValueError(f"PrithviBurnScars expects input shape (B,T,C,H,W), got {tuple(x.shape)}") + if x.size(2) != self.in_channels: + raise ValueError(f"PrithviBurnScars expected in_channels={self.in_channels}, got {x.size(2)}") + return x + + def forward(self, inputs: torch.Tensor | Dict[str, Any]) -> torch.Tensor: + x = self._extract_x(inputs) + features = self.backbone(inputs) + skip = self.skip(x.mean(dim=1)) + up = F.interpolate(features, size=skip.shape[-2:], mode="bilinear", align_corners=False) + fused = torch.cat([up, skip], dim=1) + logits = self.head(self.decoder(fused)) + return logits + + + +def prithvi_burnscars_builder( + task: str, + image_size: int = 32, + in_channels: int = 6, + out_dim: int = 1, + patch_size: int = 4, + embed_dim: int = 128, + depth: int = 4, + num_heads: int = 4, + mlp_ratio: float = 4.0, + dropout: float = 0.1, + time_dim: int = 1, + location_dim: int = 2, + decoder_channels: int = 64, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError(f"prithvi_burnscars is segmentation-only, got task={task!r}.") + return PrithviBurnScars( + image_size=image_size, + in_channels=in_channels, + out_dim=out_dim, + patch_size=patch_size, + embed_dim=embed_dim, + depth=depth, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + dropout=dropout, + time_dim=time_dim, + location_dim=location_dim, + decoder_channels=decoder_channels, + ) + + +__all__ = ["PrithviBurnScars", "prithvi_burnscars_builder"] diff --git a/pyhazards/models/prithvi_eo_2_tl.py b/pyhazards/models/prithvi_eo_2_tl.py new file mode 100644 index 00000000..09dd0ad3 --- /dev/null +++ b/pyhazards/models/prithvi_eo_2_tl.py @@ -0,0 +1,251 @@ +from __future__ import annotations + +from typing import Any, Dict, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class EOSequencePatchEmbed(nn.Module): + def __init__(self, in_channels: int, embed_dim: int, patch_size: int): + super().__init__() + self.proj = nn.Conv3d( + in_channels, + embed_dim, + kernel_size=(1, patch_size, patch_size), + stride=(1, patch_size, patch_size), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: (B, T, C, H, W) + x = x.permute(0, 2, 1, 3, 4) + return self.proj(x) + + +class PrithviEOBackbone(nn.Module): + """Lightweight temporal-location-aware EO backbone inspired by Prithvi-EO-2.0-TL.""" + + def __init__( + self, + image_size: int = 32, + in_channels: int = 6, + patch_size: int = 4, + embed_dim: int = 128, + depth: int = 4, + num_heads: int = 4, + mlp_ratio: float = 4.0, + dropout: float = 0.1, + time_dim: int = 1, + location_dim: int = 2, + ): + super().__init__() + if image_size % patch_size != 0: + raise ValueError(f"image_size={image_size} must be divisible by patch_size={patch_size}") + self.image_size = int(image_size) + self.in_channels = int(in_channels) + self.patch_size = int(patch_size) + self.embed_dim = int(embed_dim) + self.time_dim = int(time_dim) + self.location_dim = int(location_dim) + self.grid_size = self.image_size // self.patch_size + + self.patch_embed = EOSequencePatchEmbed( + in_channels=self.in_channels, + embed_dim=self.embed_dim, + patch_size=self.patch_size, + ) + self.spatial_pos_embed = nn.Parameter( + torch.zeros(1, self.grid_size * self.grid_size, self.embed_dim) + ) + nn.init.trunc_normal_(self.spatial_pos_embed, std=0.02) + + self.time_proj = nn.Linear(self.time_dim, self.embed_dim) + self.location_proj = nn.Linear(self.location_dim, self.embed_dim) + + encoder_layer = nn.TransformerEncoderLayer( + d_model=self.embed_dim, + nhead=int(num_heads), + dim_feedforward=int(self.embed_dim * mlp_ratio), + dropout=float(dropout), + activation="gelu", + batch_first=True, + norm_first=True, + ) + self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=int(depth)) + self.norm = nn.LayerNorm(self.embed_dim) + + def _unpack_inputs(self, inputs: torch.Tensor | Dict[str, Any]) -> Tuple[torch.Tensor, torch.Tensor | None, torch.Tensor | None]: + if isinstance(inputs, dict): + x = inputs.get("x") + time_metadata = inputs.get("time_metadata") + location_metadata = inputs.get("location_metadata") + else: + x = inputs + time_metadata = None + location_metadata = None + + if not isinstance(x, torch.Tensor): + raise ValueError("PrithviEOBackbone expects a tensor input or a dict containing key 'x'.") + if x.ndim != 5: + raise ValueError( + "PrithviEOBackbone expects input shape (B, T, C, H, W), " + f"got {tuple(x.shape)}." + ) + if x.size(2) != self.in_channels: + raise ValueError( + f"PrithviEOBackbone expected in_channels={self.in_channels}, got {x.size(2)}." + ) + if x.size(-1) != self.image_size or x.size(-2) != self.image_size: + raise ValueError( + f"PrithviEOBackbone expected spatial size {self.image_size}x{self.image_size}, " + f"got {tuple(x.shape[-2:])}." + ) + return x, time_metadata, location_metadata + + def _build_time_metadata(self, batch: int, timesteps: int, device: torch.device, meta: torch.Tensor | None) -> torch.Tensor: + if meta is None: + base = torch.linspace(0.0, 1.0, timesteps, device=device).view(1, timesteps, 1) + return base.expand(batch, -1, -1) + if meta.ndim == 2: + meta = meta.unsqueeze(-1) + if meta.ndim != 3: + raise ValueError(f"time_metadata must have shape (B,T) or (B,T,D), got {tuple(meta.shape)}") + if meta.size(0) != batch or meta.size(1) != timesteps: + raise ValueError( + f"time_metadata expected batch/timestep=({batch},{timesteps}), got ({meta.size(0)},{meta.size(1)})" + ) + if meta.size(-1) == self.time_dim: + return meta.to(device=device, dtype=torch.float32) + if meta.size(-1) > self.time_dim: + return meta[..., : self.time_dim].to(device=device, dtype=torch.float32) + pad = torch.zeros(batch, timesteps, self.time_dim - meta.size(-1), device=device) + return torch.cat([meta.to(device=device, dtype=torch.float32), pad], dim=-1) + + def _build_location_metadata(self, batch: int, device: torch.device, meta: torch.Tensor | None) -> torch.Tensor: + if meta is None: + return torch.zeros(batch, self.location_dim, device=device) + if meta.ndim != 2: + raise ValueError(f"location_metadata must have shape (B,D), got {tuple(meta.shape)}") + if meta.size(0) != batch: + raise ValueError(f"location_metadata expected batch={batch}, got {meta.size(0)}") + if meta.size(-1) == self.location_dim: + return meta.to(device=device, dtype=torch.float32) + if meta.size(-1) > self.location_dim: + return meta[..., : self.location_dim].to(device=device, dtype=torch.float32) + pad = torch.zeros(batch, self.location_dim - meta.size(-1), device=device) + return torch.cat([meta.to(device=device, dtype=torch.float32), pad], dim=-1) + + def forward(self, inputs: torch.Tensor | Dict[str, Any]) -> torch.Tensor: + x, time_metadata, location_metadata = self._unpack_inputs(inputs) + batch, timesteps, _, height, width = x.shape + device = x.device + + feat = self.patch_embed(x) + _, _, _, h_tokens, w_tokens = feat.shape + tokens = feat.permute(0, 2, 3, 4, 1).reshape(batch, timesteps * h_tokens * w_tokens, self.embed_dim) + + spatial_pos = self.spatial_pos_embed.unsqueeze(1).expand(-1, timesteps, -1, -1) + spatial_pos = spatial_pos.reshape(1, timesteps * h_tokens * w_tokens, self.embed_dim) + tokens = tokens + spatial_pos + + time_meta = self._build_time_metadata(batch, timesteps, device, time_metadata) + time_tokens = self.time_proj(time_meta).unsqueeze(2).expand(-1, -1, h_tokens * w_tokens, -1) + time_tokens = time_tokens.reshape(batch, timesteps * h_tokens * w_tokens, self.embed_dim) + tokens = tokens + time_tokens + + location_meta = self._build_location_metadata(batch, device, location_metadata) + tokens = tokens + self.location_proj(location_meta).unsqueeze(1) + + encoded = self.norm(self.encoder(tokens)) + encoded = encoded.reshape(batch, timesteps, h_tokens, w_tokens, self.embed_dim).mean(dim=1) + return encoded.permute(0, 3, 1, 2).contiguous() + + +class PrithviEO2TL(nn.Module): + """Temporal-location-aware EO segmentation model inspired by Prithvi-EO-2.0-TL.""" + + def __init__( + self, + image_size: int = 32, + in_channels: int = 6, + out_dim: int = 1, + patch_size: int = 4, + embed_dim: int = 128, + depth: int = 4, + num_heads: int = 4, + mlp_ratio: float = 4.0, + dropout: float = 0.1, + time_dim: int = 1, + location_dim: int = 2, + decoder_channels: int = 64, + ): + super().__init__() + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + self.image_size = int(image_size) + self.backbone = PrithviEOBackbone( + image_size=image_size, + in_channels=in_channels, + patch_size=patch_size, + embed_dim=embed_dim, + depth=depth, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + dropout=dropout, + time_dim=time_dim, + location_dim=location_dim, + ) + self.decoder = nn.Sequential( + nn.Conv2d(embed_dim, decoder_channels, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(decoder_channels, decoder_channels, kernel_size=3, padding=1), + nn.GELU(), + ) + self.head = nn.Conv2d(decoder_channels, out_dim, kernel_size=1) + + def forward(self, inputs: torch.Tensor | Dict[str, Any]) -> torch.Tensor: + if isinstance(inputs, dict): + x = inputs["x"] + else: + x = inputs + features = self.backbone(inputs) + logits = self.head(self.decoder(features)) + return F.interpolate(logits, size=x.shape[-2:], mode="bilinear", align_corners=False) + +def prithvi_eo_2_tl_builder( + task: str, + image_size: int = 32, + in_channels: int = 6, + out_dim: int = 1, + patch_size: int = 4, + embed_dim: int = 128, + depth: int = 4, + num_heads: int = 4, + mlp_ratio: float = 4.0, + dropout: float = 0.1, + time_dim: int = 1, + location_dim: int = 2, + decoder_channels: int = 64, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError(f"prithvi_eo_2_tl is segmentation-only, got task={task!r}.") + return PrithviEO2TL( + image_size=image_size, + in_channels=in_channels, + out_dim=out_dim, + patch_size=patch_size, + embed_dim=embed_dim, + depth=depth, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + dropout=dropout, + time_dim=time_dim, + location_dim=location_dim, + decoder_channels=decoder_channels, + ) + + +__all__ = ["PrithviEOBackbone", "PrithviEO2TL", "prithvi_eo_2_tl_builder"] diff --git a/pyhazards/models/prithvi_wxc.py b/pyhazards/models/prithvi_wxc.py new file mode 100644 index 00000000..dddf4c8d --- /dev/null +++ b/pyhazards/models/prithvi_wxc.py @@ -0,0 +1,284 @@ +from __future__ import annotations + +from typing import Any, Dict, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class WeatherSequencePatchEmbed(nn.Module): + def __init__(self, in_channels: int, embed_dim: int, patch_size: int): + super().__init__() + self.proj = nn.Conv3d( + in_channels, + embed_dim, + kernel_size=(1, patch_size, patch_size), + stride=(1, patch_size, patch_size), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: (B, T, C, H, W) + return self.proj(x.permute(0, 2, 1, 3, 4)) + + +class PrithviWxCBackbone(nn.Module): + """Weather-climate backbone inspired by Prithvi-WxC.""" + + def __init__( + self, + image_size: int = 32, + in_channels: int = 8, + patch_size: int = 4, + embed_dim: int = 128, + depth: int = 4, + num_heads: int = 4, + mlp_ratio: float = 4.0, + dropout: float = 0.1, + lead_time_dim: int = 1, + variable_summary_dim: int = 8, + ): + super().__init__() + if image_size % patch_size != 0: + raise ValueError(f"image_size={image_size} must be divisible by patch_size={patch_size}") + + self.image_size = int(image_size) + self.in_channels = int(in_channels) + self.patch_size = int(patch_size) + self.embed_dim = int(embed_dim) + self.lead_time_dim = int(lead_time_dim) + self.variable_summary_dim = int(variable_summary_dim) + self.grid_size = self.image_size // self.patch_size + + self.patch_embed = WeatherSequencePatchEmbed( + in_channels=self.in_channels, + embed_dim=self.embed_dim, + patch_size=self.patch_size, + ) + self.spatial_pos_embed = nn.Parameter( + torch.zeros(1, self.grid_size * self.grid_size, self.embed_dim) + ) + nn.init.trunc_normal_(self.spatial_pos_embed, std=0.02) + + self.lead_time_proj = nn.Linear(self.lead_time_dim, self.embed_dim) + self.variable_proj = nn.Linear(self.variable_summary_dim, self.embed_dim) + + encoder_layer = nn.TransformerEncoderLayer( + d_model=self.embed_dim, + nhead=int(num_heads), + dim_feedforward=int(self.embed_dim * mlp_ratio), + dropout=float(dropout), + activation="gelu", + batch_first=True, + norm_first=True, + ) + self.encoder = nn.TransformerEncoder(encoder_layer, num_layers=int(depth)) + self.norm = nn.LayerNorm(self.embed_dim) + + def _unpack_inputs( + self, + inputs: torch.Tensor | Dict[str, Any], + ) -> Tuple[torch.Tensor, torch.Tensor | None, torch.Tensor | None]: + if isinstance(inputs, dict): + x = inputs.get("x") + lead_time = inputs.get("lead_time_hours") + variable_summary = inputs.get("variable_summary") + else: + x = inputs + lead_time = None + variable_summary = None + + if not isinstance(x, torch.Tensor): + raise ValueError("PrithviWxCBackbone expects a tensor input or a dict containing key 'x'.") + if x.ndim != 5: + raise ValueError( + "PrithviWxCBackbone expects input shape (B, T, C, H, W), " + f"got {tuple(x.shape)}." + ) + if x.size(2) != self.in_channels: + raise ValueError( + f"PrithviWxCBackbone expected in_channels={self.in_channels}, got {x.size(2)}." + ) + if x.size(-1) != self.image_size or x.size(-2) != self.image_size: + raise ValueError( + f"PrithviWxCBackbone expected spatial size {self.image_size}x{self.image_size}, " + f"got {tuple(x.shape[-2:])}." + ) + return x, lead_time, variable_summary + + def _build_lead_time( + self, + batch: int, + timesteps: int, + device: torch.device, + lead_time: torch.Tensor | None, + ) -> torch.Tensor: + if lead_time is None: + base = torch.linspace(0.0, 1.0, timesteps, device=device).view(1, timesteps, 1) + return base.expand(batch, -1, -1) + if lead_time.ndim == 1: + lead_time = lead_time.view(batch, 1, 1).expand(-1, timesteps, -1) + elif lead_time.ndim == 2: + lead_time = lead_time.unsqueeze(-1) + if lead_time.ndim != 3: + raise ValueError( + f"lead_time_hours must have shape (B,), (B,T), or (B,T,D), got {tuple(lead_time.shape)}" + ) + if lead_time.size(0) != batch or lead_time.size(1) != timesteps: + raise ValueError( + "lead_time_hours batch/timestep mismatch: " + f"expected ({batch},{timesteps}), got ({lead_time.size(0)},{lead_time.size(1)})" + ) + if lead_time.size(-1) == self.lead_time_dim: + return lead_time.to(device=device, dtype=torch.float32) + if lead_time.size(-1) > self.lead_time_dim: + return lead_time[..., : self.lead_time_dim].to(device=device, dtype=torch.float32) + pad = torch.zeros(batch, timesteps, self.lead_time_dim - lead_time.size(-1), device=device) + return torch.cat([lead_time.to(device=device, dtype=torch.float32), pad], dim=-1) + + def _build_variable_summary( + self, + x: torch.Tensor, + variable_summary: torch.Tensor | None, + ) -> torch.Tensor: + batch, timesteps, channels, _, _ = x.shape + if variable_summary is None: + summary = x.mean(dim=(-1, -2)) + else: + summary = variable_summary + if summary.ndim == 2: + summary = summary.unsqueeze(1).expand(-1, timesteps, -1) + if summary.ndim != 3: + raise ValueError( + "variable_summary must have shape (B,D) or (B,T,D), " + f"got {tuple(summary.shape)}" + ) + if summary.size(0) != batch or summary.size(1) != timesteps: + raise ValueError( + "variable_summary batch/timestep mismatch: " + f"expected ({batch},{timesteps}), got ({summary.size(0)},{summary.size(1)})" + ) + if summary.size(-1) > self.variable_summary_dim: + summary = summary[..., : self.variable_summary_dim] + elif summary.size(-1) < self.variable_summary_dim: + pad = torch.zeros( + batch, + timesteps, + self.variable_summary_dim - summary.size(-1), + device=x.device, + dtype=torch.float32, + ) + summary = torch.cat([summary.to(device=x.device, dtype=torch.float32), pad], dim=-1) + else: + summary = summary.to(device=x.device, dtype=torch.float32) + return summary + + def forward(self, inputs: torch.Tensor | Dict[str, Any]) -> torch.Tensor: + x, lead_time, variable_summary = self._unpack_inputs(inputs) + batch, timesteps, _, _, _ = x.shape + + feat = self.patch_embed(x) + _, _, _, h_tokens, w_tokens = feat.shape + tokens = feat.permute(0, 2, 3, 4, 1).reshape(batch, timesteps * h_tokens * w_tokens, self.embed_dim) + + spatial_pos = self.spatial_pos_embed.unsqueeze(1).expand(-1, timesteps, -1, -1) + spatial_pos = spatial_pos.reshape(1, timesteps * h_tokens * w_tokens, self.embed_dim) + tokens = tokens + spatial_pos + + lead = self._build_lead_time(batch, timesteps, x.device, lead_time) + lead_tokens = self.lead_time_proj(lead).unsqueeze(2).expand(-1, -1, h_tokens * w_tokens, -1) + tokens = tokens + lead_tokens.reshape(batch, timesteps * h_tokens * w_tokens, self.embed_dim) + + var_summary = self._build_variable_summary(x, variable_summary) + variable_tokens = self.variable_proj(var_summary).unsqueeze(2).expand(-1, -1, h_tokens * w_tokens, -1) + tokens = tokens + variable_tokens.reshape(batch, timesteps * h_tokens * w_tokens, self.embed_dim) + + encoded = self.norm(self.encoder(tokens)) + encoded = encoded.reshape(batch, timesteps, h_tokens, w_tokens, self.embed_dim).mean(dim=1) + return encoded.permute(0, 3, 1, 2).contiguous() + + +class PrithviWxC(nn.Module): + """Dense wildfire-risk head on top of a Prithvi-WxC-style weather backbone.""" + + def __init__( + self, + image_size: int = 32, + in_channels: int = 8, + out_dim: int = 1, + patch_size: int = 4, + embed_dim: int = 128, + depth: int = 4, + num_heads: int = 4, + mlp_ratio: float = 4.0, + dropout: float = 0.1, + lead_time_dim: int = 1, + variable_summary_dim: int = 8, + decoder_channels: int = 64, + ): + super().__init__() + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + self.backbone = PrithviWxCBackbone( + image_size=image_size, + in_channels=in_channels, + patch_size=patch_size, + embed_dim=embed_dim, + depth=depth, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + dropout=dropout, + lead_time_dim=lead_time_dim, + variable_summary_dim=variable_summary_dim, + ) + self.decoder = nn.Sequential( + nn.Conv2d(embed_dim, decoder_channels, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(decoder_channels, decoder_channels, kernel_size=3, padding=1), + nn.GELU(), + ) + self.head = nn.Conv2d(decoder_channels, out_dim, kernel_size=1) + + def forward(self, inputs: torch.Tensor | Dict[str, Any]) -> torch.Tensor: + x = inputs["x"] if isinstance(inputs, dict) else inputs + features = self.backbone(inputs) + logits = self.head(self.decoder(features)) + return F.interpolate(logits, size=x.shape[-2:], mode="bilinear", align_corners=False) + + +def prithvi_wxc_builder( + task: str, + image_size: int = 32, + in_channels: int = 8, + out_dim: int = 1, + patch_size: int = 4, + embed_dim: int = 128, + depth: int = 4, + num_heads: int = 4, + mlp_ratio: float = 4.0, + dropout: float = 0.1, + lead_time_dim: int = 1, + variable_summary_dim: int = 8, + decoder_channels: int = 64, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError(f"prithvi_wxc is segmentation-only, got task={task!r}.") + return PrithviWxC( + image_size=image_size, + in_channels=in_channels, + out_dim=out_dim, + patch_size=patch_size, + embed_dim=embed_dim, + depth=depth, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + dropout=dropout, + lead_time_dim=lead_time_dim, + variable_summary_dim=variable_summary_dim, + decoder_channels=decoder_channels, + ) + + +__all__ = ["PrithviWxCBackbone", "PrithviWxC", "prithvi_wxc_builder"] diff --git a/pyhazards/models/qwen25_vl_wildfire_prompted.py b/pyhazards/models/qwen25_vl_wildfire_prompted.py new file mode 100644 index 00000000..dbb16810 --- /dev/null +++ b/pyhazards/models/qwen25_vl_wildfire_prompted.py @@ -0,0 +1,169 @@ +from __future__ import annotations + +from typing import Any, Dict, Tuple + +import torch +import torch.nn as nn + + +class Qwen25VLWildfirePrompted(nn.Module): + """Prompt-conditioned wildfire segmentation model inspired by Qwen2.5-VL.""" + + def __init__( + self, + in_channels: int = 6, + out_dim: int = 1, + hidden_dim: int = 64, + prompt_dim: int = 24, + num_prompt_tokens: int = 4, + num_heads: int = 4, + dropout: float = 0.1, + ): + super().__init__() + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if prompt_dim <= 0: + raise ValueError(f"prompt_dim must be positive, got {prompt_dim}") + if num_prompt_tokens <= 0: + raise ValueError(f"num_prompt_tokens must be positive, got {num_prompt_tokens}") + if hidden_dim % num_heads != 0: + raise ValueError(f"hidden_dim={hidden_dim} must be divisible by num_heads={num_heads}") + + self.in_channels = int(in_channels) + self.prompt_dim = int(prompt_dim) + self.num_prompt_tokens = int(num_prompt_tokens) + self.hidden_dim = int(hidden_dim) + + self.visual_encoder = nn.Sequential( + nn.Conv2d(in_channels, hidden_dim // 2, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(hidden_dim // 2, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + self.prompt_proj = nn.Linear(self.prompt_dim, hidden_dim) + self.prompt_bank = nn.Parameter(torch.randn(self.num_prompt_tokens, self.prompt_dim) * 0.02) + self.image_summary = nn.Linear(hidden_dim, hidden_dim) + self.cross_attn = nn.MultiheadAttention( + embed_dim=hidden_dim, + num_heads=num_heads, + dropout=dropout, + batch_first=True, + ) + self.ffn = nn.Sequential( + nn.LayerNorm(hidden_dim), + nn.Linear(hidden_dim, hidden_dim * 2), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim * 2, hidden_dim), + ) + self.decoder = nn.Sequential( + nn.Conv2d(hidden_dim * 2, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Dropout2d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv2d(hidden_dim, hidden_dim // 2, kernel_size=3, padding=1), + nn.GELU(), + ) + self.head = nn.Conv2d(hidden_dim // 2, out_dim, kernel_size=1) + + def _unpack_inputs(self, inputs: torch.Tensor | Dict[str, Any]) -> Tuple[torch.Tensor, torch.Tensor | None]: + if isinstance(inputs, dict): + x = inputs.get("x") + prompt = inputs.get("prompt_context") + else: + x = inputs + prompt = None + + if not isinstance(x, torch.Tensor): + raise ValueError("Qwen25VLWildfirePrompted expects a tensor input or a dict containing key 'x'.") + if x.ndim != 4: + raise ValueError( + "Qwen25VLWildfirePrompted expects input shape (B, C, H, W), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.in_channels: + raise ValueError( + f"Qwen25VLWildfirePrompted expected in_channels={self.in_channels}, got {x.size(1)}." + ) + return x, prompt + + def _coerce_prompt(self, prompt: torch.Tensor | None, batch: int, device: torch.device) -> torch.Tensor: + learned = self.prompt_bank.unsqueeze(0).expand(batch, -1, -1) + if prompt is None: + return self.prompt_proj(learned) + if prompt.ndim == 2: + if prompt.size(0) != batch: + raise ValueError(f"prompt_context must have shape (B,D) or (B,T,D), got {tuple(prompt.shape)}") + prompt = prompt.unsqueeze(1).expand(-1, self.num_prompt_tokens, -1) + elif prompt.ndim == 3: + if prompt.size(0) != batch: + raise ValueError(f"prompt_context must have shape (B,D) or (B,T,D), got {tuple(prompt.shape)}") + if prompt.size(1) != self.num_prompt_tokens: + if prompt.size(1) > self.num_prompt_tokens: + prompt = prompt[:, : self.num_prompt_tokens] + else: + pad = torch.zeros(batch, self.num_prompt_tokens - prompt.size(1), prompt.size(2), device=prompt.device) + prompt = torch.cat([prompt, pad], dim=1) + else: + raise ValueError(f"prompt_context must have rank 2 or 3, got {tuple(prompt.shape)}") + + prompt = prompt.to(device=device, dtype=torch.float32) + if prompt.size(-1) > self.prompt_dim: + prompt = prompt[..., : self.prompt_dim] + elif prompt.size(-1) < self.prompt_dim: + pad = torch.zeros(batch, self.num_prompt_tokens, self.prompt_dim - prompt.size(-1), device=device) + prompt = torch.cat([prompt, pad], dim=-1) + return self.prompt_proj(prompt + learned.to(device=device, dtype=torch.float32)) + + def forward(self, inputs: torch.Tensor | Dict[str, Any]) -> torch.Tensor: + x, prompt = self._unpack_inputs(inputs) + batch = x.size(0) + device = x.device + + feature_map = self.visual_encoder(x) + visual_tokens = feature_map.flatten(2).transpose(1, 2) + pooled = torch.mean(visual_tokens, dim=1, keepdim=True) + pooled = self.image_summary(pooled) + + prompt_tokens = self._coerce_prompt(prompt, batch, device) + query_tokens = torch.cat([prompt_tokens, pooled], dim=1) + attn_out, _ = self.cross_attn(query_tokens, visual_tokens, visual_tokens, need_weights=False) + fused_tokens = attn_out + self.ffn(attn_out) + global_token = fused_tokens.mean(dim=1) + + context_map = global_token.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, feature_map.size(-2), feature_map.size(-1)) + decoded = self.decoder(torch.cat([feature_map, context_map], dim=1)) + return self.head(decoded) + + +def qwen25_vl_wildfire_prompted_builder( + task: str, + in_channels: int = 6, + out_dim: int = 1, + hidden_dim: int = 64, + prompt_dim: int = 24, + num_prompt_tokens: int = 4, + num_heads: int = 4, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError( + f"qwen25_vl_wildfire_prompted is segmentation-only in PyHazards, got task={task!r}." + ) + return Qwen25VLWildfirePrompted( + in_channels=in_channels, + out_dim=out_dim, + hidden_dim=hidden_dim, + prompt_dim=prompt_dim, + num_prompt_tokens=num_prompt_tokens, + num_heads=num_heads, + dropout=dropout, + ) + + +__all__ = ["Qwen25VLWildfirePrompted", "qwen25_vl_wildfire_prompted_builder"] diff --git a/pyhazards/models/rainformer.py b/pyhazards/models/rainformer.py new file mode 100644 index 00000000..4a3f12c4 --- /dev/null +++ b/pyhazards/models/rainformer.py @@ -0,0 +1,459 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class RainformerTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + hidden_channels: int = 16 + num_heads: int = 4 + num_layers: int = 2 + lr: float = 3e-4 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class RainMixer(nn.Module): + def __init__(self, channels: int): + super().__init__() + self.dw = nn.Conv2d(channels, channels, kernel_size=3, padding=1, groups=channels) + self.pw = nn.Conv2d(channels, channels, kernel_size=1) + self.act = nn.ReLU(inplace=True) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.act(self.pw(self.dw(x))) + + +class RainTemporalBlock(nn.Module): + def __init__(self, dim: int, num_heads: int, dropout: float = 0.0): + super().__init__() + self.norm1 = nn.LayerNorm(dim) + self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, dropout=dropout, batch_first=True) + self.norm2 = nn.LayerNorm(dim) + self.ffn = nn.Sequential( + nn.Linear(dim, dim * 2), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(dim * 2, dim), + nn.Dropout(dropout), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: [BHW,T,D] + y = self.norm1(x) + y, _ = self.attn(y, y, y, need_weights=False) + x = x + y + x = x + self.ffn(self.norm2(x)) + return x + + +class TinyRainformer(nn.Module): + def __init__(self, in_channels: int = 1, hidden_channels: int = 16, num_heads: int = 4, num_layers: int = 2): + super().__init__() + self.hidden_channels = hidden_channels + + self.encoder = nn.Sequential( + nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + self.rain_mixer = RainMixer(hidden_channels) + self.temporal_blocks = nn.ModuleList([RainTemporalBlock(hidden_channels, num_heads) for _ in range(num_layers)]) + + self.decoder = nn.Sequential( + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, 1, kernel_size=1), + ) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + b, t, c, h, w = x_seq.shape + + x = x_seq.reshape(b * t, c, h, w) + x = self.encoder(x) + x = self.rain_mixer(x) + d = x.shape[1] + + x = x.reshape(b, t, d, h, w) + x = x.permute(0, 3, 4, 1, 2).contiguous() # [B,H,W,T,D] + x = x.reshape(b * h * w, t, d) + + for blk in self.temporal_blocks: + x = blk(x) + + x = x[:, -1, :].reshape(b, h, w, d).permute(0, 3, 1, 2).contiguous() + return self.decoder(x) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_rainformer_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: RainformerTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyRainformer( + in_channels=cfg.in_channels, + hidden_channels=cfg.hidden_channels, + num_heads=cfg.num_heads, + num_layers=cfg.num_layers, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("Rainformer Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: RainformerTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "rainformer", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = RainformerTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_rainformer_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "rainformer_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run Rainformer Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"rainformer_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] rainformer synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def rainformer_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "rainformer") + init_kwargs = filter_init_kwargs(TinyRainformer, {"in_channels": int(in_channels), **kwargs}) + model = TinyRainformer(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyRainformer", "rainformer_builder"] diff --git a/pyhazards/models/random_forest.py b/pyhazards/models/random_forest.py new file mode 100644 index 00000000..3f2e5686 --- /dev/null +++ b/pyhazards/models/random_forest.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from typing import Any, Optional + +import numpy as np +import torch.nn as nn + +from ._wildfire_benchmark_utils import EstimatorPort, filter_init_kwargs, require_task + + +class RandomForestModel(EstimatorPort): + """A tree-ensemble baseline for wildfire occurrence probability over tabular features.""" + + def __init__(self, n_estimators: int = 500, max_depth: Optional[int] = None, class_weight: Any = "balanced_subsample"): + super().__init__() + from sklearn.ensemble import RandomForestClassifier + + self.estimator = RandomForestClassifier( + n_estimators=int(n_estimators), + max_depth=max_depth, + class_weight=class_weight, + random_state=42, + n_jobs=1, + ) + + def _fit_numpy( + self, + x_train: np.ndarray, + y_train: np.ndarray, + x_val: Optional[np.ndarray], + y_val: Optional[np.ndarray], + ) -> None: + _ = x_val, y_val + self.estimator.fit(x_train, y_train) + + def _predict_positive_proba(self, x: np.ndarray) -> np.ndarray: + return self.estimator.predict_proba(x)[:, 1] + + +def random_forest_builder(task: str, **kwargs: Any) -> nn.Module: + require_task(task, {"classification"}, "random_forest") + build_kwargs = filter_init_kwargs(RandomForestModel, kwargs) + return RandomForestModel(**build_kwargs) + + +__all__ = ["RandomForestModel", "random_forest_builder"] diff --git a/pyhazards/models/registry.py b/pyhazards/models/registry.py new file mode 100644 index 00000000..182d413a --- /dev/null +++ b/pyhazards/models/registry.py @@ -0,0 +1,22 @@ +from typing import Any, Callable, Dict, Optional + +import torch.nn as nn + +_MODEL_REGISTRY: Dict[str, Dict[str, Any]] = {} + + +def register_model(name: str, builder: Callable[..., nn.Module], defaults: Optional[Dict[str, Any]] = None) -> None: + if name in _MODEL_REGISTRY: + raise ValueError(f"Model '{name}' already registered.") + _MODEL_REGISTRY[name] = {"builder": builder, "defaults": defaults or {}} + + +def available_models(): + return sorted(_MODEL_REGISTRY.keys()) + + +def get_model_config(name: str) -> Optional[Dict[str, Any]]: + return _MODEL_REGISTRY.get(name) + + +__all__ = ["register_model", "available_models", "get_model_config"] diff --git a/pyhazards/models/resnet18_unet.py b/pyhazards/models/resnet18_unet.py new file mode 100644 index 00000000..a3d42a8a --- /dev/null +++ b/pyhazards/models/resnet18_unet.py @@ -0,0 +1,465 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn.functional as F +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.unet import ( + binary_ece, + make_synthetic_fire_maps, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .unet import ( + binary_ece, + make_synthetic_fire_maps, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class ResNet18UNetTrackOConfig: + in_channels: int = 1 + stem_channels: int = 16 + lr: float = 8e-4 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_channels: int, out_channels: int, stride: int = 1): + super().__init__() + self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(out_channels) + + if stride != 1 or in_channels != out_channels: + self.downsample = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(out_channels), + ) + else: + self.downsample = None + + def forward(self, x: torch.Tensor) -> torch.Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(identity) + + out += identity + out = self.relu(out) + return out + + +class DecoderBlock(nn.Module): + def __init__(self, in_channels: int, skip_channels: int, out_channels: int): + super().__init__() + self.conv = nn.Sequential( + nn.Conv2d(in_channels + skip_channels, out_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + + def forward(self, x: torch.Tensor, skip: torch.Tensor) -> torch.Tensor: + x = F.interpolate(x, size=skip.shape[-2:], mode="bilinear", align_corners=False) + x = torch.cat([x, skip], dim=1) + return self.conv(x) + + +class TinyResNet18UNet(nn.Module): + def __init__(self, in_channels: int = 1, stem_channels: int = 16): + super().__init__() + c1, c2, c3, c4 = stem_channels, stem_channels * 2, stem_channels * 4, stem_channels * 8 + + self.stem = nn.Sequential( + nn.Conv2d(in_channels, c1, kernel_size=3, stride=1, padding=1, bias=False), + nn.BatchNorm2d(c1), + nn.ReLU(inplace=True), + ) + + self.layer1 = self._make_layer(c1, c1, blocks=2, stride=1) + self.layer2 = self._make_layer(c1, c2, blocks=2, stride=2) + self.layer3 = self._make_layer(c2, c3, blocks=2, stride=2) + self.layer4 = self._make_layer(c3, c4, blocks=2, stride=2) + + self.dec3 = DecoderBlock(in_channels=c4, skip_channels=c3, out_channels=c3) + self.dec2 = DecoderBlock(in_channels=c3, skip_channels=c2, out_channels=c2) + self.dec1 = DecoderBlock(in_channels=c2, skip_channels=c1, out_channels=c1) + + self.head = nn.Sequential( + nn.Conv2d(c1, c1, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(c1, 1, kernel_size=1), + ) + + def _make_layer(self, in_channels: int, out_channels: int, blocks: int, stride: int) -> nn.Sequential: + layers: List[nn.Module] = [BasicBlock(in_channels, out_channels, stride=stride)] + for _ in range(1, blocks): + layers.append(BasicBlock(out_channels, out_channels, stride=1)) + return nn.Sequential(*layers) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x0 = self.stem(x) + x1 = self.layer1(x0) + x2 = self.layer2(x1) + x3 = self.layer3(x2) + x4 = self.layer4(x3) + + y3 = self.dec3(x4, x3) + y2 = self.dec2(y3, x2) + y1 = self.dec1(y2, x1) + return self.head(y1) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_resnet18_unet_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: ResNet18UNetTrackOConfig, +): + if x_train.ndim != 4 or x_val.ndim != 4: + raise ValueError("x_train and x_val must be 4D arrays [N,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyResNet18UNet(in_channels=cfg.in_channels, stem_channels=cfg.stem_channels).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("ResNet18 U-Net Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: ResNet18UNetTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "resnet18_unet", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 192, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_maps(n_samples=n_samples, image_size=image_size, seed=seed) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = ResNet18UNetTrackOConfig( + seed=seed, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_resnet18_unet_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "resnet18_unet_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run ResNet18 U-Net Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=192) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"resnet18_unet_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] resnet18_unet synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def resnet18_unet_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "resnet18_unet") + init_kwargs = filter_init_kwargs(TinyResNet18UNet, {"in_channels": int(in_channels), **kwargs}) + model = TinyResNet18UNet(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyResNet18UNet", "resnet18_unet_builder"] diff --git a/pyhazards/models/saf_net.py b/pyhazards/models/saf_net.py new file mode 100644 index 00000000..796bcacd --- /dev/null +++ b/pyhazards/models/saf_net.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class SAFNet(nn.Module): + """Spatiotemporal intensity-focused storm baseline.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.temporal = nn.Sequential( + nn.Conv1d(input_dim, hidden_dim, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv1d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.ReLU(), + ) + self.dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity() + self.track_head = nn.Linear(hidden_dim, 2 * self.horizon) + self.intensity_head = nn.Linear(hidden_dim, self.horizon) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("SAFNet expects inputs shaped (batch, history, features).") + encoded = self.temporal(x.transpose(1, 2)).mean(dim=-1) + encoded = self.dropout(encoded) + track = self.track_head(encoded).view(x.size(0), self.horizon, 2) + intensity = self.intensity_head(encoded).view(x.size(0), self.horizon, 1) + return torch.cat([track, intensity], dim=-1) + + +def saf_net_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("SAFNet only supports regression for track/intensity forecasting.") + return SAFNet( + input_dim=input_dim, + hidden_dim=hidden_dim, + horizon=horizon, + dropout=dropout, + ) + + +__all__ = ["SAFNet", "saf_net_builder"] diff --git a/pyhazards/models/segformer.py b/pyhazards/models/segformer.py new file mode 100644 index 00000000..440e0a66 --- /dev/null +++ b/pyhazards/models/segformer.py @@ -0,0 +1,568 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn.functional as F +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class SegFormerTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + embed_dims: Tuple[int, int] = (16, 32) + num_heads: Tuple[int, int] = (1, 2) + sr_ratios: Tuple[int, int] = (4, 2) + mlp_ratio: float = 2.0 + dropout: float = 0.1 + lr: float = 2e-4 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class OverlapPatchEmbed(nn.Module): + def __init__(self, in_channels: int, embed_dim: int, patch_size: int, stride: int): + super().__init__() + self.proj = nn.Conv2d( + in_channels, + embed_dim, + kernel_size=patch_size, + stride=stride, + padding=patch_size // 2, + ) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, int, int]: + x = self.proj(x) + b, c, h, w = x.shape + x = x.flatten(2).transpose(1, 2).contiguous() # [B,N,C] + x = self.norm(x) + return x, h, w + + +class MixFFN(nn.Module): + def __init__(self, dim: int, mlp_ratio: float, dropout: float): + super().__init__() + hidden = int(dim * mlp_ratio) + self.fc1 = nn.Linear(dim, hidden) + self.dwconv = nn.Conv2d(hidden, hidden, kernel_size=3, padding=1, groups=hidden) + self.act = nn.GELU() + self.dropout = nn.Dropout(dropout) + self.fc2 = nn.Linear(hidden, dim) + + def forward(self, x: torch.Tensor, h: int, w: int) -> torch.Tensor: + x = self.fc1(x) + b, n, c = x.shape + x_img = x.transpose(1, 2).reshape(b, c, h, w) + x_img = self.dwconv(x_img) + x = x_img.flatten(2).transpose(1, 2).contiguous() + x = self.act(x) + x = self.dropout(x) + x = self.fc2(x) + x = self.dropout(x) + return x + + +class EfficientSelfAttention(nn.Module): + def __init__(self, dim: int, num_heads: int, sr_ratio: int, dropout: float): + super().__init__() + self.sr_ratio = sr_ratio + self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, dropout=dropout, batch_first=True) + self.norm = nn.LayerNorm(dim) + if sr_ratio > 1: + self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) + else: + self.sr = None + + def forward(self, x: torch.Tensor, h: int, w: int) -> torch.Tensor: + kv = x + if self.sr is not None: + b, n, c = x.shape + x_img = x.transpose(1, 2).reshape(b, c, h, w) + x_img = self.sr(x_img) + kv = x_img.flatten(2).transpose(1, 2).contiguous() + kv = self.norm(kv) + + out, _ = self.attn(x, kv, kv, need_weights=False) + return out + + +class TransformerEncoderBlock(nn.Module): + def __init__(self, dim: int, num_heads: int, sr_ratio: int, mlp_ratio: float, dropout: float): + super().__init__() + self.norm1 = nn.LayerNorm(dim) + self.attn = EfficientSelfAttention(dim=dim, num_heads=num_heads, sr_ratio=sr_ratio, dropout=dropout) + self.norm2 = nn.LayerNorm(dim) + self.ffn = MixFFN(dim=dim, mlp_ratio=mlp_ratio, dropout=dropout) + + def forward(self, x: torch.Tensor, h: int, w: int) -> torch.Tensor: + x = x + self.attn(self.norm1(x), h, w) + x = x + self.ffn(self.norm2(x), h, w) + return x + + +class TinySegFormerEncoder(nn.Module): + def __init__( + self, + in_channels: int = 1, + embed_dims: Tuple[int, int] = (16, 32), + num_heads: Tuple[int, int] = (1, 2), + sr_ratios: Tuple[int, int] = (4, 2), + mlp_ratio: float = 2.0, + dropout: float = 0.1, + ): + super().__init__() + d1, d2 = embed_dims + + self.patch1 = OverlapPatchEmbed(in_channels=in_channels, embed_dim=d1, patch_size=7, stride=2) + self.block1 = TransformerEncoderBlock( + dim=d1, + num_heads=num_heads[0], + sr_ratio=sr_ratios[0], + mlp_ratio=mlp_ratio, + dropout=dropout, + ) + + self.patch2 = OverlapPatchEmbed(in_channels=d1, embed_dim=d2, patch_size=3, stride=2) + self.block2 = TransformerEncoderBlock( + dim=d2, + num_heads=num_heads[1], + sr_ratio=sr_ratios[1], + mlp_ratio=mlp_ratio, + dropout=dropout, + ) + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + x1, h1, w1 = self.patch1(x) + x1 = self.block1(x1, h1, w1) + f1 = x1.transpose(1, 2).reshape(x1.shape[0], -1, h1, w1) + + x2, h2, w2 = self.patch2(f1) + x2 = self.block2(x2, h2, w2) + f2 = x2.transpose(1, 2).reshape(x2.shape[0], -1, h2, w2) + return f1, f2 + + +class SegFormerHead(nn.Module): + def __init__(self, in_dims: Tuple[int, int], decoder_dim: int = 32, dropout: float = 0.1): + super().__init__() + self.proj1 = nn.Conv2d(in_dims[0], decoder_dim, kernel_size=1) + self.proj2 = nn.Conv2d(in_dims[1], decoder_dim, kernel_size=1) + self.fuse = nn.Sequential( + nn.Conv2d(decoder_dim * 2, decoder_dim, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Dropout2d(dropout), + nn.Conv2d(decoder_dim, 1, kernel_size=1), + ) + + def forward(self, f1: torch.Tensor, f2: torch.Tensor, out_hw: Tuple[int, int]) -> torch.Tensor: + p1 = self.proj1(f1) + p2 = self.proj2(f2) + p2 = F.interpolate(p2, size=p1.shape[-2:], mode="bilinear", align_corners=False) + logits_small = self.fuse(torch.cat([p1, p2], dim=1)) + return F.interpolate(logits_small, size=out_hw, mode="bilinear", align_corners=False) + + +class TinySegFormer(nn.Module): + def __init__( + self, + in_channels: int = 1, + embed_dims: Tuple[int, int] = (16, 32), + num_heads: Tuple[int, int] = (1, 2), + sr_ratios: Tuple[int, int] = (4, 2), + mlp_ratio: float = 2.0, + dropout: float = 0.1, + ): + super().__init__() + self.encoder = TinySegFormerEncoder( + in_channels=in_channels, + embed_dims=embed_dims, + num_heads=num_heads, + sr_ratios=sr_ratios, + mlp_ratio=mlp_ratio, + dropout=dropout, + ) + self.decode_head = SegFormerHead(in_dims=embed_dims, decoder_dim=embed_dims[1], dropout=dropout) + + def _temporal_fusion(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + frame_scores = x_seq.mean(dim=(2, 3, 4)) + weights = torch.softmax(frame_scores, dim=1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) + return torch.sum(x_seq * weights, dim=1) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + _, _, _, h, w = x_seq.shape + x_img = self._temporal_fusion(x_seq) + f1, f2 = self.encoder(x_img) + return self.decode_head(f1, f2, out_hw=(h, w)) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_segformer_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: SegFormerTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinySegFormer( + in_channels=cfg.in_channels, + embed_dims=cfg.embed_dims, + num_heads=cfg.num_heads, + sr_ratios=cfg.sr_ratios, + mlp_ratio=cfg.mlp_ratio, + dropout=cfg.dropout, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("SegFormer Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: SegFormerTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "segformer", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = SegFormerTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_segformer_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "segformer_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run SegFormer Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"segformer_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] segformer synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def segformer_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "segformer") + init_kwargs = filter_init_kwargs(TinySegFormer, {"in_channels": int(in_channels), **kwargs}) + model = TinySegFormer(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinySegFormer", "segformer_builder"] diff --git a/pyhazards/models/swin_unet.py b/pyhazards/models/swin_unet.py new file mode 100644 index 00000000..dd617281 --- /dev/null +++ b/pyhazards/models/swin_unet.py @@ -0,0 +1,527 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn.functional as F +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class SwinUNetTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + embed_dims: Tuple[int, int] = (16, 32) + num_heads: Tuple[int, int] = (1, 2) + window_size: int = 3 + mlp_ratio: float = 2.0 + dropout: float = 0.1 + lr: float = 2e-4 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +def _window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, int, int]: + # x: [B,H,W,C] -> windows: [B*nw, ws*ws, C] + b, h, w, c = x.shape + pad_h = (window_size - h % window_size) % window_size + pad_w = (window_size - w % window_size) % window_size + + if pad_h > 0 or pad_w > 0: + x = x.permute(0, 3, 1, 2).contiguous() + x = F.pad(x, (0, pad_w, 0, pad_h)) + x = x.permute(0, 2, 3, 1).contiguous() + + hp, wp = h + pad_h, w + pad_w + x = x.view(b, hp // window_size, window_size, wp // window_size, window_size, c) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = x.view(-1, window_size * window_size, c) + return windows, hp, wp + + +def _window_reverse(windows: torch.Tensor, window_size: int, hp: int, wp: int, b: int) -> torch.Tensor: + # windows: [B*nw, ws*ws, C] -> [B,Hp,Wp,C] + c = windows.shape[-1] + x = windows.view(b, hp // window_size, wp // window_size, window_size, window_size, c) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous() + return x.view(b, hp, wp, c) + + +class SwinBlock(nn.Module): + def __init__(self, dim: int, num_heads: int, window_size: int, shift_size: int, mlp_ratio: float, dropout: float): + super().__init__() + self.window_size = window_size + self.shift_size = shift_size + + self.norm1 = nn.LayerNorm(dim) + self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, dropout=dropout, batch_first=True) + self.norm2 = nn.LayerNorm(dim) + self.mlp = nn.Sequential( + nn.Linear(dim, int(dim * mlp_ratio)), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(int(dim * mlp_ratio), dim), + nn.Dropout(dropout), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: [B,C,H,W] + b, c, h, w = x.shape + residual = x + x = x.permute(0, 2, 3, 1).contiguous() # [B,H,W,C] + + if self.shift_size > 0: + x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + + windows, hp, wp = _window_partition(x, self.window_size) + w_norm = self.norm1(windows) + attn_out, _ = self.attn(w_norm, w_norm, w_norm, need_weights=False) + windows = windows + attn_out + x = _window_reverse(windows, self.window_size, hp, wp, b) + + if self.shift_size > 0: + x = torch.roll(x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + x = x[:, :h, :w, :].contiguous() + x = x.permute(0, 3, 1, 2).contiguous() + x = residual + x + + tokens = x.flatten(2).transpose(1, 2).contiguous() # [B,HW,C] + tokens = tokens + self.mlp(self.norm2(tokens)) + x = tokens.transpose(1, 2).reshape(b, c, h, w).contiguous() + return x + + +class TinySwinUNet(nn.Module): + def __init__( + self, + in_channels: int = 1, + embed_dims: Tuple[int, int] = (16, 32), + num_heads: Tuple[int, int] = (1, 2), + window_size: int = 3, + mlp_ratio: float = 2.0, + dropout: float = 0.1, + ): + super().__init__() + c1, c2 = embed_dims + shift = max(1, window_size // 2) + + self.patch_embed = nn.Conv2d(in_channels, c1, kernel_size=3, stride=2, padding=1) + self.stage1 = nn.Sequential( + SwinBlock(c1, num_heads[0], window_size, shift_size=0, mlp_ratio=mlp_ratio, dropout=dropout), + SwinBlock(c1, num_heads[0], window_size, shift_size=shift, mlp_ratio=mlp_ratio, dropout=dropout), + ) + + self.downsample = nn.Conv2d(c1, c2, kernel_size=3, stride=2, padding=1) + self.stage2 = nn.Sequential( + SwinBlock(c2, num_heads[1], window_size, shift_size=0, mlp_ratio=mlp_ratio, dropout=dropout), + SwinBlock(c2, num_heads[1], window_size, shift_size=shift, mlp_ratio=mlp_ratio, dropout=dropout), + ) + + self.up1 = nn.ConvTranspose2d(c2, c1, kernel_size=2, stride=2) + self.fuse1 = nn.Sequential( + nn.Conv2d(c1 * 2, c1, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + self.up2 = nn.ConvTranspose2d(c1, c1, kernel_size=2, stride=2) + self.head = nn.Sequential( + nn.Conv2d(c1, c1, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(c1, 1, kernel_size=1), + ) + + def _temporal_fusion(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + frame_scores = x_seq.mean(dim=(2, 3, 4)) + weights = torch.softmax(frame_scores, dim=1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) + return torch.sum(x_seq * weights, dim=1) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + _, _, _, h, w = x_seq.shape + x = self._temporal_fusion(x_seq) # [B,C,H,W] + + x1 = self.patch_embed(x) # [B,C1,H/2,W/2] + x1 = self.stage1(x1) + + x2 = self.downsample(x1) # [B,C2,H/4,W/4] + x2 = self.stage2(x2) + + u1 = self.up1(x2) # [B,C1,H/2,W/2] + if u1.shape[-2:] != x1.shape[-2:]: + u1 = F.interpolate(u1, size=x1.shape[-2:], mode="bilinear", align_corners=False) + f1 = self.fuse1(torch.cat([u1, x1], dim=1)) + + u2 = self.up2(f1) # [B,C1,H,W] + logits = self.head(u2) + if logits.shape[-2:] != (h, w): + logits = F.interpolate(logits, size=(h, w), mode="bilinear", align_corners=False) + return logits + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_swin_unet_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: SwinUNetTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinySwinUNet( + in_channels=cfg.in_channels, + embed_dims=cfg.embed_dims, + num_heads=cfg.num_heads, + window_size=cfg.window_size, + mlp_ratio=cfg.mlp_ratio, + dropout=cfg.dropout, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("Swin-UNet Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: SwinUNetTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "swin_unet", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = SwinUNetTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_swin_unet_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "swin_unet_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run Swin-UNet Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"swin_unet_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] swin_unet synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def swin_unet_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "swin_unet") + init_kwargs = filter_init_kwargs(TinySwinUNet, {"in_channels": int(in_channels), **kwargs}) + model = TinySwinUNet(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinySwinUNet", "swin_unet_builder"] diff --git a/pyhazards/models/swinlstm.py b/pyhazards/models/swinlstm.py new file mode 100644 index 00000000..491e3cb5 --- /dev/null +++ b/pyhazards/models/swinlstm.py @@ -0,0 +1,506 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class SwinLSTMTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + embed_dim: int = 16 + hidden_channels: int = 16 + num_heads: int = 4 + window_size: int = 3 + lr: float = 1e-3 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +def _window_partition(x: torch.Tensor, window_size: int) -> torch.Tensor: + # x: [B, H, W, C] + b, h, w, c = x.shape + x = x.view(b, h // window_size, window_size, w // window_size, window_size, c) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = x.view(-1, window_size * window_size, c) + return windows + + +def _window_reverse(windows: torch.Tensor, window_size: int, h: int, w: int, b: int) -> torch.Tensor: + # windows: [B*num_windows, window_size*window_size, C] + c = windows.shape[-1] + x = windows.view(b, h // window_size, w // window_size, window_size, window_size, c) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous() + x = x.view(b, h, w, c) + return x + + +class WindowAttentionBlock(nn.Module): + def __init__(self, dim: int, num_heads: int, window_size: int, shift_size: int = 0): + super().__init__() + self.window_size = window_size + self.shift_size = shift_size + + self.norm1 = nn.LayerNorm(dim) + self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, batch_first=True) + self.norm2 = nn.LayerNorm(dim) + self.mlp = nn.Sequential( + nn.Linear(dim, dim * 2), + nn.GELU(), + nn.Linear(dim * 2, dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: [B, C, H, W] + b, c, h, w = x.shape + ws = self.window_size + + x = x.permute(0, 2, 3, 1).contiguous() # [B,H,W,C] + if self.shift_size > 0: + x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + + windows = _window_partition(x, ws) # [B*nw, ws*ws, C] + w_norm = self.norm1(windows) + attn_out, _ = self.attn(w_norm, w_norm, w_norm, need_weights=False) + windows = windows + attn_out + windows = windows + self.mlp(self.norm2(windows)) + + x = _window_reverse(windows, ws, h, w, b) + if self.shift_size > 0: + x = torch.roll(x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + + return x.permute(0, 3, 1, 2).contiguous() # [B,C,H,W] + + +class ConvLSTMCell(nn.Module): + def __init__(self, input_channels: int, hidden_channels: int): + super().__init__() + self.hidden_channels = hidden_channels + self.conv = nn.Conv2d(input_channels + hidden_channels, hidden_channels * 4, kernel_size=3, padding=1) + + def forward(self, x_t: torch.Tensor, h_prev: torch.Tensor, c_prev: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + fused = torch.cat([x_t, h_prev], dim=1) + gates = self.conv(fused) + i, f, o, g = torch.chunk(gates, 4, dim=1) + i = torch.sigmoid(i) + f = torch.sigmoid(f) + o = torch.sigmoid(o) + g = torch.tanh(g) + + c = f * c_prev + i * g + h = o * torch.tanh(c) + return h, c + + +class TinySwinLSTM(nn.Module): + def __init__( + self, + in_channels: int = 1, + embed_dim: int = 16, + hidden_channels: int = 16, + num_heads: int = 4, + window_size: int = 3, + ): + super().__init__() + self.embed_dim = embed_dim + self.hidden_channels = hidden_channels + + self.patch_embed = nn.Conv2d(in_channels, embed_dim, kernel_size=3, stride=2, padding=1) + self.block1 = WindowAttentionBlock(dim=embed_dim, num_heads=num_heads, window_size=window_size, shift_size=0) + self.block2 = WindowAttentionBlock( + dim=embed_dim, + num_heads=num_heads, + window_size=window_size, + shift_size=max(1, window_size // 2), + ) + + self.rnn = ConvLSTMCell(input_channels=embed_dim, hidden_channels=hidden_channels) + + self.decoder = nn.Sequential( + nn.ConvTranspose2d(hidden_channels, hidden_channels, kernel_size=2, stride=2), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, 1, kernel_size=1), + ) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + b, _, _, h, w = x_seq.shape + h2, w2 = (h + 1) // 2, (w + 1) // 2 + device = x_seq.device + + h_state = torch.zeros((b, self.hidden_channels, h2, w2), device=device) + c_state = torch.zeros((b, self.hidden_channels, h2, w2), device=device) + + for t in range(x_seq.shape[1]): + x_t = self.patch_embed(x_seq[:, t]) + x_t = self.block1(x_t) + x_t = self.block2(x_t) + h_state, c_state = self.rnn(x_t, h_state, c_state) + + return self.decoder(h_state) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_swinlstm_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: SwinLSTMTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinySwinLSTM( + in_channels=cfg.in_channels, + embed_dim=cfg.embed_dim, + hidden_channels=cfg.hidden_channels, + num_heads=cfg.num_heads, + window_size=cfg.window_size, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("SwinLSTM Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: SwinLSTMTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "swinlstm", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = SwinLSTMTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_swinlstm_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "swinlstm_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run SwinLSTM Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = Path(args.output_dir) if args.output_dir else base / f"swinlstm_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] swinlstm synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def swinlstm_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "swinlstm") + init_kwargs = filter_init_kwargs(TinySwinLSTM, {"in_channels": int(in_channels), **kwargs}) + model = TinySwinLSTM(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinySwinLSTM", "swinlstm_builder"] diff --git a/pyhazards/models/tcif_fusion.py b/pyhazards/models/tcif_fusion.py new file mode 100644 index 00000000..a9d03a48 --- /dev/null +++ b/pyhazards/models/tcif_fusion.py @@ -0,0 +1,68 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class TCIFFusion(nn.Module): + """Knowledge-guided fusion baseline for tropical cyclone forecasting.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.output_dim = int(output_dim) + left_dim = max(1, input_dim // 2) + right_dim = input_dim - left_dim + self.left_dim = left_dim + self.left_encoder = nn.GRU(left_dim, hidden_dim, batch_first=True) + self.right_encoder = nn.GRU(max(1, right_dim), hidden_dim, batch_first=True) + self.fusion = nn.Sequential( + nn.Linear(2 * hidden_dim, hidden_dim), + nn.ReLU(), + nn.Dropout(dropout) if dropout > 0 else nn.Identity(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("TCIFFusion expects inputs shaped (batch, history, features).") + left = x[:, :, : self.left_dim] + right = x[:, :, self.left_dim :] + if right.size(-1) == 0: + right = x[:, :, :1] + _, left_hidden = self.left_encoder(left) + _, right_hidden = self.right_encoder(right) + fused = torch.cat([left_hidden[-1], right_hidden[-1]], dim=-1) + preds = self.fusion(fused) + return preds.view(x.size(0), self.horizon, self.output_dim) + + +def tcif_fusion_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("TCIFFusion only supports regression for track/intensity forecasting.") + return TCIFFusion( + input_dim=input_dim, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + dropout=dropout, + ) + + +__all__ = ["TCIFFusion", "tcif_fusion_builder"] diff --git a/pyhazards/models/tcn.py b/pyhazards/models/tcn.py new file mode 100644 index 00000000..f56b8654 --- /dev/null +++ b/pyhazards/models/tcn.py @@ -0,0 +1,489 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class TCNTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + embed_dim: int = 16 + hidden_channels: int = 16 + kernel_size: int = 3 + num_levels: int = 3 + dropout: float = 0.1 + lr: float = 1e-3 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class Chomp1d(nn.Module): + def __init__(self, chomp_size: int): + super().__init__() + self.chomp_size = chomp_size + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.chomp_size <= 0: + return x + return x[:, :, :-self.chomp_size] + + +class TemporalBlock(nn.Module): + def __init__(self, in_channels: int, out_channels: int, kernel_size: int, dilation: int, dropout: float): + super().__init__() + padding = (kernel_size - 1) * dilation + + self.net = nn.Sequential( + nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding, dilation=dilation), + Chomp1d(padding), + nn.ReLU(inplace=True), + nn.Dropout(dropout), + nn.Conv1d(out_channels, out_channels, kernel_size, padding=padding, dilation=dilation), + Chomp1d(padding), + nn.ReLU(inplace=True), + nn.Dropout(dropout), + ) + self.downsample = nn.Conv1d(in_channels, out_channels, kernel_size=1) if in_channels != out_channels else None + self.act = nn.ReLU(inplace=True) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + out = self.net(x) + residual = x if self.downsample is None else self.downsample(x) + return self.act(out + residual) + + +class TemporalConvNet(nn.Module): + def __init__(self, in_channels: int, hidden_channels: int, kernel_size: int, num_levels: int, dropout: float): + super().__init__() + layers: List[nn.Module] = [] + for i in range(num_levels): + dilation = 2 ** i + cin = in_channels if i == 0 else hidden_channels + cout = hidden_channels + layers.append(TemporalBlock(cin, cout, kernel_size=kernel_size, dilation=dilation, dropout=dropout)) + self.net = nn.Sequential(*layers) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.net(x) + + +class TinyTCN(nn.Module): + def __init__( + self, + in_channels: int = 1, + embed_dim: int = 16, + hidden_channels: int = 16, + kernel_size: int = 3, + num_levels: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.embed_dim = embed_dim + self.hidden_channels = hidden_channels + + self.frame_encoder = nn.Sequential( + nn.Conv2d(in_channels, embed_dim, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim, embed_dim, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + + self.temporal = TemporalConvNet( + in_channels=embed_dim, + hidden_channels=hidden_channels, + kernel_size=kernel_size, + num_levels=num_levels, + dropout=dropout, + ) + + self.decoder = nn.Sequential( + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, 1, kernel_size=1), + ) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + b, t, c, h, w = x_seq.shape + + x = x_seq.reshape(b * t, c, h, w) + x = self.frame_encoder(x) + d = x.shape[1] + + x = x.reshape(b, t, d, h, w) + x = x.permute(0, 3, 4, 2, 1).contiguous() # [B,H,W,D,T] + x = x.reshape(b * h * w, d, t) # [BHW,D,T] + + x = self.temporal(x) + x_last = x[:, :, -1] # [BHW,HID] + + x_last = x_last.reshape(b, h, w, self.hidden_channels).permute(0, 3, 1, 2).contiguous() + return self.decoder(x_last) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_tcn_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: TCNTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyTCN( + in_channels=cfg.in_channels, + embed_dim=cfg.embed_dim, + hidden_channels=cfg.hidden_channels, + kernel_size=cfg.kernel_size, + num_levels=cfg.num_levels, + dropout=cfg.dropout, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("TCN Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: TCNTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "tcn", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = TCNTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_tcn_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "tcn_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run TCN Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = Path(args.output_dir) if args.output_dir else base / f"tcn_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] tcn synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def tcn_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "tcn") + init_kwargs = filter_init_kwargs(TinyTCN, {"in_channels": int(in_channels), **kwargs}) + model = TinyTCN(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyTCN", "tcn_builder"] diff --git a/pyhazards/models/tropicalcyclone_mlp.py b/pyhazards/models/tropicalcyclone_mlp.py new file mode 100644 index 00000000..5e76c5ab --- /dev/null +++ b/pyhazards/models/tropicalcyclone_mlp.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class TropicalCycloneMLP(nn.Module): + """Compact MLP baseline for storm track and intensity forecasting.""" + + def __init__( + self, + input_dim: int = 8, + history: int = 6, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + ): + super().__init__() + self.history = int(history) + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.net = nn.Sequential( + nn.Linear(self.history * input_dim, hidden_dim), + nn.ReLU(), + nn.Dropout(dropout) if dropout > 0 else nn.Identity(), + nn.Linear(hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("TropicalCycloneMLP expects inputs shaped (batch, history, features).") + if x.size(1) != self.history: + raise ValueError(f"TropicalCycloneMLP expected history={self.history}, got {x.size(1)}.") + preds = self.net(x.reshape(x.size(0), -1)) + return preds.view(x.size(0), self.horizon, self.output_dim) + + +def tropicalcyclone_mlp_builder( + task: str, + input_dim: int = 8, + history: int = 6, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("TropicalCycloneMLP only supports regression for track/intensity forecasting.") + return TropicalCycloneMLP( + input_dim=input_dim, + history=history, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + dropout=dropout, + ) + + +__all__ = ["TropicalCycloneMLP", "tropicalcyclone_mlp_builder"] diff --git a/pyhazards/models/tropicyclonenet.py b/pyhazards/models/tropicyclonenet.py new file mode 100644 index 00000000..69b3220f --- /dev/null +++ b/pyhazards/models/tropicyclonenet.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class TropiCycloneNet(nn.Module): + """GRU + attention baseline for all-basin tropical cyclone forecasting.""" + + def __init__( + self, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + num_layers: int = 2, + dropout: float = 0.1, + ): + super().__init__() + self.horizon = int(horizon) + self.output_dim = int(output_dim) + self.encoder = nn.GRU( + input_size=input_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=dropout if num_layers > 1 else 0.0, + bidirectional=True, + ) + self.attention = nn.Linear(2 * hidden_dim, 1) + self.head = nn.Sequential( + nn.Linear(2 * hidden_dim, hidden_dim), + nn.ReLU(), + nn.Linear(hidden_dim, self.horizon * self.output_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError("TropiCycloneNet expects inputs shaped (batch, history, features).") + encoded, _ = self.encoder(x) + weights = torch.softmax(self.attention(encoded), dim=1) + pooled = torch.sum(weights * encoded, dim=1) + preds = self.head(pooled) + return preds.view(x.size(0), self.horizon, self.output_dim) + + +def tropicyclonenet_builder( + task: str, + input_dim: int = 8, + hidden_dim: int = 64, + horizon: int = 5, + output_dim: int = 3, + num_layers: int = 2, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "regression": + raise ValueError("TropiCycloneNet only supports regression for track/intensity forecasting.") + return TropiCycloneNet( + input_dim=input_dim, + hidden_dim=hidden_dim, + horizon=horizon, + output_dim=output_dim, + num_layers=num_layers, + dropout=dropout, + ) + + +__all__ = ["TropiCycloneNet", "tropicyclonenet_builder"] diff --git a/pyhazards/models/ts_satfire.py b/pyhazards/models/ts_satfire.py new file mode 100644 index 00000000..9e9c28f6 --- /dev/null +++ b/pyhazards/models/ts_satfire.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class TSSatFire(nn.Module): + """Spatio-temporal wildfire prediction model inspired by TS-SatFire.""" + + def __init__( + self, + history: int = 5, + in_channels: int = 8, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + ): + super().__init__() + if history <= 0: + raise ValueError(f"history must be positive, got {history}") + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if out_channels <= 0: + raise ValueError(f"out_channels must be positive, got {out_channels}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.history = int(history) + self.in_channels = int(in_channels) + self.temporal_encoder = nn.Sequential( + nn.Conv3d(in_channels, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.GELU(), + nn.Conv3d(hidden_dim, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.GELU(), + ) + self.time_attention = nn.Conv3d(hidden_dim, 1, kernel_size=1) + self.decoder = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Dropout2d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv2d(hidden_dim, out_channels, kernel_size=1), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 5: + raise ValueError( + "TSSatFire expects input shape (batch, history, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.history: + raise ValueError(f"TSSatFire expected history={self.history}, got {x.size(1)}.") + if x.size(2) != self.in_channels: + raise ValueError(f"TSSatFire expected in_channels={self.in_channels}, got {x.size(2)}.") + + feat = self.temporal_encoder(x.permute(0, 2, 1, 3, 4)) + attn = torch.softmax(self.time_attention(feat), dim=2) + pooled = torch.sum(attn * feat, dim=2) + return self.decoder(pooled) + + +def ts_satfire_builder( + task: str, + history: int = 5, + in_channels: int = 8, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"segmentation", "regression"}: + raise ValueError(f"ts_satfire supports task='segmentation' or 'regression', got {task!r}.") + return TSSatFire( + history=history, + in_channels=in_channels, + hidden_dim=hidden_dim, + out_channels=out_channels, + dropout=dropout, + ) + + +__all__ = ["TSSatFire", "ts_satfire_builder"] diff --git a/pyhazards/models/unet.py b/pyhazards/models/unet.py new file mode 100644 index 00000000..8eb11a42 --- /dev/null +++ b/pyhazards/models/unet.py @@ -0,0 +1,482 @@ +from __future__ import annotations + +import argparse +import csv +import json +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + + +@dataclass +class UNetTrackOConfig: + in_channels: int = 1 + base_channels: int = 8 + lr: float = 1e-3 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class ConvBlock(nn.Module): + def __init__(self, in_channels: int, out_channels: int): + super().__init__() + self.block = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.block(x) + + +class TinyUNet(nn.Module): + def __init__(self, in_channels: int = 1, base_channels: int = 16): + super().__init__() + c1, c2, c3 = base_channels, base_channels * 2, base_channels * 4 + + self.enc1 = ConvBlock(in_channels, c1) + self.pool1 = nn.MaxPool2d(kernel_size=2) + + self.enc2 = ConvBlock(c1, c2) + self.pool2 = nn.MaxPool2d(kernel_size=2) + + self.bottleneck = ConvBlock(c2, c3) + + self.up2 = nn.ConvTranspose2d(c3, c2, kernel_size=2, stride=2) + self.dec2 = ConvBlock(c2 + c2, c2) + + self.up1 = nn.ConvTranspose2d(c2, c1, kernel_size=2, stride=2) + self.dec1 = ConvBlock(c1 + c1, c1) + + self.head = nn.Conv2d(c1, 1, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x1 = self.enc1(x) + x2 = self.enc2(self.pool1(x1)) + xb = self.bottleneck(self.pool2(x2)) + + y2 = self.up2(xb) + y2 = torch.cat([y2, x2], dim=1) + y2 = self.dec2(y2) + + y1 = self.up1(y2) + y1 = torch.cat([y1, x1], dim=1) + y1 = self.dec1(y1) + + return self.head(y1) + + +def binary_ece(y_true: np.ndarray, y_prob: np.ndarray, n_bins: int = 15) -> float: + bins = np.linspace(0.0, 1.0, n_bins + 1) + ece = 0.0 + n = float(len(y_true)) + for i in range(n_bins): + lo, hi = bins[i], bins[i + 1] + if i == n_bins - 1: + mask = (y_prob >= lo) & (y_prob <= hi) + else: + mask = (y_prob >= lo) & (y_prob < hi) + if not np.any(mask): + continue + acc = float(np.mean(y_true[mask])) + conf = float(np.mean(y_prob[mask])) + ece += (float(np.sum(mask)) / n) * abs(acc - conf) + return float(ece) + + +def normalized_consistency_score(mean_day_to_day_change: float) -> float: + return float(np.clip(1.0 - float(mean_day_to_day_change), 0.0, 1.0)) + + +def make_synthetic_fire_maps( + n_samples: int, + image_size: int, + seed: int, +) -> Tuple[np.ndarray, np.ndarray]: + rng = np.random.default_rng(seed) + yy, xx = np.meshgrid(np.arange(image_size), np.arange(image_size), indexing="ij") + + x = np.zeros((n_samples, 1, image_size, image_size), dtype=np.float32) + y = np.zeros((n_samples, 1, image_size, image_size), dtype=np.float32) + + for i in range(n_samples): + field = rng.normal(0.0, 0.15, size=(image_size, image_size)) + n_sources = int(rng.integers(1, 4)) + + for _ in range(n_sources): + cx = float(rng.uniform(0, image_size - 1)) + cy = float(rng.uniform(0, image_size - 1)) + sigma = float(rng.uniform(1.8, 4.8)) + amp = float(rng.uniform(0.8, 2.2)) + dist2 = (xx - cx) ** 2 + (yy - cy) ** 2 + field += amp * np.exp(-dist2 / (2.0 * sigma * sigma)) + + terrain = (yy / max(1, image_size - 1)) * rng.uniform(-0.15, 0.15) + wind = (xx / max(1, image_size - 1)) * rng.uniform(-0.25, 0.25) + + signal = field + terrain + wind + rng.normal(0.0, 0.08, size=(image_size, image_size)) + threshold = float(np.quantile(field, 0.90)) + mask = (field > threshold).astype(np.float32) + + x[i, 0] = signal.astype(np.float32) + y[i, 0] = mask + + x_mean = float(np.mean(x)) + x_std = float(np.std(x) + 1e-6) + x = (x - x_mean) / x_std + return x, y + + +def split_train_val_test( + x: np.ndarray, + y: np.ndarray, + seed: int, + train_ratio: float = 0.7, + val_ratio: float = 0.15, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + n = x.shape[0] + rng = np.random.default_rng(seed) + idx = rng.permutation(n) + + n_train = max(1, int(n * train_ratio)) + n_val = max(1, int(n * val_ratio)) + n_train = min(n_train, n - 2) + n_val = min(n_val, n - n_train - 1) + + train_idx = idx[:n_train] + val_idx = idx[n_train : n_train + n_val] + test_idx = idx[n_train + n_val :] + + return ( + x[train_idx], + y[train_idx], + x[val_idx], + y[val_idx], + x[test_idx], + y[test_idx], + ) + + +def _choose_device(device_text: str) -> torch.device: + normalized = str(device_text).strip().lower() + if normalized.startswith("cuda") and torch.cuda.is_available(): + return torch.device(device_text) + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_unet_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: UNetTrackOConfig, +): + if x_train.ndim != 4 or x_val.ndim != 4: + raise ValueError("x_train and x_val must be 4D arrays [N,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyUNet(in_channels=cfg.in_channels, base_channels=cfg.base_channels).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("U-Net Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: UNetTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "unet", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 192, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_maps(n_samples=n_samples, image_size=image_size, seed=seed) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = UNetTrackOConfig( + seed=seed, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cuda" if torch.cuda.is_available() else "cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_unet_track_o(x_train, y_train, x_val, y_val, cfg) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "unet_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run U-Net Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=192) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = Path(args.output_dir) if args.output_dir else base / f"unet_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] unet synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def unet_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "unet") + init_kwargs = filter_init_kwargs(TinyUNet, {"in_channels": int(in_channels), **kwargs}) + model = TinyUNet(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyUNet", "unet_builder"] diff --git a/pyhazards/models/urbanfloodcast.py b/pyhazards/models/urbanfloodcast.py new file mode 100644 index 00000000..cfeb2e9f --- /dev/null +++ b/pyhazards/models/urbanfloodcast.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class UrbanFloodCast(nn.Module): + """U-Net style urban inundation baseline.""" + + def __init__( + self, + in_channels: int = 3, + history: int = 4, + base_channels: int = 32, + out_channels: int = 1, + ): + super().__init__() + self.history = int(history) + merged_channels = in_channels * history + self.encoder = nn.Sequential( + nn.Conv2d(merged_channels, base_channels, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv2d(base_channels, base_channels, kernel_size=3, padding=1), + nn.ReLU(), + ) + self.decoder = nn.Sequential( + nn.Conv2d(base_channels, base_channels, kernel_size=3, padding=1), + nn.ReLU(), + nn.Conv2d(base_channels, out_channels, kernel_size=1), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 5: + raise ValueError("UrbanFloodCast expects inputs shaped (batch, history, channels, height, width).") + if x.size(1) != self.history: + raise ValueError(f"UrbanFloodCast expected history={self.history}, got {x.size(1)}.") + bsz, history, channels, height, width = x.shape + merged = x.reshape(bsz, history * channels, height, width) + features = self.encoder(merged) + return self.decoder(features) + + +def urbanfloodcast_builder( + task: str, + in_channels: int = 3, + history: int = 4, + base_channels: int = 32, + out_channels: int = 1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"regression", "segmentation"}: + raise ValueError("UrbanFloodCast only supports regression or segmentation-style inundation outputs.") + return UrbanFloodCast( + in_channels=in_channels, + history=history, + base_channels=base_channels, + out_channels=out_channels, + ) + + +__all__ = ["UrbanFloodCast", "urbanfloodcast_builder"] diff --git a/pyhazards/models/utae.py b/pyhazards/models/utae.py new file mode 100644 index 00000000..2b5ea2ef --- /dev/null +++ b/pyhazards/models/utae.py @@ -0,0 +1,446 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class UTAETrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + hidden_channels: int = 16 + num_heads: int = 4 + lr: float = 3e-4 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class TemporalSelfBlock(nn.Module): + def __init__(self, dim: int, num_heads: int): + super().__init__() + self.norm1 = nn.LayerNorm(dim) + self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, batch_first=True) + self.norm2 = nn.LayerNorm(dim) + self.ffn = nn.Sequential( + nn.Linear(dim, dim * 2), + nn.GELU(), + nn.Linear(dim * 2, dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + y = self.norm1(x) + y, _ = self.attn(y, y, y, need_weights=False) + x = x + y + x = x + self.ffn(self.norm2(x)) + return x + + +class TinyUTAE(nn.Module): + def __init__(self, in_channels: int = 1, hidden_channels: int = 16, num_heads: int = 4): + super().__init__() + self.hidden_channels = hidden_channels + + self.frame_encoder = nn.Sequential( + nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + ) + self.temporal_block = TemporalSelfBlock(hidden_channels, num_heads) + + self.query_token = nn.Parameter(torch.zeros(1, 1, hidden_channels)) + self.cross_attn = nn.MultiheadAttention(embed_dim=hidden_channels, num_heads=num_heads, batch_first=True) + self.decoder = nn.Sequential( + nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(hidden_channels, 1, kernel_size=1), + ) + + nn.init.normal_(self.query_token, mean=0.0, std=0.02) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + b, t, c, h, w = x_seq.shape + + x = x_seq.reshape(b * t, c, h, w) + x = self.frame_encoder(x) + d = x.shape[1] + + x = x.reshape(b, t, d, h, w) + x = x.permute(0, 3, 4, 1, 2).contiguous().reshape(b * h * w, t, d) # [BHW,T,D] + x = self.temporal_block(x) + + q = self.query_token.expand(x.shape[0], -1, -1) + agg, _ = self.cross_attn(q, x, x, need_weights=False) # [BHW,1,D] + agg = agg[:, 0, :] + + agg = agg.reshape(b, h, w, d).permute(0, 3, 1, 2).contiguous() + return self.decoder(agg) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_utae_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: UTAETrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyUTAE( + in_channels=cfg.in_channels, + hidden_channels=cfg.hidden_channels, + num_heads=cfg.num_heads, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("UTAE Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: UTAETrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "utae", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = UTAETrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_utae_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "utae_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run UTAE Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"utae_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] utae synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def utae_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "utae") + init_kwargs = filter_init_kwargs(TinyUTAE, {"in_channels": int(in_channels), **kwargs}) + model = TinyUTAE(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyUTAE", "utae_builder"] diff --git a/pyhazards/models/viirs_375m_active_fire.py b/pyhazards/models/viirs_375m_active_fire.py new file mode 100644 index 00000000..3d5ed6cc --- /dev/null +++ b/pyhazards/models/viirs_375m_active_fire.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class VIIRS375mActiveFire(nn.Module): + """Algorithm-inspired VIIRS 375 m active-fire detector with a learnable calibration head.""" + + def __init__( + self, + in_channels: int = 5, + hidden_dim: int = 24, + out_dim: int = 1, + context_kernel: int = 7, + dropout: float = 0.1, + ): + super().__init__() + if in_channels < 5: + raise ValueError( + "VIIRS375mActiveFire expects at least 5 channels: " + "mid_ir, long_ir, frp_proxy, clear_sky, dryness." + ) + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + if context_kernel <= 1 or context_kernel % 2 == 0: + raise ValueError(f"context_kernel must be an odd integer > 1, got {context_kernel}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0,1), got {dropout}") + + self.in_channels = int(in_channels) + self.context_kernel = int(context_kernel) + + evidence_channels = self.in_channels + 4 + self.context_pool = nn.AvgPool2d(kernel_size=context_kernel, stride=1, padding=context_kernel // 2) + self.evidence_encoder = nn.Sequential( + nn.Conv2d(evidence_channels, hidden_dim, kernel_size=1), + nn.GELU(), + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + self.calibration_head = nn.Sequential( + nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Dropout2d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv2d(hidden_dim, out_dim, kernel_size=1), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError( + "VIIRS375mActiveFire expects input shape (batch, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) < 5: + raise ValueError(f"VIIRS375mActiveFire expected at least 5 channels, got {x.size(1)}.") + + x = x[:, : self.in_channels] + mid_ir = x[:, 0:1] + long_ir = x[:, 1:2] + frp_proxy = x[:, 2:3] + clear_sky = x[:, 3:4] + dryness = x[:, 4:5] + + local_background = self.context_pool(mid_ir) + thermal_excess = mid_ir - local_background + split_window = mid_ir - long_ir + fire_evidence = F.relu(thermal_excess) + 0.5 * F.relu(split_window) + confidence_gate = torch.sigmoid(clear_sky) * torch.sigmoid(dryness) + + evidence = torch.cat( + [ + x, + thermal_excess, + split_window, + fire_evidence, + confidence_gate + frp_proxy, + ], + dim=1, + ) + encoded = self.evidence_encoder(evidence) + return self.calibration_head(encoded) + + +def viirs_375m_active_fire_builder( + task: str, + in_channels: int = 5, + hidden_dim: int = 24, + out_dim: int = 1, + context_kernel: int = 7, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError( + f"viirs_375m_active_fire is segmentation-only in PyHazards, got task={task!r}." + ) + return VIIRS375mActiveFire( + in_channels=in_channels, + hidden_dim=hidden_dim, + out_dim=out_dim, + context_kernel=context_kernel, + dropout=dropout, + ) + + +__all__ = ["VIIRS375mActiveFire", "viirs_375m_active_fire_builder"] diff --git a/pyhazards/models/vit_segmenter.py b/pyhazards/models/vit_segmenter.py new file mode 100644 index 00000000..2d15866b --- /dev/null +++ b/pyhazards/models/vit_segmenter.py @@ -0,0 +1,471 @@ +from __future__ import annotations + +import argparse +import csv +import json +import sys +from copy import deepcopy +from dataclasses import asdict, dataclass +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn.functional as F +from sklearn.metrics import average_precision_score, brier_score_loss, log_loss, roc_auc_score +from torch import nn +from torch.utils.data import DataLoader, TensorDataset + +if __package__ is None or __package__ == "": + sys.path.insert(0, str(Path(__file__).resolve().parents[3])) + from pyhazards.models.mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) +else: + from .mau import ( + binary_ece, + make_synthetic_fire_sequences, + normalized_consistency_score, + split_train_val_test, + ) + + +@dataclass +class ViTSegmenterTrackOConfig: + seq_len: int = 6 + in_channels: int = 1 + patch_size: int = 4 + embed_dim: int = 64 + depth: int = 4 + num_heads: int = 4 + mlp_ratio: float = 2.0 + dropout: float = 0.1 + lr: float = 2e-4 + weight_decay: float = 1e-4 + batch_size: int = 8 + max_epochs: int = 120 + early_stopping_rounds: int = 16 + min_delta: float = 1e-4 + seed: int = 42 + pos_weight_clip_max: float = 50.0 + device: str = "cpu" + + +class TransformerBlock(nn.Module): + def __init__(self, dim: int, num_heads: int, mlp_ratio: float, dropout: float): + super().__init__() + self.norm1 = nn.LayerNorm(dim) + self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, dropout=dropout, batch_first=True) + self.norm2 = nn.LayerNorm(dim) + self.mlp = nn.Sequential( + nn.Linear(dim, int(dim * mlp_ratio)), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(int(dim * mlp_ratio), dim), + nn.Dropout(dropout), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: [B,N,D] + y = self.norm1(x) + y, _ = self.attn(y, y, y, need_weights=False) + x = x + y + x = x + self.mlp(self.norm2(x)) + return x + + +class TinyViTSegmenter(nn.Module): + def __init__( + self, + in_channels: int = 1, + patch_size: int = 4, + embed_dim: int = 64, + depth: int = 4, + num_heads: int = 4, + mlp_ratio: float = 2.0, + dropout: float = 0.1, + ): + super().__init__() + self.patch_size = patch_size + + self.patch_embed = nn.Conv2d( + in_channels, + embed_dim, + kernel_size=patch_size, + stride=patch_size, + padding=0, + ) + self.blocks = nn.ModuleList( + [TransformerBlock(embed_dim, num_heads, mlp_ratio=mlp_ratio, dropout=dropout) for _ in range(depth)] + ) + self.norm = nn.LayerNorm(embed_dim) + + self.seg_head = nn.Sequential( + nn.Conv2d(embed_dim, embed_dim // 2, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(embed_dim // 2, 1, kernel_size=1), + ) + + def _temporal_fusion(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + frame_scores = x_seq.mean(dim=(2, 3, 4)) + weights = torch.softmax(frame_scores, dim=1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) + return torch.sum(x_seq * weights, dim=1) + + def forward(self, x_seq: torch.Tensor) -> torch.Tensor: + # x_seq: [B,T,C,H,W] + _, _, _, h, w = x_seq.shape + x = self._temporal_fusion(x_seq) # [B,C,H,W] + + feat = self.patch_embed(x) # [B,D,Hp,Wp] + b, d, hp, wp = feat.shape + + tokens = feat.flatten(2).transpose(1, 2).contiguous() # [B,N,D] + for blk in self.blocks: + tokens = blk(tokens) + tokens = self.norm(tokens) + + feat = tokens.transpose(1, 2).reshape(b, d, hp, wp).contiguous() + logits_small = self.seg_head(feat) + return F.interpolate(logits_small, size=(h, w), mode="bilinear", align_corners=False) + + +def _choose_device(device_text: str) -> torch.device: + if device_text == "cuda" and torch.cuda.is_available(): + return torch.device("cuda") + return torch.device("cpu") + + +def _build_loader(x: np.ndarray, y: np.ndarray, batch_size: int, shuffle: bool) -> DataLoader: + ds = TensorDataset( + torch.from_numpy(x.astype(np.float32)), + torch.from_numpy(y.astype(np.float32)), + ) + return DataLoader(ds, batch_size=batch_size, shuffle=shuffle) + + +def _predict_probabilities(model: nn.Module, loader: DataLoader, device: torch.device) -> np.ndarray: + probs: List[np.ndarray] = [] + model.eval() + with torch.no_grad(): + for xb, _ in loader: + xb = xb.to(device) + logits = model(xb) + p = torch.sigmoid(logits).detach().cpu().numpy() + probs.append(p) + if not probs: + return np.zeros((0,), dtype=np.float32) + return np.concatenate(probs, axis=0).reshape(-1) + + +def train_vit_segmenter_track_o( + x_train: np.ndarray, + y_train: np.ndarray, + x_val: np.ndarray, + y_val: np.ndarray, + cfg: ViTSegmenterTrackOConfig, +): + if x_train.ndim != 5 or x_val.ndim != 5: + raise ValueError("x_train and x_val must be 5D arrays [N,T,C,H,W]") + if y_train.ndim != 4 or y_val.ndim != 4: + raise ValueError("y_train and y_val must be 4D arrays [N,1,H,W]") + + torch.manual_seed(cfg.seed) + np.random.seed(cfg.seed) + + device = _choose_device(cfg.device) + + model = TinyViTSegmenter( + in_channels=cfg.in_channels, + patch_size=cfg.patch_size, + embed_dim=cfg.embed_dim, + depth=cfg.depth, + num_heads=cfg.num_heads, + mlp_ratio=cfg.mlp_ratio, + dropout=cfg.dropout, + ).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr, weight_decay=cfg.weight_decay) + + total_px = float(y_train.size) + pos_px = float(np.sum(y_train)) + neg_px = max(1.0, total_px - pos_px) + raw_pos_weight = neg_px / max(pos_px, 1.0) + pos_weight = float(np.clip(raw_pos_weight, 1.0, cfg.pos_weight_clip_max)) + + criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([pos_weight], device=device)) + + train_loader = _build_loader(x_train, y_train, batch_size=cfg.batch_size, shuffle=True) + val_loader = _build_loader(x_val, y_val, batch_size=cfg.batch_size, shuffle=False) + + history: List[Dict[str, float]] = [] + best_epoch = 1 + best_val_loss = float("inf") + best_state: Dict[str, torch.Tensor] | None = None + wait = 0 + + for epoch in range(1, cfg.max_epochs + 1): + model.train() + train_losses: List[float] = [] + + for xb, yb in train_loader: + xb = xb.to(device) + yb = yb.to(device) + + optimizer.zero_grad(set_to_none=True) + logits = model(xb) + loss = criterion(logits, yb) + loss.backward() + optimizer.step() + train_losses.append(float(loss.item())) + + model.eval() + val_losses: List[float] = [] + with torch.no_grad(): + for xb, yb in val_loader: + xb = xb.to(device) + yb = yb.to(device) + logits = model(xb) + loss = criterion(logits, yb) + val_losses.append(float(loss.item())) + + tr_loss = float(np.mean(train_losses)) if train_losses else float("nan") + va_loss = float(np.mean(val_losses)) if val_losses else float("nan") + + history.append( + { + "epoch": float(epoch), + "train_loss": tr_loss, + "val_loss": va_loss, + "learning_rate": float(optimizer.param_groups[0]["lr"]), + } + ) + + if va_loss < best_val_loss - cfg.min_delta: + best_val_loss = va_loss + best_epoch = epoch + best_state = deepcopy(model.state_dict()) + wait = 0 + else: + wait += 1 + + if wait >= cfg.early_stopping_rounds: + break + + if best_state is not None: + model.load_state_dict(best_state) + + val_prob = np.clip(_predict_probabilities(model, val_loader, device=device), 1e-7, 1.0 - 1e-7) + val_true = y_val.reshape(-1).astype(np.float32) + + mean_change = float(np.mean(np.abs(np.diff(np.sort(val_prob))))) if len(val_prob) > 1 else 0.0 + metrics = { + "auprc": float(average_precision_score(val_true, val_prob)), + "auroc": float(roc_auc_score(val_true, val_prob)), + "brier": float(brier_score_loss(val_true, val_prob)), + "nll": float(log_loss(val_true, val_prob)), + "ece": float(binary_ece(val_true, val_prob, n_bins=15)), + "mean_day_to_day_change": mean_change, + "normalized_consistency_score": normalized_consistency_score(mean_change), + } + + return model, history, metrics, best_epoch, pos_weight + + +def save_history_and_plot(history: List[Dict[str, float]], output_dir: Path) -> None: + output_dir.mkdir(parents=True, exist_ok=True) + + history_csv = output_dir / "history.csv" + with history_csv.open("w", encoding="utf-8", newline="") as f: + writer = csv.DictWriter(f, fieldnames=["epoch", "train_loss", "val_loss", "learning_rate"]) + writer.writeheader() + writer.writerows(history) + + x = [int(r["epoch"]) for r in history] + y_tr = [float(r["train_loss"]) for r in history] + y_va = [float(r["val_loss"]) for r in history] + + plt.figure(figsize=(8, 5)) + plt.plot(x, y_tr, label="train_bce", marker="o", linewidth=1.4) + plt.plot(x, y_va, label="val_bce", marker="s", linewidth=1.2) + plt.xlabel("epoch") + plt.ylabel("loss") + plt.title("ViT Segmenter Track-O: train loss vs epoch") + plt.grid(alpha=0.3) + plt.legend() + plt.tight_layout() + plt.savefig(output_dir / "loss_curve.png", dpi=150) + plt.close() + + +def build_experiment_setting( + cfg: ViTSegmenterTrackOConfig, + best_epoch: int, + pos_weight: float, + metrics: Dict[str, float], +) -> Dict[str, Any]: + return { + "benchmark": { + "task": "Track-O", + "model_name": "vit_segmenter", + "run_time": datetime.now().isoformat(), + }, + "evaluation_protocol": { + "discrimination": {"primary": "auprc", "secondary": "auroc"}, + "reliability": ["brier", "nll", "ece"], + "temporal_consistency": ["mean_day_to_day_change", "normalized_consistency_score"], + }, + "training": { + "train_unit": "epoch", + "max_epochs": cfg.max_epochs, + "early_stopping_rounds": cfg.early_stopping_rounds, + "best_epoch": best_epoch, + "seed": cfg.seed, + "batch_size": cfg.batch_size, + "seq_len": cfg.seq_len, + }, + "optimizer": { + "name": "AdamW", + "lr": cfg.lr, + "weight_decay": cfg.weight_decay, + }, + "learning_weight": { + "type": "pixel_pos_weight", + "value": pos_weight, + "clip_max": cfg.pos_weight_clip_max, + }, + "params": asdict(cfg), + "val_metrics": metrics, + "note": "This module supports both real data and synthetic smoke demonstration.", + } + + +def run_synthetic_demo( + output_dir: Path, + seed: int = 42, + n_samples: int = 160, + seq_len: int = 6, + image_size: int = 24, + max_epochs: int = 60, + early_stopping_rounds: int = 12, +) -> None: + x, y = make_synthetic_fire_sequences( + n_samples=n_samples, + seq_len=seq_len, + image_size=image_size, + seed=seed, + ) + x_train, y_train, x_val, y_val, x_test, y_test = split_train_val_test(x, y, seed=seed) + + cfg = ViTSegmenterTrackOConfig( + seed=seed, + seq_len=seq_len, + max_epochs=max_epochs, + early_stopping_rounds=early_stopping_rounds, + device="cpu", + ) + + model, history, val_metrics, best_epoch, pos_weight = train_vit_segmenter_track_o( + x_train, + y_train, + x_val, + y_val, + cfg, + ) + + test_loader = _build_loader(x_test, y_test, batch_size=cfg.batch_size, shuffle=False) + test_prob = np.clip(_predict_probabilities(model, test_loader, _choose_device(cfg.device)), 1e-7, 1.0 - 1e-7) + test_true = y_test.reshape(-1).astype(np.float32) + + test_mean_change = float(np.mean(np.abs(np.diff(np.sort(test_prob))))) if len(test_prob) > 1 else 0.0 + test_metrics = { + "auprc": float(average_precision_score(test_true, test_prob)), + "auroc": float(roc_auc_score(test_true, test_prob)), + "brier": float(brier_score_loss(test_true, test_prob)), + "nll": float(log_loss(test_true, test_prob)), + "ece": float(binary_ece(test_true, test_prob, n_bins=15)), + "mean_day_to_day_change": test_mean_change, + "normalized_consistency_score": normalized_consistency_score(test_mean_change), + } + + output_dir.mkdir(parents=True, exist_ok=True) + save_history_and_plot(history, output_dir) + + torch.save( + { + "state_dict": model.state_dict(), + "config": asdict(cfg), + "best_epoch": best_epoch, + }, + output_dir / "vit_segmenter_model.pt", + ) + + setting = build_experiment_setting(cfg, best_epoch=best_epoch, pos_weight=pos_weight, metrics=val_metrics) + setting["data"] = { + "n_samples": n_samples, + "image_size": image_size, + "seq_len": seq_len, + "split": {"train": int(x_train.shape[0]), "val": int(x_val.shape[0]), "test": int(x_test.shape[0])}, + } + setting["test_metrics"] = test_metrics + + (output_dir / "experiment_setting.json").write_text(json.dumps(setting, indent=2), encoding="utf-8") + (output_dir / "metrics.json").write_text( + json.dumps({"val": val_metrics, "test": test_metrics, "best_epoch": best_epoch}, indent=2), + encoding="utf-8", + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run ViT Segmenter Track-O synthetic smoke demo") + parser.add_argument("--output_dir", default=None) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--n_samples", type=int, default=160) + parser.add_argument("--seq_len", type=int, default=6) + parser.add_argument("--image_size", type=int, default=24) + parser.add_argument("--max_epochs", type=int, default=60) + parser.add_argument("--early_stopping_rounds", type=int, default=12) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + base = Path(__file__).resolve().parents[1] / "runs_scaffold" + out = ( + Path(args.output_dir) + if args.output_dir + else base / f"vit_segmenter_synthetic_{datetime.now().strftime('%Y%m%d_%H%M%S')}" + ) + + run_synthetic_demo( + output_dir=out, + seed=args.seed, + n_samples=args.n_samples, + seq_len=args.seq_len, + image_size=args.image_size, + max_epochs=args.max_epochs, + early_stopping_rounds=args.early_stopping_rounds, + ) + print(f"[done] vit_segmenter synthetic demo saved to: {out}") + + +if __name__ == "__main__": + main() + +from ._wildfire_benchmark_utils import SegmentationPort, filter_init_kwargs, require_task + + +def vit_segmenter_builder(task: str, in_channels: int = 1, out_dim: int = 1, **kwargs: Any) -> nn.Module: + require_task(task, {"segmentation"}, "vit_segmenter") + init_kwargs = filter_init_kwargs(TinyViTSegmenter, {"in_channels": int(in_channels), **kwargs}) + model = TinyViTSegmenter(**init_kwargs) + return SegmentationPort(model=model, out_channels=int(out_dim)) + + +__all__ = ["TinyViTSegmenter", "vit_segmenter_builder"] diff --git a/pyhazards/models/wavecastnet.py b/pyhazards/models/wavecastnet.py new file mode 100644 index 00000000..18c738ef --- /dev/null +++ b/pyhazards/models/wavecastnet.py @@ -0,0 +1,352 @@ +from __future__ import annotations + +import torch +import torch.nn as nn +import torch.nn.init as init + + +class ConvLEMCell(nn.Module): + """ + Convolutional Long Expressive Memory (ConvLEM) cell used by WaveCastNet. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int = 3, + dt: float = 1.0, + activation: str = "tanh", + use_reset_gate: bool = False, + ): + super().__init__() + + if activation == "tanh": + self.activation = torch.tanh + elif activation == "relu": + self.activation = torch.relu + else: + raise ValueError( + "Unsupported activation: {activation}. Use 'tanh' or 'relu'.".format( + activation=activation + ) + ) + + self.dt = float(dt) + self.use_reset_gate = bool(use_reset_gate) + self.out_channels = int(out_channels) + + padding = (kernel_size - 1) // 2 + if self.use_reset_gate: + self.conv_x = nn.Conv2d( + in_channels, + 5 * out_channels, + kernel_size, + padding=padding, + ) + self.conv_h = nn.Conv2d( + out_channels, + 4 * out_channels, + kernel_size, + padding=padding, + ) + else: + self.conv_x = nn.Conv2d( + in_channels, + 4 * out_channels, + kernel_size, + padding=padding, + ) + self.conv_h = nn.Conv2d( + out_channels, + 3 * out_channels, + kernel_size, + padding=padding, + ) + + self.conv_c = nn.Conv2d(out_channels, out_channels, kernel_size, padding=padding) + self.W_c1 = nn.Parameter(torch.empty(out_channels, 1, 1)) + self.W_c2 = nn.Parameter(torch.empty(out_channels, 1, 1)) + if self.use_reset_gate: + self.W_c4 = nn.Parameter(torch.empty(out_channels, 1, 1)) + + self.reset_parameters() + + def reset_parameters(self) -> None: + for name, param in self.named_parameters(): + if "W_c" in name: + nn.init.constant_(param, 0.0) + elif param.ndim > 1: + init.xavier_uniform_(param) + else: + nn.init.constant_(param, 0.0) + + def forward( + self, + x: torch.Tensor, + h: torch.Tensor, + c: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor]: + if x.ndim != 4 or h.ndim != 4 or c.ndim != 4: + raise ValueError("ConvLEMCell expects x, h, c shaped (B, C, H, W).") + + conv_x_out = self.conv_x(x) + conv_h_out = self.conv_h(h) + + if self.use_reset_gate: + i_dt1, i_dt2, g_dx2, i_c, i_h = torch.chunk(conv_x_out, chunks=5, dim=1) + h_dt1, h_dt2, h_h, g_dh2 = torch.chunk(conv_h_out, chunks=4, dim=1) + + ms_dt = self.dt * torch.sigmoid(i_dt2 + h_dt2 + self.W_c2 * c) + c = (1.0 - ms_dt) * c + ms_dt * self.activation(i_h + h_h) + + gate2 = self.dt * torch.sigmoid(g_dx2 + g_dh2 + self.W_c4 * c) + conv_c_out = gate2 * self.conv_c(c) + + ms_dt_bar = self.dt * torch.sigmoid(i_dt1 + h_dt1 + self.W_c1 * c) + h = (1.0 - ms_dt_bar) * h + ms_dt_bar * self.activation(conv_c_out + i_c) + else: + i_dt1, i_dt2, i_c, i_h = torch.chunk(conv_x_out, chunks=4, dim=1) + h_dt1, h_dt2, h_h = torch.chunk(conv_h_out, chunks=3, dim=1) + + ms_dt = self.dt * torch.sigmoid(i_dt2 + h_dt2 + self.W_c2 * c) + c = (1.0 - ms_dt) * c + ms_dt * self.activation(i_h + h_h) + + conv_c_out = self.conv_c(c) + ms_dt_bar = self.dt * torch.sigmoid(i_dt1 + h_dt1 + self.W_c1 * c) + h = (1.0 - ms_dt_bar) * h + ms_dt_bar * self.activation(conv_c_out + i_c) + + return h, c + + +class WaveCastNet(nn.Module): + """ + Sequence-to-sequence wavefield forecasting model based on ConvLEM cells. + + Input shape: (B, C, T_in, H, W) + Output shape: (B, C, T_out, H, W) + """ + + def __init__( + self, + in_channels: int, + height: int, + width: int, + temporal_in: int, + temporal_out: int, + hidden_dim: int = 144, + num_layers: int = 2, + kernel_size: int = 3, + dt: float = 1.0, + activation: str = "tanh", + dropout: float = 0.1, + ): + super().__init__() + + self.in_channels = int(in_channels) + self.height = int(height) + self.width = int(width) + self.temporal_in = int(temporal_in) + self.temporal_out = int(temporal_out) + self.hidden_dim = int(hidden_dim) + self.num_layers = int(num_layers) + + padding = (kernel_size - 1) // 2 + proj_dim = max(1, self.hidden_dim // 2) + + self.input_embed = nn.Sequential( + nn.Conv2d(self.in_channels, self.hidden_dim, kernel_size, padding=padding), + nn.BatchNorm2d(self.hidden_dim), + nn.ReLU(), + nn.Dropout2d(dropout), + ) + + self.encoder_layers = nn.ModuleList( + [ + ConvLEMCell( + in_channels=self.hidden_dim, + out_channels=self.hidden_dim, + kernel_size=kernel_size, + dt=dt, + activation=activation, + use_reset_gate=False, + ) + for _ in range(self.num_layers) + ] + ) + self.decoder_layers = nn.ModuleList( + [ + ConvLEMCell( + in_channels=self.hidden_dim, + out_channels=self.hidden_dim, + kernel_size=kernel_size, + dt=dt, + activation=activation, + use_reset_gate=False, + ) + for _ in range(self.num_layers) + ] + ) + + self.output_proj = nn.Sequential( + nn.Conv2d(self.hidden_dim, proj_dim, kernel_size, padding=padding), + nn.ReLU(), + nn.Dropout2d(dropout), + nn.Conv2d(proj_dim, self.in_channels, kernel_size, padding=padding), + ) + self.dropout = nn.Dropout2d(dropout) + + def _init_states(self, x: torch.Tensor) -> tuple[list[torch.Tensor], list[torch.Tensor]]: + hidden = [ + x.new_zeros(x.size(0), self.hidden_dim, self.height, self.width) + for _ in range(self.num_layers) + ] + memory = [ + x.new_zeros(x.size(0), self.hidden_dim, self.height, self.width) + for _ in range(self.num_layers) + ] + return hidden, memory + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 5: + raise ValueError( + "WaveCastNet expects x shaped (B, C, T, H, W), got {shape}".format( + shape=tuple(x.shape) + ) + ) + + batch_size, channels, temporal_in, height, width = x.shape + if channels != self.in_channels: + raise ValueError( + "Expected in_channels={expected}, got {actual}".format( + expected=self.in_channels, + actual=channels, + ) + ) + if temporal_in != self.temporal_in: + raise ValueError( + "Expected temporal_in={expected}, got {actual}".format( + expected=self.temporal_in, + actual=temporal_in, + ) + ) + if height != self.height or width != self.width: + raise ValueError( + "Expected spatial size ({h}, {w}), got ({actual_h}, {actual_w})".format( + h=self.height, + w=self.width, + actual_h=height, + actual_w=width, + ) + ) + + encoder_h, encoder_c = self._init_states(x) + for t in range(self.temporal_in): + encoded = self.input_embed(x[:, :, t, :, :]) + for i, layer in enumerate(self.encoder_layers): + layer_input = encoded if i == 0 else encoder_h[i - 1] + encoder_h[i], encoder_c[i] = layer(layer_input, encoder_h[i], encoder_c[i]) + + decoder_h = [state.clone() for state in encoder_h] + decoder_c = [state.clone() for state in encoder_c] + + outputs = [] + for t in range(self.temporal_out): + decoder_input = encoder_h[-1] if t == 0 else decoder_h[-1] + for i, layer in enumerate(self.decoder_layers): + layer_input = decoder_input if i == 0 else decoder_h[i - 1] + decoder_h[i], decoder_c[i] = layer(layer_input, decoder_h[i], decoder_c[i]) + output_t = self.output_proj(self.dropout(decoder_h[-1])) + outputs.append(output_t) + + if len(outputs) != self.temporal_out: + raise RuntimeError( + "Decoder generated {actual} steps, expected {expected}".format( + actual=len(outputs), + expected=self.temporal_out, + ) + ) + return torch.stack(outputs, dim=2) + + +def wavecastnet_builder( + task: str, + in_channels: int, + height: int, + width: int, + temporal_in: int, + temporal_out: int, + **kwargs, +) -> WaveCastNet: + if task.lower() != "regression": + raise ValueError("WaveCastNet only supports regression tasks.") + + return WaveCastNet( + in_channels=in_channels, + height=height, + width=width, + temporal_in=temporal_in, + temporal_out=temporal_out, + hidden_dim=kwargs.get("hidden_dim", 144), + num_layers=kwargs.get("num_layers", 2), + kernel_size=kwargs.get("kernel_size", 3), + dt=kwargs.get("dt", 1.0), + activation=kwargs.get("activation", "tanh"), + dropout=kwargs.get("dropout", 0.1), + ) + + +class WaveCastNetLoss(nn.Module): + """ + Huber loss used in the WaveCastNet paper. + """ + + def __init__(self, delta: float = 1.0): + super().__init__() + self.delta = float(delta) + + def forward(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + diff = pred - target + abs_diff = diff.abs() + quadratic = 0.5 * diff.square() + linear = self.delta * abs_diff - 0.5 * self.delta**2 + return torch.where(abs_diff <= self.delta, quadratic, linear).mean() + + +class WavefieldMetrics: + """ + ACC and RFNE metrics reported in the WaveCastNet paper. + """ + + @staticmethod + def accuracy(pred: torch.Tensor, target: torch.Tensor) -> float: + pred_flat = pred.reshape(pred.size(0), -1) + target_flat = target.reshape(target.size(0), -1) + numerator = (pred_flat * target_flat).sum(dim=1) + pred_norm = pred_flat.square().sum(dim=1).sqrt() + target_norm = target_flat.square().sum(dim=1).sqrt() + acc = numerator / (pred_norm * target_norm).clamp(min=1e-8) + return float(acc.mean().detach().cpu()) + + @staticmethod + def rfne(pred: torch.Tensor, target: torch.Tensor) -> float: + error_norm = (pred - target).reshape(pred.size(0), -1).square().sum(dim=1).sqrt() + target_norm = target.reshape(target.size(0), -1).square().sum(dim=1).sqrt() + rfne = error_norm / target_norm.clamp(min=1e-8) + return float(rfne.mean().detach().cpu()) + + @staticmethod + def compute_all(pred: torch.Tensor, target: torch.Tensor) -> dict[str, float]: + return { + "ACC": WavefieldMetrics.accuracy(pred, target), + "RFNE": WavefieldMetrics.rfne(pred, target), + } + + +__all__ = [ + "ConvLEMCell", + "WaveCastNet", + "WaveCastNetLoss", + "WavefieldMetrics", + "wavecastnet_builder", +] diff --git a/pyhazards/models/wildfire_aspp.py b/pyhazards/models/wildfire_aspp.py new file mode 100644 index 00000000..d99ad13f --- /dev/null +++ b/pyhazards/models/wildfire_aspp.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + +from .cnn_aspp import WildfireCNNASPP, cnn_aspp_builder + + +class WildfireASPP(WildfireCNNASPP): + """ + Backward-compatible name for the CNN + ASPP wildfire model. + """ + + +def wildfire_aspp_builder(*args, **kwargs) -> nn.Module: + return cnn_aspp_builder(*args, **kwargs) + + +class TverskyLoss(nn.Module): + """ + Tversky loss for binary segmentation. + """ + + def __init__( + self, + alpha: float = 0.5, + beta: float = 0.5, + smooth: float = 1e-6, + from_logits: bool = True, + ): + super().__init__() + self.alpha = float(alpha) + self.beta = float(beta) + self.smooth = float(smooth) + self.from_logits = bool(from_logits) + + def forward(self, logits: torch.Tensor, targets: torch.Tensor) -> torch.Tensor: + if self.from_logits: + probs = torch.sigmoid(logits) + else: + probs = logits + + targets = targets.float() + + probs = probs.view(probs.size(0), -1) + targets = targets.view(targets.size(0), -1) + + tp = (probs * targets).sum(dim=1) + fp = (probs * (1 - targets)).sum(dim=1) + fn = ((1 - probs) * targets).sum(dim=1) + + tversky = (tp + self.smooth) / ( + tp + self.alpha * fp + self.beta * fn + self.smooth + ) + loss = 1.0 - tversky + return loss.mean() diff --git a/pyhazards/models/wildfire_fpa.py b/pyhazards/models/wildfire_fpa.py new file mode 100644 index 00000000..16be944c --- /dev/null +++ b/pyhazards/models/wildfire_fpa.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from typing import Callable, Union + +import torch +import torch.nn as nn + +from .wildfire_fpa_dnn import WildfireFPADNN +from .wildfire_fpa_forecast import WildfireFPAForecast + + +class WildfireFPA(nn.Module): + """Paper-facing wrapper for the two-stage FPA-FOD wildfire framework.""" + + def __init__(self, stage: str, component: nn.Module): + super().__init__() + normalized_stage = stage.lower() + if normalized_stage not in {"classification", "forecasting", "regression"}: + raise ValueError(f"Unsupported wildfire_fpa stage: {stage!r}") + + self.stage = "forecasting" if normalized_stage == "regression" else normalized_stage + self.component = component + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.component(x) + + def forward_with_reconstruction(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + if not hasattr(self.component, "forward_with_reconstruction"): + raise AttributeError( + "forward_with_reconstruction is only available for the forecasting stage " + "of wildfire_fpa." + ) + return self.component.forward_with_reconstruction(x) + + +def wildfire_fpa_builder( + task: str, + in_dim: int | None = None, + input_dim: int | None = None, + out_dim: int | None = None, + output_dim: int | None = None, + depth: int = 2, + hidden_dim: int = 64, + activation: Union[str, Callable[[], nn.Module]] = "relu", + dropout: float | None = None, + latent_dim: int = 32, + num_layers: int = 1, + ae_hidden_dim: int | None = None, + ae_num_layers: int | None = None, + lookback: int = 50, + **kwargs, +) -> nn.Module: + _ = kwargs + normalized_task = task.lower() + + if normalized_task == "classification": + feature_dim = in_dim if in_dim is not None else input_dim + if feature_dim is None: + raise TypeError("wildfire_fpa classification requires in_dim (or input_dim).") + + component = WildfireFPADNN( + in_dim=feature_dim, + out_dim=out_dim if out_dim is not None else (output_dim if output_dim is not None else 5), + depth=depth, + hidden_dim=hidden_dim, + activation=activation, + dropout=0.0 if dropout is None else dropout, + ) + return WildfireFPA(stage="classification", component=component) + + if normalized_task in {"forecasting", "regression"}: + sequence_dim = input_dim if input_dim is not None else in_dim + if sequence_dim is None: + raise TypeError("wildfire_fpa forecasting requires input_dim (or in_dim).") + + component = WildfireFPAForecast( + input_dim=sequence_dim, + hidden_dim=hidden_dim, + output_dim=output_dim if output_dim is not None else (out_dim if out_dim is not None else 5), + latent_dim=latent_dim, + num_layers=num_layers, + ae_hidden_dim=ae_hidden_dim, + ae_num_layers=ae_num_layers, + dropout=0.2 if dropout is None else dropout, + lookback=lookback, + ) + return WildfireFPA(stage=normalized_task, component=component) + + raise ValueError( + "wildfire_fpa supports task='classification' for the DNN stage and " + "task in {'forecasting', 'regression'} for the LSTM + autoencoder stage." + ) + + +__all__ = ["WildfireFPA", "wildfire_fpa_builder"] diff --git a/pyhazards/models/wildfire_fpa_autoencoder.py b/pyhazards/models/wildfire_fpa_autoencoder.py new file mode 100644 index 00000000..69bc5766 --- /dev/null +++ b/pyhazards/models/wildfire_fpa_autoencoder.py @@ -0,0 +1,117 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class WildfireFPAAutoencoder(nn.Module): + """Autoencoder block used in the FPA-FOD forecasting stack.""" + + def __init__( + self, + input_dim: int, + hidden_dim: int = 64, + latent_dim: int = 32, + num_layers: int = 1, + dropout: float = 0.2, + lookback: int = 50, + ): + super().__init__() + if input_dim <= 0: + raise ValueError(f"input_dim must be positive, got {input_dim}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if latent_dim <= 0: + raise ValueError(f"latent_dim must be positive, got {latent_dim}") + if num_layers <= 0: + raise ValueError(f"num_layers must be positive, got {num_layers}") + if lookback <= 0: + raise ValueError(f"lookback must be positive, got {lookback}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.lookback = lookback + lstm_dropout = dropout if num_layers > 1 else 0.0 + self.encoder = nn.LSTM( + input_size=input_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=lstm_dropout, + ) + self.to_latent = nn.Sequential( + nn.Dropout(dropout), + nn.Linear(hidden_dim, latent_dim), + ) + self.decoder = nn.LSTM( + input_size=latent_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=lstm_dropout, + ) + self.to_reconstruction = nn.Linear(hidden_dim, input_dim) + + def encode(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError(f"Expected input shape (B, T, D), got {tuple(x.shape)}") + _, sequence_length, _ = x.shape + if sequence_length != self.lookback: + raise ValueError(f"Expected lookback={self.lookback}, got sequence length {sequence_length}") + _, (hidden, _) = self.encoder(x) + return self.to_latent(hidden[-1]) + + def decode(self, latent: torch.Tensor, sequence_length: int | None = None) -> torch.Tensor: + if latent.ndim != 2: + raise ValueError(f"Expected latent shape (B, Z), got {tuple(latent.shape)}") + sequence_length = sequence_length or self.lookback + batch_size = latent.size(0) + repeated = latent.unsqueeze(1).expand(batch_size, sequence_length, latent.size(-1)) + decoded, _ = self.decoder(repeated) + return self.to_reconstruction(decoded) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + latent = self.encode(x) + return self.decode(latent, sequence_length=x.size(1)) + + @torch.no_grad() + def reconstruction_error(self, x: torch.Tensor, reduction: str = "mean") -> torch.Tensor: + residual = (self.forward(x) - x) ** 2 + if reduction == "none": + return residual + if reduction == "mean": + return residual.mean(dim=(1, 2)) + if reduction == "sum": + return residual.sum(dim=(1, 2)) + raise ValueError(f"Unknown reduction: {reduction!r}") + + +def wildfire_fpa_autoencoder_builder( + task: str, + input_dim: int, + hidden_dim: int = 64, + latent_dim: int = 32, + num_layers: int = 1, + dropout: float = 0.2, + lookback: int = 50, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"reconstruction", "autoencoder", "regression", "forecasting"}: + raise ValueError( + "wildfire_fpa_autoencoder supports task in " + "{'reconstruction', 'autoencoder', 'regression', 'forecasting'}." + ) + return WildfireFPAAutoencoder( + input_dim=input_dim, + hidden_dim=hidden_dim, + latent_dim=latent_dim, + num_layers=num_layers, + dropout=dropout, + lookback=lookback, + ) + +__all__ = [ + "WildfireFPAAutoencoder", + "wildfire_fpa_autoencoder_builder", +] diff --git a/pyhazards/models/wildfire_fpa_dnn.py b/pyhazards/models/wildfire_fpa_dnn.py new file mode 100644 index 00000000..6dde75dc --- /dev/null +++ b/pyhazards/models/wildfire_fpa_dnn.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +from typing import Callable, Union + +import torch +import torch.nn as nn + + +def _activation_from_name(name: Union[str, Callable[[], nn.Module]]) -> nn.Module: + if callable(name): + return name() + key = str(name).strip().lower() + if key == "relu": + return nn.ReLU() + if key == "gelu": + return nn.GELU() + if key == "tanh": + return nn.Tanh() + if key in {"silu", "swish"}: + return nn.SiLU() + raise ValueError(f"Unsupported activation: {name!r}") + + +class WildfireFPADNN(nn.Module): + """DNN classifier for incident-level FPA-FOD features.""" + + def __init__( + self, + in_dim: int, + out_dim: int = 5, + depth: int = 2, + hidden_dim: int = 64, + activation: Union[str, Callable[[], nn.Module]] = "relu", + dropout: float = 0.0, + ): + super().__init__() + if in_dim <= 0: + raise ValueError(f"in_dim must be positive, got {in_dim}") + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + if depth < 1: + raise ValueError(f"depth must be >= 1, got {depth}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + layers = [] + current_dim = in_dim + for _ in range(depth): + layers.append(nn.Linear(current_dim, hidden_dim)) + layers.append(_activation_from_name(activation)) + if dropout > 0: + layers.append(nn.Dropout(dropout)) + current_dim = hidden_dim + layers.append(nn.Linear(current_dim, out_dim)) + self.net = nn.Sequential(*layers) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 2: + raise ValueError(f"Expected input shape (B, D), got {tuple(x.shape)}") + return self.net(x) + + +def wildfire_fpa_dnn_builder( + task: str, + in_dim: int, + out_dim: int = 5, + depth: int = 2, + hidden_dim: int = 64, + activation: Union[str, Callable[[], nn.Module]] = "relu", + dropout: float = 0.0, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "classification": + raise ValueError(f"wildfire_fpa_dnn supports task='classification', got {task!r}") + return WildfireFPADNN( + in_dim=in_dim, + out_dim=out_dim, + depth=depth, + hidden_dim=hidden_dim, + activation=activation, + dropout=dropout, + ) + + +__all__ = ["WildfireFPADNN", "wildfire_fpa_dnn_builder"] diff --git a/pyhazards/models/wildfire_fpa_forecast.py b/pyhazards/models/wildfire_fpa_forecast.py new file mode 100644 index 00000000..8d4d575d --- /dev/null +++ b/pyhazards/models/wildfire_fpa_forecast.py @@ -0,0 +1,109 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + +from .wildfire_fpa_autoencoder import WildfireFPAAutoencoder + + +class WildfireFPAForecast(nn.Module): + """Forecast model that combines an LSTM temporal encoder with an autoencoder latent summary.""" + + def __init__( + self, + input_dim: int, + hidden_dim: int = 64, + output_dim: int = 5, + latent_dim: int = 32, + num_layers: int = 1, + ae_hidden_dim: int | None = None, + ae_num_layers: int | None = None, + dropout: float = 0.2, + lookback: int = 50, + ): + super().__init__() + if input_dim <= 0: + raise ValueError(f"input_dim must be positive, got {input_dim}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if output_dim <= 0: + raise ValueError(f"output_dim must be positive, got {output_dim}") + if latent_dim <= 0: + raise ValueError(f"latent_dim must be positive, got {latent_dim}") + if num_layers <= 0: + raise ValueError(f"num_layers must be positive, got {num_layers}") + if lookback <= 0: + raise ValueError(f"lookback must be positive, got {lookback}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.lookback = lookback + lstm_dropout = dropout if num_layers > 1 else 0.0 + self.temporal = nn.LSTM( + input_size=input_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=lstm_dropout, + ) + self.autoencoder = WildfireFPAAutoencoder( + input_dim=input_dim, + hidden_dim=ae_hidden_dim or hidden_dim, + latent_dim=latent_dim, + num_layers=ae_num_layers or num_layers, + dropout=dropout, + lookback=lookback, + ) + self.head = nn.Sequential( + nn.Dropout(dropout), + nn.Linear(hidden_dim + latent_dim, output_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError(f"Expected input shape (B, T, D), got {tuple(x.shape)}") + if x.size(1) != self.lookback: + raise ValueError(f"Expected lookback={self.lookback}, got sequence length {x.size(1)}") + + _, (hidden, _) = self.temporal(x) + latent = self.autoencoder.encode(x) + return self.head(torch.cat([hidden[-1], latent], dim=-1)) + + def forward_with_reconstruction(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + preds = self.forward(x) + recon = self.autoencoder(x) + return preds, recon + + +def wildfire_fpa_forecast_builder( + task: str, + input_dim: int, + hidden_dim: int = 64, + output_dim: int = 5, + latent_dim: int = 32, + num_layers: int = 1, + ae_hidden_dim: int | None = None, + ae_num_layers: int | None = None, + dropout: float = 0.2, + lookback: int = 50, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"regression", "forecasting"}: + raise ValueError( + f"wildfire_fpa_forecast supports task='regression' or 'forecasting', got {task!r}" + ) + return WildfireFPAForecast( + input_dim=input_dim, + hidden_dim=hidden_dim, + output_dim=output_dim, + latent_dim=latent_dim, + num_layers=num_layers, + ae_hidden_dim=ae_hidden_dim, + ae_num_layers=ae_num_layers, + dropout=dropout, + lookback=lookback, + ) + + +__all__ = ["WildfireFPAForecast", "wildfire_fpa_forecast_builder"] diff --git a/pyhazards/models/wildfire_fpa_lstm.py b/pyhazards/models/wildfire_fpa_lstm.py new file mode 100644 index 00000000..64c0afe8 --- /dev/null +++ b/pyhazards/models/wildfire_fpa_lstm.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class WildfireFPALSTM(nn.Module): + """Sequence model for next-week FPA-FOD count forecasting.""" + + def __init__( + self, + input_dim: int, + hidden_dim: int = 64, + output_dim: int = 5, + num_layers: int = 1, + dropout: float = 0.2, + lookback: int = 50, + ): + super().__init__() + if input_dim <= 0: + raise ValueError(f"input_dim must be positive, got {input_dim}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if output_dim <= 0: + raise ValueError(f"output_dim must be positive, got {output_dim}") + if num_layers <= 0: + raise ValueError(f"num_layers must be positive, got {num_layers}") + if lookback <= 0: + raise ValueError(f"lookback must be positive, got {lookback}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.lookback = lookback + lstm_dropout = dropout if num_layers > 1 else 0.0 + self.encoder = nn.LSTM( + input_size=input_dim, + hidden_size=hidden_dim, + num_layers=num_layers, + batch_first=True, + dropout=lstm_dropout, + ) + self.head = nn.Sequential( + nn.Dropout(dropout), + nn.Linear(hidden_dim, output_dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 3: + raise ValueError(f"Expected input shape (B, T, D), got {tuple(x.shape)}") + if x.size(1) != self.lookback: + raise ValueError(f"Expected lookback={self.lookback}, got sequence length {x.size(1)}") + _, (hidden, _) = self.encoder(x) + return self.head(hidden[-1]) + + +def wildfire_fpa_lstm_builder( + task: str, + input_dim: int, + hidden_dim: int = 64, + output_dim: int = 5, + num_layers: int = 1, + dropout: float = 0.2, + lookback: int = 50, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"regression", "forecasting"}: + raise ValueError( + f"wildfire_fpa_lstm supports task='regression' or 'forecasting', got {task!r}" + ) + return WildfireFPALSTM( + input_dim=input_dim, + hidden_dim=hidden_dim, + output_dim=output_dim, + num_layers=num_layers, + dropout=dropout, + lookback=lookback, + ) + + +__all__ = ["WildfireFPALSTM", "wildfire_fpa_lstm_builder"] diff --git a/pyhazards/models/wildfire_mamba.py b/pyhazards/models/wildfire_mamba.py new file mode 100644 index 00000000..5ce9bd8b --- /dev/null +++ b/pyhazards/models/wildfire_mamba.py @@ -0,0 +1,236 @@ +from __future__ import annotations + +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def _normalize_adjacency(adj: torch.Tensor) -> torch.Tensor: + """ + Row-normalize an adjacency matrix and ensure self-loops. + Accepts (N, N) or (B, N, N) and returns the same rank. + """ + if adj.dim() == 2: + adj = adj.unsqueeze(0) + eye = torch.eye(adj.size(-1), device=adj.device, dtype=adj.dtype) + adj = adj.float() + eye.unsqueeze(0) + return adj / adj.sum(-1, keepdim=True).clamp(min=1e-6) + + +class SelectiveSSMBlock(nn.Module): + """ + Lightweight selective state-space block inspired by Mamba. + + Operates over a single temporal stream: (batch, time, features) -> (batch, time, hidden_dim). + """ + + def __init__(self, in_dim: int, hidden_dim: int, state_dim: int = 64, conv_kernel: int = 5, dropout: float = 0.1): + super().__init__() + self.in_proj = nn.Linear(in_dim, hidden_dim) + self.dwconv = nn.Conv1d(hidden_dim, hidden_dim, kernel_size=conv_kernel, padding=conv_kernel // 2, groups=hidden_dim) + self.gate = nn.Linear(hidden_dim, hidden_dim) + self.A = nn.Parameter(torch.randn(hidden_dim, state_dim) * 0.02) + self.B = nn.Parameter(torch.randn(state_dim, hidden_dim) * 0.02) + self.out_proj = nn.Linear(hidden_dim, hidden_dim) + self.norm = nn.LayerNorm(hidden_dim) + self.drop = nn.Dropout(dropout) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + # x: (B, T, F) + h = self.in_proj(x) # (B, T, H) + h_conv = self.dwconv(h.transpose(1, 2)).transpose(1, 2) + g = torch.sigmoid(self.gate(h_conv)) + B, T, H = h_conv.shape + state = torch.zeros(B, H, device=h_conv.device, dtype=h_conv.dtype) + outputs = [] + for t in range(T): + # selective update: gates decide how much new signal to mix into the running state + state = g[:, t, :] * (state @ self.A @ self.B + h_conv[:, t, :]) + (1 - g[:, t, :]) * state + outputs.append(state) + y = torch.stack(outputs, dim=1) + y = self.out_proj(self.drop(y)) + h_conv + return self.norm(y) + + +class MambaTemporalEncoder(nn.Module): + """Stack of selective SSM blocks; returns the last hidden state.""" + + def __init__(self, in_dim: int, hidden_dim: int = 128, num_layers: int = 2, state_dim: int = 64, conv_kernel: int = 5, dropout: float = 0.1): + super().__init__() + self.blocks = nn.ModuleList( + [ + SelectiveSSMBlock( + in_dim=in_dim if i == 0 else hidden_dim, + hidden_dim=hidden_dim, + state_dim=state_dim, + conv_kernel=conv_kernel, + dropout=dropout, + ) + for i in range(num_layers) + ] + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + h = x + for block in self.blocks: + h = block(h) + return h[:, -1, :] + + +class SimpleGCN(nn.Module): + """Two-layer GCN that mixes counties with a fixed adjacency.""" + + def __init__(self, in_dim: int, hidden_dim: int = 64, out_dim: int = 64, dropout: float = 0.1): + super().__init__() + self.lin1 = nn.Linear(in_dim, hidden_dim) + self.lin2 = nn.Linear(hidden_dim, out_dim) + self.drop = nn.Dropout(dropout) + + def forward(self, H: torch.Tensor, adj: torch.Tensor) -> torch.Tensor: + # H: (B, N, D); adj: (B, N, N) + z = torch.matmul(adj, H) + z = F.relu(self.lin1(z)) + z = self.drop(z) + z = torch.matmul(adj, z) + return F.relu(self.lin2(z)) + + +class WildfireMamba(nn.Module): + """ + Mamba-based spatio-temporal wildfire model for county-day ERA5 features. + + Input shape: (batch, past_days, num_counties, num_features) + Output: logits per county for the next day (use sigmoid for probabilities) + """ + + def __init__( + self, + in_dim: int, + num_counties: int, + past_days: int, + hidden_dim: int = 128, + gcn_hidden: int = 64, + mamba_layers: int = 2, + state_dim: int = 64, + conv_kernel: int = 5, + dropout: float = 0.1, + adjacency: Optional[torch.Tensor] = None, + with_count_head: bool = False, + ): + super().__init__() + self.num_counties = num_counties + self.past_days = past_days + self.with_count_head = with_count_head + self.temporal = MambaTemporalEncoder( + in_dim=in_dim, + hidden_dim=hidden_dim, + num_layers=mamba_layers, + state_dim=state_dim, + conv_kernel=conv_kernel, + dropout=dropout, + ) + # differential branch is shallower and gates how much change to inject + self.delta_temporal = MambaTemporalEncoder( + in_dim=in_dim, + hidden_dim=hidden_dim, + num_layers=max(1, mamba_layers - 1), + state_dim=state_dim, + conv_kernel=conv_kernel, + dropout=dropout, + ) + self.delta_gate = nn.Linear(hidden_dim, hidden_dim) + self.gcn = SimpleGCN(hidden_dim, hidden_dim=gcn_hidden, out_dim=gcn_hidden, dropout=dropout) + self.cls_head = nn.Linear(gcn_hidden, 1) + if self.with_count_head: + self.count_head = nn.Linear(gcn_hidden, 1) + self.dropout = nn.Dropout(dropout) + self.register_buffer("_adjacency", None) + if adjacency is not None: + self.set_adjacency(adjacency) + + def set_adjacency(self, adj: torch.Tensor) -> None: + """Set/override the spatial adjacency.""" + adj = _normalize_adjacency(adj.detach()) + self._adjacency = adj + + def _get_adjacency(self, batch_size: int) -> torch.Tensor: + if self._adjacency is None: + eye = torch.eye(self.num_counties, device=self.cls_head.weight.device) + adj = _normalize_adjacency(eye) + else: + adj = self._adjacency + if adj.dim() == 2: + adj = adj.unsqueeze(0) + if adj.size(0) == 1 and batch_size > 1: + adj = adj.expand(batch_size, -1, -1) + return adj + + @staticmethod + def _temporal_delta(x: torch.Tensor) -> torch.Tensor: + # prepend zeros so delta has the same length as the input sequence + zeros = torch.zeros(x.size(0), 1, x.size(2), device=x.device, dtype=x.dtype) + return torch.cat([zeros, x[:, 1:] - x[:, :-1]], dim=1) + + def forward(self, x: torch.Tensor, adjacency: Optional[torch.Tensor] = None): + """ + Args: + x: Tensor shaped (batch, past_days, num_counties, in_dim) + adjacency: Optional (N, N) or (B, N, N) adjacency override. + Returns: + - logits: (batch, num_counties) + - optional counts: (batch, num_counties) if with_count_head is enabled. + """ + B, T, N, F = x.shape + if T != self.past_days: + raise ValueError(f"Expected past_days={self.past_days}, got {T}.") + if N != self.num_counties: + raise ValueError(f"Expected num_counties={self.num_counties}, got {N}.") + + # flatten counties into the batch for temporal encoding + x_flat = x.permute(0, 2, 1, 3).reshape(B * N, T, F) + base = self.temporal(x_flat) + delta = self.delta_temporal(self._temporal_delta(x_flat)) + gate = torch.sigmoid(self.delta_gate(delta)) + fused = base * gate + delta + fused = fused.view(B, N, -1) + + adj = _normalize_adjacency(adjacency) if adjacency is not None else self._get_adjacency(B) + spatial = self.gcn(fused, adj) + spatial = self.dropout(spatial) + logits = self.cls_head(spatial).squeeze(-1) + if self.with_count_head: + counts = F.relu(self.count_head(spatial)).squeeze(-1) + return logits, counts + return logits + + +def wildfire_mamba_builder( + task: str, + in_dim: int, + num_counties: int, + past_days: int, + **kwargs, +) -> WildfireMamba: + """ + Builder used by the model registry. + """ + if task.lower() not in {"classification", "binary_classification"}: + raise ValueError("WildfireMamba is designed for binary per-county classification.") + return WildfireMamba( + in_dim=in_dim, + num_counties=num_counties, + past_days=past_days, + hidden_dim=kwargs.get("hidden_dim", 128), + gcn_hidden=kwargs.get("gcn_hidden", 64), + mamba_layers=kwargs.get("mamba_layers", 2), + state_dim=kwargs.get("state_dim", 64), + conv_kernel=kwargs.get("conv_kernel", 5), + dropout=kwargs.get("dropout", 0.1), + adjacency=kwargs.get("adjacency"), + with_count_head=kwargs.get("with_count_head", False), + ) + + +__all__ = ["WildfireMamba", "wildfire_mamba_builder"] diff --git a/pyhazards/models/wildfiregpt.py b/pyhazards/models/wildfiregpt.py new file mode 100644 index 00000000..7deb48dc --- /dev/null +++ b/pyhazards/models/wildfiregpt.py @@ -0,0 +1,168 @@ +from __future__ import annotations + +from typing import Any, Dict, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class WildfireGPTReasoner(nn.Module): + """Retrieval-conditioned wildfire risk model inspired by the WildfireGPT multi-agent RAG system.""" + + def __init__( + self, + in_channels: int = 12, + out_dim: int = 1, + base_channels: int = 32, + hidden_dim: int = 64, + profile_dim: int = 8, + retrieved_dim: int = 16, + num_heads: int = 4, + dropout: float = 0.1, + ): + super().__init__() + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if out_dim <= 0: + raise ValueError(f"out_dim must be positive, got {out_dim}") + if hidden_dim % num_heads != 0: + raise ValueError(f"hidden_dim={hidden_dim} must be divisible by num_heads={num_heads}") + + self.in_channels = int(in_channels) + self.profile_dim = int(profile_dim) + self.retrieved_dim = int(retrieved_dim) + self.hidden_dim = int(hidden_dim) + + self.raster_encoder = nn.Sequential( + nn.Conv2d(in_channels, base_channels, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(base_channels, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + ) + self.profile_proj = nn.Linear(self.profile_dim, hidden_dim) + self.retrieved_proj = nn.Linear(self.retrieved_dim, hidden_dim) + self.raster_proj = nn.Linear(hidden_dim, hidden_dim) + + # Learned system-role tokens: user-profile, planner, analyst. + self.agent_tokens = nn.Parameter(torch.randn(3, hidden_dim) * 0.02) + self.attn = nn.MultiheadAttention( + embed_dim=hidden_dim, + num_heads=num_heads, + dropout=dropout, + batch_first=True, + ) + self.ffn = nn.Sequential( + nn.LayerNorm(hidden_dim), + nn.Linear(hidden_dim, hidden_dim * 2), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim * 2, hidden_dim), + ) + self.decoder = nn.Sequential( + nn.Conv2d(hidden_dim * 2, hidden_dim, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv2d(hidden_dim, hidden_dim // 2, kernel_size=3, padding=1), + nn.GELU(), + ) + self.head = nn.Conv2d(hidden_dim // 2, out_dim, kernel_size=1) + + def _unpack_inputs(self, inputs: torch.Tensor | Dict[str, Any]) -> Tuple[torch.Tensor, torch.Tensor | None, torch.Tensor | None]: + if isinstance(inputs, dict): + x = inputs.get("x") + profile = inputs.get("user_profile") + retrieved = inputs.get("retrieved_context") + else: + x = inputs + profile = None + retrieved = None + + if not isinstance(x, torch.Tensor): + raise ValueError("WildfireGPTReasoner expects a tensor input or a dict containing key 'x'.") + if x.ndim != 4: + raise ValueError( + "WildfireGPTReasoner expects input shape (B, C, H, W), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.in_channels: + raise ValueError( + f"WildfireGPTReasoner expected in_channels={self.in_channels}, got {x.size(1)}." + ) + return x, profile, retrieved + + def _coerce_profile(self, profile: torch.Tensor | None, batch: int, device: torch.device) -> torch.Tensor: + if profile is None: + return torch.zeros(batch, self.profile_dim, device=device) + if profile.ndim != 2 or profile.size(0) != batch: + raise ValueError(f"user_profile must have shape (B,D), got {tuple(profile.shape)}") + if profile.size(1) == self.profile_dim: + return profile.to(device=device, dtype=torch.float32) + if profile.size(1) > self.profile_dim: + return profile[:, : self.profile_dim].to(device=device, dtype=torch.float32) + pad = torch.zeros(batch, self.profile_dim - profile.size(1), device=device) + return torch.cat([profile.to(device=device, dtype=torch.float32), pad], dim=1) + + def _coerce_retrieved(self, retrieved: torch.Tensor | None, batch: int, device: torch.device) -> torch.Tensor: + if retrieved is None: + return torch.zeros(batch, self.retrieved_dim, device=device) + if retrieved.ndim != 2 or retrieved.size(0) != batch: + raise ValueError(f"retrieved_context must have shape (B,D), got {tuple(retrieved.shape)}") + if retrieved.size(1) == self.retrieved_dim: + return retrieved.to(device=device, dtype=torch.float32) + if retrieved.size(1) > self.retrieved_dim: + return retrieved[:, : self.retrieved_dim].to(device=device, dtype=torch.float32) + pad = torch.zeros(batch, self.retrieved_dim - retrieved.size(1), device=device) + return torch.cat([retrieved.to(device=device, dtype=torch.float32), pad], dim=1) + + def forward(self, inputs: torch.Tensor | Dict[str, Any]) -> torch.Tensor: + x, profile, retrieved = self._unpack_inputs(inputs) + batch = x.size(0) + device = x.device + + feature_map = self.raster_encoder(x) + pooled_raster = F.adaptive_avg_pool2d(feature_map, 1).flatten(1) + + profile_token = self.profile_proj(self._coerce_profile(profile, batch, device)).unsqueeze(1) + retrieved_token = self.retrieved_proj(self._coerce_retrieved(retrieved, batch, device)).unsqueeze(1) + raster_token = self.raster_proj(pooled_raster).unsqueeze(1) + agent_tokens = self.agent_tokens.unsqueeze(0).expand(batch, -1, -1) + + tokens = torch.cat([agent_tokens, profile_token, retrieved_token, raster_token], dim=1) + attn_out, _ = self.attn(tokens, tokens, tokens, need_weights=False) + fused_tokens = attn_out + self.ffn(attn_out) + fused_global = fused_tokens.mean(dim=1) + + fused_map = fused_global.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, feature_map.size(-2), feature_map.size(-1)) + decoded = self.decoder(torch.cat([feature_map, fused_map], dim=1)) + return self.head(decoded) + + + +def wildfiregpt_builder( + task: str, + in_channels: int = 12, + out_dim: int = 1, + base_channels: int = 32, + hidden_dim: int = 64, + profile_dim: int = 8, + retrieved_dim: int = 16, + num_heads: int = 4, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() != "segmentation": + raise ValueError(f"wildfiregpt is segmentation-only in PyHazards, got task={task!r}.") + return WildfireGPTReasoner( + in_channels=in_channels, + out_dim=out_dim, + base_channels=base_channels, + hidden_dim=hidden_dim, + profile_dim=profile_dim, + retrieved_dim=retrieved_dim, + num_heads=num_heads, + dropout=dropout, + ) + + +__all__ = ["WildfireGPTReasoner", "wildfiregpt_builder"] diff --git a/pyhazards/models/wildfirespreadts.py b/pyhazards/models/wildfirespreadts.py new file mode 100644 index 00000000..1de85416 --- /dev/null +++ b/pyhazards/models/wildfirespreadts.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +import torch +import torch.nn as nn + + +class WildfireSpreadTS(nn.Module): + """Temporal convolution baseline for wildfire spread masks.""" + + def __init__( + self, + history: int = 4, + in_channels: int = 6, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + ): + super().__init__() + if history <= 0: + raise ValueError(f"history must be positive, got {history}") + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if hidden_dim <= 0: + raise ValueError(f"hidden_dim must be positive, got {hidden_dim}") + if out_channels <= 0: + raise ValueError(f"out_channels must be positive, got {out_channels}") + if not 0.0 <= dropout < 1.0: + raise ValueError(f"dropout must be in [0, 1), got {dropout}") + + self.history = int(history) + self.in_channels = int(in_channels) + self.encoder = nn.Sequential( + nn.Conv3d(in_channels, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.GELU(), + nn.Dropout3d(dropout) if dropout > 0 else nn.Identity(), + nn.Conv3d(hidden_dim, hidden_dim, kernel_size=(3, 3, 3), padding=1), + nn.GELU(), + ) + self.head = nn.Conv2d(hidden_dim, out_channels, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 5: + raise ValueError( + "WildfireSpreadTS expects input shape (batch, history, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.history: + raise ValueError(f"WildfireSpreadTS expected history={self.history}, got {x.size(1)}.") + if x.size(2) != self.in_channels: + raise ValueError(f"WildfireSpreadTS expected in_channels={self.in_channels}, got {x.size(2)}.") + encoded = self.encoder(x.permute(0, 2, 1, 3, 4)) + return self.head(torch.mean(encoded, dim=2)) + + +def wildfirespreadts_builder( + task: str, + history: int = 4, + in_channels: int = 6, + hidden_dim: int = 32, + out_channels: int = 1, + dropout: float = 0.1, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"segmentation", "regression"}: + raise ValueError( + "wildfirespreadts supports task='segmentation' or 'regression', " + f"got {task!r}." + ) + return WildfireSpreadTS( + history=history, + in_channels=in_channels, + hidden_dim=hidden_dim, + out_channels=out_channels, + dropout=dropout, + ) + + +__all__ = ["WildfireSpreadTS", "wildfirespreadts_builder"] diff --git a/pyhazards/models/wrf_sfire.py b/pyhazards/models/wrf_sfire.py new file mode 100644 index 00000000..08ac5035 --- /dev/null +++ b/pyhazards/models/wrf_sfire.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class WRFSFireAdapter(nn.Module): + """Lightweight raster adapter inspired by WRF-SFIRE spread transport.""" + + def __init__( + self, + in_channels: int = 12, + out_channels: int = 1, + diffusion_steps: int = 3, + ): + super().__init__() + if in_channels <= 0: + raise ValueError(f"in_channels must be positive, got {in_channels}") + if out_channels != 1: + raise ValueError(f"WRFSFireAdapter only supports out_channels=1, got {out_channels}") + if diffusion_steps <= 0: + raise ValueError(f"diffusion_steps must be positive, got {diffusion_steps}") + + self.in_channels = int(in_channels) + self.diffusion_steps = int(diffusion_steps) + + kernel = torch.tensor( + [[0.02, 0.08, 0.02], [0.08, 0.60, 0.08], [0.02, 0.08, 0.02]], + dtype=torch.float32, + ).view(1, 1, 3, 3) + self.register_buffer("transport_kernel", kernel) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if x.ndim != 4: + raise ValueError( + "WRFSFireAdapter expects input shape (batch, channels, height, width), " + f"got {tuple(x.shape)}." + ) + if x.size(1) != self.in_channels: + raise ValueError( + f"WRFSFireAdapter expected in_channels={self.in_channels}, got {x.size(1)}." + ) + + # The first three channels act as fireline, terrain, and moisture proxies. + fireline = torch.sigmoid(x[:, :1]) + terrain = torch.sigmoid(x[:, 1:2]) + moisture = torch.sigmoid(x[:, 2:3]) + + for _ in range(self.diffusion_steps): + fireline = F.conv2d(fireline, self.transport_kernel, padding=1) + fireline = torch.clamp( + fireline * (0.9 + 0.1 * terrain) * (1.0 - 0.15 * moisture), + 0.0, + 1.0, + ) + return fireline + + +def wrf_sfire_builder( + task: str, + in_channels: int = 12, + out_channels: int = 1, + diffusion_steps: int = 3, + **kwargs, +) -> nn.Module: + _ = kwargs + if task.lower() not in {"segmentation", "regression"}: + raise ValueError(f"wrf_sfire supports task='segmentation' or 'regression', got {task!r}.") + return WRFSFireAdapter( + in_channels=in_channels, + out_channels=out_channels, + diffusion_steps=diffusion_steps, + ) + + +__all__ = ["WRFSFireAdapter", "wrf_sfire_builder"] diff --git a/pyhazards/models/xgboost.py b/pyhazards/models/xgboost.py new file mode 100644 index 00000000..39ebf252 --- /dev/null +++ b/pyhazards/models/xgboost.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from typing import Any, Optional + +import numpy as np +import torch.nn as nn + +from ._wildfire_benchmark_utils import EstimatorPort, filter_init_kwargs, require_task + + +class XGBoostModel(EstimatorPort): + """A boosted-tree wildfire occurrence baseline using a binary logistic objective.""" + + def __init__(self, max_depth: int = 8, eta: float = 0.05, subsample: float = 0.8, colsample_bytree: float = 0.8, num_boost_round: int = 800): + super().__init__() + self.params = { + "objective": "binary:logistic", + "eval_metric": "logloss", + "max_depth": int(max_depth), + "eta": float(eta), + "subsample": float(subsample), + "colsample_bytree": float(colsample_bytree), + } + self.num_boost_round = int(num_boost_round) + self.booster = None + + def _fit_numpy( + self, + x_train: np.ndarray, + y_train: np.ndarray, + x_val: Optional[np.ndarray], + y_val: Optional[np.ndarray], + ) -> None: + import xgboost as xgb + + dtrain = xgb.DMatrix(x_train, label=y_train) + evals = [(dtrain, "train")] + if x_val is not None and y_val is not None: + dval = xgb.DMatrix(x_val, label=y_val) + evals.append((dval, "val")) + self.booster = xgb.train( + params=self.params, + dtrain=dtrain, + num_boost_round=self.num_boost_round, + evals=evals, + verbose_eval=False, + ) + + def _predict_positive_proba(self, x: np.ndarray) -> np.ndarray: + if self.booster is None: + raise RuntimeError("XGBoost booster is not fitted.") + import xgboost as xgb + return np.asarray(self.booster.predict(xgb.DMatrix(x)), dtype=np.float32) + + +def xgboost_builder(task: str, **kwargs: Any) -> nn.Module: + require_task(task, {"classification"}, "xgboost") + build_kwargs = filter_init_kwargs(XGBoostModel, kwargs) + return XGBoostModel(**build_kwargs) + + +__all__ = ["XGBoostModel", "xgboost_builder"] diff --git a/pyhazards/reports/__init__.py b/pyhazards/reports/__init__.py new file mode 100644 index 00000000..cce320bb --- /dev/null +++ b/pyhazards/reports/__init__.py @@ -0,0 +1,3 @@ +from .base import BenchmarkReport, export_report_bundle + +__all__ = ["BenchmarkReport", "export_report_bundle"] diff --git a/pyhazards/reports/base.py b/pyhazards/reports/base.py new file mode 100644 index 00000000..0201cf94 --- /dev/null +++ b/pyhazards/reports/base.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import csv +import json +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Dict, Mapping, Sequence + + +@dataclass +class BenchmarkReport: + benchmark_name: str + hazard_task: str + metrics: Dict[str, float] + metadata: Dict[str, Any] = field(default_factory=dict) + artifacts: Dict[str, str] = field(default_factory=dict) + + def to_dict(self) -> Dict[str, Any]: + return { + "benchmark_name": self.benchmark_name, + "hazard_task": self.hazard_task, + "metrics": self.metrics, + "metadata": self.metadata, + "artifacts": self.artifacts, + } + + +def export_report_bundle( + report: BenchmarkReport, + output_dir: str | Path, + formats: Sequence[str], +) -> Dict[str, str]: + target = Path(output_dir) + target.mkdir(parents=True, exist_ok=True) + paths: Dict[str, str] = {} + for fmt in formats: + fmt = fmt.lower() + path = target / "{name}.{fmt}".format(name=report.benchmark_name, fmt=fmt) + if fmt == "json": + path.write_text(json.dumps(report.to_dict(), indent=2, sort_keys=True), encoding="utf-8") + elif fmt == "md": + path.write_text(_markdown_report(report), encoding="utf-8") + elif fmt == "csv": + _write_csv(path, report.metrics, report.metadata) + else: + raise ValueError("Unsupported report format: {fmt}".format(fmt=fmt)) + paths[fmt] = str(path) + return paths + + +def _markdown_report(report: BenchmarkReport) -> str: + lines = [ + "# {name}".format(name=report.benchmark_name), + "", + "- Hazard task: `{task}`".format(task=report.hazard_task), + "", + "## Metrics", + "", + ] + if report.metrics: + for key, value in sorted(report.metrics.items()): + lines.append("- `{key}`: {value}".format(key=key, value=value)) + else: + lines.append("- No metrics recorded.") + if report.metadata: + lines.extend(["", "## Metadata", ""]) + for key, value in sorted(report.metadata.items()): + lines.append("- `{key}`: {value}".format(key=key, value=value)) + if report.artifacts: + lines.extend(["", "## Artifacts", ""]) + for key, value in sorted(report.artifacts.items()): + lines.append("- `{key}`: {value}".format(key=key, value=value)) + lines.append("") + return "\n".join(lines) + + +def _write_csv(path: Path, metrics: Mapping[str, float], metadata: Mapping[str, Any]) -> None: + row: Dict[str, Any] = {} + row.update(metrics) + row.update({"metadata.{key}".format(key=key): value for key, value in metadata.items()}) + fieldnames = list(row.keys()) or ["status"] + if not row: + row = {"status": "empty"} + with path.open("w", encoding="utf-8", newline="") as handle: + writer = csv.DictWriter(handle, fieldnames=fieldnames) + writer.writeheader() + writer.writerow(row) diff --git a/pyhazards/tasks.py b/pyhazards/tasks.py new file mode 100644 index 00000000..594a9661 --- /dev/null +++ b/pyhazards/tasks.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Dict, List + + +@dataclass(frozen=True) +class HazardTask: + """Canonical hazard task label used by benchmark and config layers.""" + + name: str + hazard: str + target: str + description: str + + +_HAZARD_TASKS: Dict[str, HazardTask] = { + "earthquake.picking": HazardTask( + name="earthquake.picking", + hazard="earthquake", + target="picking", + description="Waveform-based earthquake phase detection and P/S picking.", + ), + "earthquake.forecasting": HazardTask( + name="earthquake.forecasting", + hazard="earthquake", + target="forecasting", + description="Earthquake forecasting over spatial or temporal forecast windows.", + ), + "wildfire.danger": HazardTask( + name="wildfire.danger", + hazard="wildfire", + target="danger", + description="Wildfire danger or risk prediction over a region and horizon.", + ), + "wildfire.spread": HazardTask( + name="wildfire.spread", + hazard="wildfire", + target="spread", + description="Wildfire spread forecasting over raster masks or burned-area grids.", + ), + "flood.streamflow": HazardTask( + name="flood.streamflow", + hazard="flood", + target="streamflow", + description="Riverine discharge or streamflow forecasting.", + ), + "flood.inundation": HazardTask( + name="flood.inundation", + hazard="flood", + target="inundation", + description="Flood inundation and water-extent forecasting over spatial grids.", + ), + "tc.track_intensity": HazardTask( + name="tc.track_intensity", + hazard="tc", + target="track_intensity", + description="Storm-track and intensity forecasting over lead-time horizons.", + ), +} + + +def available_hazard_tasks() -> List[str]: + return sorted(_HAZARD_TASKS.keys()) + + +def get_hazard_task(name: str) -> HazardTask: + key = name.strip().lower() + if key not in _HAZARD_TASKS: + raise KeyError( + "Unknown hazard task '{name}'. Known: {known}".format( + name=name, + known=", ".join(available_hazard_tasks()), + ) + ) + return _HAZARD_TASKS[key] + + +def has_hazard_task(name: str) -> bool: + return name.strip().lower() in _HAZARD_TASKS + + +__all__ = [ + "HazardTask", + "available_hazard_tasks", + "get_hazard_task", + "has_hazard_task", +] diff --git a/pyhazards/utils/__init__.py b/pyhazards/utils/__init__.py new file mode 100644 index 00000000..294e7bbe --- /dev/null +++ b/pyhazards/utils/__init__.py @@ -0,0 +1,4 @@ +from .common import get_logger, seed_all +from .hardware import auto_device, get_device, num_devices, set_device + +__all__ = ["auto_device", "get_device", "set_device", "num_devices", "seed_all", "get_logger"] diff --git a/pyhazards/utils/common.py b/pyhazards/utils/common.py new file mode 100644 index 00000000..dd429534 --- /dev/null +++ b/pyhazards/utils/common.py @@ -0,0 +1,23 @@ +import logging +import os +import random +from typing import Optional + +import numpy as np +import torch + + +def seed_all(seed: int = 42, deterministic: bool = False) -> None: + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + logging.basicConfig(level=os.getenv("PYHAZARD_LOGLEVEL", "INFO")) + return logging.getLogger(name or "pyhazards") diff --git a/pyhazards/utils/hardware.py b/pyhazards/utils/hardware.py new file mode 100644 index 00000000..2c30580e --- /dev/null +++ b/pyhazards/utils/hardware.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +import os +from typing import Optional + +import torch + +_DEFAULT_DEVICE_STR = os.getenv("PYHAZARDS_DEVICE") or ("cuda:0" if torch.cuda.is_available() else "cpu") +_default_device = torch.device(_DEFAULT_DEVICE_STR) + + +def auto_device(prefer: str | None = None) -> torch.device: + """ + Choose a device automatically. Respects PYHAZARDS_DEVICE and prefer flag. + """ + if prefer: + return torch.device(prefer) + return _default_device + + +def num_devices() -> int: + if torch.cuda.is_available(): + return torch.cuda.device_count() + return 0 + + +def get_device() -> torch.device: + return _default_device + + +def set_device(device_str: str | torch.device) -> None: + global _default_device + _default_device = torch.device(device_str) diff --git a/pyproject.toml b/pyproject.toml index d4a5de88..b25a40c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,24 +1,26 @@ [build-system] -requires = ["setuptools>=61", "wheel"] +requires = ["setuptools>=77", "wheel"] build-backend = "setuptools.build_meta" [project] -name = "PyGIP" -version = "1.1.0" -description = "A Python package for Graph Intellectual Property Protection" +name = "pyhazards" +version = "1.0.5" +description = "A Python framework for AI-powered hazard prediction and risk assessment" readme = "README.md" -authors = [{ name = "Bolin Shen", email = "blshen@fsu.edu" }] -license = { text = "BSD-2-Clause" } +authors = [{ name = "Xueqi Cheng", email = "xc25@fsu.edu" }] +license = "MIT" requires-python = ">=3.8, <3.13" dependencies = [ - "torch-geometric>=2.0.0", + "torch>=2.3.0,<3.0", "numpy>=1.19.0", "scipy>=1.6.0", - "networkx>=2.5", "scikit-learn>=0.24.0", + "h5py>=3.8.0", + "requests>=2.28.0", + "xarray>=2023.1.0", + "netCDF4>=1.6.0", "tqdm>=4.50.0", - "torchdata>=0.7.0,<0.8.0", "pyyaml>=5.3.0", "pydantic>=2.0.0" ] @@ -26,8 +28,9 @@ dependencies = [ classifiers = [ "Development Status :: 3 - Alpha", "Intended Audience :: Science/Research", + "Intended Audience :: Developers", "Topic :: Scientific/Engineering :: Artificial Intelligence", - "License :: OSI Approved :: BSD License", + "Topic :: Scientific/Engineering :: Information Analysis", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", @@ -37,18 +40,11 @@ classifiers = [ ] [project.urls] -Homepage = "https://labrai.github.io/PyGIP" -Repository = "https://github.com/LabRAI/PyGIP" - -[project.optional-dependencies] -# torch 2.3 cannot be installed directly from PyPI, users must add -f -torch = ["torch==2.3.0"] -# DGL cannot be installed directly from PyPI, users must add -f -dgl = ["dgl==2.2.1"] - +Homepage = "https://labrai.github.io/PyHazard" +Repository = "https://github.com/LabRAI/PyHazard" [tool.setuptools] include-package-data = true [tool.setuptools.packages.find] -include = ["pygip*"] +include = ["pyhazards*"] diff --git a/requirements.txt b/requirements.txt index 351816a1..19482438 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,6 @@ -torch==2.3.0 -torch-geometric>=2.0.0 numpy scipy -networkx scikit-learn tqdm pyyaml pydantic -torchdata>=0.7.0,<0.8.0 \ No newline at end of file diff --git a/scripts/align_wildfire_2024_fuel.py b/scripts/align_wildfire_2024_fuel.py new file mode 100644 index 00000000..52442120 --- /dev/null +++ b/scripts/align_wildfire_2024_fuel.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +import argparse +from pathlib import Path + +from pyhazards.benchmarks.wildfire_benchmark.cache_builder import align_static_fuel_to_cache + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Align LANDFIRE fuel to the wildfire 2024 benchmark cache grid.") + parser.add_argument("--cache_dir", type=str, default="/home/runyang/my-copy/data_cache/wildfire_2024_v1") + parser.add_argument( + "--landfire_tif", + type=str, + default="/home/runyang/ryang/landfire_fbfm40/LF2024_FBFM13_250_CONUS/Tif/LC24_F13_250.tif", + ) + parser.add_argument("--overwrite", action="store_true") + return parser.parse_args() + + +def main() -> None: + args = parse_args() + payload = align_static_fuel_to_cache( + cache_root=args.cache_dir, + landfire_tif=args.landfire_tif, + overwrite=bool(args.overwrite), + ) + print(f"[done] aligned fuel written under {Path(args.cache_dir) / 'static'}") + print(f"[summary] valid_cells={payload.get('valid_cells')} valid_fraction={payload.get('valid_fraction')}") + + +if __name__ == "__main__": + main() diff --git a/scripts/build_wildfire_2024_cache.py b/scripts/build_wildfire_2024_cache.py new file mode 100644 index 00000000..044b8bf7 --- /dev/null +++ b/scripts/build_wildfire_2024_cache.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +import argparse +import sys +from pathlib import Path + +REPO_ROOT = Path(__file__).resolve().parent.parent +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from pyhazards.benchmarks.wildfire_benchmark.cache_builder import build_cache + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Build the wildfire 2024 real-data cache for benchmark runs.") + parser.add_argument( + "--config", + default=str(REPO_ROOT / "pyhazards" / "configs" / "wildfire_benchmark" / "cache_2024_v1.yaml"), + ) + parser.add_argument("--limit_days", type=int, default=0, help="Only materialize the first N shared dates for smoke-like validation.") + return parser.parse_args() + + +def main() -> None: + args = parse_args() + summary = build_cache(args.config, limit_days=int(args.limit_days)) + print("[done] wildfire cache built") + print(f"cache_root={summary.cache_root}") + print(f"label_days={summary.n_label_days} met_days={summary.n_met_days} shared_days={summary.n_shared_days}") + print(f"train={summary.train_days} val={summary.val_days} test={summary.test_days}") + print(f"weather_vars={','.join(summary.weather_vars)}") + + +if __name__ == "__main__": + main() diff --git a/scripts/render_appendix_a_docs.py b/scripts/render_appendix_a_docs.py new file mode 100644 index 00000000..7aa67cb0 --- /dev/null +++ b/scripts/render_appendix_a_docs.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +import argparse +from pathlib import Path + +from pyhazards.appendix_a_catalog import sync_generated_appendix_a_docs + + +def main() -> int: + parser = argparse.ArgumentParser( + description="Render coverage-audit docs from the checked-in audit catalog." + ) + parser.add_argument( + "--check", + action="store_true", + help="Fail if generated files are out of date instead of writing them.", + ) + args = parser.parse_args() + + changes = sync_generated_appendix_a_docs(check=args.check) + if changes: + action = "would update" if args.check else "updated" + print("Coverage audit docs {action}:".format(action=action)) + for path in changes: + print(" - {path}".format(path=Path(path))) + return 1 if args.check else 0 + + print("Coverage audit docs are in sync.") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/render_benchmark_docs.py b/scripts/render_benchmark_docs.py new file mode 100644 index 00000000..32410706 --- /dev/null +++ b/scripts/render_benchmark_docs.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +import argparse +from pathlib import Path + +from pyhazards.benchmark_catalog import sync_generated_benchmark_docs + + +def main() -> int: + parser = argparse.ArgumentParser( + description=( + "Render generated benchmark catalog docs, including family and ecosystem " + "tables plus per-benchmark detail pages." + ) + ) + parser.add_argument( + "--check", + action="store_true", + help="Fail if generated files are out of date instead of writing them.", + ) + args = parser.parse_args() + + changes = sync_generated_benchmark_docs(check=args.check) + if changes: + action = "would update" if args.check else "updated" + print("Benchmark docs {action}:".format(action=action)) + for path in changes: + print(" - {path}".format(path=Path(path))) + return 1 if args.check else 0 + + print("Benchmark docs are in sync.") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/render_dataset_docs.py b/scripts/render_dataset_docs.py new file mode 100644 index 00000000..9beb7825 --- /dev/null +++ b/scripts/render_dataset_docs.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +import argparse +from pathlib import Path + +from pyhazards.dataset_catalog import load_dataset_cards, sync_generated_dataset_docs + + +def main() -> int: + parser = argparse.ArgumentParser( + description="Render generated dataset catalog pages from YAML dataset cards." + ) + parser.add_argument( + "--check", + action="store_true", + help="Fail if generated files are out of date instead of writing them.", + ) + args = parser.parse_args() + + cards = load_dataset_cards() + changes = sync_generated_dataset_docs(cards, check=args.check) + + if changes: + action = "would update" if args.check else "updated" + print(f"Dataset docs {action}:") + for path in changes: + print(f" - {Path(path)}") + return 1 if args.check else 0 + + print("Dataset docs are in sync.") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/render_model_docs.py b/scripts/render_model_docs.py new file mode 100644 index 00000000..cf0712dc --- /dev/null +++ b/scripts/render_model_docs.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +import argparse +from pathlib import Path + +from pyhazards.model_catalog import load_model_cards, sync_generated_docs + + +def main() -> int: + parser = argparse.ArgumentParser( + description="Render generated model catalog pages from YAML model cards." + ) + parser.add_argument( + "--check", + action="store_true", + help="Fail if generated files are out of date instead of writing them.", + ) + args = parser.parse_args() + + cards = load_model_cards() + changes = sync_generated_docs(cards, check=args.check) + + if changes: + action = "would update" if args.check else "updated" + print("Model docs {action}:".format(action=action)) + for path in changes: + print(" - {path}".format(path=Path(path))) + return 1 if args.check else 0 + + print("Model docs are in sync.") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/review_model_pr.py b/scripts/review_model_pr.py new file mode 100644 index 00000000..76b682d1 --- /dev/null +++ b/scripts/review_model_pr.py @@ -0,0 +1,308 @@ +from __future__ import annotations + +import argparse +import json +import subprocess +from pathlib import Path +from typing import Dict, Iterable, List, Optional, Sequence + +from pyhazards.model_catalog import ( + MODEL_PR_MARKER, + MODEL_REVIEW_MARKER, + NON_CATALOG_MODELS, + builder_contract_issues, + card_by_registry_name, + load_model_cards, + model_catalog_alignment_issues, + run_smoke_test, + source_contract_issues, + touched_card_names, +) + + +REQUIRED_PR_SECTIONS = [ + "Model Summary", + "Hazard Scenario", + "Registry Name", + "Paper / Source", + "Smoke Test", + "Parity Notes", +] + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Review a model contribution PR using the PyHazards model catalog." + ) + parser.add_argument("--base-sha", help="Base commit SHA for git diff.") + parser.add_argument("--event", help="Path to the GitHub event JSON payload.") + parser.add_argument("--report-json", required=True, help="Output path for JSON report.") + parser.add_argument("--report-md", required=True, help="Output path for Markdown report.") + return parser.parse_args() + + +def git_changed_files(base_sha: Optional[str]) -> List[str]: + revision = "{base}...HEAD".format(base=base_sha) if base_sha else "HEAD~1...HEAD" + result = subprocess.run( + ["git", "diff", "--name-only", revision], + check=True, + capture_output=True, + text=True, + ) + return [line.strip() for line in result.stdout.splitlines() if line.strip()] + + +def load_event(path: Optional[str]) -> Dict[str, object]: + if not path: + return {} + return json.loads(Path(path).read_text(encoding="utf-8")) + + +def extract_sections(body: str) -> Dict[str, str]: + sections: Dict[str, str] = {} + if not body: + return sections + + current_title: Optional[str] = None + current_lines: List[str] = [] + for line in body.splitlines(): + if line.startswith("## "): + if current_title is not None: + sections[current_title] = "\n".join(current_lines).strip() + current_title = line[3:].strip() + current_lines = [] + continue + if current_title is not None: + current_lines.append(line) + + if current_title is not None: + sections[current_title] = "\n".join(current_lines).strip() + return sections + + +def is_model_pr(changed_files: Sequence[str], body: str) -> bool: + if any(path.startswith("pyhazards/model_cards/") for path in changed_files): + return True + return "- [x] Model contribution" in body + + +def markdown_report( + *, + status: str, + summary: str, + changed_files: Sequence[str], + touched_models: Sequence[str], + blockers: Sequence[str], + warnings: Sequence[str], +) -> str: + lines: List[str] = [ + MODEL_REVIEW_MARKER, + "", + "## PyHazards Model PR Review", + "", + "Status: **{status}**".format(status=status.upper()), + "", + summary, + "", + ] + + if touched_models: + lines.extend( + [ + "Touched models:", + "", + ] + ) + for name in touched_models: + lines.append("- ``{name}``".format(name=name)) + lines.append("") + + if blockers: + lines.extend(["Blocking issues:", ""]) + for issue in blockers: + lines.append("- {issue}".format(issue=issue)) + lines.append("") + + if warnings: + lines.extend(["Warnings:", ""]) + for issue in warnings: + lines.append("- {issue}".format(issue=issue)) + lines.append("") + + if changed_files: + lines.extend(["Changed files reviewed:", ""]) + for path in changed_files: + lines.append("- ``{path}``".format(path=path)) + lines.append("") + + lines.extend( + [ + "Automation notes:", + "", + "- Passing model PRs are merged automatically by the PR bot workflow.", + "- The public model tables and module pages are generated from ``pyhazards/model_cards/*.yaml``.", + "- Use ``python scripts/smoke_test_models.py --models {models}`` locally before pushing.".format( + models=" ".join(touched_models) if touched_models else "" + ), + "", + ] + ) + return "\n".join(lines) + + +def write_reports(report_json: Path, report_md: Path, payload: Dict[str, object], markdown: str) -> None: + report_json.parent.mkdir(parents=True, exist_ok=True) + report_md.parent.mkdir(parents=True, exist_ok=True) + report_json.write_text(json.dumps(payload, indent=2, sort_keys=True), encoding="utf-8") + report_md.write_text(markdown, encoding="utf-8") + + +def main() -> int: + args = parse_args() + event = load_event(args.event) + pull_request = event.get("pull_request", {}) if isinstance(event, dict) else {} + body = "" + draft = False + if isinstance(pull_request, dict): + body = str(pull_request.get("body") or "") + draft = bool(pull_request.get("draft")) + + changed_files = git_changed_files(args.base_sha) + model_pr = is_model_pr(changed_files, body) + + if not model_pr: + payload = { + "status": "skip", + "is_model_pr": False, + "summary": "No catalog-backed model contribution detected in this PR.", + "models": [], + "blockers": [], + "warnings": [], + } + markdown = markdown_report( + status="skip", + summary=payload["summary"], + changed_files=changed_files, + touched_models=[], + blockers=[], + warnings=[], + ) + write_reports(Path(args.report_json), Path(args.report_md), payload, markdown) + return 0 + + blockers: List[str] = [] + warnings: List[str] = [] + + if MODEL_PR_MARKER not in body: + warnings.append( + "The PR template marker is missing. Use `.github/PULL_REQUEST_TEMPLATE.md` so the bot can parse the model metadata reliably." + ) + + sections = extract_sections(body) + for section in REQUIRED_PR_SECTIONS: + if not sections.get(section): + blockers.append( + "Fill in the PR template section `## {section}` with project-specific details.".format( + section=section + ) + ) + + try: + cards = load_model_cards() + except Exception as exc: # pragma: no cover - exercised via CLI path + blockers.append("Unable to load model cards: {error}".format(error=exc)) + cards = [] + + touched_names = touched_card_names(cards, changed_files) if cards else [] + mapping = card_by_registry_name(cards) if cards else {} + + if not touched_names: + blockers.append( + "Model PRs must add or update at least one YAML card under `pyhazards/model_cards/`." + ) + + if sections.get("Registry Name"): + registry_text = sections["Registry Name"] + for name in touched_names: + if name not in registry_text: + blockers.append( + "The `Registry Name` section should mention `{name}` so the described model matches the implementation under review.".format( + name=name + ) + ) + + if cards: + blockers.extend(model_catalog_alignment_issues(cards)) + for name in touched_names: + if name not in mapping: + blockers.append( + "No valid model card was loaded for `{name}`. Check the YAML filename and schema.".format( + name=name + ) + ) + continue + + card = mapping[name] + if card.model_name in NON_CATALOG_MODELS: + continue + + blockers.extend(builder_contract_issues(card)) + warnings.extend(source_contract_issues(card)) + + try: + smoke = run_smoke_test(card) + except Exception as exc: # pragma: no cover - exercised via CLI path + blockers.append( + "Smoke test failed for `{name}`: {error}".format(name=name, error=exc) + ) + else: + if not smoke["ok"]: + blockers.append( + "Smoke test shape mismatch for `{name}`: expected {expected}, got {actual}.".format( + name=name, + expected=smoke["expected_shape"], + actual=smoke["actual_shape"], + ) + ) + + hazard_section = sections.get("Hazard Scenario", "") + if card.hazard not in hazard_section: + warnings.append( + "The `Hazard Scenario` section should mention `{hazard}` so the generated model tables land in the intended section.".format( + hazard=card.hazard + ) + ) + + if draft: + warnings.append("This PR is still marked as draft, so the bot will not merge it yet.") + + status = "block" if blockers else "pass" + summary = ( + "The PR satisfies the current PyHazards model contract and the synthetic smoke test." + if status == "pass" + else "The PR is missing one or more blocking model-contract requirements." + ) + + payload = { + "status": status, + "is_model_pr": True, + "summary": summary, + "models": touched_names, + "blockers": blockers, + "warnings": warnings, + "draft": draft, + } + markdown = markdown_report( + status=status, + summary=summary, + changed_files=changed_files, + touched_models=touched_names, + blockers=blockers, + warnings=warnings, + ) + write_reports(Path(args.report_json), Path(args.report_md), payload, markdown) + return 0 if status == "pass" else 1 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/run_benchmark.py b/scripts/run_benchmark.py new file mode 100644 index 00000000..5f04e328 --- /dev/null +++ b/scripts/run_benchmark.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +import argparse + +from pyhazards.benchmarks import available_benchmarks +from pyhazards.configs import load_experiment_config +from pyhazards.engine.runner import BenchmarkRunner +from pyhazards.tasks import available_hazard_tasks + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Run a PyHazards benchmark from an experiment config.", + ) + parser.add_argument("--config", help="Path to an experiment YAML config.") + parser.add_argument( + "--list-benchmarks", + action="store_true", + help="Print registered benchmark names and exit.", + ) + parser.add_argument( + "--list-hazard-tasks", + action="store_true", + help="Print canonical hazard-task names and exit.", + ) + parser.add_argument( + "--output-dir", + help="Override the output directory declared in the config.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + + if args.list_benchmarks: + for name in available_benchmarks(): + print(name) + return 0 + + if args.list_hazard_tasks: + for name in available_hazard_tasks(): + print(name) + return 0 + + if not args.config: + raise SystemExit("--config is required unless a list flag is used.") + + experiment = load_experiment_config(args.config) + summary = BenchmarkRunner().run(experiment, output_dir=args.output_dir) + print("benchmark:", summary.benchmark_name) + print("hazard_task:", summary.hazard_task) + for key, value in sorted(summary.metrics.items()): + print("metric.{key}={value}".format(key=key, value=value)) + for fmt, path in sorted(summary.report_paths.items()): + print("report.{fmt}={path}".format(fmt=fmt, path=path)) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/run_wildfire_2024_real_baselines.py b/scripts/run_wildfire_2024_real_baselines.py new file mode 100644 index 00000000..7962fb95 --- /dev/null +++ b/scripts/run_wildfire_2024_real_baselines.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +import argparse +from pathlib import Path + +from pyhazards.benchmarks.wildfire_benchmark.real_runner import run_real_baselines + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run first real-data wildfire baselines on the 2024 cache.") + parser.add_argument("--cache_dir", type=str, default="/home/runyang/my-copy/data_cache/wildfire_2024_v1") + parser.add_argument("--run_name", type=str, default="track_o_2024_real_v1_first4_dryrun") + parser.add_argument("--models", type=str, default="logistic_regression,xgboost,unet,convlstm") + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--train_limit_days", type=int, default=0) + parser.add_argument("--val_limit_days", type=int, default=0) + parser.add_argument("--test_limit_days", type=int, default=0) + parser.add_argument("--tabular_downsample", type=int, default=8) + parser.add_argument("--raster_downsample", type=int, default=4) + parser.add_argument("--temporal_downsample", type=int, default=8) + parser.add_argument("--temporal_history", type=int, default=6) + parser.add_argument("--xgboost_rounds", type=int, default=120) + parser.add_argument("--lightgbm_rounds", type=int, default=120) + parser.add_argument("--unet_epochs", type=int, default=12) + parser.add_argument("--convlstm_epochs", type=int, default=12) + parser.add_argument("--deep_patience", type=int, default=4) + parser.add_argument("--device", type=str, default=None) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + selected_models = [item.strip() for item in str(args.models).split(",") if item.strip()] + run_root = run_real_baselines( + cache_dir=args.cache_dir, + run_name=args.run_name, + models=selected_models, + seed=args.seed, + train_limit_days=args.train_limit_days or None, + val_limit_days=args.val_limit_days or None, + test_limit_days=args.test_limit_days or None, + tabular_downsample=args.tabular_downsample, + raster_downsample=args.raster_downsample, + temporal_downsample=args.temporal_downsample, + temporal_history=args.temporal_history, + xgboost_rounds=args.xgboost_rounds, + lightgbm_rounds=args.lightgbm_rounds, + unet_epochs=args.unet_epochs, + convlstm_epochs=args.convlstm_epochs, + deep_patience=args.deep_patience, + device=args.device, + ) + print(f"[done] real wildfire benchmark run written to {Path(run_root)}") + + +if __name__ == "__main__": + main() diff --git a/scripts/run_wildfire_smoke_batch.py b/scripts/run_wildfire_smoke_batch.py new file mode 100644 index 00000000..5f810cfe --- /dev/null +++ b/scripts/run_wildfire_smoke_batch.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +import argparse +import sys +from pathlib import Path + +REPO_ROOT = Path(__file__).resolve().parent.parent +if str(REPO_ROOT) not in sys.path: + sys.path.insert(0, str(REPO_ROOT)) + +from pyhazards.benchmarks.wildfire_benchmark import run_smoke_batch +from pyhazards.benchmarks.wildfire_benchmark.adapters import create_adapter + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Run wildfire benchmark smoke batches inside my-copy.") + parser.add_argument("--track", default="smoke", choices=["smoke", "real", "archive"]) + parser.add_argument("--run_name", default=None) + parser.add_argument("--catalog_kind", default="main", choices=["main", "extensions"]) + parser.add_argument("--catalog_path", default=None) + parser.add_argument("--contract_path", default=None) + parser.add_argument("--source_tier", default="all") + parser.add_argument("--models", default="") + parser.add_argument("--seeds", default="42") + parser.add_argument("--limit_models", type=int, default=0) + parser.add_argument("--max_epoch_steps", type=int, default=12) + parser.add_argument("--max_round_steps", type=int, default=30) + parser.add_argument("--max_iter_steps", type=int, default=20) + parser.add_argument("--max_tree_steps", type=int, default=20) + return parser.parse_args() + + +def main() -> None: + args = parse_args() + selected_models = [item.strip() for item in args.models.split(",") if item.strip()] or None + run_root = run_smoke_batch( + adapter_factory=create_adapter, + run_name=args.run_name, + track=args.track, + catalog_kind=args.catalog_kind, + catalog_path=args.catalog_path, + contract_path=args.contract_path, + source_tier=args.source_tier, + models=selected_models, + seeds=args.seeds, + limit_models=args.limit_models, + step_limits={ + "epoch": int(args.max_epoch_steps), + "round": int(args.max_round_steps), + "iteration": int(args.max_iter_steps), + "tree": int(args.max_tree_steps), + }, + ) + print(f"[done] wildfire benchmark smoke batch saved to: {run_root}") + + +if __name__ == "__main__": + main() diff --git a/scripts/smoke_test_models.py b/scripts/smoke_test_models.py new file mode 100644 index 00000000..01dea5ff --- /dev/null +++ b/scripts/smoke_test_models.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +import argparse +from typing import List + +from pyhazards.model_catalog import card_by_registry_name, load_model_cards, run_smoke_test + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Run synthetic smoke tests for cataloged PyHazards models." + ) + parser.add_argument( + "--models", + nargs="*", + default=[], + help="Specific registry names to test. Defaults to all cataloged models.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + cards = load_model_cards() + mapping = card_by_registry_name(cards) + + selected: List[str] + if args.models: + selected = [] + for name in args.models: + if name not in mapping: + raise SystemExit("Unknown cataloged model: {name}".format(name=name)) + card = mapping[name] + if card.model_name not in selected: + selected.append(card.model_name) + else: + selected = [card.model_name for card in cards] + + ok = True + for name in selected: + card = mapping[name] + result = run_smoke_test(card) + status = "PASS" if result["ok"] else "FAIL" + print( + "[{status}] {name}: expected {expected}, got {actual}".format( + status=status, + name=card.model_name, + expected=result["expected_shape"], + actual=result["actual_shape"], + ) + ) + ok = ok and result["ok"] + + return 0 if ok else 1 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/test_dataset.py b/scripts/test_dataset.py new file mode 100644 index 00000000..1b683bf3 --- /dev/null +++ b/scripts/test_dataset.py @@ -0,0 +1,16 @@ +print("TEST SCRIPT STARTED") + +import torch +from pyhazards.data.load_hydrograph_data import load_hydrograph_data + +print("Imports OK") + +bundle = load_hydrograph_data( + era5_path="pyhazards/data/era5_subset", + mesh_coords=mesh_coords, +) + +print("Bundle built") +print(bundle) +print(bundle.feature_spec) +print("DONE") diff --git a/scripts/train_hydrographnet.py b/scripts/train_hydrographnet.py new file mode 100644 index 00000000..387b86e6 --- /dev/null +++ b/scripts/train_hydrographnet.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 + +import torch +import torch.nn.functional as F + +from pyhazards.engine import Trainer +from pyhazards.models import build_model +from pyhazards.datasets import graph_collate + +from pyhazards.data.load_hydrograph_data import load_hydrograph_data + + +# ----------------------------- +# Simple regression metrics +# ----------------------------- +def mse(pred, target): + return F.mse_loss(pred, target).item() + +def rmse(pred, target): + return torch.sqrt(F.mse_loss(pred, target)).item() + + +def main(): + torch.manual_seed(0) + + bundle = load_hydrograph_data( + era5_path="pyhazards/data/era5_subset", + ) + + train_split = "train" + + # Infer dimensions from dataset + sample_x, sample_y = bundle.splits[train_split].inputs[0] + + x_tensor = sample_x["x"] + + past_days = x_tensor.shape[0] + num_nodes = x_tensor.shape[1] + node_feats = x_tensor.shape[2] + out_dim = 1 + + print("Dataset shapes:") + print(" x:", x_tensor.shape) + print(" y:", sample_y.shape) + + # Model + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=node_feats, + edge_in_dim=3, + out_dim=out_dim, + ) + # Optimizer + loss + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + + def loss_fn(pred, target): + return F.mse_loss(pred, target) + # Trainer + trainer = Trainer(model=model) + + trainer.fit( + bundle, + train_split=train_split, + val_split=None, + optimizer=optimizer, + loss_fn=loss_fn, + batch_size=1, + max_epochs=5, + collate_fn=graph_collate, + ) + # Manual evaluation + model.eval() + with torch.no_grad(): + dataset = bundle.splits[train_split].inputs + batch, target = graph_collate([dataset[i] for i in range(len(dataset))]) + + device = next(model.parameters()).device + batch = { + k: (v.to(device) if torch.is_tensor(v) else v) + for k, v in batch.items() + } + target = target.to(device) + + pred = model(batch) + + print("Train MSE :", mse(pred, target)) + print("Train RMSE:", rmse(pred, target)) + + +if __name__ == "__main__": + main() diff --git a/scripts/verify_table_entries.py b/scripts/verify_table_entries.py new file mode 100644 index 00000000..74b65e8c --- /dev/null +++ b/scripts/verify_table_entries.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import importlib.util +import subprocess +import sys + +from pyhazards.dataset_catalog import ( + dataset_catalog_alignment_issues, + load_dataset_cards, +) +from pyhazards.model_catalog import load_model_cards, run_smoke_test + + +def verify_datasets() -> bool: + ok = True + print("=== Dataset Table Verification ===") + cards = load_dataset_cards() + issues = dataset_catalog_alignment_issues(cards) + for issue in issues: + print(f"[catalog] {issue}") + ok = False + + dataset_modules = sorted( + { + card.inspection.module + for card in cards + if card.inspection is not None and card.inspection.module + } + ) + for mod in dataset_modules: + spec = importlib.util.find_spec(mod) + print(f"[import] {mod}: {'OK' if spec else 'MISSING'}") + if spec is None: + ok = False + continue + + cmd = [sys.executable, "-m", mod, "--help"] + res = subprocess.run(cmd, capture_output=True, text=True) + print(f"[cli] {mod} --help: exit={res.returncode}") + if res.returncode != 0: + ok = False + + era5_cmd = [ + sys.executable, + "-m", + "pyhazards.datasets.era5.inspection", + "--path", + "pyhazards/data/era5_subset", + "--max-vars", + "5", + ] + era5_res = subprocess.run(era5_cmd, capture_output=True, text=True) + print( + "[run] pyhazards.datasets.era5.inspection " + f"--path pyhazards/data/era5_subset: exit={era5_res.returncode}" + ) + if era5_res.returncode != 0: + ok = False + + return ok + + +def verify_models() -> bool: + ok = True + print("\n=== Model Table Verification ===") + for card in load_model_cards(): + result = run_smoke_test(card) + print( + f"[model] {card.model_name}: expected={result['expected_shape']} " + f"actual={result['actual_shape']}" + ) + if not result["ok"]: + ok = False + + return ok + + +def main() -> int: + datasets_ok = verify_datasets() + models_ok = verify_models() + ok = datasets_ok and models_ok + print(f"\nRESULT: {'PASS' if ok else 'FAIL'}") + return 0 if ok else 1 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/wildfire_aspp_NDWS.py b/scripts/wildfire_aspp_NDWS.py new file mode 100644 index 00000000..ef6d3b6f --- /dev/null +++ b/scripts/wildfire_aspp_NDWS.py @@ -0,0 +1,668 @@ +#!/usr/bin/env python3 +""" +train_wildfire_aspp_micro.py + +End-to-end trainer for wildfire next-day spread segmentation on NPZ tiles. +- PyHazards-registered CNN + ASPP model (wildfire_cnn_aspp via build_model) +- Lightning training loop +- Fast epoch metrics during training (TP/FP/FN/TN -> Precision/Recall/F1/IoU/PixelAcc) +- Threshold sweep after training (optionally with small-component removal) + +Expected NPZ keys: +- inputs: (C,H,W) float32 +- targets: (1,H,W) or (H,W) {0,1} +Also supports x/y, image/mask, label/target/targets. +""" + +from __future__ import annotations + +import argparse +import json +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, List, Tuple + +import numpy as np +import torch +import torch.nn as nn +import lightning.pytorch as pl +from torch.utils.data import DataLoader, Dataset, WeightedRandomSampler +from pyhazards.models.wildfire_aspp import WildfireASPP, TverskyLoss + +# ✅ Use the PyHazards model registry +from pyhazards.models import build_model +# keep your loss +from pyhazards.models.wildfire_aspp import TverskyLoss + + +# ----------------------------- +# Utilities +# ----------------------------- + +def seed_everything(seed: int) -> None: + pl.seed_everything(seed, workers=True) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def list_npz_files(root: str) -> List[str]: + root_p = Path(root) + if root_p.is_file() and root_p.suffix == ".npz": + return [str(root_p)] + if not root_p.exists(): + raise FileNotFoundError(f"root not found: {root}") + files = sorted([str(p) for p in root_p.rglob("*.npz")]) + if not files: + raise FileNotFoundError(f"No .npz files found under: {root}") + return files + + +def load_npz_xy(path: str) -> Tuple[np.ndarray, np.ndarray]: + d = np.load(path) + x_keys = ["x", "X", "inputs", "image", "inp"] + y_keys = ["y", "Y", "label", "mask", "target", "targets"] + + x = None + y = None + for k in x_keys: + if k in d: + x = d[k] + break + for k in y_keys: + if k in d: + y = d[k] + break + + if x is None or y is None: + raise KeyError( + f"{path} missing required keys. Found keys={list(d.keys())}. " + f"Expected one of x={x_keys} and y={y_keys}." + ) + + x = x.astype(np.float32) + y = y.astype(np.float32) + + if x.ndim == 2: + x = x[None, ...] + if x.ndim != 3: + raise ValueError(f"{path}: x must be (C,H,W) got {x.shape}") + + if y.ndim == 2: + y = y[None, ...] + if y.ndim == 3 and y.shape[0] != 1 and y.shape[-1] == 1: + y = np.transpose(y, (2, 0, 1)) + if y.ndim != 3 or y.shape[0] != 1: + raise ValueError(f"{path}: y must be (1,H,W) or (H,W) got {y.shape}") + + y = (y > 0.5).astype(np.float32) + return x, y + + +def save_json(obj: dict, path: str) -> None: + Path(path).parent.mkdir(parents=True, exist_ok=True) + with open(path, "w") as f: + json.dump(obj, f, indent=2) + + +def fbeta(precision: float, recall: float, beta: float = 0.5) -> float: + if precision <= 0 and recall <= 0: + return 0.0 + b2 = beta * beta + denom = (b2 * precision + recall) + return 0.0 if denom == 0 else (1 + b2) * precision * recall / denom + + +# ----------------------------- +# Post-processing: remove small FP blobs +# ----------------------------- + +def remove_small_components(mask: np.ndarray, min_component_size: int = 0, **kwargs) -> np.ndarray: + """ + Removes connected components smaller than min_component_size. + Accepts **kwargs for backward-compat with older call-sites that used min_size=... + """ + if "min_size" in kwargs and min_component_size == 0: + try: + min_component_size = int(kwargs["min_size"]) + except Exception: + pass + + if min_component_size <= 0: + return mask + + try: + from scipy.ndimage import label # type: ignore + lab, n = label(mask.astype(np.uint8)) + if n == 0: + return mask + counts = np.bincount(lab.ravel()) + keep = counts >= min_component_size + keep[0] = False + return keep[lab] + except Exception: + H, W = mask.shape + visited = np.zeros((H, W), dtype=np.uint8) + out = mask.copy() + + def neighbors(r, c): + for dr, dc in ((1, 0), (-1, 0), (0, 1), (0, -1)): + rr, cc = r + dr, c + dc + if 0 <= rr < H and 0 <= cc < W: + yield rr, cc + + for r in range(H): + for c in range(W): + if out[r, c] and not visited[r, c]: + stack = [(r, c)] + coords = [] + visited[r, c] = 1 + while stack: + rr, cc = stack.pop() + coords.append((rr, cc)) + for r2, c2 in neighbors(rr, cc): + if out[r2, c2] and not visited[r2, c2]: + visited[r2, c2] = 1 + stack.append((r2, c2)) + if len(coords) < min_component_size: + for rr, cc in coords: + out[rr, cc] = False + return out + + +# ----------------------------- +# Dataset / Splits +# ----------------------------- + +class NPZTileDataset(Dataset): + def __init__(self, files: List[str]): + self.files = files + + def __len__(self) -> int: + return len(self.files) + + def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]: + x, y = load_npz_xy(self.files[idx]) + return torch.from_numpy(x), torch.from_numpy(y) + + +def make_split(files: List[str], seed: int, val_ratio: float, test_ratio: float) -> Dict[str, List[str]]: + rng = np.random.default_rng(seed) + files = list(files) + rng.shuffle(files) + n = len(files) + n_test = int(round(n * test_ratio)) + n_val = int(round(n * val_ratio)) + n_train = n - n_val - n_test + train = files[:n_train] + val = files[n_train:n_train + n_val] + test = files[n_train + n_val:] + return {"train": train, "val": val, "test": test} + + +def tile_has_positive(npz_path: str) -> bool: + _, y = load_npz_xy(npz_path) + return bool((y > 0.5).any()) + + +# ----------------------------- +# Metrics dataclass (offline eval / sweep) +# ----------------------------- + +@dataclass +class Metrics: + PixelAcc: float + Precision: float + Recall: float + F1: float + IoU: float + Threshold: float + + +@torch.no_grad() +def compute_metrics_from_logits( + logits: torch.Tensor, + targets: torch.Tensor, + threshold: float, + min_component_size: int = 0, +) -> Metrics: + probs = torch.sigmoid(logits).detach().cpu().numpy() + y = targets.detach().cpu().numpy().astype(np.uint8) + + tp = fp = fn = tn = 0 + for i in range(probs.shape[0]): + pm = probs[i, 0] + yt = y[i, 0].astype(bool) + + pred = (pm >= threshold) + pred = remove_small_components(pred, min_component_size=min_component_size) + + tp += int(np.logical_and(pred, yt).sum()) + fp += int(np.logical_and(pred, np.logical_not(yt)).sum()) + fn += int(np.logical_and(np.logical_not(pred), yt).sum()) + tn += int(np.logical_and(np.logical_not(pred), np.logical_not(yt)).sum()) + + pixel_acc = (tp + tn) / max(tp + tn + fp + fn, 1) + precision = tp / max(tp + fp, 1) + recall = tp / max(tp + fn, 1) + f1 = 0.0 if (precision + recall) == 0 else 2 * precision * recall / (precision + recall) + iou = tp / max(tp + fp + fn, 1) + + return Metrics( + PixelAcc=float(pixel_acc), + Precision=float(precision), + Recall=float(recall), + F1=float(f1), + IoU=float(iou), + Threshold=float(threshold), + ) + + +# ----------------------------- +# Lightning Module (FAST val/test metrics) +# ----------------------------- + +class WildfireLitModule(pl.LightningModule): + def __init__( + self, + in_channels: int, + lr: float, + loss: str, + pos_weight_value: float, + use_pos_weight: bool, + tversky_alpha: float, + tversky_beta: float, + bce_weight: float, + tversky_weight: float, + threshold: float, + # NOTE: we keep min_component_size for offline eval only (not used in fast epoch metrics) + min_component_size: int = 0, + ): + super().__init__() + self.save_hyperparameters() + + # ✅ build from registry (your CNN+ASPP implemented in PyHazards) + self.model = build_model( + name="wildfire_cnn_aspp", + task="segmentation", + in_channels=int(in_channels), + base_channels=32, + aspp_channels=32, + dilations=(1, 3, 6, 12), + dropout=0.0, + ) + + capped = min(float(pos_weight_value), 25.0) + self.register_buffer("pos_weight", torch.tensor([capped], dtype=torch.float32)) + + self.loss_name = loss + self.thr = float(threshold) + + self.tversky = TverskyLoss(alpha=float(tversky_alpha), beta=float(tversky_beta)) + if bool(use_pos_weight): + self.bce = nn.BCEWithLogitsLoss(pos_weight=self.pos_weight) + else: + self.bce = nn.BCEWithLogitsLoss() + + self.bce_weight = float(bce_weight) + self.tversky_weight = float(tversky_weight) + + # running confusion for val/test (epoch) + self._reset_running("val") + self._reset_running("test") + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.model(x) + + def _reset_running(self, stage: str) -> None: + setattr(self, f"{stage}_tp", 0.0) + setattr(self, f"{stage}_fp", 0.0) + setattr(self, f"{stage}_fn", 0.0) + setattr(self, f"{stage}_tn", 0.0) + + @torch.no_grad() + def _update_running(self, stage: str, logits: torch.Tensor, y: torch.Tensor) -> None: + # IMPORTANT: no connected-components here (too slow). Just threshold. + p = (torch.sigmoid(logits) >= self.thr).to(torch.int32) + t = (y >= 0.5).to(torch.int32) + + tp = (p & t).sum().item() + fp = (p & (1 - t)).sum().item() + fn = ((1 - p) & t).sum().item() + tn = ((1 - p) & (1 - t)).sum().item() + + setattr(self, f"{stage}_tp", getattr(self, f"{stage}_tp") + tp) + setattr(self, f"{stage}_fp", getattr(self, f"{stage}_fp") + fp) + setattr(self, f"{stage}_fn", getattr(self, f"{stage}_fn") + fn) + setattr(self, f"{stage}_tn", getattr(self, f"{stage}_tn") + tn) + + def _log_running(self, stage: str) -> None: + tp = getattr(self, f"{stage}_tp") + fp = getattr(self, f"{stage}_fp") + fn = getattr(self, f"{stage}_fn") + tn = getattr(self, f"{stage}_tn") + + pixel_acc = (tp + tn) / max(tp + tn + fp + fn, 1.0) + precision = tp / max(tp + fp, 1.0) + recall = tp / max(tp + fn, 1.0) + f1 = 0.0 if (precision + recall) == 0 else 2 * precision * recall / (precision + recall) + iou = tp / max(tp + fp + fn, 1.0) + + self.log(f"{stage}/PixelAcc", pixel_acc, prog_bar=False, on_epoch=True) + self.log(f"{stage}/Precision", precision, prog_bar=False, on_epoch=True) + self.log(f"{stage}/Recall", recall, prog_bar=False, on_epoch=True) + self.log(f"{stage}/F1", f1, prog_bar=True, on_epoch=True) + self.log(f"{stage}/IoU", iou, prog_bar=True, on_epoch=True) + + def training_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + + if self.loss_name == "bce": + loss = self.bce(logits, y) + elif self.loss_name == "tversky": + loss = self.tversky(logits, y) + elif self.loss_name == "bce_tversky": + loss = self.bce_weight * self.bce(logits, y) + self.tversky_weight * self.tversky(logits, y) + else: + raise ValueError(f"Unknown loss: {self.loss_name}") + + self.log("train/loss", loss, prog_bar=True, on_step=True, on_epoch=True) + return loss + + # ✅ fixes the "no validation_step" issue + def on_validation_epoch_start(self): + self._reset_running("val") + + def validation_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + self._update_running("val", logits, y) + + def on_validation_epoch_end(self): + self._log_running("val") + + def on_test_epoch_start(self): + self._reset_running("test") + + def test_step(self, batch, batch_idx): + x, y = batch + logits = self(x) + self._update_running("test", logits, y) + + def on_test_epoch_end(self): + self._log_running("test") + + def configure_optimizers(self): + return torch.optim.Adam(self.parameters(), lr=float(self.hparams.lr)) + + +# ----------------------------- +# Eval helpers (offline collect + sweep) +# ----------------------------- + +@torch.no_grad() +def gather_logits_targets(model: nn.Module, loader: DataLoader, device: torch.device): + model.eval() + all_logits = [] + all_targets = [] + for x, y in loader: + x = x.to(device) + y = y.to(device) + logits = model(x) + all_logits.append(logits.detach().cpu()) + all_targets.append(y.detach().cpu()) + return torch.cat(all_logits, dim=0), torch.cat(all_targets, dim=0) + + +@torch.no_grad() +def sweep_thresholds( + logits: torch.Tensor, + targets: torch.Tensor, + thresholds: np.ndarray, + pick: str, + beta: float, + min_recall: float, + min_component_size: int, +) -> Metrics: + best: Metrics | None = None + best_score = -1e9 + + for t in thresholds: + m = compute_metrics_from_logits( + logits, + targets, + threshold=float(t), + min_component_size=int(min_component_size), + ) + + if pick == "f05": + score = fbeta(m.Precision, m.Recall, beta=float(beta)) + + elif pick == "precision_at_recall": + # HARD constraint: only consider thresholds that meet recall requirement + if m.Recall < float(min_recall): + continue + score = m.Precision + + else: + raise ValueError(f"Unknown pick mode: {pick}") + + if score > best_score: + best_score = score + best = m + + if best is None: + raise RuntimeError( + f"No threshold achieved recall >= {float(min_recall):.4f}. " + f"Try lowering --min_recall, adjusting thresholds range, or retraining." + ) + + return best + + +# ----------------------------- +# Main +# ----------------------------- + +def main(): + ap = argparse.ArgumentParser() + + ap.add_argument("--root", type=str, required=True, help="Path to NPZ files or directory containing NPZs") + ap.add_argument("--seed", type=int, default=1337) + + ap.add_argument("--epochs", type=int, default=50) + ap.add_argument("--batch_size", type=int, default=8) + ap.add_argument("--num_workers", type=int, default=0) + ap.add_argument("--lr", type=float, default=4e-4) + + ap.add_argument("--loss", type=str, default="bce_tversky", choices=["bce", "tversky", "bce_tversky"]) + ap.add_argument("--tversky_alpha", type=float, default=0.8) + ap.add_argument("--tversky_beta", type=float, default=0.2) + + ap.add_argument("--use_pos_weight", action="store_true", help="Use BCE pos_weight (often hurts precision)") + ap.add_argument("--bce_weight", type=float, default=0.5) + ap.add_argument("--tversky_weight", type=float, default=0.5) + + ap.add_argument("--threshold", type=float, default=0.5) + ap.add_argument("--min_component_size", type=int, default=0) + + ap.add_argument("--val_ratio", type=float, default=0.1) + ap.add_argument("--test_ratio", type=float, default=0.1) + + ap.add_argument("--device", type=str, default="cpu", help="cpu|cuda|mps") + ap.add_argument("--log_every_steps", type=int, default=50) + + # oversampling: biggest lever for learning positives + ap.add_argument("--pos_oversample", type=float, default=5.0, help=">1 oversamples tiles that contain positives") + ap.add_argument("--no_oversample", action="store_true", help="Disable positive-tile oversampling") + + # eval/sweep + ap.add_argument("--sweep_after", action="store_true") + ap.add_argument("--sweep_pick", type=str, default="f05", choices=["f05", "precision_at_recall"]) + ap.add_argument("--sweep_beta", type=float, default=0.5) + ap.add_argument("--min_recall", type=float, default=0.30) + ap.add_argument("--sweep_n", type=int, default=40) + + ap.add_argument("--ckpt_out", type=str, default="aspp_micro.ckpt") + ap.add_argument("--eval_only", action="store_true", help="Skip training; just load ckpt and eval/sweep") + ap.add_argument("--ckpt_in", type=str, default="", help="Path to ckpt for --eval_only") + + args = ap.parse_args() + seed_everything(args.seed) + + # split files + files = list_npz_files(args.root) + split = make_split(files, seed=args.seed, val_ratio=args.val_ratio, test_ratio=args.test_ratio) + print(f"[files] train={len(split['train'])} val={len(split['val'])} test={len(split['test'])}") + save_json(split, "split_manifest.json") + print("[manifest] saved: split_manifest.json") + + # datasets + train_ds = NPZTileDataset(split["train"]) + val_ds = NPZTileDataset(split["val"]) + test_ds = NPZTileDataset(split["test"]) + + # sanity + x0, y0 = train_ds[0] + print(f"[sanity] x0: {tuple(x0.shape)} {x0.dtype}") + print(f"[sanity] y0: {tuple(y0.shape)} {y0.dtype}") + + # estimate pos_weight (pixel-level, sample subset) + sample_k = min(len(train_ds), 256) + pos = 0 + neg = 0 + for i in range(sample_k): + _, y = train_ds[i] + yy = y.numpy().astype(np.uint8) + pos += int(yy.sum()) + neg += int(yy.size - yy.sum()) + pos_weight = (neg / max(pos, 1)) + print(f"[pos_weight] approx {pos_weight:.3f} (enable with --use_pos_weight; often hurts precision)") + + # device selection + if args.device == "cuda" and torch.cuda.is_available(): + dev = torch.device("cuda") + accelerator = "cuda" + devices = 1 + elif args.device == "mps" and torch.backends.mps.is_available(): + dev = torch.device("mps") + accelerator = "mps" + devices = 1 + else: + dev = torch.device("cpu") + accelerator = "cpu" + devices = 1 + print(f"[trainer] accelerator={accelerator} devices={devices}") + + # loaders + sampler = None + if not args.no_oversample and len(split["train"]) > 0 and args.pos_oversample and args.pos_oversample > 1.0: + print("[oversample] scanning train tiles for positives (one-time)...") + has_pos = np.array([tile_has_positive(p) for p in split["train"]], dtype=np.bool_) + npos = int(has_pos.sum()) + nneg = int((~has_pos).sum()) + print(f"[oversample] train tiles: pos={npos} neg={nneg} pos%={npos/max(npos+nneg,1):.4f}") + + w = np.ones(len(has_pos), dtype=np.float32) + w[has_pos] = float(args.pos_oversample) + sampler = WeightedRandomSampler(weights=w.tolist(), num_samples=len(w), replacement=True) + + train_loader = DataLoader( + train_ds, + batch_size=args.batch_size, + shuffle=(sampler is None), + sampler=sampler, + num_workers=args.num_workers, + pin_memory=(accelerator == "cuda"), + persistent_workers=bool(args.num_workers and args.num_workers > 0), + ) + val_loader = DataLoader( + val_ds, + batch_size=args.batch_size, + shuffle=False, + num_workers=args.num_workers, + pin_memory=(accelerator == "cuda"), + persistent_workers=bool(args.num_workers and args.num_workers > 0), + ) + test_loader = DataLoader( + test_ds, + batch_size=args.batch_size, + shuffle=False, + num_workers=args.num_workers, + pin_memory=(accelerator == "cuda"), + persistent_workers=bool(args.num_workers and args.num_workers > 0), + ) + + # module + lit = WildfireLitModule( + in_channels=int(x0.shape[0]), + lr=args.lr, + loss=args.loss, + pos_weight_value=float(pos_weight), + use_pos_weight=bool(args.use_pos_weight), + tversky_alpha=args.tversky_alpha, + tversky_beta=args.tversky_beta, + bce_weight=args.bce_weight, + tversky_weight=args.tversky_weight, + threshold=args.threshold, + min_component_size=args.min_component_size, + ) + + # load ckpt (eval only) + if args.eval_only: + if not args.ckpt_in: + raise ValueError("--eval_only requires --ckpt_in path") + ck = torch.load(args.ckpt_in, map_location="cpu") + lit.load_state_dict(ck["state_dict"]) + lit = lit.to(dev) + print(f"[eval_only] loaded ckpt: {args.ckpt_in}") + else: + trainer = pl.Trainer( + max_epochs=args.epochs, + accelerator=accelerator, + devices=devices, + log_every_n_steps=args.log_every_steps, + enable_checkpointing=False, + enable_progress_bar=True, + ) + trainer.fit(lit, train_dataloaders=train_loader, val_dataloaders=val_loader) + + torch.save({"state_dict": lit.state_dict(), "hparams": dict(lit.hparams)}, args.ckpt_out) + print(f"[ckpt] saved: {args.ckpt_out}") + + # offline eval + sweep (with optional component removal) + lit = lit.to(dev) + logits_val, targets_val = gather_logits_targets(lit.model.to(dev), val_loader, dev) + logits_test, targets_test = gather_logits_targets(lit.model.to(dev), test_loader, dev) + + val_m = compute_metrics_from_logits( + logits_val, targets_val, + threshold=args.threshold, + min_component_size=args.min_component_size, + ) + test_m = compute_metrics_from_logits( + logits_test, targets_test, + threshold=args.threshold, + min_component_size=args.min_component_size, + ) + + print("val:", val_m.__dict__) + print("test:", test_m.__dict__) + + if args.sweep_after: + # ✅ include tiny thresholds (critical for recall>=0.30 constraints) + thresholds = np.linspace(5e-4, 2e-2, args.sweep_n, dtype=np.float32) + best = sweep_thresholds( + logits=logits_val, + targets=targets_val, + thresholds=thresholds, + pick=args.sweep_pick, + beta=args.sweep_beta, + min_recall=args.min_recall, + min_component_size=args.min_component_size, + ) + print(f"\n[SWEEP] best on val (by {args.sweep_pick}): {best.__dict__}") + + +if __name__ == "__main__": + main() diff --git a/static/logo.png b/static/logo.png index ab106707..3a7451eb 100644 Binary files a/static/logo.png and b/static/logo.png differ diff --git a/test.py b/test.py new file mode 100644 index 00000000..99f33f23 --- /dev/null +++ b/test.py @@ -0,0 +1,100 @@ +import os +import math +os.environ["PYHAZARDS_DEVICE"] = "cuda:0" + +import torch +from torch.utils.data import DataLoader + +from pyhazards.data.load_hydrograph_data import load_hydrograph_data +from pyhazards.datasets import graph_collate +from pyhazards.engine import Trainer +from pyhazards.metrics import RegressionMetrics +from pyhazards.models import build_model + + +def _to_device(obj, device): + if obj is None: + return None + if isinstance(obj, torch.Tensor): + return obj.to(device) + if isinstance(obj, (list, tuple)): + return type(obj)(_to_device(x, device) for x in obj) + if isinstance(obj, dict): + return {k: _to_device(v, device) for k, v in obj.items()} + return obj + + +def main() -> None: + if not torch.cuda.is_available(): + raise RuntimeError( + "CUDA is not available. Please install a CUDA-enabled PyTorch build " + "compatible with your GPU and ensure NVIDIA driver/CUDA runtime are working." + ) + + device = torch.device("cuda:0") + print("== PyHazards GPU smoke test (ERA5 + HydroGraphNet) ==") + print(f"PyTorch version: {torch.__version__}") + print(f"CUDA runtime in torch: {torch.version.cuda}") + print(f"Using device: {device} ({torch.cuda.get_device_name(0)})") + + data = load_hydrograph_data("pyhazards/data/era5_subset", max_nodes=50) + assert "train" in data.splits, "Expected 'train' split in loaded data." + assert data.feature_spec.input_dim == 2, f"Unexpected input_dim: {data.feature_spec.input_dim}" + assert data.label_spec.task_type == "regression", f"Unexpected task type: {data.label_spec.task_type}" + + train_inputs = data.get_split("train").inputs + print(f"Loaded split keys: {list(data.splits.keys())}") + print(f"Dataset samples: {len(train_inputs)}") + print(f"Feature spec: {data.feature_spec}") + print(f"Label spec: {data.label_spec}") + + model = build_model( + name="hydrographnet", + task="regression", + node_in_dim=2, + edge_in_dim=3, + out_dim=1, + ) + model = model.to(device) + print(f"Model: {type(model).__name__}") + + # Forward-pass sanity check on one real batch on GPU. + sample_loader = DataLoader(train_inputs, batch_size=1, shuffle=False, collate_fn=graph_collate) + batch_x, batch_y = next(iter(sample_loader)) + batch_x = _to_device(batch_x, device) + batch_y = _to_device(batch_y, device) + with torch.no_grad(): + pred = model(batch_x) + assert pred.shape == batch_y.shape, f"Prediction shape {pred.shape} != target shape {batch_y.shape}" + print(f"Forward pass OK: pred shape {tuple(pred.shape)}") + + trainer = Trainer(model=model, device="cuda:0", metrics=[RegressionMetrics()], mixed_precision=False) + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + loss_fn = torch.nn.MSELoss() + + trainer.fit( + data, + optimizer=optimizer, + loss_fn=loss_fn, + max_epochs=1, + batch_size=1, + collate_fn=graph_collate, + ) + + metrics = trainer.evaluate( + data, + split="train", + batch_size=1, + collate_fn=graph_collate, + ) + + assert "MAE" in metrics and "RMSE" in metrics, f"Missing expected metrics: {metrics}" + assert math.isfinite(metrics["MAE"]) and math.isfinite(metrics["RMSE"]), f"Non-finite metrics: {metrics}" + + print(f"Evaluation metrics: {metrics}") + print("Note: MAE/RMSE are error metrics (lower is better). This smoke test is for pipeline validity, not benchmark quality.") + print("PASS: end-to-end implementation is working.") + + +if __name__ == "__main__": + main() diff --git a/tests/test_adapter_datasets.py b/tests/test_adapter_datasets.py new file mode 100644 index 00000000..55f18c62 --- /dev/null +++ b/tests/test_adapter_datasets.py @@ -0,0 +1,23 @@ +from pyhazards.datasets import available_datasets, load_dataset + + +def test_named_adapter_datasets_are_registered_and_loadable(): + expected = { + "seisbench_waveforms", + "pick_benchmark_waveforms", + "aefa_forecast", + "caravan_streamflow", + "waterbench_streamflow", + "hydrobench_streamflow", + "floodcastbench_inundation", + "ibtracs_tracks", + "tcbench_alpha", + "tropicyclonenet_dataset", + "wildfire_spread_temporal_synthetic", + } + assert expected.issubset(set(available_datasets())) + + for name in sorted(expected): + bundle = load_dataset(name, micro=True).load() + assert bundle.splits["test"].inputs is not None + assert bundle.metadata.get("source_dataset", name) == name or bundle.metadata.get("dataset") == name diff --git a/tests/test_appendix_a_catalog.py b/tests/test_appendix_a_catalog.py new file mode 100644 index 00000000..197372cd --- /dev/null +++ b/tests/test_appendix_a_catalog.py @@ -0,0 +1,32 @@ +from pathlib import Path + +from pyhazards.appendix_a_catalog import ( + appendix_a_alignment_issues, + render_appendix_a_page, +) +from pyhazards.model_catalog import load_model_cards + + +def test_appendix_a_catalog_aligns_with_model_card_statuses() -> None: + cards = load_model_cards() + assert not appendix_a_alignment_issues(cards) + + +def test_appendix_a_page_lists_missing_and_non_core_entries() -> None: + cards = load_model_cards() + page = render_appendix_a_page(cards) + assert "Coverage Audit" in page + assert "`wildfire_forecasting `_" in page + assert "``Implemented``" in page + assert "``Experimental``" in page + assert "GraphCast / GenCast" in page + assert ":doc:`ForeFire Adapter `" in page + assert ":doc:`WRF-SFIRE Adapter `" in page + assert ":doc:`FireCastNet `" in page + assert ":doc:`WaveCastNet `" in page + + +def test_appendix_a_page_is_linked_from_docs_index() -> None: + repo_root = Path(__file__).resolve().parents[1] + index_text = (repo_root / "docs" / "source" / "index.rst").read_text(encoding="utf-8") + assert "appendix_a_coverage" in index_text diff --git a/tests/test_benchmark_catalog.py b/tests/test_benchmark_catalog.py new file mode 100644 index 00000000..a9907aed --- /dev/null +++ b/tests/test_benchmark_catalog.py @@ -0,0 +1,110 @@ +from pathlib import Path + +from pyhazards.benchmark_catalog import ( + BENCHMARK_PAGE_PATH, + benchmark_catalog_alignment_issues, + render_benchmark_page, + rendered_benchmark_docs, +) + + +def test_benchmark_catalog_aligns_with_registry_and_configs() -> None: + assert not benchmark_catalog_alignment_issues() + + +def test_benchmark_page_lists_family_and_ecosystem_tables() -> None: + page = render_benchmark_page() + + assert "At a Glance" in page + assert "Benchmark Families" in page + assert "Coverage Matrix" in page + assert "Benchmark Ecosystems" in page + assert "Programmatic Use" in page + assert page.index("Wildfire") < page.index("Earthquake") + assert ".. tab-item:: Tropical Cyclone" in page + assert "Tropical Cyclone / Hurricane" not in page + + family_cards = [ + ".. grid-item-card:: Wildfire Benchmark", + ".. grid-item-card:: Earthquake Benchmark", + ".. grid-item-card:: Flood Benchmark", + ".. grid-item-card:: Tropical Cyclone Benchmark", + ] + for card in family_cards: + assert page.count(card) == 1 + + ecosystem_cards = [ + ".. grid-item-card:: WildfireSpreadTS", + ".. grid-item-card:: SeisBench", + ".. grid-item-card:: pick-benchmark", + ".. grid-item-card:: pyCSEP", + ".. grid-item-card:: AEFA", + ".. grid-item-card:: Caravan", + ".. grid-item-card:: WaterBench", + ".. grid-item-card:: FloodCastBench", + ".. grid-item-card:: HydroBench", + ".. grid-item-card:: TCBench Alpha", + ".. grid-item-card:: IBTrACS", + ".. grid-item-card:: TropiCycloneNet-Dataset", + ] + for card in ecosystem_cards: + assert page.count(card) == 1 + + assert "WildfireSpreadTS: A Dataset of Multi-Modal Time Series for Wildfire Spread Prediction" in page + assert "8 smoke configs | 8 models | 1 ecosystem" in page + assert "5 smoke configs | 5 models | 4 ecosystems" in page + assert "6 smoke configs | 6 models | 4 ecosystems" in page + assert "8 smoke configs | 8 models | 3 ecosystems" in page + + +def test_rendered_docs_include_detail_pages_with_absolute_cross_links() -> None: + docs = rendered_benchmark_docs() + benchmark_docs_dir = BENCHMARK_PAGE_PATH.parent / "benchmarks" + + assert BENCHMARK_PAGE_PATH in docs + earthquake_detail = docs[benchmark_docs_dir / "earthquake_benchmark.rst"] + ecosystem_detail = docs[benchmark_docs_dir / "seisbench.rst"] + + assert ":doc:`SeisBench
`" in earthquake_detail + assert ":doc:`WaveCastNet
`" in earthquake_detail + assert ":doc:`Earthquake Benchmark
`" in ecosystem_detail + assert ":doc:`PhaseNet
`" in ecosystem_detail + assert ".. dropdown:: Supported Tasks" in ecosystem_detail + assert ".. dropdown:: Linked Models" in ecosystem_detail + + +def test_api_reference_order_is_curated() -> None: + repo_root = Path(__file__).resolve().parents[1] + index_text = (repo_root / "docs" / "source" / "index.rst").read_text(encoding="utf-8") + api_toc_start = index_text.index(":caption: API Reference") + additional_info_start = index_text.index(":caption: Additional Information") + api_toc_text = index_text[api_toc_start:additional_info_start] + api_order = [ + "pyhazards_datasets", + "pyhazards_models", + "pyhazards_benchmarks", + "pyhazards_configs", + "pyhazards_reports", + "pyhazards_engine", + "pyhazards_metrics", + "pyhazards_utils", + "interactive_map", + ] + positions = [api_toc_text.index(name) for name in api_order] + assert positions == sorted(positions) + + package_api_text = ( + repo_root / "docs" / "source" / "api" / "pyhazards.rst" + ).read_text(encoding="utf-8") + package_order = [ + "pyhazards.datasets", + "pyhazards.models", + "pyhazards.benchmarks", + "pyhazards.configs", + "pyhazards.reports", + "pyhazards.engine", + "pyhazards.metrics", + "pyhazards.utils", + ] + package_positions = [package_api_text.index(name) for name in package_order] + assert package_positions == sorted(package_positions) diff --git a/tests/test_benchmark_registry.py b/tests/test_benchmark_registry.py new file mode 100644 index 00000000..b1c43436 --- /dev/null +++ b/tests/test_benchmark_registry.py @@ -0,0 +1,47 @@ +import pytest +import torch.nn as nn + +from pyhazards.benchmarks import available_benchmarks, build_benchmark, register_benchmark +from pyhazards.benchmarks.base import Benchmark +from pyhazards.benchmarks.registry import _BENCHMARK_REGISTRY +from pyhazards.benchmarks.schemas import BenchmarkResult + + +class DummyBenchmark(Benchmark): + name = "dummy_benchmark" + hazard_task = "wildfire.danger" + + def evaluate(self, model: nn.Module, data, config): + return BenchmarkResult( + benchmark_name=self.name, + hazard_task=self.hazard_task, + metrics={"score": 1.0}, + ) + + +def test_register_and_build_benchmark(monkeypatch): + monkeypatch.setattr("pyhazards.benchmarks.registry._BENCHMARK_REGISTRY", {}) + register_benchmark("dummy_benchmark", DummyBenchmark) + + assert available_benchmarks() == ["dummy_benchmark"] + benchmark = build_benchmark("dummy_benchmark") + assert isinstance(benchmark, DummyBenchmark) + + +def test_duplicate_registration_raises(monkeypatch): + monkeypatch.setattr("pyhazards.benchmarks.registry._BENCHMARK_REGISTRY", {}) + register_benchmark("dummy_benchmark", DummyBenchmark) + + with pytest.raises(ValueError): + register_benchmark("dummy_benchmark", DummyBenchmark) + + +def test_aggregate_metrics_averages_results(): + benchmark = DummyBenchmark() + metrics = benchmark.aggregate_metrics( + [ + BenchmarkResult("dummy", "wildfire.danger", {"score": 1.0, "loss": 4.0}), + BenchmarkResult("dummy", "wildfire.danger", {"score": 3.0, "loss": 2.0}), + ] + ) + assert metrics == {"loss": 3.0, "score": 2.0} diff --git a/tests/test_benchmark_runner.py b/tests/test_benchmark_runner.py new file mode 100644 index 00000000..841af664 --- /dev/null +++ b/tests/test_benchmark_runner.py @@ -0,0 +1,98 @@ +import torch +from torch import nn + +from pyhazards.benchmarks import register_benchmark +from pyhazards.benchmarks.base import Benchmark +from pyhazards.benchmarks.schemas import BenchmarkResult +from pyhazards.configs import ( + BenchmarkConfig, + DatasetRef, + ExperimentConfig, + ModelRef, + ReportConfig, + dump_experiment_config, + load_experiment_config, +) +from pyhazards.datasets import DataBundle, DataSplit, FeatureSpec, LabelSpec +from pyhazards.engine.runner import BenchmarkRunner + + +class DummyRegressionBenchmark(Benchmark): + name = "dummy_regression" + hazard_task = "flood.streamflow" + + def evaluate(self, model: nn.Module, data: DataBundle, config: ExperimentConfig) -> BenchmarkResult: + split = data.get_split(config.benchmark.eval_split) + preds = model(split.inputs) + mae = float(torch.mean(torch.abs(preds - split.targets)).item()) + return BenchmarkResult( + benchmark_name=self.name, + hazard_task=config.benchmark.hazard_task, + metrics={"mae": mae}, + metadata={"split": config.benchmark.eval_split}, + ) + + +def _dummy_bundle() -> DataBundle: + x = torch.randn(16, 4) + y = torch.randn(16, 1) + splits = { + "train": DataSplit(x[:8], y[:8]), + "val": DataSplit(x[8:12], y[8:12]), + "test": DataSplit(x[12:], y[12:]), + } + return DataBundle( + splits=splits, + feature_spec=FeatureSpec(input_dim=4), + label_spec=LabelSpec(num_targets=1, task_type="regression"), + ) + + +def test_benchmark_runner_executes_dummy_pipeline(monkeypatch, tmp_path): + monkeypatch.setattr("pyhazards.benchmarks.registry._BENCHMARK_REGISTRY", {}) + register_benchmark("dummy_regression", DummyRegressionBenchmark) + + experiment = ExperimentConfig( + benchmark=BenchmarkConfig( + name="dummy_regression", + hazard_task="flood.streamflow", + metrics=["mae"], + eval_split="test", + ), + dataset=DatasetRef(name="unused"), + model=ModelRef(name="mlp", task="regression", params={"in_dim": 4, "out_dim": 1}), + report=ReportConfig(output_dir=str(tmp_path), formats=["json", "md"]), + ) + + summary = BenchmarkRunner().run( + experiment, + data=_dummy_bundle(), + output_dir=str(tmp_path), + ) + + assert summary.benchmark_name == "dummy_regression" + assert summary.hazard_task == "flood.streamflow" + assert "mae" in summary.metrics + assert summary.report_paths["json"].endswith("dummy_regression.json") + assert (tmp_path / "dummy_regression.md").exists() + + +def test_experiment_config_roundtrip(tmp_path): + path = tmp_path / "experiment.yaml" + experiment = ExperimentConfig( + benchmark=BenchmarkConfig(name="dummy_regression", hazard_task="flood.streamflow"), + dataset=DatasetRef(name="fpa_fod_tabular", params={"micro": True, "task": "cause"}), + model=ModelRef(name="wildfire_fpa", task="classification", params={"in_dim": 8, "out_dim": 5}), + report=ReportConfig(output_dir="reports", formats=["json", "csv"]), + seed=7, + metadata={"owner": "wave1"}, + ) + + dump_experiment_config(experiment, path) + loaded = load_experiment_config(path) + + assert loaded.benchmark.hazard_task == "flood.streamflow" + assert loaded.dataset.params["micro"] is True + assert loaded.model.params["out_dim"] == 5 + assert loaded.report.formats == ["json", "csv"] + assert loaded.seed == 7 diff --git a/tests/test_dataset_catalog.py b/tests/test_dataset_catalog.py new file mode 100644 index 00000000..5b39474a --- /dev/null +++ b/tests/test_dataset_catalog.py @@ -0,0 +1,80 @@ +from pyhazards.dataset_catalog import ( + API_PAGE_PATH, + DATASET_PAGE_PATH, + dataset_catalog_alignment_issues, + load_dataset_cards, + render_dataset_api_page, + render_dataset_page, + rendered_dataset_docs, +) + + +def test_dataset_catalog_aligns_with_registry_and_links() -> None: + cards = load_dataset_cards() + assert not dataset_catalog_alignment_issues(cards) + + +def test_dataset_page_lists_curated_hazard_tabs() -> None: + cards = load_dataset_cards() + page = render_dataset_page(cards) + + assert "At a Glance" in page + assert "Catalog by Hazard" in page + assert "Recommended Entry Points" in page + assert "Programmatic Use" in page + assert ".. tab-set::" in page + + tabs = [ + ".. tab-item:: Shared Forcing", + ".. tab-item:: Wildfire", + ".. tab-item:: Flood", + ".. tab-item:: Earthquake", + ".. tab-item:: Tropical Cyclone", + ] + for tab in tabs: + assert tab in page + + positions = [page.index(tab) for tab in tabs] + assert positions == sorted(positions) + + assert ":doc:`ERA5 `" in page + assert ":doc:`WFIGS `" in page + assert ":doc:`Caravan `" in page + assert ":doc:`SeisBench `" in page + assert ":doc:`IBTrACS `" in page + assert "Registry-loadable Datasets" in page + assert "Inspection Entry Points" in page + + +def test_dataset_api_and_detail_pages_use_generated_structure() -> None: + cards = load_dataset_cards() + docs = rendered_dataset_docs(cards) + + assert DATASET_PAGE_PATH in docs + assert API_PAGE_PATH in docs + + api_page = render_dataset_api_page(cards) + assert "Catalog Summary" in api_page + assert "Developer Dataset Workflow" in api_page + assert "Inspect an External Dataset Source" in api_page + assert "Register a Custom Dataset" in api_page + assert "pyhazards/dataset_cards" in api_page + + dataset_docs_dir = DATASET_PAGE_PATH.parent / "datasets" + era5_detail = docs[dataset_docs_dir / "era5.rst"] + seisbench_detail = docs[dataset_docs_dir / "seisbench_waveforms.rst"] + ibtracs_detail = docs[dataset_docs_dir / "ibtracs_tracks.rst"] + + for detail in (era5_detail, seisbench_detail, ibtracs_detail): + assert "Overview" in detail + assert "At a Glance" in detail + assert "Data Characteristics" in detail + assert "Access" in detail + assert "PyHazards Usage" in detail + assert "Inspection Workflow" in detail + assert "Reference" in detail + + assert "This dataset is currently documented as an external or inspection-first" in era5_detail + assert ":doc:`Earthquake Benchmark
`" in seisbench_detail + assert "there is no standalone inspection cli documented for it." in seisbench_detail.lower() + assert ":doc:`Tropical Cyclone Benchmark
`" in ibtracs_detail diff --git a/tests/test_earthquake_benchmarks.py b/tests/test_earthquake_benchmarks.py new file mode 100644 index 00000000..ca4abca1 --- /dev/null +++ b/tests/test_earthquake_benchmarks.py @@ -0,0 +1,20 @@ +from pyhazards.configs import load_experiment_config +from pyhazards.engine.runner import BenchmarkRunner + + +def test_earthquake_vertical_slice(tmp_path): + config = load_experiment_config("pyhazards/configs/earthquake/phasenet_smoke.yaml") + summary = BenchmarkRunner().run(config, output_dir=str(tmp_path)) + + assert summary.benchmark_name == "earthquake" + assert summary.hazard_task == "earthquake.picking" + assert "p_pick_mae" in summary.metrics + assert "json" in summary.report_paths + + +def test_earthquake_forecasting_exports_pycsep_style_report(tmp_path): + config = load_experiment_config("pyhazards/configs/earthquake/wavecastnet_benchmark_smoke.yaml") + summary = BenchmarkRunner().run(config, output_dir=str(tmp_path)) + + assert summary.hazard_task == "earthquake.forecasting" + assert "pycsep" in summary.report_paths diff --git a/tests/test_earthquake_breadth.py b/tests/test_earthquake_breadth.py new file mode 100644 index 00000000..96487397 --- /dev/null +++ b/tests/test_earthquake_breadth.py @@ -0,0 +1,37 @@ +import torch + +from pyhazards.configs import load_experiment_config +from pyhazards.engine.runner import BenchmarkRunner +from pyhazards.models import build_model + + +def test_additional_earthquake_picking_models_forward(): + x = torch.randn(3, 3, 256) + for name in ["eqtransformer", "gpd", "eqnet"]: + model = build_model(name=name, task="regression", in_channels=3) + preds = model(x) + assert preds.shape == (3, 2) + + +def test_earthquake_breadth_configs(tmp_path): + for config_name in [ + "pyhazards/configs/earthquake/eqtransformer_smoke.yaml", + "pyhazards/configs/earthquake/gpd_smoke.yaml", + "pyhazards/configs/earthquake/eqnet_smoke.yaml", + ]: + summary = BenchmarkRunner().run( + load_experiment_config(config_name), + output_dir=str(tmp_path), + ) + assert summary.hazard_task == "earthquake.picking" + assert "f1" in summary.metrics + + +def test_wavecastnet_forecasting_benchmark(tmp_path): + config = load_experiment_config("pyhazards/configs/earthquake/wavecastnet_benchmark_smoke.yaml") + summary = BenchmarkRunner().run(config, output_dir=str(tmp_path)) + + assert summary.benchmark_name == "earthquake" + assert summary.hazard_task == "earthquake.forecasting" + assert "mae" in summary.metrics + assert "mse" in summary.metrics diff --git a/tests/test_flood_benchmarks.py b/tests/test_flood_benchmarks.py new file mode 100644 index 00000000..5167e1ae --- /dev/null +++ b/tests/test_flood_benchmarks.py @@ -0,0 +1,14 @@ +from pyhazards.configs import load_experiment_config +from pyhazards.engine.runner import BenchmarkRunner + + +def test_flood_streamflow_vertical_slice(tmp_path): + config = load_experiment_config("pyhazards/configs/flood/hydrographnet_smoke.yaml") + summary = BenchmarkRunner().run(config, output_dir=str(tmp_path)) + + assert summary.benchmark_name == "flood" + assert summary.hazard_task == "flood.streamflow" + assert "mae" in summary.metrics + assert "rmse" in summary.metrics + assert "nse" in summary.metrics + assert "kge" in summary.metrics diff --git a/tests/test_flood_breadth.py b/tests/test_flood_breadth.py new file mode 100644 index 00000000..f93ddc4b --- /dev/null +++ b/tests/test_flood_breadth.py @@ -0,0 +1,50 @@ +import torch + +from pyhazards.configs import load_experiment_config +from pyhazards.engine.runner import BenchmarkRunner +from pyhazards.models import build_model + + +def test_neuralhydrology_baselines_forward(): + batch = {"x": torch.randn(2, 4, 6, 2)} + for name in ["neuralhydrology_lstm", "neuralhydrology_ealstm", "google_flood_forecasting"]: + model = build_model(name=name, task="regression", input_dim=2, out_dim=1) + preds = model(batch) + assert preds.shape == (2, 6, 1) + + +def test_inundation_baselines_forward(): + x = torch.randn(2, 4, 3, 16, 16) + for name in ["floodcast", "urbanfloodcast"]: + model = build_model(name=name, task="regression", in_channels=3, history=4) + preds = model(x) + assert preds.shape == (2, 1, 16, 16) + + +def test_flood_streamflow_breadth_configs(tmp_path): + for config_name in [ + "pyhazards/configs/flood/neuralhydrology_lstm_smoke.yaml", + "pyhazards/configs/flood/neuralhydrology_ealstm_smoke.yaml", + "pyhazards/configs/flood/google_flood_forecasting_smoke.yaml", + ]: + summary = BenchmarkRunner().run( + load_experiment_config(config_name), + output_dir=str(tmp_path), + ) + assert summary.hazard_task == "flood.streamflow" + assert "rmse" in summary.metrics + assert "nse" in summary.metrics + + +def test_flood_inundation_breadth_configs(tmp_path): + for config_name in [ + "pyhazards/configs/flood/floodcast_smoke.yaml", + "pyhazards/configs/flood/urbanfloodcast_smoke.yaml", + ]: + summary = BenchmarkRunner().run( + load_experiment_config(config_name), + output_dir=str(tmp_path), + ) + assert summary.hazard_task == "flood.inundation" + assert "iou" in summary.metrics + assert "pixel_mae" in summary.metrics diff --git a/tests/test_fpa_fod_datasets.py b/tests/test_fpa_fod_datasets.py new file mode 100644 index 00000000..bd2ff060 --- /dev/null +++ b/tests/test_fpa_fod_datasets.py @@ -0,0 +1,33 @@ +import torch + +from pyhazards.datasets import load_dataset + + +def test_fpa_fod_tabular_micro_shapes(): + bundle = load_dataset("fpa_fod_tabular", micro=True, task="cause").load() + train = bundle.get_split("train") + + assert train.inputs.ndim == 2 + assert train.targets.ndim == 1 + assert train.inputs.dtype == torch.float32 + assert train.targets.dtype == torch.long + assert train.inputs.shape[0] == train.targets.shape[0] + assert bundle.feature_spec.input_dim == train.inputs.shape[1] + + +def test_fpa_fod_weekly_micro_shapes(): + bundle = load_dataset( + "fpa_fod_weekly", + micro=True, + lookback_weeks=12, + features="counts+time", + ).load() + train = bundle.get_split("train") + + assert train.inputs.ndim == 3 + assert train.targets.ndim == 2 + assert train.inputs.dtype == torch.float32 + assert train.targets.dtype == torch.float32 + assert train.inputs.shape[0] == train.targets.shape[0] + assert train.inputs.shape[1] == 12 + assert bundle.feature_spec.input_dim == train.inputs.shape[2] diff --git a/tests/test_fpa_fod_models.py b/tests/test_fpa_fod_models.py new file mode 100644 index 00000000..410967cd --- /dev/null +++ b/tests/test_fpa_fod_models.py @@ -0,0 +1,95 @@ +import torch + +from pyhazards.models import available_models, build_model +from pyhazards.models.wildfire_fpa_autoencoder import WildfireFPAAutoencoder +from pyhazards.models.wildfire_fpa_lstm import WildfireFPALSTM + + +def test_wildfire_fpa_public_registry_name(): + assert [name for name in available_models() if "wildfire_fpa" in name] == ["wildfire_fpa"] + + +def test_wildfire_fpa_classification_forward(): + model = build_model( + name="wildfire_fpa", + task="classification", + in_dim=8, + out_dim=5, + ) + + logits = model(torch.randn(4, 8)) + assert model.stage == "classification" + assert logits.shape == (4, 5) + + +def test_wildfire_fpa_forecast_forward(): + model = build_model( + name="wildfire_fpa", + task="forecasting", + input_dim=7, + output_dim=5, + lookback=12, + latent_dim=16, + ) + + preds = model(torch.randn(3, 12, 7)) + assert model.stage == "forecasting" + assert preds.shape == (3, 5) + + +def test_wildfire_fpa_forecast_reconstruction_output(): + model = build_model(name="wildfire_fpa", task="forecasting", input_dim=7, output_dim=5, lookback=12) + + preds, recon = model.forward_with_reconstruction(torch.randn(2, 12, 7)) + assert preds.shape == (2, 5) + assert recon.shape == (2, 12, 7) + +def test_wildfire_fpa_internal_lstm_forward(): + model = WildfireFPALSTM(input_dim=7, output_dim=5, lookback=12) + + preds = model(torch.randn(3, 12, 7)) + assert preds.shape == (3, 5) + + +def test_wildfire_fpa_autoencoder_forward(): + model = WildfireFPAAutoencoder(input_dim=7, lookback=12, latent_dim=16) + + recon = model(torch.randn(2, 12, 7)) + assert recon.shape == (2, 12, 7) + + +def test_added_wildfire_public_methods_forward(): + weekly_x = torch.randn(2, 12, 7) + spread_x = torch.randn(2, 12, 16, 16) + temporal_spread_x = torch.randn(2, 4, 6, 16, 16) + + forecasting = build_model( + name="wildfire_forecasting", + task="forecasting", + input_dim=7, + output_dim=5, + lookback=12, + ) + asufm = build_model( + name="asufm", + task="forecasting", + input_dim=7, + output_dim=5, + lookback=12, + ) + spread_ts = build_model( + name="wildfirespreadts", + task="segmentation", + history=4, + in_channels=6, + ) + forefire = build_model(name="forefire", task="segmentation", in_channels=12) + wrf_sfire = build_model(name="wrf_sfire", task="segmentation", in_channels=12) + firecastnet = build_model(name="firecastnet", task="segmentation", in_channels=12) + + assert forecasting(weekly_x).shape == (2, 5) + assert asufm(weekly_x).shape == (2, 5) + assert spread_ts(temporal_spread_x).shape == (2, 1, 16, 16) + assert forefire(spread_x).shape == (2, 1, 16, 16) + assert wrf_sfire(spread_x).shape == (2, 1, 16, 16) + assert firecastnet(spread_x).shape == (2, 1, 16, 16) diff --git a/tests/test_fpa_fod_trainer_smoke.py b/tests/test_fpa_fod_trainer_smoke.py new file mode 100644 index 00000000..c69971e8 --- /dev/null +++ b/tests/test_fpa_fod_trainer_smoke.py @@ -0,0 +1,29 @@ +import torch + +from pyhazards.datasets import load_dataset +from pyhazards.engine import Trainer +from pyhazards.models import build_model + + +def test_fpa_fod_trainer_smoke(): + bundle = load_dataset("fpa_fod_tabular", micro=True, task="cause").load() + train = bundle.get_split("train") + + model = build_model( + name="wildfire_fpa", + task="classification", + in_dim=train.inputs.shape[1], + out_dim=int(train.targets.max().item() + 1), + ) + trainer = Trainer(model=model, mixed_precision=False, device="cpu") + optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) + loss_fn = torch.nn.CrossEntropyLoss() + + trainer.fit( + bundle, + optimizer=optimizer, + loss_fn=loss_fn, + max_epochs=1, + batch_size=32, + num_workers=0, + ) diff --git a/tests/test_interactive_map.py b/tests/test_interactive_map.py new file mode 100644 index 00000000..8a00fd30 --- /dev/null +++ b/tests/test_interactive_map.py @@ -0,0 +1,61 @@ +from pyhazards import RAI_FIRE_URL, open_interactive_map +from pyhazards.__main__ import main + + +def test_open_interactive_map_returns_url_without_browser(): + assert open_interactive_map(open_browser=False) == RAI_FIRE_URL + + +def test_open_interactive_map_attempts_browser(monkeypatch): + calls = [] + + def fake_open(url, new=0): + calls.append((url, new)) + return True + + monkeypatch.setattr("pyhazards.interactive_map._can_launch_browser", lambda: True) + monkeypatch.setattr("pyhazards.interactive_map.webbrowser.open", fake_open) + + assert open_interactive_map() == RAI_FIRE_URL + assert calls == [(RAI_FIRE_URL, 2)] + + +def test_open_interactive_map_swallows_browser_errors(monkeypatch): + def fake_open(url, new=0): + raise RuntimeError("browser unavailable") + + monkeypatch.setattr("pyhazards.interactive_map._can_launch_browser", lambda: True) + monkeypatch.setattr("pyhazards.interactive_map.webbrowser.open", fake_open) + + assert open_interactive_map() == RAI_FIRE_URL + + +def test_open_interactive_map_skips_browser_when_headless(monkeypatch): + calls = [] + + def fake_open(url, new=0): + calls.append((url, new)) + return True + + monkeypatch.setattr("pyhazards.interactive_map._can_launch_browser", lambda: False) + monkeypatch.setattr("pyhazards.interactive_map.webbrowser.open", fake_open) + + assert open_interactive_map() == RAI_FIRE_URL + assert calls == [] + + +def test_cli_map_prints_url(monkeypatch, capsys): + calls = [] + + def fake_open_map(open_browser=True): + calls.append(open_browser) + return RAI_FIRE_URL + + monkeypatch.setattr("pyhazards.__main__.open_interactive_map", fake_open_map) + + assert main(["map"]) == 0 + out = capsys.readouterr().out + + assert "RAI Fire interactive map" in out + assert RAI_FIRE_URL in out + assert calls == [True] diff --git a/tests/test_model_catalog.py b/tests/test_model_catalog.py new file mode 100644 index 00000000..a1f4b13b --- /dev/null +++ b/tests/test_model_catalog.py @@ -0,0 +1,60 @@ +from pyhazards.model_catalog import ( + load_model_cards, + model_catalog_alignment_issues, + render_api_page, + render_model_page, +) + + +def test_model_catalog_aligns_with_registry() -> None: + cards = load_model_cards() + assert not model_catalog_alignment_issues(cards) + + +def test_model_page_lists_generated_hazard_sections() -> None: + cards = load_model_cards() + page = render_model_page(cards) + assert "At a Glance" in page + assert "Catalog by Hazard" in page + assert "Recommended Entry Points" in page + assert "Programmatic Use" in page + assert ".. tab-set::" in page + assert page.index(".. tab-item:: Wildfire") < page.index(".. tab-item:: Earthquake") + assert ".. tab-item:: Flood" in page + assert ".. tab-item:: Tropical Cyclone" in page + assert ".. tab-item:: Hurricane" not in page + assert ":doc:`DNN-LSTM-AutoEncoder `" in page + assert ":doc:`Wildfire Forecasting `" in page + assert ":doc:`WildfireSpreadTS `" in page + assert ":doc:`ASUFM `" in page + assert ":doc:`ForeFire Adapter `" in page + assert ":doc:`WRF-SFIRE Adapter `" in page + assert ":doc:`FireCastNet `" in page + assert ":doc:`WaveCastNet `" in page + assert ":doc:`GraphCast TC Adapter `" in page + assert "Wildfire Danger Prediction and Understanding with Deep Learning" in page + assert "`Repository `_" in page + assert page.count("Implemented Models") == 5 + assert page.count("Experimental Adapters") == 2 + assert "Core Baselines" not in page + assert "Variants and Additional Implementations" not in page + assert page.count(":doc:`DNN-LSTM-AutoEncoder `") == 1 + assert page.count(":doc:`WaveCastNet `") == 1 + assert ":doc:`Wildfire Mamba `" not in page + assert ":doc:`DNN `" not in page + assert ":doc:`LSTM-AutoEncoder `" not in page + assert ":doc:`LSTM `" not in page + + +def test_hidden_models_are_omitted_from_public_catalog_pages() -> None: + cards = load_model_cards() + api_page = render_api_page(cards) + assert ":doc:`Wildfire Mamba `" not in api_page + assert api_page.count("Implemented Models") == 4 + assert api_page.count("Experimental Adapters") == 1 + assert "Core Baselines" not in api_page + assert "Variants and Additional Implementations" not in api_page + assert ":doc:`GraphCast TC Adapter `" in api_page + assert "Developer Registry Workflow" in api_page + assert "Catalog Summary" in api_page + assert "Hurricane" not in api_page diff --git a/tests/test_report_exports.py b/tests/test_report_exports.py new file mode 100644 index 00000000..69377ae5 --- /dev/null +++ b/tests/test_report_exports.py @@ -0,0 +1,28 @@ +import csv +import json + +from pyhazards.reports import BenchmarkReport, export_report_bundle + + +def test_export_report_bundle_writes_requested_formats(tmp_path): + report = BenchmarkReport( + benchmark_name="dummy_benchmark", + hazard_task="earthquake.picking", + metrics={"mae": 0.5, "f1": 0.9}, + metadata={"split": "test"}, + ) + paths = export_report_bundle(report, tmp_path, formats=["json", "md", "csv"]) + + json_payload = json.loads((tmp_path / "dummy_benchmark.json").read_text(encoding="utf-8")) + assert json_payload["hazard_task"] == "earthquake.picking" + assert json_payload["metrics"]["mae"] == 0.5 + + markdown = (tmp_path / "dummy_benchmark.md").read_text(encoding="utf-8") + assert "# dummy_benchmark" in markdown + assert "`earthquake.picking`" in markdown + + with (tmp_path / "dummy_benchmark.csv").open("r", encoding="utf-8", newline="") as handle: + rows = list(csv.DictReader(handle)) + assert rows[0]["mae"] == "0.5" + assert rows[0]["metadata.split"] == "test" + assert paths["json"].endswith("dummy_benchmark.json") diff --git a/tests/test_tasks.py b/tests/test_tasks.py new file mode 100644 index 00000000..1833eda4 --- /dev/null +++ b/tests/test_tasks.py @@ -0,0 +1,24 @@ +import pytest + +from pyhazards.tasks import available_hazard_tasks, get_hazard_task, has_hazard_task + + +def test_available_hazard_tasks_contains_wave_one_targets(): + names = available_hazard_tasks() + assert "earthquake.picking" in names + assert "wildfire.spread" in names + assert "flood.streamflow" in names + assert "tc.track_intensity" in names + + +def test_get_hazard_task_returns_structured_record(): + task = get_hazard_task("wildfire.spread") + assert task.hazard == "wildfire" + assert task.target == "spread" + assert "burned-area" in task.description + + +def test_unknown_hazard_task_raises(): + assert not has_hazard_task("unknown.task") + with pytest.raises(KeyError): + get_hazard_task("unknown.task") diff --git a/tests/test_tc_benchmarks.py b/tests/test_tc_benchmarks.py new file mode 100644 index 00000000..76bce908 --- /dev/null +++ b/tests/test_tc_benchmarks.py @@ -0,0 +1,13 @@ +from pyhazards.configs import load_experiment_config +from pyhazards.engine.runner import BenchmarkRunner + + +def test_tc_vertical_slice(tmp_path): + config = load_experiment_config("pyhazards/configs/tc/hurricast_smoke.yaml") + summary = BenchmarkRunner().run(config, output_dir=str(tmp_path)) + + assert summary.benchmark_name == "tc" + assert summary.hazard_task == "tc.track_intensity" + assert "track_error" in summary.metrics + assert "intensity_mae" in summary.metrics + assert summary.metadata["source_dataset"] == "ibtracs_tracks" diff --git a/tests/test_tc_models.py b/tests/test_tc_models.py new file mode 100644 index 00000000..f62e41e5 --- /dev/null +++ b/tests/test_tc_models.py @@ -0,0 +1,48 @@ +import torch + +from pyhazards.configs import load_experiment_config +from pyhazards.engine.runner import BenchmarkRunner +from pyhazards.models import build_model + + +def test_storm_breadth_models_forward(): + x = torch.randn(2, 6, 8) + model_names = [ + "tropicalcyclone_mlp", + "tropicyclonenet", + "saf_net", + "tcif_fusion", + "graphcast_tc", + "pangu_tc", + "fourcastnet_tc", + ] + for name in model_names: + kwargs = { + "input_dim": 8, + "horizon": 5, + "output_dim": 3, + } + if name in {"tropicalcyclone_mlp", "fourcastnet_tc"}: + kwargs["history"] = 6 + model = build_model(name=name, task="regression", **kwargs) + preds = model(x) + assert preds.shape == (2, 5, 3) + + +def test_tc_breadth_configs(tmp_path): + for config_name in [ + "pyhazards/configs/tc/tropicalcyclone_mlp_smoke.yaml", + "pyhazards/configs/tc/tropicyclonenet_smoke.yaml", + "pyhazards/configs/tc/saf_net_smoke.yaml", + "pyhazards/configs/tc/tcif_fusion_smoke.yaml", + "pyhazards/configs/tc/graphcast_tc_smoke.yaml", + "pyhazards/configs/tc/pangu_tc_smoke.yaml", + "pyhazards/configs/tc/fourcastnet_tc_smoke.yaml", + ]: + summary = BenchmarkRunner().run( + load_experiment_config(config_name), + output_dir=str(tmp_path), + ) + assert summary.hazard_task == "tc.track_intensity" + assert "track_error" in summary.metrics + assert "intensity_mae" in summary.metrics diff --git a/tests/test_wildfire_benchmarks.py b/tests/test_wildfire_benchmarks.py new file mode 100644 index 00000000..0a1dc681 --- /dev/null +++ b/tests/test_wildfire_benchmarks.py @@ -0,0 +1,35 @@ +from pyhazards.configs import load_experiment_config +from pyhazards.engine.runner import BenchmarkRunner + + +def test_wildfire_danger_vertical_slice(tmp_path): + config = load_experiment_config("pyhazards/configs/wildfire/wildfire_danger_smoke.yaml") + summary = BenchmarkRunner().run(config, output_dir=str(tmp_path)) + + assert summary.benchmark_name == "wildfire" + assert summary.hazard_task == "wildfire.danger" + assert "accuracy" in summary.metrics + assert "auc" in summary.metrics + + +def test_wildfire_spread_vertical_slice(tmp_path): + config = load_experiment_config("pyhazards/configs/wildfire/wildfire_spread_smoke.yaml") + summary = BenchmarkRunner().run(config, output_dir=str(tmp_path)) + + assert summary.hazard_task == "wildfire.spread" + assert "iou" in summary.metrics + assert "burned_area_mae" in summary.metrics + + +def test_added_wildfire_breadth_configs(tmp_path): + expectations = { + "pyhazards/configs/wildfire/wildfire_forecasting_smoke.yaml": "mae", + "pyhazards/configs/wildfire/asufm_smoke.yaml": "mae", + "pyhazards/configs/wildfire/wildfirespreadts_smoke.yaml": "burned_area_mae", + "pyhazards/configs/wildfire/forefire_smoke.yaml": "burned_area_mae", + "pyhazards/configs/wildfire/wrf_sfire_smoke.yaml": "burned_area_mae", + "pyhazards/configs/wildfire/firecastnet_smoke.yaml": "burned_area_mae", + } + for path, metric_name in expectations.items(): + summary = BenchmarkRunner().run(load_experiment_config(path), output_dir=str(tmp_path)) + assert metric_name in summary.metrics