diff --git a/cli/cli.py b/cli/cli.py index f003dae4..6be0e16e 100644 --- a/cli/cli.py +++ b/cli/cli.py @@ -13,8 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== +import os +import re +from collections import Counter, defaultdict from enum import Enum from pathlib import Path +from textwrap import shorten, wrap import typer from dotenv import load_dotenv @@ -32,8 +36,56 @@ class OutputFormat(str, Enum): json = "json" +class ThemeMode(str, Enum): + auto = "auto" + light = "light" + dark = "dark" + + +def _resolve_theme(theme: ThemeMode) -> ThemeMode: + if theme != ThemeMode.auto: + return theme + + env_theme = os.environ.get("OCO_THEME", "").strip().lower() + if env_theme in (ThemeMode.light.value, ThemeMode.dark.value): + return ThemeMode(env_theme) + + colorfgbg = os.environ.get("COLORFGBG", "") + if colorfgbg: + try: + bg = int(colorfgbg.split(";")[-1]) + return ThemeMode.light if bg >= 8 else ThemeMode.dark + except (TypeError, ValueError): + pass + + return ThemeMode.dark + + +def _palette(theme: ThemeMode) -> dict[str, str]: + mode = _resolve_theme(theme) + if mode == ThemeMode.light: + return { + "ok": typer.colors.GREEN, + "issue": typer.colors.RED, + "accent": typer.colors.BLUE, + "muted": typer.colors.BLACK, + "field": typer.colors.RED, + } + return { + "ok": typer.colors.GREEN, + "issue": typer.colors.MAGENTA, + "accent": typer.colors.BRIGHT_BLUE, + "muted": typer.colors.BRIGHT_BLACK, + "field": typer.colors.BRIGHT_YELLOW, + } + + @cli.command("initialize-lexicon") -def initialize_lexicon(): +def initialize_lexicon( + theme: ThemeMode = typer.Option( + ThemeMode.auto, "--theme", help="Color theme: auto, light, dark." + ), +): from core.initializers import init_lexicon init_lexicon() @@ -47,7 +99,10 @@ def associate_assets_command( file_okay=False, dir_okay=True, readable=True, - ) + ), + theme: ThemeMode = typer.Option( + ThemeMode.auto, "--theme", help="Color theme: auto, light, dark." + ), ): from cli.service_adapter import associate_assets @@ -62,7 +117,10 @@ def well_inventory_csv( file_okay=True, dir_okay=False, readable=True, - ) + ), + theme: ThemeMode = typer.Option( + ThemeMode.auto, "--theme", help="Color theme: auto, light, dark." + ), ): """ parse and upload a csv to database @@ -70,7 +128,164 @@ def well_inventory_csv( # TODO: use the same helper function used by api to parse and upload a WI csv from cli.service_adapter import well_inventory_csv - well_inventory_csv(file_path) + result = well_inventory_csv(file_path) + payload = result.payload if isinstance(result.payload, dict) else {} + summary = payload.get("summary", {}) + validation_errors = payload.get("validation_errors", []) + detail = payload.get("detail") + colors = _palette(theme) + + if result.exit_code == 0: + typer.secho("[WELL INVENTORY IMPORT] SUCCESS", fg=colors["ok"], bold=True) + else: + typer.secho( + "[WELL INVENTORY IMPORT] COMPLETED WITH ISSUES", + fg=colors["issue"], + bold=True, + ) + typer.secho("=" * 72, fg=colors["accent"]) + + if summary: + processed = summary.get("total_rows_processed", 0) + imported = summary.get("total_rows_imported", 0) + rows_with_issues = summary.get("validation_errors_or_warnings", 0) + typer.secho("SUMMARY", fg=colors["accent"], bold=True) + label_width = 16 + value_width = 8 + typer.secho(" " + "-" * (label_width + 3 + value_width), fg=colors["muted"]) + typer.secho( + f" {'processed':<{label_width}} | {processed:>{value_width}}", + fg=colors["accent"], + ) + typer.secho( + f" {'imported':<{label_width}} | {imported:>{value_width}}", + fg=colors["ok"], + ) + issue_color = colors["issue"] if rows_with_issues else colors["ok"] + typer.secho( + f" {'rows_with_issues':<{label_width}} | {rows_with_issues:>{value_width}}", + fg=issue_color, + ) + typer.echo() + + if validation_errors: + typer.secho("VALIDATION", fg=colors["accent"], bold=True) + typer.secho( + f"Validation errors: {len(validation_errors)}", + fg=colors["issue"], + bold=True, + ) + common_errors = Counter() + for err in validation_errors: + field = err.get("field", "unknown") + message = err.get("error") or err.get("msg") or "validation error" + common_errors[(field, message)] += 1 + + if common_errors: + typer.secho( + "Most common validation errors:", fg=colors["accent"], bold=True + ) + field_width = 28 + count_width = 5 + error_width = 100 + typer.secho( + f" {'#':>2} | {'field':<{field_width}} | {'count':>{count_width}} | error", + fg=colors["muted"], + bold=True, + ) + typer.secho( + " " + "-" * (2 + 3 + field_width + 3 + count_width + 3 + error_width), + fg=colors["muted"], + ) + for idx, ((field, message), count) in enumerate( + common_errors.most_common(5), start=1 + ): + error_one_line = shorten( + str(message).replace("\n", " "), + width=error_width, + placeholder="...", + ) + field_text = shorten(str(field), width=field_width, placeholder="...") + field_part = typer.style( + f"{field_text:<{field_width}}", fg=colors["field"], bold=True + ) + count_part = f"{int(count):>{count_width}}" + idx_part = typer.style(f"{idx:>2}", fg=colors["issue"]) + error_part = typer.style(error_one_line, fg=colors["issue"]) + typer.echo(f" {idx_part} | {field_part} | {count_part} | {error_part}") + typer.echo() + + grouped_errors = defaultdict(list) + for err in validation_errors: + row = err.get("row", "?") + grouped_errors[row].append(err) + + def _row_sort_key(row_value): + try: + return (0, int(row_value)) + except (TypeError, ValueError): + return (1, str(row_value)) + + max_errors_to_show = 10 + shown = 0 + first_group = True + for row in sorted(grouped_errors.keys(), key=_row_sort_key): + if shown >= max_errors_to_show: + break + + row_errors = grouped_errors[row] + if not first_group: + typer.secho(" " + "-" * 56, fg=colors["muted"]) + first_group = False + typer.secho( + f" Row {row} ({len(row_errors)} issue{'s' if len(row_errors) != 1 else ''})", + fg=colors["accent"], + bold=True, + ) + + for idx, err in enumerate(row_errors, start=1): + if shown >= max_errors_to_show: + break + field = err.get("field", "unknown") + message = err.get("error") or err.get("msg") or "validation error" + input_value = err.get("value") + prefix_raw = f" {idx}. " + field_raw = f"{field}:" + msg_chunks = wrap( + str(message), + width=max(20, 200 - len(prefix_raw) - len(field_raw) - 1), + ) or [""] + prefix = typer.style(prefix_raw, fg=colors["issue"]) + field_part = typer.style(field_raw, fg=colors["field"], bold=True) + first_msg_part = typer.style(msg_chunks[0], fg=colors["issue"]) + typer.echo(f"{prefix}{field_part} {first_msg_part}") + msg_indent = " " * (len(prefix_raw) + len(field_raw) + 1) + for chunk in msg_chunks[1:]: + typer.secho(f"{msg_indent}{chunk}", fg=colors["issue"]) + if input_value is not None: + input_prefix = " input: " + input_chunks = wrap( + str(input_value), width=max(20, 200 - len(input_prefix)) + ) or [""] + typer.echo(f"{input_prefix}{input_chunks[0]}") + input_indent = " " * len(input_prefix) + for chunk in input_chunks[1:]: + typer.echo(f"{input_indent}{chunk}") + shown += 1 + typer.echo() + + if len(validation_errors) > shown: + typer.secho( + f"... and {len(validation_errors) - shown} more validation errors", + fg=colors["issue"], + ) + if detail: + typer.secho("ERRORS", fg=colors["accent"], bold=True) + typer.secho(f"Error: {detail}", fg=colors["issue"], bold=True) + + typer.secho("=" * 72, fg=colors["accent"]) + + raise typer.Exit(result.exit_code) @water_levels.command("bulk-upload") @@ -89,6 +304,9 @@ def water_levels_bulk_upload( "--output", help="Optional output format", ), + theme: ThemeMode = typer.Option( + ThemeMode.auto, "--theme", help="Color theme: auto, light, dark." + ), ): """ parse and upload a csv @@ -96,12 +314,210 @@ def water_levels_bulk_upload( # TODO: use the same helper function used by api to parse and upload a WL csv from cli.service_adapter import water_levels_csv + colors = _palette(theme) + source = Path(file_path) + if not source.exists() or not source.is_file(): + typer.secho( + f"File not found: {source}", + fg=colors["issue"], + bold=True, + err=True, + ) + raise typer.Exit(1) + pretty_json = output_format == OutputFormat.json - water_levels_csv(file_path, pretty_json=pretty_json) + try: + result = water_levels_csv(file_path, pretty_json=pretty_json) + except (FileNotFoundError, PermissionError, IsADirectoryError) as exc: + typer.secho(str(exc), fg=colors["issue"], bold=True, err=True) + raise typer.Exit(1) + + # Backward compatibility for tests/mocks that return only an int. + if isinstance(result, int): + raise typer.Exit(result) + + if output_format == OutputFormat.json: + typer.echo(result.stdout) + raise typer.Exit(result.exit_code) + + payload = result.payload if isinstance(result.payload, dict) else {} + summary = payload.get("summary", {}) + validation_errors = payload.get("validation_errors", []) + + if result.exit_code == 0: + typer.secho("[WATER LEVEL IMPORT] SUCCESS", fg=colors["ok"], bold=True) + else: + typer.secho( + "[WATER LEVEL IMPORT] COMPLETED WITH ISSUES", + fg=colors["issue"], + bold=True, + ) + typer.secho("=" * 72, fg=colors["accent"]) + + parsed_validation: list[tuple[str | None, str, str]] = [] + for entry in validation_errors: + if isinstance(entry, dict): + row_value = entry.get("row") + row = str(row_value) if row_value is not None else None + field = str(entry.get("field") or "error").strip() + message = str( + entry.get("error") or entry.get("msg") or "validation error" + ).strip() + parsed_validation.append((row, field, message)) + continue + + text = str(entry).strip() + m = re.match(r"^Row\s+(\d+):\s*(.+)$", text) + if not m: + parsed_validation.append((None, "error", text)) + continue + + row = m.group(1) + detail = m.group(2).strip() + if " - " in detail: + field, message = detail.split(" - ", 1) + elif req := re.match(r"^Missing required field '([^']+)'$", detail): + field = req.group(1).strip() + message = "Missing required field" + else: + field, message = "error", detail + parsed_validation.append((row, field.strip(), message.strip())) + + if summary: + processed = summary.get("total_rows_processed", 0) + imported = summary.get("total_rows_imported", 0) + rows_with_issues = summary.get("validation_errors_or_warnings", 0) + typer.secho("SUMMARY", fg=colors["accent"], bold=True) + label_width = 16 + value_width = 8 + typer.secho(" " + "-" * (label_width + 3 + value_width), fg=colors["muted"]) + typer.secho( + f" {'processed':<{label_width}} | {processed:>{value_width}}", + fg=colors["accent"], + ) + typer.secho( + f" {'imported':<{label_width}} | {imported:>{value_width}}", + fg=colors["ok"], + ) + issue_color = colors["issue"] if rows_with_issues else colors["ok"] + typer.secho( + f" {'rows_with_issues':<{label_width}} | {rows_with_issues:>{value_width}}", + fg=issue_color, + ) + typer.echo() + + if parsed_validation: + summary_counts: Counter[tuple[str, str]] = Counter( + (field, message) for _row, field, message in parsed_validation + ) + + if summary_counts: + typer.secho("VALIDATION SUMMARY", fg=colors["accent"], bold=True) + field_width = 28 + count_width = 5 + error_width = 100 + typer.secho( + f" {'#':>2} | {'field':<{field_width}} | {'count':>{count_width}} | error", + fg=colors["muted"], + bold=True, + ) + typer.secho( + " " + "-" * (2 + 3 + field_width + 3 + count_width + 3 + error_width), + fg=colors["muted"], + ) + for idx, ((field, message), count) in enumerate( + summary_counts.most_common(5), start=1 + ): + field_text = shorten(str(field), width=field_width, placeholder="...") + error_one_line = shorten( + str(message).replace("\\n", " "), + width=error_width, + placeholder="...", + ) + idx_part = typer.style(f"{idx:>2}", fg=colors["issue"]) + field_part = typer.style( + f"{field_text:<{field_width}}", fg=colors["field"], bold=True + ) + count_part = f"{int(count):>{count_width}}" + error_part = typer.style(error_one_line, fg=colors["issue"]) + typer.echo(f" {idx_part} | {field_part} | {count_part} | {error_part}") + typer.echo() + + if validation_errors: + typer.secho("VALIDATION", fg=colors["accent"], bold=True) + typer.secho( + f"Validation errors: {len(validation_errors)}", + fg=colors["issue"], + bold=True, + ) + + row_grouped: dict[str, list[tuple[str, str]]] = defaultdict(list) + generic_errors: list[str] = [] + for row, field, message in parsed_validation: + if row is None: + if field and field != "error": + generic_errors.append(f"{field}: {message}") + else: + generic_errors.append(message) + continue + row_grouped[row].append((field, message)) + + max_errors_to_show = 10 + shown = 0 + first_group = True + for row in sorted( + row_grouped.keys(), key=lambda r: int(r) if str(r).isdigit() else 10**9 + ): + if shown >= max_errors_to_show: + break + if not first_group: + typer.secho(" " + "-" * 56, fg=colors["muted"]) + first_group = False + errors = row_grouped[row] + typer.secho( + f" Row {row} ({len(errors)} issue{'s' if len(errors) != 1 else ''})", + fg=colors["accent"], + bold=True, + ) + for idx, (field, message) in enumerate(errors, start=1): + if shown >= max_errors_to_show: + break + prefix_raw = f" {idx}. " + field_raw = f"{field}:" + msg_chunks = wrap( + str(message), + width=max(20, 200 - len(prefix_raw) - len(field_raw) - 1), + ) or [""] + prefix = typer.style(prefix_raw, fg=colors["issue"]) + field_part = typer.style(field_raw, fg=colors["field"], bold=True) + first_msg_part = typer.style(msg_chunks[0], fg=colors["issue"]) + typer.echo(f"{prefix}{field_part} {first_msg_part}") + msg_indent = " " * (len(prefix_raw) + len(field_raw) + 1) + for chunk in msg_chunks[1:]: + typer.secho(f"{msg_indent}{chunk}", fg=colors["issue"]) + shown += 1 + typer.echo() + + for entry in generic_errors[: max(0, max_errors_to_show - shown)]: + typer.secho(f" - {entry}", fg=colors["issue"]) + shown += 1 + + if len(validation_errors) > shown: + typer.secho( + f"... and {len(validation_errors) - shown} more validation errors", + fg=colors["issue"], + ) + + typer.secho("=" * 72, fg=colors["accent"]) + raise typer.Exit(result.exit_code) @data_migrations.command("list") -def data_migrations_list(): +def data_migrations_list( + theme: ThemeMode = typer.Option( + ThemeMode.auto, "--theme", help="Color theme: auto, light, dark." + ), +): from data_migrations.registry import list_migrations migrations = list_migrations() @@ -114,7 +530,11 @@ def data_migrations_list(): @data_migrations.command("status") -def data_migrations_status(): +def data_migrations_status( + theme: ThemeMode = typer.Option( + ThemeMode.auto, "--theme", help="Color theme: auto, light, dark." + ), +): from db.engine import session_ctx from data_migrations.runner import get_status @@ -138,6 +558,9 @@ def data_migrations_run( force: bool = typer.Option( False, "--force", help="Re-run even if already applied." ), + theme: ThemeMode = typer.Option( + ThemeMode.auto, "--theme", help="Color theme: auto, light, dark." + ), ): from db.engine import session_ctx from data_migrations.runner import run_migration_by_id @@ -157,6 +580,9 @@ def data_migrations_run_all( force: bool = typer.Option( False, "--force", help="Re-run non-repeatable migrations." ), + theme: ThemeMode = typer.Option( + ThemeMode.auto, "--theme", help="Color theme: auto, light, dark." + ), ): from db.engine import session_ctx from data_migrations.runner import run_all @@ -177,6 +603,9 @@ def alembic_upgrade_and_data( force: bool = typer.Option( False, "--force", help="Re-run non-repeatable migrations." ), + theme: ThemeMode = typer.Option( + ThemeMode.auto, "--theme", help="Color theme: auto, light, dark." + ), ): from alembic import command from alembic.config import Config diff --git a/cli/service_adapter.py b/cli/service_adapter.py index 4ab13f88..3e7eb770 100644 --- a/cli/service_adapter.py +++ b/cli/service_adapter.py @@ -21,15 +21,14 @@ from dataclasses import dataclass from pathlib import Path -from fastapi import UploadFile -from sqlalchemy import select - from db import Thing, Asset from db.engine import session_ctx +from fastapi import UploadFile from services.asset_helper import upload_and_associate from services.gcs_helper import get_storage_bucket, make_blob_name_and_uri from services.water_level_csv import bulk_upload_water_levels from services.well_inventory_csv import import_well_inventory_csv +from sqlalchemy import select @dataclass @@ -73,7 +72,7 @@ def water_levels_csv(source_file: Path | str, *, pretty_json: bool = False): result = bulk_upload_water_levels(source_file, pretty_json=pretty_json) if result.stderr: print(result.stderr, file=sys.stderr) - return result.exit_code + return result def associate_assets(source_directory: Path | str) -> list[str]: diff --git a/pyproject.toml b/pyproject.toml index fd4bbe3b..70d4bae8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,8 +69,8 @@ dependencies = [ "pyasn1==0.6.2", "pyasn1-modules==0.4.2", "pycparser==2.23", - "pydantic==2.11.7", - "pydantic-core==2.33.2", + "pydantic==2.12.5", + "pydantic-core==2.41.5", "pygments==2.19.2", "pyjwt==2.11.0", "pyproj==3.7.2", diff --git a/requirements.txt b/requirements.txt index 8a57ce8d..6f9bed2f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1152,32 +1152,57 @@ pycparser==2.23 \ # via # cffi # ocotilloapi -pydantic==2.11.7 \ - --hash=sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db \ - --hash=sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b +pydantic==2.12.5 \ + --hash=sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49 \ + --hash=sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d # via # fastapi # fastapi-pagination # ocotilloapi -pydantic-core==2.33.2 \ - --hash=sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56 \ - --hash=sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef \ - --hash=sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a \ - --hash=sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f \ - --hash=sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916 \ - --hash=sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a \ - --hash=sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849 \ - --hash=sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e \ - --hash=sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac \ - --hash=sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162 \ - --hash=sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc \ - --hash=sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5 \ - --hash=sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d \ - --hash=sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9 \ - --hash=sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9 \ - --hash=sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5 \ - --hash=sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9 \ - --hash=sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6 +pydantic-core==2.41.5 \ + --hash=sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90 \ + --hash=sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740 \ + --hash=sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33 \ + --hash=sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e \ + --hash=sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0 \ + --hash=sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34 \ + --hash=sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14 \ + --hash=sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375 \ + --hash=sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf \ + --hash=sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1 \ + --hash=sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553 \ + --hash=sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470 \ + --hash=sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2 \ + --hash=sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660 \ + --hash=sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c \ + --hash=sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008 \ + --hash=sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a \ + --hash=sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd \ + --hash=sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586 \ + --hash=sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869 \ + --hash=sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66 \ + --hash=sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d \ + --hash=sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07 \ + --hash=sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36 \ + --hash=sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e \ + --hash=sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612 \ + --hash=sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11 \ + --hash=sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c \ + --hash=sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a \ + --hash=sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf \ + --hash=sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858 \ + --hash=sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9 \ + --hash=sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2 \ + --hash=sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3 \ + --hash=sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23 \ + --hash=sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa \ + --hash=sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3 \ + --hash=sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d \ + --hash=sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9 \ + --hash=sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9 \ + --hash=sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e \ + --hash=sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb \ + --hash=sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0 # via # ocotilloapi # pydantic diff --git a/schemas/__init__.py b/schemas/__init__.py index 25a71d07..5a1d85af 100644 --- a/schemas/__init__.py +++ b/schemas/__init__.py @@ -16,6 +16,7 @@ from datetime import datetime, timezone, date from typing import Annotated +from core.enums import ReleaseStatus from pydantic import ( BaseModel, ConfigDict, @@ -26,8 +27,6 @@ from pydantic.json_schema import JsonSchemaValue from pydantic_core import core_schema -from core.enums import ReleaseStatus - DT_FMT = "%Y-%m-%dT%H:%M:%SZ" @@ -53,7 +52,12 @@ class BaseUpdateModel(BaseCreateModel): release_status: ReleaseStatus | None = None -def past_or_today_validator(value: date | datetime) -> date | datetime: +def past_or_today_validator( + value: date | datetime | None, +) -> date | datetime | None: + if value is None: + return None + if isinstance(value, datetime): if value.tzinfo is None: if value > datetime.now(): diff --git a/schemas/well_inventory.py b/schemas/well_inventory.py index 6b87c3f7..dd547725 100644 --- a/schemas/well_inventory.py +++ b/schemas/well_inventory.py @@ -19,15 +19,6 @@ import phonenumbers import utm -from pydantic import ( - BaseModel, - model_validator, - BeforeValidator, - validate_email, - AfterValidator, - field_validator, -) - from core.constants import STATE_CODES from core.enums import ( ElevationMethod, @@ -39,6 +30,15 @@ WellPurpose as WellPurposeEnum, MonitoringFrequency, ) +from phonenumbers import NumberParseException +from pydantic import ( + BaseModel, + model_validator, + BeforeValidator, + validate_email, + AfterValidator, + field_validator, +) from schemas import past_or_today_validator, PastOrTodayDatetime from services.util import convert_dt_tz_naive_to_tz_aware @@ -96,14 +96,21 @@ def phone_validator(phone_number_str): phone_number_str = phone_number_str.strip() if phone_number_str: - parsed_number = phonenumbers.parse(phone_number_str, "US") + try: + parsed_number = phonenumbers.parse(phone_number_str, "US") + except NumberParseException as e: + raise ValueError(f"Invalid phone number. {phone_number_str}") from e + if phonenumbers.is_valid_number(parsed_number): formatted_number = phonenumbers.format_number( parsed_number, phonenumbers.PhoneNumberFormat.E164 ) return formatted_number - else: - raise ValueError(f"Invalid phone number. {phone_number_str}") + + raise ValueError(f"Invalid phone number. {phone_number_str}") + + # Explicitly return None for empty strings after stripping. + return None def email_validator_function(email_str): diff --git a/services/water_level_csv.py b/services/water_level_csv.py index ff49fe12..f695fcd1 100644 --- a/services/water_level_csv.py +++ b/services/water_level_csv.py @@ -18,19 +18,19 @@ import csv import io import json +import re import uuid from dataclasses import dataclass from datetime import datetime from pathlib import Path from typing import Any, BinaryIO, Iterable, List +from db import Thing, FieldEvent, FieldActivity, Sample, Observation, Parameter +from db.engine import session_ctx from pydantic import BaseModel, ConfigDict, ValidationError, field_validator from sqlalchemy import select from sqlalchemy.orm import Session -from db import Thing, FieldEvent, FieldActivity, Sample, Observation, Parameter -from db.engine import session_ctx - # Required CSV columns for the bulk upload REQUIRED_FIELDS: List[str] = [ "field_staff", @@ -45,6 +45,11 @@ "data_quality", ] +HEADER_ALIASES: dict[str, str] = { + "measuring_person": "sampler", + "water_level_date_time": "measurement_date_time", +} + # Allow-list values for validation. These represent early MVP lexicon values. VALID_LEVEL_STATUSES = {"stable", "rising", "falling"} VALID_DATA_QUALITIES = {"approved", "provisional"} @@ -173,7 +178,7 @@ def bulk_upload_water_levels( headers, csv_rows = _read_csv(source_file) except FileNotFoundError: msg = f"File not found: {source_file}" - payload = _build_payload([], [], 0, 0, [msg]) + payload = _build_payload([], [], 0, 0, 1, errors=[msg]) stdout = _serialize_payload(payload, pretty_json) return BulkUploadResult(exit_code=1, stdout=stdout, stderr=msg, payload=payload) @@ -205,7 +210,7 @@ def bulk_upload_water_levels( summary = { "total_rows_processed": len(csv_rows), "total_rows_imported": len(created_rows) if not validation_errors else 0, - "validation_errors_or_warnings": len(validation_errors), + "validation_errors_or_warnings": _count_rows_with_issues(validation_errors), } payload = _build_payload( csv_rows, created_rows, **summary, errors=validation_errors @@ -222,6 +227,22 @@ def _serialize_payload(payload: dict[str, Any], pretty: bool) -> str: return json.dumps(payload, indent=2 if pretty else None) +def _count_rows_with_issues(errors: list[str]) -> int: + """ + Count unique row numbers represented in validation errors. + Falls back to total error count when row numbers are unavailable. + """ + row_ids: set[int] = set() + for err in errors: + match = re.match(r"^Row\s+(\d+):", str(err)) + if match: + row_ids.add(int(match.group(1))) + + if row_ids: + return len(row_ids) + return len(errors) + + def _build_payload( csv_rows: Iterable[dict[str, Any]], created_rows: list[dict[str, Any]], @@ -261,14 +282,23 @@ def _read_csv( stream = io.StringIO(text) reader = csv.DictReader(stream) - rows = [ - { - k.strip(): (v.strip() if isinstance(v, str) else v or "") - for k, v in row.items() - } - for row in reader + rows: list[dict[str, str]] = [] + for row in reader: + normalized_row: dict[str, str] = {} + for k, v in row.items(): + if k is None: + continue + key = HEADER_ALIASES.get(k.strip(), k.strip()) + value = v.strip() if isinstance(v, str) else v or "" + # If both alias and canonical header are present, preserve first non-empty value. + if key in normalized_row and normalized_row[key] and not value: + continue + normalized_row[key] = value + rows.append(normalized_row) + + headers = [ + HEADER_ALIASES.get(h.strip(), h.strip()) for h in (reader.fieldnames or []) ] - headers = [h.strip() for h in reader.fieldnames or []] return headers, rows diff --git a/services/well_inventory_csv.py b/services/well_inventory_csv.py index 8f214319..247091a2 100644 --- a/services/well_inventory_csv.py +++ b/services/well_inventory_csv.py @@ -23,7 +23,6 @@ from itertools import groupby from typing import Set -from pydantic import ValidationError from shapely import Point from sqlalchemy import select, and_ from sqlalchemy.exc import DatabaseError @@ -41,8 +40,10 @@ Contact, PermissionHistory, Thing, + ThingContactAssociation, ) from db.engine import session_ctx +from pydantic import ValidationError from schemas.thing import CreateWell from schemas.well_inventory import WellInventoryRow from services.contact_helper import add_contact @@ -50,7 +51,44 @@ from services.thing_helper import add_thing from services.util import transform_srid, convert_ft_to_m -AUTOGEN_REGEX = re.compile(r"^[A-Za-z]{2}-$") +AUTOGEN_DEFAULT_PREFIX = "NM-" +AUTOGEN_PREFIX_REGEX = re.compile(r"^[A-Z]{2,3}-$") +AUTOGEN_TOKEN_REGEX = re.compile(r"^(?P[A-Z]{2,3})\s*-\s*(?:x{4}|X{4})$") + + +def _extract_autogen_prefix(well_id: str | None) -> str | None: + """ + Return normalized auto-generation prefix when a placeholder token is provided. + + Supported forms: + - ``XY-`` (existing behavior) + - ``WL-XXXX`` / ``SAC-XXXX`` / ``ABC-XXXX`` (2-3 uppercase letter prefixes) + - blank value (uses default ``NM-`` prefix) + """ + # Normalize input + value = (well_id or "").strip() + + # Blank / missing value -> use default prefix + if not value: + return AUTOGEN_DEFAULT_PREFIX + + # Direct prefix form, e.g. "XY-" or "ABC-" + if AUTOGEN_PREFIX_REGEX.match(value): + # Ensure normalized trailing dash and uppercase + prefix = value[:-1].upper() + return f"{prefix}-" + + # Token form, e.g. "WL-XXXX", "SAC-xxxx", with optional spaces around "-" + m = AUTOGEN_TOKEN_REGEX.match(value) + if m: + prefix = m.group("prefix").upper() + return f"{prefix}-" + + token_match = AUTOGEN_TOKEN_REGEX.match(value) + if token_match: + return f"{token_match.group('prefix')}-" + + return None def import_well_inventory_csv(*args, **kw) -> dict: @@ -127,6 +165,7 @@ def _import_well_inventory_csv(session: Session, text: str, user: str): "row": 0, "field": f"{duplicates}", "error": "Duplicate columns found", + "value": duplicates, } ] @@ -353,12 +392,18 @@ def _make_row_models(rows, session): if all(key == row.get(key) for key in row.keys()): raise ValueError("Duplicate header row") - well_id = row.get("well_name_point_id") - if not well_id: + if "well_name_point_id" not in row: raise ValueError("Field required") - if AUTOGEN_REGEX.match(well_id): - well_id, offset = _generate_autogen_well_id(session, well_id, offset) + + well_id = row.get("well_name_point_id") + autogen_prefix = _extract_autogen_prefix(well_id) + if autogen_prefix: + well_id, offset = _generate_autogen_well_id( + session, autogen_prefix, offset + ) row["well_name_point_id"] = well_id + elif not well_id: + raise ValueError("Field required") if well_id in seen_ids: raise ValueError("Duplicate value for well_name_point_id") @@ -394,8 +439,13 @@ def _make_row_models(rows, session): else: error_msg = "Invalid value" + if field == "header": + value = ",".join(row.keys()) + else: + value = row.get(field) + validation_errors.append( - {"row": idx + 1, "field": field, "error": error_msg} + {"row": idx + 1, "field": field, "error": error_msg, "value": value} ) return models, validation_errors @@ -609,7 +659,37 @@ def _add_csv_row(session: Session, group: Group, model: WellInventoryRow, user) for idx in (1, 2): contact_dict = _make_contact(model, well, idx) if contact_dict: - contact = add_contact(session, contact_dict, user=user, commit=False) + existing_contact = session.scalars( + select(Contact) + .where( + and_( + Contact.name == contact_dict.get("name"), + Contact.organization == contact_dict.get("organization"), + ) + ) + .order_by(Contact.id.asc()) + ).first() + + if existing_contact: + association = session.scalars( + select(ThingContactAssociation) + .where( + and_( + ThingContactAssociation.thing_id == well.id, + ThingContactAssociation.contact_id == existing_contact.id, + ) + ) + .order_by(ThingContactAssociation.id.asc()) + ).first() + if not association: + session.add( + ThingContactAssociation( + thing_id=well.id, contact_id=existing_contact.id + ) + ) + contact = existing_contact + else: + contact = add_contact(session, contact_dict, user=user, commit=False) # Use the first created contact for permissions if available if contact_for_permissions is None: diff --git a/tests/features/data/water-levels-real-user-entered-data.csv b/tests/features/data/water-levels-real-user-entered-data.csv new file mode 100644 index 00000000..a41a1cf4 --- /dev/null +++ b/tests/features/data/water-levels-real-user-entered-data.csv @@ -0,0 +1,68 @@ +well_name_point_id,field_event_date_time,field_staff,field_staff_2,field_staff_3,water_level_date_time,measuring_person,sample_method,mp_height,level_status,hold(not saved),cut(not saved),depth_to_water_ft,data_quality,water_level_notes +OG-0079,2025-12-19T00:00:00,Geoff Rawling,,,2025-12-19T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),4,,,,375.75,Water level accurate to within two hundreths of a foot, +OG-0081,2025-12-19T00:00:00,Geoff Rawling,,,2025-12-19T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),3.55,,,,377.33,Water level accurate to within two hundreths of a foot, +OG-0082,2025-12-19T00:00:00,Geoff Rawling,,,2025-12-19T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),3.65,,,,383.6,Water level accurate to within two hundreths of a foot, +OG-0084,2025-12-19T00:00:00,Geoff Rawling,,,2025-12-19T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),3.9,,,,387.53,Water level accurate to within two hundreths of a foot, +OG-0086,2025-12-19T00:00:00,Geoff Rawling,,,2025-12-19T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),2.8,,,,389.43,Water level accurate to within two hundreths of a foot, +OG-0087,2025-12-19T00:00:00,Geoff Rawling,,,2025-12-19T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),2.7,,,,339.58,Water level accurate to within two hundreths of a foot, +OG-0094,2025-12-19T00:00:00,Geoff Rawling,,,2025-12-19T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),2.4,,,,359.3,Water level accurate to within two hundreths of a foot, +OG-0093,2025-12-19T00:00:00,Geoff Rawling,,,2025-12-19T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),2.6,,,,356.95,Water level accurate to within two hundreths of a foot, +OG-0092,2025-12-19T00:00:00,Geoff Rawling,,,2025-12-19T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),2.55,,,,348.95,Water level accurate to within two hundreths of a foot, +OG-0002,2025-12-18T00:00:00,Geoff Rawling,,,2025-12-18T00:00:00,Geoff Rawling,Steel-tape measurement,0.17,,,,431.18,Water level accurate to nearest tenth of a foot (USGS accuracy level), +OG-0010,2025-12-18T00:00:00,Geoff Rawling,,,2025-12-18T00:00:00,Geoff Rawling,Steel-tape measurement,0.14,,,,368.69,Water level accurate to nearest tenth of a foot (USGS accuracy level), +OG-0016,2025-12-18T00:00:00,Geoff Rawling,,,2025-12-18T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),0.4,,,,427.55,Water level accurate to within two hundreths of a foot,MP height changed in 2024 when pump was removed +OG-0027,2025-12-18T00:00:00,Geoff Rawling,,,2025-12-18T00:00:00,Geoff Rawling,Steel-tape measurement,1.15,,,,409.44,Water level accurate to nearest tenth of a foot (USGS accuracy level),"Difficult well, did not repeat measurement - tape got stuck in well!" +OG-0031,2025-12-18T00:00:00,Geoff Rawling,,,2025-12-18T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),0,,,,418.55,Water level accurate to within two hundreths of a foot, +OG-0042,2025-12-18T00:00:00,Geoff Rawling,,,2025-12-18T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),-0.1,,,,410.72,Water level accurate to within two hundreths of a foot, +OG-0067,2025-12-18T00:00:00,Geoff Rawling,,,2025-12-18T00:00:00,Geoff Rawling,Electric tape measurement (E-probe),0,,,,360.95,Water level accurate to within two hundreths of a foot, +OG-0072,2025-12-18T00:00:00,Geoff Rawling,,,2025-12-18T00:00:00,Geoff Rawling,Steel-tape measurement,0,,,,339.15,Water level accurate to within one foot, +CP-0019,2025-12-19T00:00:00,Geoff Rawling,,,2025-12-19T00:00:00,Geoff Rawling,Steel-tape measurement,1,,,,349.92,Water level accurate to nearest tenth of a foot (USGS accuracy level),"Difficult well, did not repeat measurement" +WL-0213,2025-09-18T12:33:00,Joe Beman,,,2025-09-18T12:33:00,Joe Beman,Steel-tape measurement,,,,,102.03,Water level accurate to within two hundreths of a foot,"Good cut. Storage reservoir appears to be full, possibly pumped recently. Gate code = 2020. WellIntel downloaded @ 12:17, new battery voltage = 12.8." +WL-0247,2025-09-18T09:15:00,Joe Beman,,,2025-09-18T09:15:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,30.98,Water level accurate to within two hundreths of a foot,"WellIntel downloaded, new battery voltage = 12.8." +RA-025,2025-09-18T08:10:00,Joe Beman,,,2025-09-18T08:10:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,12.98,,"Diver 93% battery, downloaded @ 09:08, restarted 9/18 12 pm. Baro 86% battery." +RA-022,2025-09-17T14:50:00,Joe Beman,,,2025-09-17T14:50:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,5.26,,"Diver 93% battery, downloaded @ 08:15, smart start 9/18 at 12 PM. No baro." +WL-0028,2025-09-17T11:50:00,Joe Beman,,,2025-09-17T11:50:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,3.18,,"Diver 87% battery, smart start 9/17 at 12 PM. Baro 93% battery, smart start 9/17 at 12 PM. Baro has no nose cone, not sure if this is new." +AR-0209,2025-09-17T10:30:00,Joe Beman,,,2025-09-17T10:30:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,6.78,,"Diver 93% battery, restart 9/17 at 12 PM. Baro 93% battery, restart 9/17 at 12 PM." +TV-196,2025-10-23T00:00:00,Joe Beman,,,2025-10-23T00:00:00,Joe Beman,null placeholder,,Obstruction was encountered in the well (no level recorded),,,,None,"No measurement taken. Pump installed since last visit, no place to measure and no way to remove transducer." +WL-0063,2025-10-28T09:00:00,Joe Beman,,,2025-10-28T09:00:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,22.25,Water level accurate to within two hundreths of a foot,"WellIntel downloaded at 08:45 AM, battery voltage = 12.6. Gateway was unplugged on 10/7, replaced + reset and got running again." +TV-157,2025-10-23T11:25:00,Joe Beman,,,2025-10-23T11:25:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,159.99,,"Diver 93% battery, downloaded @ 11:35, smart start at 12 PM. Baro 86% battery, smart start at 12 PM." +WL-0005,2025-10-22T14:00:00,Joe Beman,,,2025-10-22T14:00:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,440.77,Water level accurate to within two hundreths of a foot,Spotty tape +WL-044,2025-10-22T14:35:00,Joe Beman,,,2025-10-22T14:35:00,Joe Beman,Sonic water level meter (acoustic pulse),,Water level not affected,,,487.5,,"Temperature setting 47 deg F. WellIntel downloaded @ 14:25, uploaded at home 10/24/25. New battery voltage = 12.9, forced read @ 14:38. Only sonic measurements at this location." +TC-316,2025-10-22T11:00:00,Joe Beman,,,2025-10-22T11:00:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,271.8,,"Diver 86% battery, downloaded at 11:10, smart start at 12 PM. Baro 86% battery, smart start at 12 PM. " +QU-004,2025-10-22T10:05:00,Joe Beman,,,2025-10-22T10:05:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,53.45,,"Diver 86% battery, downloaded @ 10:10, smart start at 12 PM. Baro 86% battery, smart start at 12 PM." +TV-121,2025-10-22T15:50:00,Joe Beman,,,2025-10-22T15:50:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,121.02,,"Diver 84% battery, smart start 10/23 at 12 AM. No baro." +WL-0016,2025-01-22T09:25:00,Joe Beman,,,2025-01-22T09:25:00,Joe Beman,null placeholder,,Site was being pumped,,,,None,"No measurement because pump was running on arrival. Operator had to ""turn pump on by hand"" because tank was low due to something freezing. WellIntel downloaded at 09:25, new battery voltage = 12.7." +WL-0093,2025-01-23T07:55:00,Joe Beman,,,2025-01-23T07:55:00,Joe Beman,null placeholder,,Site was being pumped,,,,None,"No measurement because pump was running on arrival. WellIntel downloaded at 08:00, new battery voltage = 12.74." +WL-0152,2025-07-10T10:30:00,Joe Beman,RH,,2025-07-10T10:30:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,312.07,, +WL-0153,2025-07-10T09:00:00,Joe Beman,RH,,2025-07-10T09:00:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,241.27,Water level accurate to nearest tenth of a foot (USGS accuracy level),WL accurate to 0.03 ft. Steel tape hit obstructions when attempting to use outside of sounding tube. Sounding tube was very damp so tape was spotty. E-probe couldn't get a good reading down sounding tube because too damp. +WL-0062,2025-07-10T12:40:00,Joe Beman,RH,,2025-07-10T12:40:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,29.45,Water level accurate to within two hundreths of a foot,"Obstructions in well, had difficult time settling on good measurement." +WL-0007,2025-07-17T09:45:00,Joe Beman,Henrion,,2025-07-17T09:45:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,643.02,,Downloaded Eno file. In Joe's files as WSLOG000_2025_07_17. +WL-0016,2025-07-17T11:50:00,Joe Beman,Henrion,,2025-07-17T11:50:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,115.67,Water level accurate to within two hundreths of a foot,Tape gets caught on something below water surface past 124'. WellIntel downloaded at 11:45. +WL-0260,2025-07-17T12:30:00,Joe Beman,Henrion,,2025-07-17T12:30:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,143.02,Water level accurate to within two hundreths of a foot,Neighbor to south's well just went dry - owner says marijuana growers using more than their fair share of water. +WL-0357,2025-07-17T13:50:00,Joe Beman,Henrion,,2025-07-17T13:50:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,162.8,Water level accurate to within two hundreths of a foot,Obstruction below water level below 171'; had to try several attempts at measuring. +WL-0150,2025-07-24T08:25:00,Joe Beman,Henrion,,2025-07-24T08:25:00,Joe Beman,Steel-tape measurement,,Site was pumped recently,,,420,Water level accurate to nearest tenth of a foot (USGS accuracy level),Well was recently pumped and was recovering. Measurement accuracy of 0.05 ft. WellIntel read @ 08:04 and battery voltage at 12.4. +WL-0021,2025-07-24T09:50:00,Joe Beman,Henrion,,2025-07-24T09:50:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,40.93,Water level accurate to within two hundreths of a foot,WellIntel downloaded @ 09:39 and battery voltage = 12.6. +WL-0080,2025-07-24T10:50:00,Joe Beman,Henrion,,2025-07-24T10:50:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,220.5,Water level accurate to nearest tenth of a foot (USGS accuracy level),"Tape was wet and spotty. WellIntel downloaded - took several attempts to download, had to empty disk on laptop and power down logger to download full dataset. Battery voltage = 12.47." +WL-0330,2025-07-25T10:20:00,Joe Beman,Henrion,,2025-07-25T10:20:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,158.95,Water level accurate to within two hundreths of a foot, +PC-121,2025-08-25T09:25:00,Joe Beman,,,2025-08-25T09:25:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,167.09,Water level accurate to within two hundreths of a foot, +WL-0063,2025-08-14T11:20:00,Joe Beman,,,2025-08-14T11:20:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,22.08,Water level accurate to within two hundreths of a foot,"WellIntel downloaded at 11:05, new battery voltage = 12.5." +WL-0036,2025-08-14T08:20:00,Joe Beman,,,2025-08-14T08:20:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,78.16,Water level accurate to within two hundreths of a foot,"WellIntel downloaded @ 08:07, new battery voltage = 12.7." +BC-0166,2025-08-15T10:00:00,Joe Beman,,,2025-08-15T10:00:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,243.4,Water level accurate to within two hundreths of a foot,"WellIntel downloaded @ 09:30, battery voltage = 12.4." +SV-0122,2025-08-15T08:55:00,Joe Beman,,,2025-08-15T08:55:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,135.6,,"Diver 83% battery, smart start 8/15 at 12 PM." +NM-23292,2025-08-15T08:10:00,Joe Beman,,,2025-08-15T08:10:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,82.43,,"Baro 85% battery, downloaded @ 08:12, smart start at 12 PM. Diver 83% battery." +WL-0231,2025-09-03T11:45:00,Joe Beman,,,2025-09-03T11:45:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,83.36,Water level accurate to within two hundreths of a foot, +PB-0012,2025-09-03T09:40:00,Joe Beman,,,2025-09-03T09:40:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,11.09,Water level accurate to within two hundreths of a foot, +WL-0237,2025-09-03T14:25:00,Joe Beman,,,2025-09-03T14:25:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,14.45,Water level accurate to within two hundreths of a foot,"WellIntel downloaded at 14:21, battery voltage = 12.5." +WL-0232,2025-09-03T12:20:00,Joe Beman,,,2025-09-03T12:20:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,70.78,,"Diver 91% battery, downloaded at 12:09, smart start at 12 PM. Baro 87% battery, downloaded at 12:09, smart start at 12 PM." +PB-0020,2025-09-03T08:15:00,Joe Beman,,,2025-09-03T08:15:00,Joe Beman,null placeholder,,Site was being pumped,,,,None,"Pump is running so no measurement taken. In future, can shut pump off @ breaker if well is running and tanks are not empty. WellIntel downloaded at 08:20, new battery voltage = 12.7." +RA-102,2025-09-04T12:10:00,Joe Beman,,,2025-09-04T12:10:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,119.01,,"Randy Quintana (ranqnt@gmail.com) is an alternate contact. Craig and Randy are on the board and live near the well, no key needed to access well but is needed to access building if pump needs to be turned off." +WL-0356,2025-08-14T09:55:00,Joe Beman,,,2025-08-14T09:55:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,95.49,Water level accurate to within two hundreths of a foot, +WL-0121,2025-08-21T09:20:00,Joe Beman,,,2025-08-21T09:20:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,174.42,Water level accurate to within two hundreths of a foot, +WL-0123,2025-08-21T12:15:00,Joe Beman,,,2025-08-21T12:15:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,113.6,Water level accurate to within two hundreths of a foot, +WL-0179,2025-08-21T11:30:00,Joe Beman,,,2025-08-21T11:30:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,29.05,Water level accurate to within two hundreths of a foot, +WL-0183,2025-08-21T09:50:00,Joe Beman,,,2025-08-21T09:50:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,29.78,Water level accurate to within two hundreths of a foot, +WL-0206,2025-08-22T09:15:00,Joe Beman,,,2025-08-22T09:15:00,Joe Beman,Electric tape measurement (E-probe),,Water level not affected,,,41.57,,"Diver - new transducer YZ480, future start 8/22 at 12 PM. Baro smart start 8/22 at 12 PM." +WL-0207,2025-08-22T10:15:00,Joe Beman,,,2025-08-22T10:15:00,Joe Beman,Steel-tape measurement,,Water level not affected,,,32.59,Water level accurate to within two hundreths of a foot, +RA-140,2025-09-04T09:12:00,Joe Beman,,,,,Steel-tape measurement,,Site was pumped recently,,,48.88,Water level accurate to within two hundreths of a foot,Seemed to be recovering +RA-143,2025-09-04T10:40:00,Joe Beman,,,,,Steel-tape measurement,,Water level not affected,,,174.2,Water level accurate to within two hundreths of a foot, +RA-149,2025-09-04T,,,,,,null placeholder,,Site was pumped recently,,,,None,Unable to measure - DTW over 200' despite being 86' in June. Cut power to pump and waited but did not get above 200'. Tape was also wet and spotty. \ No newline at end of file diff --git a/tests/features/data/well-inventory-real-user-entered-data.csv b/tests/features/data/well-inventory-real-user-entered-data.csv new file mode 100644 index 00000000..b2a65a5e --- /dev/null +++ b/tests/features/data/well-inventory-real-user-entered-data.csv @@ -0,0 +1,130 @@ +project,well_name_point_id,site_name,date_time,field_staff,field_staff_2,field_staff_3,contact_1_name,contact_1_organization,contact_1_role,contact_1_type,contact_1_phone_1,contact_1_phone_1_type,contact_1_phone_2,contact_1_phone_2_type,contact_1_email_1,contact_1_email_1_type,contact_1_email_2,contact_1_email_2_type,contact_1_address_1_line_1,contact_1_address_1_line_2,contact_1_address_1_type,contact_1_address_1_state,contact_1_address_1_city,contact_1_address_1_postal_code,contact_1_address_2_line_1,contact_1_address_2_line_2,contact_1_address_2_type,contact_1_address_2_state,contact_1_address_2_city,contact_1_address_2_postal_code,contact_2_name,contact_2_role,contact_2_type,contact_2_phone_1,contact_2_phone_1_type,contact_2_phone_2,contact_2_phone_2_type,contact_2_email_1,contact_2_email_1_type,contact_2_email_2,contact_2_email_2_type,contact_2_address_1_line_1,contact_2_address_1_line_2,contact_2_address_1_type,contact_2_address_1_state,contact_2_address_1_city,contact_2_address_1_postal_code,contact_2_address_2_line_1,contact_2_address_2_line_2,contact_2_address_2_type,contact_2_address_2_state,contact_2_address_2_city,contact_2_address_2_postal_code,directions_to_site,specific_location_of_well,repeat_measurement_permission,sampling_permission,datalogger_installation_permission,public_availability_acknowledgement,result_communication_preference,contact_special_requests_notes,utm_easting,utm_northing,utm_zone,elevation_ft,elevation_method,ose_well_record_id,date_drilled,completion_source,total_well_depth_ft,historic_depth_to_water_ft,depth_source,well_pump_type,well_pump_depth_ft,is_open,datalogger_possible,casing_diameter_ft,measuring_point_height_ft,measuring_point_description,well_purpose,well_hole_status,monitoring_frequency,sampling_scenario_notes,well_notes,well_measuring_notes,water_notes,sample_possible,water_level_date_time,measuring_person,sample_method,mp_height,level_status,depth_to_water_ft,data_quality,water_level_notes,sample_collection_notes +Rio Arriba,RA-027,,2025-06-11T14:15:00,Person 001,Person 002,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Monitoring complete,Redacted note 001,,,,TRUE,,,,,,,,,Redacted note 001 +Rio Arriba,RA-092,,2025-06-09,Person 001,Person 002,,Person 003,,Owner,,505-555-0001,Mobile,,,,,,,Address Line 002,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 002,Redacted note 002,TRUE,TRUE,,TRUE,,,362254,4072390,,,,,,,,,,Submersible pump,,,,0.5,1.24,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 002,,,,TRUE,T08:55:00,,,,,92.15,,,Redacted note 002 +Rio Arriba,RA-093,,2025-06-09,Person 001,Person 002,,Person 004,,Owner,Primary,505-555-0002,Mobile,,,user001@example.com,Primary,,,Address Line 003,Address Line 003,Physical,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 003,Redacted note 003,TRUE,TRUE,TRUE,,,,361995,4072135,,,,,,,300,,,Submersible pump,,,,0.55,1.75,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 003,,,,TRUE,,,,,Site was pumped recently,185.7,,Redacted note 003,Redacted note 003 +Rio Arriba,RA-102,Redacted note 004,2025-06-12T13:00:00,Person 005,Person 006,,Person 007,Organization 001,Owner,Primary,505-555-0003,Mobile,,,user002@example.com,Primary,,,Address Line 004,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 004,Redacted note 004,TRUE,TRUE,,TRUE,,,405318,4013168,,,,,,,340,110,Drinking water watch,,,,,0.5,2.27,Top of sounding tube,Public supply,"Active, pumping well",Monitoring complete,,Redacted note 004,,,TRUE,,,,,,,,, +Rio Arriba,RA-103,Redacted note 005,2025-06-12T14:53:00,Person 005,,,Person 007,Organization 001,Owner,Primary,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"20.98 (""Depth to Water"")",,,,,,,,,,,Monitoring complete,,,,,TRUE,,,,,Site was pumped recently,,,Redacted note 005,Redacted note 005 +Rio Arriba,RA-106,Redacted note 006,2025-06-12,Person 005,Person 006,,Person 008,,Owner,Primary,505-555-0004,Mobile,,,user003@example.com,Primary,,,Address Line 006,,Physical,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 006,TRUE,TRUE,TRUE,TRUE,,,397891,3996992,,,,,,,,"12.66 (""Depth to Water"")",,Submersible pump,,,,0.6,1.9,TOC,Domestic,,Monitoring complete,Redacted note 006,,,,TRUE,,,,,Site was pumped recently,13.5,,Redacted note 006,Redacted note 006 +Rio Arriba,RA-107,Redacted note 007,2025-06-13T09:13:00,Person 005,Person 006,,Person 009,,Owner,Primary,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"154.9 (""Depth to Water"")",,,,,,,,,,,Monitoring complete,Redacted note 007,,,,TRUE,,,,,,,,,Redacted note 007 +Rio Arriba,RA-108,Redacted note 008,2025-06-26,Person 005,Person 006,,Person 010,,Owner,Primary,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Monitoring complete,Redacted note 008,,,,TRUE,,,,,,,,,Redacted note 008 +Rio Arriba,RA-111,Redacted note 009,2025-06-26,Person 005,Person 006,,Person 010,,Owner,Primary,505-555-0005,Mobile,,,user004@example.com,Primary,,,Address Line 009,,Physical,,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 009,,TRUE,TRUE,,TRUE,,,414222,4021553,,,,,,,600,,Owner,Submersible pump,,,,0.5,,TOC,Livestock,"Active, pumping well",Monitoring complete,Redacted note 009,,,,TRUE,,,,,Site was being pumped,,,Redacted note 009, +Rio Arriba,RA-115,Redacted note 010,2025-06-10T09:04:00,Person 001,Person 002,,Person 011,,Owner,Primary,505-555-0006,Mobile,,,user005@example.com,Primary,,,Address Line 010,,Physical,NM,Anytown,87010,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 010,TRUE,TRUE,TRUE,TRUE,,Redacted note 010,352876,4080253,,,Global positioning system (GPS),RG-87518,08/2007,,260,130,At the time of drilling.,Submersible pump,,,,0.55,1.55,West side of well.,Domestic,"Active, pumping well",Monitoring complete,,Redacted note 010,,,TRUE,,,,,,,,, +Rio Arriba,RA-116,Redacted note 011,2025-06-10T11:39:00,Person 001,Person 002,,Person 012,,Owner,Primary,505-555-0007,Mobile,,,user006@example.com,Primary,,,Address Line 011,,Physical,,Anytown,87011,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 011,Redacted note 011,TRUE,TRUE,FALSE,TRUE,,Redacted note 011,351184,4065957,,,,,,,650,,,,,,,,,,Domestic,"Active, pumping well",Monitoring complete,Redacted note 011,Redacted note 011,Redacted note 011,,TRUE,,,,,Obstruction was encountered in the well (no level recorded),,,Redacted note 011,Redacted note 011 +Rio Arriba,RA-117,Redacted note 012,2025-06-10T12:26:00,Person 001,Person 002,,Person 013,,Owner,Primary,505-555-0008,Mobile,,,,,,,Address Line 012,,Physical,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 012,Redacted note 012,,,,,,Redacted note 012,350549,4066414,,,,,,,,,,Submersible pump,,,,0.46,2.12,PVC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 012,,Redacted note 012,,TRUE,,,,,,,,Redacted note 012,Redacted note 012 +Rio Arriba,RA-118,Redacted note 013,2025-06-10T14:15:00,Person 001,Person 002,,Person 014,Organization 002,Contact,Primary,505-555-0009,Mobile,,,user007@example.com,Primary,,,Address Line 013,,Physical,,,,Address Line 013,,Mailing,NM,Anytown,87013,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 013,Redacted note 013,TRUE,,TRUE,TRUE,,Redacted note 013,361207,4063581,,,,,,,78,,Owner,Submersible pump,,,,0.7,2.09,Top of red steel cap on well. Included height of cement in MP measurement.,Public supply,"Active, pumping well",Monitoring complete,Redacted note 013,,,,TRUE,,,,,,,,,Redacted note 013 +Rio Arriba,RA-119,Redacted note 014,2025-06-10T15:08:00,Person 001,Person 002,,Person 015,Organization 003,Owner,Primary,505-555-0010,Mobile,,,user008@example.com,Primary,,,Address Line 014,,Physical,,,,Address Line 014,,Mailing,NM,Anytown,87014,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 014,Redacted note 014,TRUE,TRUE,FALSE,TRUE,,Redacted note 014,360543,4064607,,,,,,,,,,Submersible pump,,,,0.47,-3.4,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 014,,,,TRUE,,,,,,,,,Redacted note 014 +Rio Arriba,RA-120,Redacted note 015,2025-06-11T09:20:00,Person 001,Person 002,,Person 016,Organization 004,Owner,Primary,505-555-0011,Home,,,user009@example.com,Primary,,,Address Line 015,,Physical,,Anytown,,Address Line 015,,Mailing,,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 015,Redacted note 015,TRUE,TRUE,FALSE,TRUE,,Redacted note 015,360765,4069230,,,,,,,,,,Submersible pump,,,,,-1.05,"Casing is below floor of wellhouse, top of casing is below floor.",Domestic,"Active, pumping well",Monitoring complete,Redacted note 015,,,,TRUE,,,,,,,,Redacted note 015,Redacted note 015 +Rio Arriba,RA-121,Redacted note 016,2025-06-11T09:45:00,Person 001,Person 002,,Person 017,,Owner,Primary,505-555-0012,Home,505-555-0013,Mobile,user010@example.com,Primary,,,Address Line 016,,Physical,NM,Anytown,87016,Address Line 016,,Mailing,NM,Anytown,87016,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 016,Redacted note 016,TRUE,TRUE,FALSE,TRUE,,Redacted note 016,360837,4070065,,,,,Pit well dug ~30 years.,,,,,Submersible pump,,,,,2.92,Top of open pit well.,Domestic,"Active, pumping well",Monitoring complete,Redacted note 016,,Redacted note 016,,TRUE,,,,,,,,Redacted note 016,Redacted note 016 +Rio Arriba,RA-122,Redacted note 017,2025-06-12T08:40:00,Person 001,Person 002,,Person 018,,Owner,Primary,505-555-0014,Mobile,,,user011@example.com,Primary,,,Address Line 017,,Physical,,Anytown,87017,Address Line 017,,Mailing,,Anytown,87017,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 017,357622,4063727,,,,,,,,,,,,,,0.52,1.45,TOC,,,Monitoring complete,Redacted note 017,,,,TRUE,,,,,,,,,Redacted note 017 +Rio Arriba,RA-123,Redacted note 018,2025-06-12T10:40:00,Person 001,Person 002,,Person 019,,Owner,Primary,505-555-0015,Mobile,,,user012@example.com,Primary,,,Address Line 018,,Physical,,,,Address Line 018,,Physical,NM,Anytown,87018,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 018,Redacted note 018,TRUE,TRUE,TRUE,TRUE,,Redacted note 018,351304,4065624,,,,,,,,,,Submersible pump,,,,0.4,1.87,Top of PVC casing.,Domestic,"Active, pumping well",Monitoring complete,,,,,TRUE,,,,,,,,,Redacted note 018 +Rio Arriba,RA-124,Redacted note 019,2025-06-12T12:30:00,Person 001,Person 002,,Person 020,,Owner,Primary,,,,,user013@example.com,Primary,,,Address Line 019,,Physical,,,,Address Line 019,,Physical,,Anytown,87019,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 019,Redacted note 019,TRUE,TRUE,,TRUE,,,370829,4067249,,,,,,,,,,Submersible pump,,,,0.46,-6,Top of casing in vault below ground.,Domestic,"Active, pumping well",Monitoring complete,Redacted note 019,Redacted note 019,,,TRUE,,,,,,,,,Redacted note 019 +Rio Arriba,RA-125,Redacted note 020,2025-06-12T14:15:00,Person 001,Person 002,,Person 021,,Owner,Primary,505-555-0016,Mobile,,,user014@example.com,Primary,,,Address Line 020,,Physical,NM,Anytown,87020,Address Line 020,,Mailing,NM,Anytown,87020,,,,,,,,,,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,,Redacted note 020,371293,4067919,,,,,~2008,,305,275 at time of drilling,,Submersible pump,,,,0.46,0.9,,,,Monitoring complete,Redacted note 020,,,,,,,,,,,,,Redacted note 020 +Rio Arriba,RA-126,Redacted note 021,2025-06-13T07:40:00,Person 001,Person 002,,Person 022,,Owner,Primary,505-555-0017,Mobile,,,user015@example.com,Primary,,,Address Line 021,,Physical,NM,Anytown,87021,Address Line 021,,Mailing,NM,Anytown,87021,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 021,Redacted note 021,TRUE,TRUE,TRUE,TRUE,,Redacted note 021,369151,4048590,,,,RG-21554,,,2610,Early 2020s: 1100-ish ft,,Submersible pump,~1100,,,1.03,0.86,TOC,Public supply,"Active, pumping well",Monitoring complete,,Redacted note 021,Redacted note 021,,,,,,,,,,Redacted note 021,Redacted note 021 +Rio Arriba,RA-127,Redacted note 022,2025-06-13T09:00:00,Person 001,Person 002,,Person 023,,Owner,Primary,505-555-0018,Mobile,,,user016@example.com,Primary,,,Address Line 022,,Physical,NM,Anytown,87022,Address Line 022,,Mailing,NM,Anytown,87022,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 022,Redacted note 022,TRUE,TRUE,TRUE,TRUE,,Redacted note 022,364404,4049515,,,,,1999,Well owner,~320,~80,Well owner,Submersible pump,,,,0.55,0.95,TOC ,Domestic,"Active, pumping well",Monitoring complete,,Redacted note 022,,Redacted note 022,,,,,,,,,,Redacted note 022 +Rio Arriba,RA-128,Redacted note 023,2025-06-13T10:28:00,Person 001,Person 002,,Person 024,,Owner,Primary,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 023,,,,,,,Redacted note 023,360319,4065424,,,,,,,,,,Submersible pump,,,,,,,Public supply,"Active, pumping well",Monitoring complete,Redacted note 023,Redacted note 023,Redacted note 023,,TRUE,,,,,,,,Redacted note 023,Redacted note 023 +Rio Arriba,RA-129,Redacted note 024,2025-06-12T08:40:00,Person 001,Person 002,,Person 018,,Owner,Primary,505-555-0014,Mobile,,,user011@example.com,Primary,,,Address Line 024,Address Line 024,Physical,NM,Anytown,87024,Address Line 024,,Mailing,NM,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 024,TRUE,TRUE,,TRUE,,Redacted note 024,357610,4063715,,,,,,,105,,,Submersible pump,,,,0.72,1.31,TOC ,Irrigation,"Active, pumping well",Monitoring complete,Redacted note 024,Redacted note 024,,Redacted note 024,TRUE,,,,,,,,, +Rio Arriba,RA-140,Redacted note 025,2025-06-10T10:45:00,Person 005,Person 006,,Person 025,Organization 005,Owner,Primary,505-555-0019,Mobile,,,user017@example.com,Primary,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 025,Redacted note 025,TRUE,TRUE,,TRUE,,,388388,4009362,,,,,,,,,,,,,,0.5,2.03,TOC,Domestic,"Active, pumping well",Monitoring complete,,,,,TRUE,2025-06-10T11:00:00,Person 026,Steel-tape measurement,,Site was pumped recently,52.09,Water level accurate to within one foot,Redacted note 025, +Rio Arriba,RA-141,Redacted note 026,2025-06-10,Person 005,Person 006,,Person 025,Organization 005,Owner,Primary,505-555-0019,Mobile,,,user017@example.com,Primary,,,,,,,,,Address Line 026,,Mailing,NM,Anytown,87026,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 026,Redacted note 026,TRUE,TRUE,,TRUE,,,388471,4009927,,5971,,,,,,,,,,,,0.4,1.96,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 026,,,,FALSE,,,,,,,,, +Rio Arriba,RA-142,Redacted note 027,2025-06-10,Person 005,Person 006,,Person 025,Organization 005,Owner,Primary,505-555-0019,Mobile,,,user017@example.com,Primary,,,,,,,,,Address Line 027,,Mailing,NM,Anytown,87027,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 027,,TRUE,TRUE,,TRUE,,,388273,4009973,,5969,,,,,,,,,,,,0.4,1.85,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 027,,,,TRUE,,,,,,,,,Redacted note 027 +Rio Arriba,RA-143,Redacted note 028,2025-06-10T14:33:00,Person 005,Person 006,,Person 027,,Owner,Primary,505-555-0020,Mobile,,,user018@example.com,Primary,,,Address Line 028,,Physical,,Anytown,87028,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 028,Redacted note 028,TRUE,TRUE,,TRUE,,,367381,4012288,,6378,,,,,256,,,,,,,0.4,0.75,TOC,Domestic,,Monitoring complete,Redacted note 028,,,,TRUE,2025-06-10T14:40:00,Person 026,Steel-tape measurement,,Water level not affected,174.27,Water level accurate to within two hundreths of a foot,,Redacted note 028 +Rio Arriba,RA-144,Redacted note 029,2025-06-10T16:56:00,Person 005,Person 006,,Person 028,,Owner,Primary,505-555-0021,Mobile,,,user019@example.com,Primary,,,Address Line 029,,Physical,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 029,,,,,,,385106,4009631,,6111,,RG-A1584,,,390,268,,Submersible pump,,,,0.5,6.4,TOC (top of casing?),Domestic,"Active, pumping well",Monitoring complete,Redacted note 029,,,,TRUE,,,,,,,,,Redacted note 029 +Rio Arriba,RA-145,Redacted note 030,2025-06-11T11:01:00,Person 005,Person 006,,Person 010,,Owner,Primary,505-555-0005,Mobile,,,user004@example.com,Primary,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 030,,TRUE,TRUE,,TRUE,,,352342,4040485,,,,,,,,"142.1 (""Depth to Water"")",,,,,,0.4,1.4,TOC ,,,Monitoring complete,,,,,TRUE,,,,,,,,,Redacted note 030 +Rio Arriba,RA-146,Redacted note 031,2025-06-11T12:19:00,Person 005,Person 006,,Person 010,,Owner,Primary,505-555-0022,Mobile,,,user004@example.com,Primary,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 031,Redacted note 031,TRUE,TRUE,,TRUE,,,348715,4043303,,,,,,,292,60,,,,,,0.5,,TOC ,,,Monitoring complete,Redacted note 031,,,,TRUE,,,,,,,,Redacted note 031, +Rio Arriba,RA-147,Redacted note 032,2025-06-11T14:15:00,Person 005,Person 006,,Person 010,,Owner,Primary,505-555-0023,Mobile,,,user004@example.com,Primary,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 032,Redacted note 032,TRUE,TRUE,,TRUE,,,351057,4045227,,,,,,,,,,,,,,,,,,,Monitoring complete,,,,,TRUE,,,,,,,,Redacted note 032, +Rio Arriba,RA-148,Redacted note 033,2025-06-11T17:00:00,Person 005,Person 006,,Person 029,,Owner,Primary,505-555-0024,Home,505-555-0025,Mobile,user020@example.com,Primary,,,Address Line 033,,Physical,NM,Anytown,87033,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 033,TRUE,TRUE,,TRUE,,,396122,3997771,,,,,,,,"23.09 (""Depth to Water"")",,,,,,0.55,0.45,TOC ,Domestic,"Active, pumping well",Monitoring complete,Redacted note 033,,,,TRUE,,,,,,,,,Redacted note 033 +Rio Arriba,RA-149,Redacted note 034,2025-06-12T09:15:00,Person 005,Person 006,,Person 030,,Owner,Primary,505-555-0026,Mobile,,,user021@example.com,Primary,,,Address Line 034,,Physical,,Anytown,87034,Address Line 034,,Mailing,,Anytown,87034,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 034,Redacted note 034,TRUE,TRUE,,TRUE,,,390748,4010868,,,,RG-88003,,,500,"86 (""Depth to Water"")",,,,,,0.35,2.15,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 034,,,,TRUE,2025-06-12T09:30:00,Person 031,Steel-tape measurement,,Water level not affected,86,Water level accurate to within two hundreths of a foot,,Redacted note 034 +Rio Arriba,RA-150,Redacted note 035,2025-06-13T10:54:00,Person 005,Person 006,,Person 032,,Owner,Primary,505-555-0027,Mobile,,,,,,,Address Line 035,,Physical,,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 035,,,,,,,428365,3998760,,,,,,,,"57 (""Depth to Water"")",,,,,,,,,Domestic,"Active, pumping well",Monitoring complete,Redacted note 035,,,,TRUE,,,,,,,,,Redacted note 035 +Rio Arriba,RA-155,Redacted note 036,2025-06-24T9:17:00,Person 005,Person 006,,Person 033,,Owner,Primary,505-555-0028,Mobile,,,user022@example.com,Primary,,,Address Line 036,,Physical,NM,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 036,Redacted note 036,TRUE,TRUE,,TRUE,,,422664,4005784,,,,,,,,"8.78 (""Depth to Water"")",,Submersible pump,,,,0.55,2.65,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 036,,,,TRUE,,,,,,,,,Redacted note 036 +Rio Arriba,RA-156,Redacted note 037,2025-06-24T10:30:00,Person 005,Person 006,,Person 034,,Owner,Primary,,,,,user023@example.com,Primary,,,Address Line 037,,Physical,,,,Address Line 037,,Mailing,NM,Anytown,87037,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 037,TRUE,TRUE,,TRUE,,,422714,4005640,,,,RG-95412 PODI,,,180,"47.4 (""Depth to Water"")",NMOSE,,,,,0.55,1.95,TOC,Domestic,,Monitoring complete,,,,Redacted note 037,TRUE,,,,,,,,, +Rio Arriba,RA-157,Redacted note 038,2025-06-24,Person 005,Person 006,,Person 035,,Owner,Primary,505-555-0029,Mobile,,,user024@example.com,Primary,,,Address Line 038,,Physical,,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 038,,TRUE,,,,,,420558,4006079,,,,,,,,,,,,,,3,0,At ground level.,,Abandoned,Monitoring complete,,,,,FALSE,,,,,,,,, +Rio Arriba,RA-158,Redacted note 039,2025-06-24T13:32:00,Person 005,Person 006,,Person 036,,Owner,Primary,505-555-0030,Mobile,,,user025@example.com,Primary,,,Address Line 039,,Physical,NM,Anytown,,,,,,,,,,Primary,505-555-0031,,,,,,,,,,,,,,,,,,,,Redacted note 039,,TRUE,TRUE,,TRUE,,,389606,4026793,,,,,,,,,,,,,,,1.9,0.5,,,Monitoring complete,Redacted note 039,,Redacted note 039,,FALSE,,,,,,,,, +Rio Arriba,RA-159,Redacted note 040,2025-06-25T8:00:00,Person 005,Person 006,,Person 037,,Owner,Primary,505-555-0032,Mobile,,,user026@example.com,Primary,,,Address Line 040,,Physical,,Anytown,,Address Line 040,,Mailing,,,87040,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 040,Redacted note 040,TRUE,TRUE,,TRUE,,,391763,4009306,,,,,,,,"106.42 (""Depth to Water"")",,Submersible pump,,,,0.35,0.9,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 040,,,,TRUE,,,,,,,,,Redacted note 040 +Rio Arriba,RA-160,Redacted note 041,2025-06-25T09:30:00,Person 005,Person 006,,Person 038,,Owner,Primary,505-555-0033,Mobile,505-555-0034,Mobile,user027@example.com,Primary,,,Address Line 041,,Physical,,Anytown,,Address Line 041,,Mailing,NM,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 041,TRUE,TRUE,,TRUE,,,393034,4010098,,,,RG-99070,,,340,"254.57 (""Depth to Water"")",Well owner,Submersible pump,,,,0.5,3.8,TOC,Domestic,"Active, pumping well",Monitoring complete,,,,,TRUE,,,,,,,,, +Rio Arriba,RA-161,Redacted note 042,2025-06-25T11:48:00,Person 005,Person 006,,Person 039,,Owner,Primary,505-555-0035,Mobile,,,user028@example.com,Primary,,,Address Line 042,,Physical,,,,Address Line 042,,Mailing,NM,Anytown,87042,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 042,TRUE,TRUE,,TRUE,,,366251,4066434,,,,,,,,"48.48 (""Depth to Water"")",,,,,,0.55,0.98,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 042,,,,TRUE,,,,,,,,,Redacted note 042 +Rio Arriba,RA-162,Redacted note 043,2025-06-25T15:55:00,Person 005,Person 006,,Person 040,,Owner,Primary,505-555-0036,Mobile,,,user029@example.com,Primary,,,Address Line 043,,Physical,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 043,TRUE,TRUE,,TRUE,,,366007,4066411,,,,,,,,"23.38 (""Depth to Water"")",,,,,,0.55,0.15,,Domestic,"Active, pumping well",Monitoring complete,,,,,TRUE,,,,,,,,, +Rio Arriba,RA-163,Redacted note 044,2025-06-26T10:00:00,Person 005,Person 006,,Person 041,Organization 006,Water operator,Primary,505-555-0037,Mobile,505-555-0038,Mobile,,,,,Address Line 044,,Physical,,Anytown,87044,Address Line 044,,Mailing,,,,,,,,,,,,,,,,,,,,,,,,,,,,,TRUE,TRUE,,TRUE,,,327887,4040522,,,,RG-90323,,,1155,"540 (""Depth to Water"")",,,,,,1,1.7,TOC,Public supply,"Active, pumping well",Monitoring complete,,,Redacted note 044,,TRUE,,,,,,,,Redacted note 044,Redacted note 044 +Rio Arriba,RA-164,Redacted note 045,2025-06-26T12:00:00,Person 005,Person 006,,Person 042,,Owner,Primary,505-555-0039,Mobile,,,user030@example.com,Primary,,,Address Line 045,,Physical,,Anytown,,Address Line 045,,Mailing,NM,Anytown,87045,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 045,TRUE,TRUE,,TRUE,,,384542,4009372,,,,,,,,"29.51 (""Depth to Water"")",,Submersible pump,,,,0.47,1.33,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 045,Redacted note 045,,,TRUE,,,,,,,,,Redacted note 045 +Rio Arriba,RA-165,Redacted note 046,2025-06-26T13:00:00,Person 005,Person 006,,Person 043,,Owner,Primary,505-555-0040,Mobile,,,,,,,Address Line 046,,Physical,,Anytown,,Address Line 046,,Mailing,NM,Anytown,87046,,,,,,,,,,,,,,,,,,,,,,,,,,TRUE,TRUE,,TRUE,,,388866,4008456,,,,,,,,"56.88 (""Depth to Water"")",,,,,,0.388,,TOC,Domestic,"Active, pumping well",Monitoring complete,,,,,TRUE,,,,,,,,, +Rio Arriba,RA-166,Redacted note 047,2025-06-26T14:15:00,Person 005,Person 006,,Person 044,,Owner,Primary,,,,,user031@example.com,Primary,,,Address Line 047,,Physical,,Anytown,,Address Line 047,,Mailing,NM,Anytown,87047,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 047,TRUE,TRUE,,TRUE,,,391992,4005488,,,,,,,,"70.4 (""Depth to Water"")",,,,,,0.47,1.83,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 047,,,,TRUE,,,,,,,,,Redacted note 047 +Rio Arriba,RA-167,Redacted note 048,2025-06-26T15:20:00,Person 005,Person 006,,Person 045,,Owner,Primary,505-555-0041,Mobile,,,user032@example.com,,user033@example.com,,Address Line 048,,Physical,NM,Anytown,87048,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 048,TRUE,TRUE,,TRUE,,,394204,4003295,,,,,,,,,,Submersible pump,,,,0.6,5.5,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 048,,,,TRUE,,,,,,,,Redacted note 048,Redacted note 048 +San Acacia,SA-091,Redacted note 049,2025-02-15T10:30:00-08:00,Person 046,Person 047,,Person 048,Organization 007,,,505-555-0042,,,,user034@example.com,,,,Address Line 049,,,NM,Anytown,87049,,,,,,,,,,505-555-0042,,,,user034@example.com,,,,Address Line 049,,,NM,Anytown,87049,,,,,,,Redacted note 049,Redacted note 049,TRUE,TRUE,FALSE,TRUE,,Redacted note 049,330123.4,3976543.2,13,5012.3,,RG-0001,2014-07-10T00:00:00-08:00,Historic driller log,280,85.2,Measured 2018-06-01,,140,TRUE,TRUE,0.33,1.2,"Top of steel casing, north side",Domestic,"Active, pumping well",Annual water level,,,,,TRUE,,,,,,,,, +Water Level Network,WL-xxxx,Redacted note 050,2025-08-26T09:45:00,Person 049,Person 050,,Person 051,,Owner,Primary,505-555-0043,Home,,,user035@example.com,Primary,,,Address Line 050,Address Line 050,Physical,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,372949,3750634,13,,,,1961,,,,,,,,,,,,,,Annual water level,Redacted note 050,,Redacted note 050,,TRUE,,,,,,,,Redacted note 050, +Water Level Network,WL-xxxx,Redacted note 051,2025-08-26T09:45:00,Person 049,Person 050,,Person 051,,Owner,,505-555-0043,,,,,,,,Address Line 051,Address Line 051,Physical,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 051,TRUE,,,,,Redacted note 051,372980,3750627,13,,,RG-78079-S,Pre 1979,,250,,,Submersible pump,,,,0.68,0.43,Top of plate where electric enters well.,Irrigation,"Active, pumping well",Annual water level,,,,,TRUE,,,,,,,,, +Water Level Network,WL-xxxx,Redacted note 052,2025-11-06T10:00:00,Person 049,Person 050,,Person 052,,Owner,Primary,505-555-0044,Mobile,,,,,,,Address Line 052,,Physical,NM,Anytown,87052,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,,Redacted note 052,344100,3855426,13,,,RG-22666,2004,,205,,,Submersible pump,,,,0.33,1.96,TOC,Domestic,"Active, pumping well",Annual water level,,,,,TRUE,,,,,,,,, +Water Level Network,WL-xxxx,Redacted note 053,2025-11-06T11:45:00,Person 049,Person 050,,Person 053,,Owners,Primary,505-555-0045,Mobile,,,,,,,Address Line 053,,Physical,NM,Anytown,87053,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 053,TRUE,TRUE,TRUE,TRUE,,Redacted note 053,337309,3840339,13,,,#ID 12163,1995,,,,,Submersible pump,,,,,1.33,"TOC, opposite electric",Domestic,"Active, pumping well",Annual water level,,,,,TRUE,,,,,,,,, +Water Level Network,WL-xxxx,Redacted note 054,2025-11-06T11:00:00,Person 049,Person 050,,Person 054,,Owner,Primary,505-555-0046,Mobile,,,,,,,Address Line 054,,Physical,NM,Anytown,87054,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 054,TRUE,TRUE,TRUE,TRUE,,Redacted note 054,345856,3857237,13,,,,~2000,,60,7,,Jet pump,,,,0.33,0.5,Illegible,Irrigation,"Active, pumping well",Annual water level,,,,,TRUE,,,,,,,,, +Water Level Network,WL-xxxx,Redacted note 055,2024-10-16,Person 049,Person 002,,Person 055,Organization 008,Owner,Primary,505-555-0047,Home,,,,,,,Address Line 055,,Physical,,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 055,,TRUE,,TRUE,TRUE,,,448616,4031491,13,,,RG-40450-S-3,,,1130,~27,,Submersible pump/turbine well,,,TRUE,1.2,2.8,top of measuring port,Production,"Active, pumping well",Annual water level,Redacted note 055,,Redacted note 055,,TRUE,,,,,,,,, +Water Level Network,WL-xxxx,Redacted note 056,2024-10-16,Person 049,Person 002,,Person 055,Organization 008,Owner,Primary,505-555-0047,Home,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 056,Redacted note 056,TRUE,,TRUE,TRUE,,,441566,4035871,13,,,,,,910,~550,,Submersible pump,,,,1.8,3.5,top of measuring port,Production,"Active, pumping well",Annual water level,,,,,TRUE,,,,,,,,, +Water Level Network,WL-xxxx,Redacted note 057,2024-10-16,Person 049,Person 002,,Person 055,Organization 008,Owner,Primary,505-555-0047,Home,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 057,,TRUE,,TRUE,TRUE,,,442411,4035102,13,,,,,,980,~530,,Submersible pump,,,,1.8,3.2,top of measuring port,Production,"Active, pumping well",Annual water level,Redacted note 057,,,,TRUE,,,,,,,,Redacted note 057, +Water Level Network,WL-xxxx,Redacted note 058,2024-10-16,Person 049,Person 002,,Person 055,Organization 008,Owner,Primary,505-555-0047,Home,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 058,,TRUE,,TRUE,TRUE,,,445451,4035015,13,,,,,,,,,,,,,,,,,"Destroyed, exists but not usable",Annual water level,Redacted note 058,Redacted note 058,Redacted note 058,,,,,,,,,,Redacted note 058, +San Acacia,SAC-xxxx,Redacted note 059,2025-11-14T15:34:00,Person 056,,,Person 057,,Owner,Primary,505-555-0048,Mobile,,,,,,,Address Line 059,,Physical,,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 059,Redacted note 059,TRUE,TRUE,FALSE,FALSE,,Redacted note 059,312159,3740231,,,,RG-A0789-P001,,OSE,320,260,OSE,Submersible pump,,,FALSE,0.5,1.91,Top of casing at port.,Domestic,"Active, pumping well",Annual water level,Redacted note 059,,,,,2025-11-14,,Steel-tape measurement,,,208.64,,, +San Acacia,SAC-xxxx,Redacted note 060,2025-11-14T14:40:00,Person 056,,,Person 058,,Owner,Primary,505-555-0049,Mobile,,,,,,,Address Line 060,,,,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 060,Redacted note 060,TRUE,TRUE,FALSE,TRUE,,Redacted note 060,324180,3782031,,,,RG 75545,,OSE,,,,Submersible pump,,,FALSE,,1.09,Top of casing.,Irrigation,"Active, pumping well",Annual water level,,,,,,2025-11-14 14:38:00,,Steel-tape measurement,,,12.24,,, +San Acacia,SAC-xxxx,Redacted note 061,2025-11-14T14:00:00,Person 056,,,Person 059,,Owner,Primary,505-555-0050,Mobile,,,user036@example.com,Primary,,,Address Line 061,,Physical,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 061,Redacted note 061,TRUE,TRUE,FALSE,TRUE,,Redacted note 061,321274,3786654,,,,,,,,,,Submersible pump,,,FALSE,0.5,1.2,Top of casing at cap.,Domestic,"Active, pumping well",Annual water level,,,,,,2025-11-14 14:00:00,,Steel-tape measurement,,,270.76,,, +Water Level Network,WL-xxxx,Redacted note 062,2025-11-07T15:30:00,Person 056,Person 049,,Person 060,,Owner,Primary,505-555-0051,Mobile,,,,,,,Address Line 062,,Physical,NM,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 062,Redacted note 062,TRUE,TRUE,,TRUE,,,Lat: 34.009134,Long: -107.2778,,,,RG-76705,2002-10-28,OSE,555,530,OSE,Submersible pump,,,FALSE,0.42,1.09,Top of casing across from wires.,,"Inactive, exists but not used",Annual water level,,,Redacted note 062,,,,,Steel-tape measurement,,,,,, +San Acacia,SAC-xxxx,Redacted note 063,2025-11-21T12:00:00,Person 056,,,Person 061,,Owner,Primary,505-555-0052,Mobile,,,,,,,Address Line 063,,Physical,,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 063,Redacted note 063,TRUE,TRUE,FALSE,TRUE,,Redacted note 063,Lat: 33.972852,Long: -106.879441,,,,RG-45445-PODI,1986-04-18,OSE,73,51,OSE,Submersible pump,,,,0.42,1.15,Observation port in TOC.,Domestic,"Active, pumping well",Annual water level,,,,,,,,Steel-tape measurement,,,49.4,,, +San Acacia,SAC-xxxx,Redacted note 064,2025-11-21T12:35:00,Person 056,,,Person 062,,Owner,Primary,505-555-0053,Mobile,,,,,,,Address Line 064,,Physical,NM,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 064,Redacted note 064,TRUE,FALSE,FALSE,TRUE,,Redacted note 064,Lat: 33.972562,Long: -106.880565,,,,,,Owner,80,,Owner,Submersible pump,,,,,0.42,Top of casing.,Domestic,"Active, pumping well",Annual water level,Redacted note 064,,,,,,,,,,,,, +San Acacia,SAC-xxxx,Redacted note 065,2025-11-21T16:00:00,Person 056,,,Person 063,,Owner,Primary,505-555-0054,Mobile,,,,,,,Address Line 065,,Physical,NM,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 065,TRUE,TRUE,FALSE,TRUE,,Redacted note 065,Lat: 34.149952,Long: -106.870350,,,,RG-79305-PODI,2003-10-06,OSE,156,80,OSE,Submersible pump,,TRUE,FALSE,0.6,1.15,TOC,Domestic,"Active, pumping well",Annual water level,,,,,,,,,,,,,Redacted note 065, +San Acacia,SAC-xxxx,Redacted note 066,2025-11-21T14:00:00,Person 056,,,Person 063,,Owner,Primary,505-555-0055,Mobile,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 066,Redacted note 066,TRUE,TRUE,FALSE,TRUE,,Redacted note 066,Lat: 34.091054,Long: -106.870633,,,,,,,,,,"Sandpoint, open well",,TRUE,,,0,Top of pipe,"Open, unequipped well","Active, pumping well",Annual water level,,,,,,2025-11-21 14:07:00,,,,,9.7,,, +San Acacia,SAC-xxxx,Redacted note 067,2025-11-21T15:45:00,Person 056,,,Person 063,,Owner,Primary,505-555-0056,Mobile,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 067,TRUE,TRUE,FALSE,TRUE,,Redacted note 067,Lat: 34.149738,Long: -106.875028,,,,,,,22,,,"Sandpoint, open well",,TRUE,,,3.97,Top of pipe,"Open, unequipped well","Active, pumping well",Annual water level,,,Redacted note 067,,,,,,,,,,, +Water Level Network,WL-0360,Redacted note 068,2025-09-18T11:00:00,Person 006,,,Person 064,,Owner,Primary,505-555-0057,Mobile,,,user037@example.com,Primary,,,Address Line 068,,Physical,NM,Anytown,87068,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 068,Redacted note 068,TRUE,,,,,Redacted note 068,343541,4057849,,7090,Global positioning system (GPS),,,,,,,Submersible pump,,,,0.55,-3.2,Top casing,Shared domestic,"Active, pumping well",Annual water level,,Redacted note 068,Redacted note 068,,,,,,,,,,Redacted note 068, +Water Level Network,WL-0361,Redacted note 069,2025-10-23T09:00:00,Person 006,,,Person 065,,Owner,Primary,505-555-0058,Mobile,505-555-0059,Mobile,user038@example.com,Primary,,,Address Line 069,,Physical,NM,Anytown,87069,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 069,Redacted note 069,TRUE,TRUE,TRUE,TRUE,,Redacted note 069,443622,4030397,,6981,Global positioning system (GPS),,,,,,,Submersible pump,,,,0.45,-4.35,"1"" hole in top of casing, remove plug.",Shared domestic,"Active, pumping well",Annual water level,,,,,,2025-10-23 9:12:00,,,,,137.45,,, +Rio Arriba,RA-180,Redacted note 070,2025-11-18T11:47:00,Person 005,Person 066,Person 067,Person 068,,Owner,Primary,,,,,user039@example.com,Primary,,,Address Line 070,,Physical,NM,Anytown,87070,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 070,TRUE,TRUE,TRUE,TRUE,,,398252,3996265,,,,RG-A1644,,Well record,110,22,Well record,,,,,0.5,1.5,Top PVC casing,Domestic,"Active, pumping well",Monitoring complete,Redacted note 070,,,,TRUE,,,,,,38.7,,, +Rio Arriba,RA-181,Redacted note 071,2025-11-18T09:44:00,Person 005,Person 066,Person 067,Person 069,,Owner,Primary,505-555-0060,Mobile,,,,,,,Address Line 071,,Physical,NM,Anytown,87071,Address Line 071,,Mailing,NM,Anytown,87071,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 071,TRUE,,,TRUE,,,401398,3988703,,,,,,,89,,,,,,,0.53,0.4,TOC,Domestic,"Inactive, exists but not used",Monitoring complete,,Redacted note 071,,,FALSE,,,,,,19.76,,, +Rio Arriba,RA-182,Redacted note 072,2025-11-18T10:00:00,Person 005,Person 066,Person 067,Person 070,Organization 009,District Manager,Primary,505-555-0061,Mobile,,,user040@example.com,Primary,,,Address Line 072,,Physical,NM,Anytown,87072,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 072,TRUE,TRUE,,TRUE,,,401027,3988713,,,,,,,,,,,,,,0.37,1.58,TOC,Domestic,"Active, pumping well",Monitoring complete,Redacted note 072,,,,TRUE,,,,,,57.5,,,Redacted note 072 +Rio Arriba,RA-183,Redacted note 073,2025-11-18T13:13:00,Person 005,Person 066,Person 067,Person 071,,Owner,Primary,505-555-0062,Mobile,,,user041@example.com,Primary,,,Address Line 073,,Physical,NM,Anytown,87073,Address Line 073,,Mailing,NM,Anytown,87073,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 073,TRUE,TRUE,,TRUE,,,402620,3986887,,,,,1995,,,,,,,,,0.5,0.34,Top of electrical sleeve.,Irrigation,"Active, pumping well",Monitoring complete,Redacted note 073,,,,TRUE,,,,,,8.85,,,Redacted note 073 +Rio Arriba,RA-184,Redacted note 074,2025-11-18T15:00:00,Person 005,Person 066,Person 067,Person 072,,Owner,Primary,505-555-0063,Mobile,,,,,,,Address Line 074,,Physical,NM,Anytown,87074,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,399194,3993001,,,,,,,,,,,,,,,4,TOC PVC port,Domestic,"Active, pumping well",Monitoring complete,,,,,TRUE,,,,,,5.26,,, +Rio Arriba,RA-185,Redacted note 075,2025-11-19T08:56:00,Person 005,Person 066,Person 067,Person 073,Organization 010,Winter Operator,Primary,505-555-0064,Mobile,,,user042@example.com,Primary,,,Address Line 075,,Physical,NM,Anytown,87075,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 075,Redacted note 075,TRUE,TRUE,,TRUE,,,397813,3989397,,,,,,,,,,,,,,,,,Public supply,"Active, pumping well",Monitoring complete,Redacted note 075,,,,TRUE,,,,,,,,Redacted note 075,Redacted note 075 +Rio Arriba,RA-186,Redacted note 076,2025-11-19T11:25:00,Person 005,Person 066,Person 067,Person 074,,Owner,Primary,505-555-0065,Mobile,,,,,,,Address Line 076,Address Line 076,Physical,NM,Anytown,87076,Address Line 076,,Mailing,NM,Anytown,87076,,,,,,,,,,,,,,,,,,,,,,,,,,TRUE,TRUE,,TRUE,,,401403,3992181,,,,,,,,,,,,,,,5.33,Top of casing electric wire entrance.,Irrigation,"Active, pumping well",Monitoring complete,,,,,TRUE,2025-11-19 11:18:00,,,,,62.68,,, +Rio Arriba,RA-187,Redacted note 077,2025-11-19T11:45:00,Person 005,Person 066,Person 067,Person 075,,Owner,Primary,505-555-0066,Home,,,user043@example.com,Primary,,,Address Line 077,,Physical,NM,Anytown,87077,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 077,TRUE,TRUE,,TRUE,,,401162,3988918,,,,,,,,,,,,,,,1.06,Top of casing electric.,Domestic,"Active, pumping well",Monitoring complete,,,,,TRUE,2025-11-19 12:01:00,,,,,29.1,,, +Rio Arriba,RA-188,Redacted note 078,2025-11-19T12:30:00,Person 005,Person 066,Person 067,Person 076,,Owner,Primary,505-555-0067,Mobile,,,,,,,Address Line 078,,Physical,NM,Anytown,87078,Address Line 078,,Mailing,NM,Anytown,87078,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 078,TRUE,TRUE,,TRUE,,,396955,3995733,,,,,,,,,,,,,,,,,Domestic,"Active, pumping well",Monitoring complete,,Redacted note 078,,,TRUE,,,,,,,,, +Rio Arriba,RA-189,Redacted note 079,2025-11-19T15:30:00,Person 005,Person 066,Person 067,Person 077,,Owner,Primary,,,,,user044@example.com,Primary,,,Address Line 079,,Physical,NM,Anytown,87079,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 079,TRUE,TRUE,,TRUE,,,396456,3996143,,,,,,,52,,,,,,,,-4.72,TOC in vault.,Domestic,"Active, pumping well",Monitoring complete,,,,,TRUE,,,,,,28.3,,, +Rio Arriba,RA-190,Redacted note 080,2025-11-19T14:30:00,Person 005,,,Person 078,,Owner,Primary,505-555-0068,Mobile,,,user045@example.com,Primary,,,Address Line 080,,Physical,NM,Anytown,87080,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,TRUE,TRUE,,TRUE,,,396597,3996277,,,,,,,,,,,,,,,,TOC ,Domestic,"Active, pumping well",Monitoring complete,,,,,TRUE,,,,,,9.3,,, +Water Level Network,WL-0231,Redacted note 081,2021-04-01T11:00:00,Person 079,,,Person 073,Organization 011,Owner,Primary,505-555-0069,Mobile,505-555-0070,Mobile,user042@example.com,Primary,,,,,,,,,Address Line 081,,Mailing,NM,Anytown,87081,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 081,Redacted note 081,TRUE,TRUE,TRUE,TRUE,,Redacted note 081,400516,3992975,,5738,Global positioning system (GPS),,,,,,,,,TRUE,TRUE,0.7,1.15,Top of casing opposite pump wires.,Public supply,"Active, pumping well",Annual water level,Redacted note 081,,,,TRUE,2021-04-01 11:15:00,,Steel-tape measurement,,,85.53,,, +Water Level Network,WL-0232,Redacted note 082,2021-04-01T11:35:00,Person 079,,,Person 073,Organization 011,Owner,Primary,505-555-0069,Mobile,505-555-0070,Mobile,user042@example.com,Primary,,,,,,,,,Address Line 082,,Mailing,NM,Anytown,87082,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 082,Redacted note 082,TRUE,TRUE,TRUE,TRUE,,Redacted note 082,400226,3993394,,5740,Global positioning system (GPS),,,,,,,Submersible pump,,TRUE,TRUE,"6.5""",1.75,"Top of casing, top of sounding tube.",Public supply,"Active, pumping well",Annual water level,Redacted note 082,,,,TRUE,2021-04-01 11:45:00,,Electric tape measurement (E-probe),,,72.4,,, +Water Level Network,WL-xxxx,Redacted note 083,2025-07-25T10:00:00,Person 006,,,Person 080,Organization 012,Owner,Primary,,,,,user046@example.com,Primary,,,Address Line 083,,Physical,NM,Anytown,87083,Address Line 083,,Mailing,NM,Anytown,87083,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 083,Redacted note 083,TRUE,TRUE,TRUE,TRUE,,Redacted note 083,421579,3939470,,7690,Global positioning system (GPS),RG-54390-5,,,760,,,Submersible pump,,,,"6""",3.08,Top casing opposite pump wires.,Public supply,"Inactive, exists but not used",,,,,,,,,Steel-tape measurement,,,82.85,,, +Water Level Network,WL-xxxx,Redacted note 084,2025-07-25T09:00:00,Person 006,,,Person 080,Organization 012,Owner,Primary,,,,,user046@example.com,Primary,,,Address Line 084,,Physical,NM,Anytown,87084,Address Line 084,,Mailing,NM,Anytown,87084,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 084,Redacted note 084,TRUE,TRUE,TRUE,TRUE,,Redacted note 084,422118,3938758,,7507,Global positioning system (GPS),RG-54390-2,,,650,,,Open well,,TRUE,TRUE,0.5,1.46,"Top of 2"" opening on top of 6"" steel casing. Remove plug from opening.","Open, unequipped well",,,,,,,,2025-07-25 9:10:00,,Electric tape measurement (E-probe),,,80.09,,, +Water Level Network,WL-xxxx,Redacted note 085,2026-01-21T15:38:00,Person 056,,,Person 081,Organization 013,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 085,,Physical,NM,Anytown,87085,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 085,Redacted note 085,TRUE,TRUE,FALSE,TRUE,Redacted note 085,Redacted note 085,324579,3606008,13N,,,,,,,,,Submersible,,FALSE,FALSE,0.5,0.8,Cap port hole,wildlife,"Inactive, exists but not used",,,Redacted note 085,,,FALSE,2026-01-21 13:00:00,Person 056,Steel-tape measurement,0.8,,333.05,Water level accurate to within two hundreths of a foot,Redacted note 085, +Water Level Network,WL-xxxx,Redacted note 086,2026-01-21T13:00:01,Person 056,,,Person 081,Organization 013,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 086,,Physical,NM,Anytown,87086,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 086,Redacted note 086,TRUE,TRUE,FALSE,TRUE,Redacted note 086,Redacted note 086,318494,3601464,13N,,,,,,,,,Windmill,,FALSE,FALSE,0.5,,,Unused,"Inactive, exists but not used",,,,Redacted note 086,,FALSE,,,,,,,,Redacted note 086, +Water Level Network,WL-xxxx,Redacted note 087,2026-01-21T15:00:02,Person 056,,,Person 081,Organization 013,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 087,,Physical,NM,Anytown,87087,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 087,Redacted note 087,TRUE,TRUE,FALSE,TRUE,Redacted note 087,Redacted note 087,318709,3602162,13N,,,,,,,,,Open,,TRUE,FALSE,0.83,-0.6,TOC,Unused,"Inactive, exists but not used",,,,,,FALSE,2026-01-21 15:20:00,Person 056,Electric tape measurement (E-probe),-0.6,,450.09,Water level accurate to within two hundreths of a foot,Redacted note 087, +Water Level Network,WL-xxxx,Redacted note 088,2026-01-21T16:00:03,Person 056,,,Person 081,Organization 013,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 088,,Physical,NM,Anytown,87088,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 088,,TRUE,TRUE,FALSE,TRUE,Redacted note 088,Redacted note 088,318173,3600199,13N,,,,,,,,,Open,,TRUE,FALSE,1.5,,,Unused,"Inactive, exists but not used",,,,Redacted note 088,,FALSE,,,,,,,,Redacted note 088, +Water Level Network,WL-xxxx,Redacted note 089,2026-01-21T14:00:04,Person 056,,,Person 081,Organization 013,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 089,,Physical,NM,Anytown,87089,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 089,Redacted note 089,TRUE,TRUE,FALSE,TRUE,Redacted note 089,Redacted note 089,319585,3606318,13N,,,,,,,,,Open,,TRUE,FALSE,0.5,2.16,TOC,Unused,"Inactive, exists but not used",,,Redacted note 089,,,FALSE,2026-01-21 14:30:00,Person 056,Steel-tape measurement,2.16,,307.36,Water level accurate to within two hundreths of a foot,Redacted note 089, +Water Level Network,WL-xxxx,Redacted note 090,2025-12-17T12:00:00,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 090,,Physical,NM,Anytown,87090,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 090,Redacted note 090,TRUE,TRUE,FALSE,TRUE,Redacted note 090,Redacted note 090,336307,3610089,13N,,,,,,,,,Submersible,,FALSE,FALSE,0.43,1.27,Observation port in cap,Domestic,"Active, pumping well",,,Redacted note 090,,,FALSE,2025-12-17 12:20:00,Person 056,Steel-tape measurement,1.27,,264.73,Water level accurate to within two hundreths of a foot,, +Water Level Network,WL-xxxx,Redacted note 091,2025-12-16T11:30:00,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 091,,Physical,NM,Anytown,87091,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 091,Redacted note 091,TRUE,TRUE,FALSE,TRUE,Redacted note 091,Redacted note 091,342987,3605396,13N,,,,,,,,,Submersible,,FALSE,FALSE,0.5,2.15,Hole in top of cap. Tap to side.,Livestock,"Active, pumping well",,,Redacted note 091,,,FALSE,2025-12-16 12:00:00,Person 056,Steel-tape measurement,2.15,,369.2,Water level accurate to within two hundreths of a foot,Redacted note 091, +Water Level Network,WL-xxxx,Redacted note 092,2025-12-17T14:30:00,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 092,,Physical,NM,Anytown,87092,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 092,Redacted note 092,TRUE,TRUE,FALSE,TRUE,Redacted note 092,Redacted note 092,340033,3618417,13N,,,LRG-15946,,,500,,,Submersible,,FALSE,FALSE,1.5,1.6,TOC,Livestock,"Active, pumping well",,,Redacted note 092,,,FALSE,2025-12-17 13:00:00,Person 056,Steel-tape measurement,1.6,,395.52,Water level accurate to within two hundreths of a foot,, +Water Level Network,WL-xxxx,Redacted note 093,2025-12-16T09:45:00,Person 056,,,Person 081,Organization 013,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 093,,Physical,NM,Anytown,87093,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 093,Redacted note 093,TRUE,TRUE,FALSE,TRUE,Redacted note 093,Redacted note 093,330549,3600679,13N,,,LRG-05315,12/4/1970,OSE,375,293,OSE,Submersible,,FALSE,FALSE,0.5,0.18,TOC,Livestock,"Active, pumping well",,,Redacted note 093,,,FALSE,2025-12-16 10:10:00,Person 056,Steel-tape measurement,0.18,,294.65,Water level accurate to within two hundreths of a foot,Redacted note 093, +Water Level Network,WL-xxxx,Redacted note 094,2025-12-16T11:00:00,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 094,,Physical,NM,Anytown,87094,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 094,Redacted note 094,TRUE,TRUE,FALSE,TRUE,Redacted note 094,Redacted note 094,338536,3595230,13N,,,,,,,,,Open,,TRUE,FALSE,0.5,0.8,TOC,Unused,"Inactive, exists but not used",,,,Redacted note 094,,FALSE,2025-12-16 11:10:00,Person 056,Electric tape measurement (E-probe),0.8,,,,Redacted note 094, +Water Level Network,WL-xxxx,Redacted note 095,2025-12-17T12:45:00,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 095,,Physical,NM,Anytown,87095,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 095,Redacted note 095,TRUE,TRUE,FALSE,TRUE,Redacted note 095,Redacted note 095,336697,3610187,13N,,,LRG-04676,12/31/1902,OSE,,,,Submersible,,FALSE,FALSE,0.5,1.61,Port in cap,Livestock,"Active, pumping well",,,Redacted note 095,,,TRUE,2025-12-17 12:55:00,Person 056,Steel-tape measurement,1.61,,248.96,Water level accurate to within two hundreths of a foot,,Redacted note 095 +Water Level Network,WL-xxxx,Redacted note 096,2025-12-17T11:30:01,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 096,,Physical,NM,Anytown,87096,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 096,Redacted note 096,TRUE,TRUE,FALSE,TRUE,Redacted note 096,Redacted note 096,336348,3610095,13N,,,,,,,,,Submersible,,FALSE,FALSE,0.67,0.3,TOC at power cable hole,Domestic,"Active, pumping well",,,Redacted note 096,,,FALSE,2025-12-17 11:40:01,Person 056,Steel-tape measurement,0.3,,260.96,Water level accurate to within two hundreths of a foot,Redacted note 096, +Water Level Network,WL-xxxx,Redacted note 097,2025-12-16T14:00:00,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 097,,Physical,NM,Anytown,87097,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 097,,TRUE,TRUE,FALSE,TRUE,Redacted note 097,Redacted note 097,332463,3618452,13N,,,,,,,,,Submersible,,FALSE,FALSE,0.5,1.38,Bottom of bent pipe in cap,Livestock,"Active, pumping well",,,Redacted note 097,,,FALSE,2025-12-16 14:09:00,Person 056,Steel-tape measurement,1.38,,239.2,Water level accurate to within two hundreths of a foot,, +Water Level Network,WL-xxxx,Redacted note 098,2025-12-16T09:00:00,Person 056,,,Person 081,Organization 013,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 098,,Physical,NM,Anytown,87098,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 098,,TRUE,TRUE,FALSE,TRUE,Redacted note 098,Redacted note 098,320319,3602573,13N,,,ID 4217 C,,,,,,Submersible,,FALSE,FALSE,0.5,,,Livestock,"Active, pumping well",,,Redacted note 098,Redacted note 098,,FALSE,,,,,,,,Redacted note 098, +Water Level Network,WL-xxxx,Redacted note 099,2025-12-16T10:30:00,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 099,,Physical,NM,Anytown,87099,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 099,Redacted note 099,TRUE,TRUE,FALSE,TRUE,Redacted note 099,Redacted note 099,335957,3600935,13N,,,LRG-15829-POD1,7/25/2014,OSE,492,390,OSE,Submersible,,FALSE,FALSE,0.43,,,Unused,"Inactive, exists but not used",,,Redacted note 099,Redacted note 099,,FALSE,,,,,,,,Redacted note 099, +Water Level Network,WL-xxxx,Redacted note 100,2025-12-16T16:40:02,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 100,,Physical,NM,Anytown,87100,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 100,Redacted note 100,TRUE,TRUE,FALSE,TRUE,Redacted note 100,Redacted note 100,326608,3609014,13N,,,,1906,Owner,300,274,Owner,Submersible,,FALSE,FALSE,0.3,1.86,Hole in cap,Unused,"Inactive, exists but not used",,,Redacted note 100,,,FALSE,2025-12-16 16:50:00,Person 056,Steel-tape measurement,1.86,,276.31,Water level accurate to within two hundreths of a foot,Redacted note 100, +Water Level Network,WL-xxxx,Redacted note 101,2025-12-17T10:00:01,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 101,,Physical,NM,Anytown,87101,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 101,Redacted note 101,TRUE,TRUE,FALSE,TRUE,Redacted note 101,Redacted note 101,333235,3607526,13N,,,LRG-4677,4/30/1984,OSE,415,280,OSE,Submersible,,FALSE,FALSE,0.5,1.4,Top of pipe fitting in cap,Livestock,"Active, pumping well",,,,,,FALSE,2025-12-17 11:00:01,Person 056,Steel-tape measurement,1.4,,285.98,Water level accurate to within two hundreths of a foot,Redacted note 101, +Water Level Network,WL-xxxx,Redacted note 102,2025-12-17T13:15:02,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 102,,Physical,NM,Anytown,87102,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 102,Redacted note 102,TRUE,TRUE,FALSE,TRUE,Redacted note 102,Redacted note 102,343020,3613531,13N,,,,1912,Owner,510,229,Owner,Open,,TRUE,TRUE,0.5,1.8,TOC,Unused,"Inactive, exists but not used",,,Redacted note 102,,,FALSE,2025-12-17 11:00:01,Person 056,Electric tape measurement (E-probe),1.8,,433.8,Water level accurate to within two hundreths of a foot,Redacted note 102, +Water Level Network,WL-xxxx,Redacted note 103,2025-12-16T14:00:00,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 103,,Physical,NM,Anytown,87103,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 103,Redacted note 103,TRUE,TRUE,FALSE,TRUE,Redacted note 103,Redacted note 103,329024,3620539,13N,,,,3/18/1905,Owner,350,232,Owner,Submersible,,FALSE,FALSE,0.25,0.4,Top of cap on casing,Livestock,"Inactive, exists but not used",,,Redacted note 103,,,FALSE,2025-12-16 15:15:00,Person 056,Steel-tape measurement,0.4,,246.1,Water level accurate to within two hundreths of a foot,Redacted note 103, +Water Level Network,WL-xxxx,Redacted note 104,2025-12-16T15:37:00,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 104,,Physical,NM,Anytown,87104,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 104,,TRUE,TRUE,FALSE,TRUE,Redacted note 104,Redacted note 104,327482,3614828,13N,,,,1967,Owner,350,309,Owner,Submersible,,FALSE,FALSE,0.5,0.97,Hole in cap,Livestock,"Active, pumping well",,,Redacted note 104,,,FALSE,2025-12-16 16:15:00,Person 056,Steel-tape measurement,0.97,,305.4,Water level accurate to nearest tenth of a foot (USGS accuracy level),Redacted note 104, +Water Level Network,WL-xxxx,Redacted note 105,2025-12-17T09:00:01,Person 056,,,Person 081,Organization 014,Manager,Primary,505-555-0071,Primary,,,user047@example.com,Primary,,,Address Line 105,,Physical,NM,Anytown,87105,,,,,,,,Manager,Secondary,505-555-0072,Primary,,,user048@example.com,Primary,,,,,,,,,,,,,,,Redacted note 105,Redacted note 105,TRUE,TRUE,FALSE,TRUE,Redacted note 105,Redacted note 105,329963,3604962,13N,,,LRG-07947,3/20/1992,OSE,534,320,OSE,Submersible,,FALSE,FALSE,0.5,1.6,Port in cap,Unused,"Inactive, exists but not used",,,Redacted note 105,,,FALSE,2025-12-17 9:45:01,Person 056,Steel-tape measurement,1.6,,328.4,Water level accurate to nearest tenth of a foot (USGS accuracy level),Redacted note 105, +Gila River,,Redacted note 106,1/12/2026 14:37,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 106,Redacted note 106,730484,3658132,12N,4625.92,Survey-grade GPS,,,,10.93,,Measured,Open,,TRUE,TRUE,,1.63,TOC,Observation,"Inactive, exists but not used",,,Redacted note 106,,,FALSE,1/12/2026 14:37,Person 049,Electric tape measurement (E-probe),,,9.08,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 107,1/12/2026 12:38,Person 049,,,Person 082,Organization 015,Contractor,Secondary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 107,Redacted note 107,730322,3658119,12N,4624.38,Survey-grade GPS,,,,10.93,,Measured,Open,,TRUE,TRUE,,0.33,TOC,Observation,"Inactive, exists but not used",,,Redacted note 107,,,FALSE,1/12/2026 12:38,Person 049,Electric tape measurement (E-probe),,,7.59,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 108,1/12/2026 12:36,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 108,Redacted note 108,730318,3658119,12N,4625.03,Survey-grade GPS,,,,12.34,,Measured,Open,,TRUE,TRUE,,0.64,TOC,Observation,"Inactive, exists but not used",,,Redacted note 108,,,FALSE,1/12/2026 12:36,Person 049,Electric tape measurement (E-probe),,,8.61,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 109,1/12/2026 12:28,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 109,Redacted note 109,730255,3658153,12N,4624.02,Survey-grade GPS,,,,8.56,,Measured,Open,,TRUE,TRUE,,1.98,TOC,Observation,"Inactive, exists but not used",,,Redacted note 109,,,FALSE,1/12/2026 12:28,Person 049,Electric tape measurement (E-probe),,,7.52,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 110,1/12/2026 13:50,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 110,Redacted note 110,730409,3657504,12N,4619.09,Survey-grade GPS,,,,15.45,,Measured,Open,,TRUE,TRUE,,1.03,TOC,Observation,"Inactive, exists but not used",,,Redacted note 110,,,FALSE,1/12/2026 13:50,Person 049,Electric tape measurement (E-probe),,,11.71,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 111,1/12/2026 13:47,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 111,Redacted note 111,730396,3657505,12N,4612.01,Survey-grade GPS,,,,10.17,,Measured,Open,,TRUE,TRUE,,1.25,TOC,Observation,"Inactive, exists but not used",,,Redacted note 111,,,FALSE,1/12/2026 13:47,Person 049,Electric tape measurement (E-probe),,,5.29,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 112,1/12/2026 13:40,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 112,Redacted note 112,730322,3657516,12N,4615.06,Survey-grade GPS,,,,12.47,,Measured,Open,,TRUE,TRUE,,0.65,TOC,Observation,"Inactive, exists but not used",,,Redacted note 112,,,FALSE,1/12/2026 13:40,Person 049,Electric tape measurement (E-probe),,,8.03,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 113,1/12/2026 13:17,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 113,Redacted note 113,730143,3657537,12N,4610.83,Survey-grade GPS,,,,9.15,,Measured,Open,,TRUE,TRUE,,1.13,TOC,Observation,"Inactive, exists but not used",,,Redacted note 113,,,FALSE,1/12/2026 13:17,Person 049,Electric tape measurement (E-probe),,,3.85,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 114,1/13/2026 11:42,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 114,Redacted note 114,729147,3655595,12N,4583.63,Survey-grade GPS,,,,12.01,,Measured,Open,,TRUE,TRUE,,1.6,TOC,Observation,"Inactive, exists but not used",,,Redacted note 114,,,FALSE,1/13/2026 11:42,Person 049,Electric tape measurement (E-probe),,,2.9,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 115,1/13/2026 11:28,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 115,Redacted note 115,729005,3655639,12N,4584.19,Survey-grade GPS,,,,12.11,,Measured,Open,,TRUE,TRUE,,1.18,TOC,Observation,"Inactive, exists but not used",,,Redacted note 115,,,FALSE,1/13/2026 11:28,Person 049,Electric tape measurement (E-probe),,,6.06,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 116,1/13/2026 11:06,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 116,Redacted note 116,728866,3655679,12N,4583.53,Survey-grade GPS,,,,14.57,,Measured,Open,,TRUE,TRUE,,0.07,TOC,Observation,"Inactive, exists but not used",,,Redacted note 116,,,FALSE,1/13/2026 11:06,Person 049,Electric tape measurement (E-probe),,,14.3,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 117,1/13/2026 11:12,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 117,Redacted note 117,728812,3655674,12N,4584.06,Survey-grade GPS,,,,14.07,,Measured,Open,,TRUE,TRUE,,1.37,TOC,Observation,"Inactive, exists but not used",,,Redacted note 117,,,FALSE,1/13/2026 11:12,Person 049,Electric tape measurement (E-probe),,,10.82,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 118,,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 118,Redacted note 118,724155,3646184,12N,4452.95,Survey-grade GPS,,,,15.29,,Measured,Open,,TRUE,TRUE,,0.82,TOC,Observation,"Inactive, exists but not used",,,Redacted note 118,,,FALSE,,,,,,,,, +Gila River,,Redacted note 119,,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 119,Redacted note 119,724101,3646130,12N,4454.46,Survey-grade GPS,,,,19.39,,Measured,Open,,TRUE,TRUE,,1.51,TOC,Observation,"Inactive, exists but not used",,,Redacted note 119,,,FALSE,,,,,,,,, +Gila River,,Redacted note 120,,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 120,Redacted note 120,724043,3646057,12N,4451.90,Survey-grade GPS,,,,9.09,,Measured,Open,,TRUE,TRUE,,1.21,TOC,Observation,"Inactive, exists but not used",,,Redacted note 120,,,FALSE,,,,,,,,, +Gila River,,Redacted note 121,,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 121,Redacted note 121,724048,3646047,12N,4452.89,Survey-grade GPS,,,,12.17,,Measured,Open,,TRUE,TRUE,,1.08,TOC,Observation,"Inactive, exists but not used",,,Redacted note 121,,,FALSE,,,,,,,,, +Gila River,,Redacted note 122,1/13/2026 13:48,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 122,Redacted note 122,724447,3634150,12N,4326.84,Survey-grade GPS,,,,17.32,,Measured,Open,,TRUE,TRUE,,1.80,TOC,Observation,"Inactive, exists but not used",,,Redacted note 122,,,FALSE,1/13/2026 13:48,Person 049,Electric tape measurement (E-probe),,,11.95,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 123,1/13/2026 14:00,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 123,Redacted note 123,724333,3634083,12N,4325.10,Survey-grade GPS,,,,16.21,,Measured,Open,,TRUE,TRUE,,1.18,TOC,Observation,"Inactive, exists but not used",,,Redacted note 123,,,FALSE,1/13/2026 14:00,Person 049,Electric tape measurement (E-probe),,,10.03,Water level accurate to within two hundreths of a foot,, +Gila River,,Redacted note 124,1/13/2026 14:11,Person 049,,,Person 082,Organization 015,Contractor,Primary,505-555-0073,,,,user049@example.com,,,,,,,,,,,,,,,,Person 083,Manager,Secondary,,Primary,,,user050@example.com,Primary,,,,,,,,,,,,,,,,,TRUE,TRUE,TRUE,TRUE,Redacted note 124,Redacted note 124,724192,3634012,12N,4322.34,Survey-grade GPS,,,,15.24,,Measured,Open,,TRUE,TRUE,,1.11,TOC,Observation,"Inactive, exists but not used",,,Redacted note 124,,,FALSE,1/13/2026 14:11,Person 049,Electric tape measurement (E-probe),,,6.65,Water level accurate to within two hundreths of a foot,, +Water Level Network,,Redacted note 125,1/13/2026 16:14,Person 049,,,Person 084,Organization 016,owner,Primary,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,TRUE,TRUE,,TRUE,,,728132,3655594,12N,,,,,,70,,Owner,Submersible,,FALSE,FALSE,0.52,0.7,TOC,Domestic,"Active, pumping well",,,,,,TRUE,1/13/2026 16:14,Person 049,Steel-tape measurement,,,18.48,Water level accurate to within two hundreths of a foot,, +Water Level Network,,Redacted note 126,1/13/2026 16:46,Person 049,,,Person 082,,owner,Primary,505-555-0073,,,,user049@example.com,,,,Address Line 126,,Primary,NM,Anytown,87126,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,TRUE,TRUE,,TRUE,,,723327,3649169,12N,,,,,,,,,Submersible,,FALSE,FALSE,,0.75,TOC,Domestic,"Active, pumping well",,,,,,TRUE,1/13/2026 16:46,Person 049,Steel-tape measurement,,,25.58,Water level accurate to within two hundreths of a foot,, +Water Level Network,,Redacted note 127,,Person 049,,,Person 085,Organization 017,Water Operator,Primary,,,,,,,,,,,,NM,Anytown,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,TRUE,TRUE,,TRUE,,Redacted note 127,752465.13,3534595.03,12N,,,,,,,,,Submersible,,FALSE,FALSE,0.83,1.9,TOC,Production,"Active, pumping well",,,,,,TRUE,1/28/2026 15:00,Person 049,Steel-tape measurement,,,299.35,Water level accurate to within two hundreths of a foot,, +Water Level Network,,Redacted note 128,1/16/2026,Person 049,,,Person 086,Organization 018,owner,Primary,,,,,user051@example.com,,,,Address Line 128,,,NM,Anytown,87128,,,,,,,Person 087,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 128,TRUE,TRUE,,TRUE,,,755935.79,3641249.74,12N,,,,,,,,,Submersible,,FALSE,FALSE,0.83,1.3,TOC,Production,"Active, pumping well",,,,,,TRUE,1/16/2026 12:00,Person 049,Sonic water level meter (acoustic pulse),,,590,Water level accurate to within one foot,, +Water Level Network,,Redacted note 129,1/16/2026,Person 049,,,Person 086,Organization 018,owner,Primary,,,,,user051@example.com,,,,Address Line 129,,,NM,Anytown,87129,,,,,,,Person 087,,,,,,,,,,,,,,,,,,,,,,,,Redacted note 129,TRUE,TRUE,,TRUE,,,756655.59,3641238.69,12N,,,,,,,,,Submersible,,FALSE,FALSE,0.83,1.05,hole in top of casing,Production,"Active, pumping well",,,,,,TRUE,1/16/2026 13:00,Person 049,Sonic water level meter (acoustic pulse),,,759.7,Water level accurate to within one foot,, diff --git a/tests/features/environment.py b/tests/features/environment.py index a02c1273..0e9ada2a 100644 --- a/tests/features/environment.py +++ b/tests/features/environment.py @@ -16,8 +16,6 @@ import random from datetime import datetime, timedelta -from sqlalchemy import select - from db import ( Location, Thing, @@ -48,6 +46,8 @@ Sample, ) from db.engine import session_ctx +from services.util import get_bool_env +from sqlalchemy import select from transfers.transfer import _drop_and_rebuild_db @@ -502,7 +502,8 @@ def add_geologic_formation(context, session, formation_code, well): def before_all(context): context.objects = {} - rebuild = False + rebuild_raw = get_bool_env("DROP_AND_REBUILD_DB") + rebuild = rebuild_raw if isinstance(rebuild_raw, bool) else False erase_data = False if rebuild: _drop_and_rebuild_db() diff --git a/tests/features/steps/admin-minor-trace-chemistry.py b/tests/features/steps/admin-minor-trace-chemistry.py index acfcb434..9b193168 100644 --- a/tests/features/steps/admin-minor-trace-chemistry.py +++ b/tests/features/steps/admin-minor-trace-chemistry.py @@ -18,11 +18,10 @@ These are fast integration tests - no HTTP calls, direct module testing. """ +from admin.views.minor_trace_chemistry import MinorTraceChemistryAdmin from behave import when, then from behave.runner import Context -from admin.views.minor_trace_chemistry import MinorTraceChemistryAdmin - ADMIN_IDENTITY = MinorTraceChemistryAdmin.identity ADMIN_BASE_URL = f"/admin/{ADMIN_IDENTITY}" @@ -42,7 +41,7 @@ def _ensure_admin_mounted(context): @when("I check the registered admin views") -def step_impl(context: Context): +def step_when_i_check_the_registered_admin_views(context: Context): from admin.config import create_admin from fastapi import FastAPI @@ -52,7 +51,9 @@ def step_impl(context: Context): @then('"{view_name}" should be in the list of admin views') -def step_impl(context: Context, view_name: str): +def step_then_view_name_should_be_in_the_list_of_admin_views( + context: Context, view_name: str +): assert view_name in context.admin_views, ( f"Expected '{view_name}' to be registered in admin views. " f"Found: {context.admin_views}" @@ -60,7 +61,9 @@ def step_impl(context: Context, view_name: str): @then("the Minor Trace Chemistry admin view should not allow create") -def step_impl(context: Context): +def step_then_the_minor_trace_chemistry_admin_view_should_not_allow_create( + context: Context, +): from db.nma_legacy import NMA_MinorTraceChemistry view = MinorTraceChemistryAdmin(NMA_MinorTraceChemistry) @@ -68,7 +71,9 @@ def step_impl(context: Context): @then("the Minor Trace Chemistry admin view should not allow edit") -def step_impl(context: Context): +def step_then_the_minor_trace_chemistry_admin_view_should_not_allow_edit( + context: Context, +): from db.nma_legacy import NMA_MinorTraceChemistry view = MinorTraceChemistryAdmin(NMA_MinorTraceChemistry) @@ -76,7 +81,9 @@ def step_impl(context: Context): @then("the Minor Trace Chemistry admin view should not allow delete") -def step_impl(context: Context): +def step_then_the_minor_trace_chemistry_admin_view_should_not_allow_delete( + context: Context, +): from db.nma_legacy import NMA_MinorTraceChemistry view = MinorTraceChemistryAdmin(NMA_MinorTraceChemistry) @@ -84,13 +91,15 @@ def step_impl(context: Context): @when("I request the Minor Trace Chemistry admin list page") -def step_impl(context: Context): +def step_when_i_request_the_minor_trace_chemistry_admin_list_page(context: Context): _ensure_admin_mounted(context) context.response = context.client.get(f"{ADMIN_BASE_URL}/list") @when("I request the Minor Trace Chemistry admin detail page for an existing record") -def step_impl(context: Context): +def step_when_i_request_the_minor_trace_chemistry_admin_detail_page_for( + context: Context, +): _ensure_admin_mounted(context) from db.engine import session_ctx from db.nma_legacy import NMA_MinorTraceChemistry @@ -107,14 +116,18 @@ def step_impl(context: Context): @then("the response status should be {status_code:d}") -def step_impl(context: Context, status_code: int): +def step_then_the_response_status_should_be_status_code_d( + context: Context, status_code: int +): assert ( context.response.status_code == status_code ), f"Expected status {status_code}, got {context.response.status_code}" @then("the Minor Trace Chemistry admin view should have these fields configured:") -def step_impl(context: Context): +def step_then_the_minor_trace_chemistry_admin_view_should_have_these_fields( + context: Context, +): from admin.views.minor_trace_chemistry import MinorTraceChemistryAdmin expected_fields = [row["field"] for row in context.table] diff --git a/tests/features/steps/api_common.py b/tests/features/steps/api_common.py index 1899a2c0..98d14cd9 100644 --- a/tests/features/steps/api_common.py +++ b/tests/features/steps/api_common.py @@ -14,8 +14,6 @@ # limitations under the License. # =============================================================================== from behave import then, given, when -from starlette.testclient import TestClient - from core.dependencies import ( viewer_function, amp_viewer_function, @@ -24,6 +22,7 @@ amp_admin_function, ) from core.initializers import register_routes +from starlette.testclient import TestClient @given("a functioning api") @@ -65,7 +64,7 @@ def closure(): @when("the user retrieves the well by ID via path parameter") -def step_impl(context): +def step_when_the_user_retrieves_the_well_by_id_via_path_parameter(context): context.response = context.client.get( f"thing/water-well/{context.objects['wells'][0].id}" ) @@ -76,7 +75,7 @@ def step_impl(context): @then( "null values in the response should be represented as JSON null (not placeholder strings)" ) -def step_impl(context): +def step_step_step(context): data = context.response.json() for k, v in data.items(): if v == "": @@ -84,14 +83,14 @@ def step_impl(context): @then("I should receive a successful response") -def step_impl(context): +def step_then_i_should_receive_a_successful_response(context): assert ( context.response.status_code == 200 ), f"Unexpected response: {context.response.text}" @then("the system returns a 201 Created status code") -def step_impl(context): +def step_then_the_system_returns_a_201_created_status_code(context): assert context.response.status_code == 201, ( f"Unexpected response status code " f"{context.response.status_code}. " @@ -100,35 +99,35 @@ def step_impl(context): @then("the system should return a 200 status code") -def step_impl(context): +def step_then_the_system_should_return_a_200_status_code(context): assert ( context.response.status_code == 200 ), f"Unexpected response status code {context.response.status_code}" @then("the system should return a 404 status code") -def step_impl(context): +def step_then_the_system_should_return_a_404_status_code(context): assert ( context.response.status_code == 404 ), f"Unexpected response status code {context.response.status_code}" @then("the system returns a 400 status code") -def step_impl(context): +def step_then_the_system_returns_a_400_status_code(context): assert ( context.response.status_code == 400 ), f"Unexpected response status code {context.response.status_code}" @then("the system returns a 422 Unprocessable Entity status code") -def step_impl(context): +def step_then_the_system_returns_a_422_unprocessable_entity_status_code(context): assert ( context.response.status_code == 422 ), f"Unexpected response status code {context.response.status_code}" @then("the response should be paginated") -def step_impl(context): +def step_then_the_response_should_be_paginated(context): data = context.response.json() assert "items" in data, "Response is not paginated" assert "total" in data, "Response is not paginated" @@ -137,14 +136,14 @@ def step_impl(context): @then("the system should return a response in JSON format") -def step_impl(context): +def step_then_the_system_should_return_a_response_in_json_format(context): assert ( context.response.headers["Content-Type"] == "application/json" ), f"Unexpected response type {context.response.headers['Content-Type']}" @then("the items should be an empty list") -def step_impl(context): +def step_then_the_items_should_be_an_empty_list(context): data = context.response.json() assert len(data["items"]) == 0, f'Unexpected items {data["items"]}' assert data["total"] == 0, f'Unexpected total {data["total"]}' diff --git a/tests/features/steps/cli-associate-assets.py b/tests/features/steps/cli-associate-assets.py index e7b8ecef..ad4cfdf9 100644 --- a/tests/features/steps/cli-associate-assets.py +++ b/tests/features/steps/cli-associate-assets.py @@ -11,16 +11,15 @@ from behave import given, when, then from behave.runner import Context -from sqlalchemy import select - from cli.service_adapter import associate_assets from db import Thing, Asset from db.engine import session_ctx from services.gcs_helper import get_storage_bucket +from sqlalchemy import select @given('a local directory named "asset_import_batch"') -def step_impl(context: Context): +def step_given_a_local_directory_named_asset_import_batch(context: Context): context.source_directory = ( Path("tests") / "features" / "data" / "asset_import_batch" ) @@ -29,7 +28,9 @@ def step_impl(context: Context): @given('the directory contains a manifest file named "manifest.txt"') -def step_impl(context: Context): +def step_given_the_directory_contains_a_manifest_file_named_manifest_txt( + context: Context, +): context.manifest_file = context.source_directory / "manifest.txt" assert context.manifest_file.exists() @@ -37,7 +38,7 @@ def step_impl(context: Context): @given( "the manifest file is a 2-column CSV with headers asset_file_name and thing_name" ) -def step_impl(context: Context): +def step_step_step(context: Context): header = ["asset_file_name", "thing_name"] with open(context.manifest_file) as f: reader = csv.DictReader(f) @@ -48,7 +49,9 @@ def step_impl(context: Context): @given("the directory contains a set of asset files referenced in the manifest") -def step_impl(context: Context): +def step_given_the_directory_contains_a_set_of_asset_files_referenced_in( + context: Context, +): for a in context.asset_file_names: p = context.source_directory / a assert p.exists() @@ -60,7 +63,9 @@ def step_impl(context: Context): @given('the manifest contains a row for "{asset_file_name}" with thing "{thing_name}"') -def step_impl(context: Context, asset_file_name, thing_name): +def step_given_the_manifest_contains_a_row_for_asset_file_name_with( + context: Context, asset_file_name, thing_name +): with open(context.manifest_file) as f: reader = csv.DictReader(f) for r in reader: @@ -72,7 +77,9 @@ def step_impl(context: Context, asset_file_name, thing_name): @given('the directory contains a asset file named "{asset_file_name}"') -def step_impl(context: Context, asset_file_name): +def step_given_the_directory_contains_a_asset_file_named_asset_file_name( + context: Context, asset_file_name +): for path in context.source_directory.iterdir(): if path.name == asset_file_name: break @@ -81,13 +88,15 @@ def step_impl(context: Context, asset_file_name): @when('I run the "associate_assets" command on the directory') -def step_impl(context: Context): +def step_when_i_run_the_associate_assets_command_on_the_directory(context: Context): uris = associate_assets(context.source_directory) context.uris = uris @then('the app should upload "{asset_file_name}" to Google Cloud Storage') -def step_impl(context: Context, asset_file_name): +def step_then_the_app_should_upload_asset_file_name_to_google_cloud( + context: Context, asset_file_name +): bucket = get_storage_bucket() head, ext = asset_file_name.split(".") for uri in context.uris: @@ -104,7 +113,7 @@ def step_impl(context: Context, asset_file_name): @then( 'the app should create an association between the uploaded asset and thing "{thing_name}"' ) -def step_impl(context: Context, thing_name): +def step_step_step_2(context: Context, thing_name): with session_ctx() as session: sql = select(Thing).where(Thing.name == thing_name) thing = session.scalars(sql).one_or_none() @@ -125,18 +134,22 @@ def step_impl(context: Context, thing_name): @given( 'the manifest contains a row for "missing-asset.jpg" with a valid thing_name and asset_type' ) -def step_impl(context: Context): +def step_step_step_3(context: Context): context.manifest_file = context.source_directory / "manifest-missing-asset.txt" assert context.manifest_file.exists() @given('the directory does not contain a file named "missing-asset.jpg"') -def step_impl(context: Context): +def step_given_the_directory_does_not_contain_a_file_named_missing_asset( + context: Context, +): assert not (context.source_directory / "missing-asset.jpg").exists() @then("each photo listed in the manifest should be uploaded exactly once to GCS") -def step_impl(context: Context): +def step_then_each_photo_listed_in_the_manifest_should_be_uploaded_exactly( + context: Context, +): bucket = get_storage_bucket() for uri in context.uris: blob = uri.split("/")[-1] @@ -146,7 +159,7 @@ def step_impl(context: Context): @then( "each uploaded photo should be associated exactly once to its corresponding thing" ) -def step_impl(context: Context): +def step_step_step_4(context: Context): with session_ctx() as session: for uri in context.uris: sql = select(Asset).where(Asset.uri == uri) @@ -159,7 +172,7 @@ def step_impl(context: Context): @when( 'I run the "associate photos" command on the same directory again with the same manifest' ) -def step_impl(context: Context): +def step_step_step_5(context: Context): uris = associate_assets(context.source_directory) context.uris = uris diff --git a/tests/features/steps/geojson-response.py b/tests/features/steps/geojson-response.py index 4244ec4e..ecddd130 100644 --- a/tests/features/steps/geojson-response.py +++ b/tests/features/steps/geojson-response.py @@ -18,34 +18,34 @@ @when("the user requests all the wells as geojson") -def step_impl(context): +def step_when_the_user_requests_all_the_wells_as_geojson(context): context.response = context.client.get( "/geospatial", params={"thing_type": "water well"} ) @then("the system should return a response in GEOJSON format") -def step_impl(context): +def step_then_the_system_should_return_a_response_in_geojson_format(context): assert context.response.headers["Content-Type"] == "application/geo+json" @then("the response should be a feature collection") -def step_impl(context): +def step_then_the_response_should_be_a_feature_collection(context): assert context.response.json()["type"] == "FeatureCollection" @then("the feature collection should have 3 features") -def step_impl(context): +def step_then_the_feature_collection_should_have_3_features(context): assert len(context.response.json()["features"]) == 3 @when("the user requests all the wells for group Collabnet") -def step_impl(context): +def step_when_the_user_requests_all_the_wells_for_group_collabnet(context): context.response = context.client.get("/geospatial", params={"group": "Collabnet"}) @then("the feature collection should have 2 features") -def step_impl(context): +def step_then_the_feature_collection_should_have_2_features(context): obj = context.response.json() features = obj["features"] assert ( diff --git a/tests/features/steps/location-notes.py b/tests/features/steps/location-notes.py index 8ec7486c..f2350564 100644 --- a/tests/features/steps/location-notes.py +++ b/tests/features/steps/location-notes.py @@ -17,43 +17,43 @@ @when("the user retrieves the location by ID via path parameter") -def step_impl(context): +def step_when_the_user_retrieves_the_location_by_id_via_path_parameter(context): location_id = context.objects["locations"][0].id context.response = context.client.get(f"location/{location_id}") @then("the response should include a current location") -def step_impl(context): +def step_then_the_response_should_include_a_current_location(context): assert context.response.json()["current_location"] @then("the current location should include notes") -def step_impl(context): +def step_then_the_current_location_should_include_notes(context): context.notes = context.response.json()["current_location"]["properties"]["notes"] assert context.notes @then("the notes should be a list of dictionaries") -def step_impl(context): +def step_then_the_notes_should_be_a_list_of_dictionaries(context): assert isinstance(context.notes, list) assert all(isinstance(n, dict) for n in context.notes) @then('each note dictionary should have "content" and "note_type" keys') -def step_impl(context): +def step_then_each_note_dictionary_should_have_content_and_note_type_keys(context): for note in context.notes: assert "content" in note assert "note_type" in note @then("each note in the notes list should be a non-empty string") -def step_impl(context): +def step_then_each_note_in_the_notes_list_should_be_a_non(context): for note in context.notes: assert note["content"], "Note is empty" @then("the location response should include notes") -def step_impl(context): +def step_then_the_location_response_should_include_notes(context): context.notes = context.response.json()["notes"] assert context.notes diff --git a/tests/features/steps/sensor-notes.py b/tests/features/steps/sensor-notes.py index c40e60de..0323158e 100644 --- a/tests/features/steps/sensor-notes.py +++ b/tests/features/steps/sensor-notes.py @@ -18,19 +18,19 @@ @when("the user requests the sensor with ID 1") -def step_impl(context: Context): +def step_when_the_user_requests_the_sensor_with_id_1(context: Context): context.response = context.client.get("sensor/1") @when("the user requests the sensor with ID 9999") -def step_impl(context: Context): +def step_when_the_user_requests_the_sensor_with_id_9999(context: Context): context.response = context.client.get("sensor/9999") @then( "the response should include an error message indicating the sensor was not found" ) -def step_impl(context: Context): +def step_step_step(context: Context): assert {"detail": "Sensor with ID 9999 not found."} == context.response.json() diff --git a/tests/features/steps/thing-path.py b/tests/features/steps/thing-path.py index 0452ad90..e6cf2692 100644 --- a/tests/features/steps/thing-path.py +++ b/tests/features/steps/thing-path.py @@ -18,30 +18,30 @@ @when('the user requests things with type "water well"') -def step_impl(context): +def step_when_the_user_requests_things_with_type_water_well(context): context.response = context.client.get("/thing/water-well") @then("the response should include at least one thing") -def step_impl(context): +def step_then_the_response_should_include_at_least_one_thing(context): data = context.response.json() context.data = data["items"] assert len(context.data) > 0 @then('the response should only include things of type "water well"') -def step_impl(context): +def step_then_the_response_should_only_include_things_of_type_water_well(context): for d in context.data: assert d["thing_type"] == "water well" @when('the user requests things with type "spring"') -def step_impl(context): +def step_when_the_user_requests_things_with_type_spring(context): context.response = context.client.get("/thing/spring") @then('the response should only include things of type "spring"') -def step_impl(context): +def step_then_the_response_should_only_include_things_of_type_spring(context): for d in context.data: assert d["thing_type"] == "spring" diff --git a/tests/features/steps/transducer.py b/tests/features/steps/transducer.py index 9030ba02..e7925f77 100644 --- a/tests/features/steps/transducer.py +++ b/tests/features/steps/transducer.py @@ -14,14 +14,13 @@ # limitations under the License. # =============================================================================== from behave import when, then, given -from sqlalchemy import select - from db import Thing, TransducerObservation from db.engine import session_ctx +from sqlalchemy import select @given("the system has valid well and transducer data in the database") -def step_impl(context): +def step_given_the_system_has_valid_well_and_transducer_data_in_the(context): with session_ctx() as session: sql = select(Thing).where(Thing.thing_type == "water well") wells = session.execute(sql).unique().scalars().all() @@ -33,27 +32,29 @@ def step_impl(context): @when("the user requests transducer data for a non-existing well") -def step_impl(context): +def step_when_the_user_requests_transducer_data_for_a_non_existing_well(context): context.response = context.client.get( "/observation/transducer-groundwater-level?thing_id=9999" ) @when("the user requests transducer data for a well") -def step_impl(context): +def step_when_the_user_requests_transducer_data_for_a_well(context): context.response = context.client.get( f"/observation/transducer-groundwater-level?thing_id={context.objects['wells'][0].id}", ) @then("each page should be an array of transducer data") -def step_impl(context): +def step_then_each_page_should_be_an_array_of_transducer_data(context): data = context.response.json() assert len(data["items"]) > 0, "Expected at least one transducer data entry" @then("each transducer data entry should include a timestamp, value, status") -def step_impl(context): +def step_then_each_transducer_data_entry_should_include_a_timestamp_value_status( + context, +): data = context.response.json() items = data["items"][0] item = items["observation"] @@ -69,7 +70,7 @@ def step_impl(context): @then("the timestamp should be in ISO 8601 format") -def step_impl(context): +def step_then_the_timestamp_should_be_in_iso_8601_format(context): # assert that time stamp is in ISO 8601 format from datetime import datetime @@ -80,12 +81,12 @@ def step_impl(context): @then("the value should be a numeric type") -def step_impl(context): +def step_then_the_value_should_be_a_numeric_type(context): assert isinstance(context.value, (int, float)) @then('the status should be one of "approved", "not reviewed"') -def step_impl(context): +def step_then_the_status_should_be_one_of_approved_not_reviewed(context): assert context.status in ( "approved", "not reviewed", diff --git a/tests/features/steps/water-levels-csv.py b/tests/features/steps/water-levels-csv.py index b8955a03..4a8d6b57 100644 --- a/tests/features/steps/water-levels-csv.py +++ b/tests/features/steps/water-levels-csv.py @@ -20,7 +20,6 @@ from behave import given, when, then from behave.runner import Context - from db import Observation from db.engine import session_ctx from services.water_level_csv import bulk_upload_water_levels @@ -116,18 +115,20 @@ def _ensure_stdout_json(context: Context) -> Dict[str, Any]: # Scenario: Uploading a valid water level entry CSV containing required fields # ============================================================================ @given("a valid CSV file for bulk water level entry upload") -def step_impl(context: Context): +def step_given_a_valid_csv_file_for_bulk_water_level_entry_upload(context: Context): rows = _build_valid_rows(context) _set_rows(context, rows) @given("my CSV file contains multiple rows of water level entry data") -def step_impl(context: Context): +def step_given_my_csv_file_contains_multiple_rows_of_water_level_entry( + context: Context, +): assert len(context.csv_rows) >= 2 @given("the water level CSV includes required fields:") -def step_impl(context: Context): +def step_given_the_water_level_csv_includes_required_fields(context: Context): field_name = context.table.headings[0] expected_fields = [row[field_name].strip() for row in context.table] headers = set(context.csv_headers) @@ -136,7 +137,7 @@ def step_impl(context: Context): @given('each "well_name_point_id" value matches an existing well') -def step_impl(context: Context): +def step_given_each_well_name_point_id_value_matches_an_existing_well(context: Context): available = set(_available_well_names(context)) for row in context.csv_rows: assert ( @@ -147,7 +148,7 @@ def step_impl(context: Context): @given( '"measurement_date_time" values are valid ISO 8601 timestamps with timezone offsets (e.g. "2025-02-15T10:30:00-08:00")' ) -def step_impl(context: Context): +def step_step_step(context: Context): for row in context.csv_rows: assert row["measurement_date_time"].startswith("2025-02") assert "T" in row["measurement_date_time"] @@ -163,7 +164,7 @@ def step_impl(context: Context): @when("I run the CLI command:") -def step_impl(context: Context): +def step_when_i_run_the_cli_command(context: Context): command_text = (context.text or "").strip() context.command_text = command_text output_json = "--output json" in command_text.lower() @@ -175,12 +176,12 @@ def step_impl(context: Context): @then("stdout should be valid JSON") -def step_impl(context: Context): +def step_then_stdout_should_be_valid_json(context: Context): _ensure_stdout_json(context) @then("stdout includes a summary containing:") -def step_impl(context: Context): +def step_then_stdout_includes_a_summary_containing(context: Context): payload = _ensure_stdout_json(context) summary = payload.get("summary", {}) for row in context.table: @@ -194,7 +195,9 @@ def step_impl(context: Context): @then("stdout includes an array of created water level entry objects") -def step_impl(context: Context): +def step_then_stdout_includes_an_array_of_created_water_level_entry_objects( + context: Context, +): payload = _ensure_stdout_json(context) rows = payload.get("water_levels", []) assert rows, "Expected created water level records" @@ -207,7 +210,7 @@ def step_impl(context: Context): @then("stderr should be empty") -def step_impl(context: Context): +def step_then_stderr_should_be_empty(context: Context): assert context.cli_result.stderr == "" @@ -217,7 +220,7 @@ def step_impl(context: Context): @given( "my water level CSV file contains all required headers but in a different column order" ) -def step_impl(context: Context): +def step_step_step_2(context: Context): rows = _build_valid_rows(context) headers = list(reversed(list(rows[0].keys()))) _set_rows(context, rows, headers=headers) @@ -225,7 +228,7 @@ def step_impl(context: Context): @then("all water level entries are imported") -def step_impl(context: Context): +def step_then_all_water_level_entries_are_imported(context: Context): payload = _ensure_stdout_json(context) summary = payload["summary"] assert summary["total_rows_processed"] == summary["total_rows_imported"] @@ -236,7 +239,7 @@ def step_impl(context: Context): # Scenario: Upload succeeds when CSV contains extra columns # ============================================================================ @given("my water level CSV file contains extra columns but is otherwise valid") -def step_impl(context: Context): +def step_given_my_water_level_csv_file_contains_extra_columns_but_is(context: Context): rows = _build_valid_rows(context) for idx, row in enumerate(rows): row["custom_note"] = f"extra-{idx}" @@ -251,7 +254,7 @@ def step_impl(context: Context): @given( 'my water level CSV contains 3 rows with 2 valid rows and 1 row missing the required "well_name_point_id"' ) -def step_impl(context: Context): +def step_step_step_3(context: Context): rows = _build_valid_rows(context, count=3) rows[2]["well_name_point_id"] = "" _set_rows(context, rows) @@ -261,12 +264,12 @@ def step_impl(context: Context): @then( 'stderr should contain a validation error for the row missing "well_name_point_id"' ) -def step_impl(context: Context): +def step_step_step_4(context: Context): assert "well_name_point_id" in context.cli_result.stderr @then("no water level entries are imported") -def step_impl(context: Context): +def step_then_no_water_level_entries_are_imported(context: Context): payload = _ensure_stdout_json(context) summary = payload["summary"] assert summary["total_rows_imported"] == 0 @@ -278,7 +281,7 @@ def step_impl(context: Context): @given( 'my water level CSV file contains a row missing the required "{required_field}" field' ) -def step_impl(context: Context, required_field: str): +def step_step_step_5(context: Context, required_field: str): rows = _build_valid_rows(context, count=1) rows[0][required_field] = "" _set_rows(context, rows) @@ -286,7 +289,9 @@ def step_impl(context: Context, required_field: str): @then('stderr should contain a validation error for the "{required_field}" field') -def step_impl(context: Context, required_field: str): +def step_then_stderr_should_contain_a_validation_error_for_the_required_field( + context: Context, required_field: str +): assert required_field in context.cli_result.stderr @@ -296,7 +301,7 @@ def step_impl(context: Context, required_field: str): @given( 'my CSV file contains invalid ISO 8601 date values in the "measurement_date_time" field' ) -def step_impl(context: Context): +def step_step_step_6(context: Context): rows = _build_valid_rows(context, count=1) rows[0]["measurement_date_time"] = "02/15/2025 10:30" _set_rows(context, rows) @@ -304,7 +309,9 @@ def step_impl(context: Context): @then("stderr should contain validation errors identifying the invalid field and row") -def step_impl(context: Context): +def step_then_stderr_should_contain_validation_errors_identifying_the_invalid_field_and( + context: Context, +): stderr = context.cli_result.stderr assert stderr, "Expected stderr output" for field in getattr(context, "invalid_fields", []): @@ -318,7 +325,7 @@ def step_impl(context: Context): @given( 'my CSV file contains values that cannot be parsed as numeric in numeric-required fields such as "mp_height" or "depth_to_water_ft"' ) -def step_impl(context: Context): +def step_step_step_7(context: Context): rows = _build_valid_rows(context, count=1) rows[0]["mp_height"] = "one point five" rows[0]["depth_to_water_ft"] = "forty" @@ -332,7 +339,7 @@ def step_impl(context: Context): @given( 'my CSV file contains invalid lexicon values for "sampler", "sample_method", "level_status", or "data_quality"' ) -def step_impl(context: Context): +def step_step_step_8(context: Context): rows = _build_valid_rows(context, count=1) rows[0]["sampler"] = "Unknown Team" rows[0]["sample_method"] = "mystery" diff --git a/tests/features/steps/well-additional-information.py b/tests/features/steps/well-additional-information.py index 8eecef15..c34f17b6 100644 --- a/tests/features/steps/well-additional-information.py +++ b/tests/features/steps/well-additional-information.py @@ -9,7 +9,7 @@ @then( "the response should include whether repeat measurement permission is granted for the well" ) -def step_impl(context): +def step_step_step(context): permission_type = "Water Level Sample" assert "permissions" in context.water_well_data @@ -42,7 +42,9 @@ def step_impl(context): @then("the response should include whether sampling permission is granted for the well") -def step_impl(context): +def step_then_the_response_should_include_whether_sampling_permission_is_granted_for( + context, +): permission_type = "Water Chemistry Sample" assert "permissions" in context.water_well_data @@ -77,7 +79,7 @@ def step_impl(context): @then( "the response should include whether datalogger installation permission is granted for the well" ) -def step_impl(context): +def step_step_step_2(context): permission_type = "Datalogger Installation" assert "permissions" in context.water_well_data @@ -115,7 +117,7 @@ def step_impl(context): @then("the response should include the completion date of the well") -def step_impl(context): +def step_then_the_response_should_include_the_completion_date_of_the_well(context): assert "well_completion_date" in context.water_well_data assert context.water_well_data["well_completion_date"] == context.objects["wells"][ 0 @@ -123,7 +125,9 @@ def step_impl(context): @then("the response should include the source of the completion information") -def step_impl(context): +def step_then_the_response_should_include_the_source_of_the_completion_information( + context, +): assert "well_completion_date_source" in context.water_well_data assert ( @@ -133,7 +137,7 @@ def step_impl(context): @then("the response should include the driller name") -def step_impl(context): +def step_then_the_response_should_include_the_driller_name(context): assert "well_driller_name" in context.water_well_data assert ( context.water_well_data["well_driller_name"] @@ -142,7 +146,7 @@ def step_impl(context): @then("the response should include the construction method") -def step_impl(context): +def step_then_the_response_should_include_the_construction_method(context): assert "well_construction_method" in context.water_well_data assert ( context.water_well_data["well_construction_method"] @@ -151,7 +155,9 @@ def step_impl(context): @then("the response should include the source of the construction information") -def step_impl(context): +def step_then_the_response_should_include_the_source_of_the_construction_information( + context, +): assert "well_construction_method_source" in context.water_well_data assert ( context.water_well_data["well_construction_method_source"] @@ -165,7 +171,7 @@ def step_impl(context): @then("the response should include the casing diameter in inches") -def step_impl(context): +def step_then_the_response_should_include_the_casing_diameter_in_inches(context): assert "well_casing_diameter" in context.water_well_data assert "well_casing_diameter_unit" in context.water_well_data @@ -177,7 +183,7 @@ def step_impl(context): @then("the response should include the casing depth in feet below ground surface") -def step_impl(context): +def step_then_the_response_should_include_the_casing_depth_in_feet_below(context): assert "well_casing_depth" in context.water_well_data assert "well_casing_depth_unit" in context.water_well_data @@ -189,7 +195,7 @@ def step_impl(context): @then("the response should include the casing materials") -def step_impl(context): +def step_then_the_response_should_include_the_casing_materials(context): assert "well_casing_materials" in context.water_well_data assert set(context.water_well_data["well_casing_materials"]) == { m.material for m in context.objects["wells"][0].well_casing_materials @@ -197,7 +203,7 @@ def step_impl(context): @then("the response should include the well pump type (previously well_type field)") -def step_impl(context): +def step_then_the_response_should_include_the_well_pump_type_previously_well(context): assert "well_pump_type" in context.water_well_data assert ( context.water_well_data["well_pump_type"] @@ -206,7 +212,7 @@ def step_impl(context): @then("the response should include the well pump depth in feet (new field)") -def step_impl(context): +def step_then_the_response_should_include_the_well_pump_depth_in_feet(context): assert "well_pump_depth" in context.water_well_data assert "well_pump_depth_unit" in context.water_well_data @@ -220,7 +226,7 @@ def step_impl(context): @then( "the response should include whether the well is open and suitable for a datalogger" ) -def step_impl(context): +def step_step_step_3(context): assert "datalogger_installation_status" in context.water_well_data assert "open_status" in context.water_well_data assert ( @@ -241,7 +247,7 @@ def step_impl(context): @then( "the response should include the formation as the formation zone of well completion" ) -def step_impl(context): +def step_step_step_4(context): assert "formation_completion_code" in context.water_well_data assert ( context.water_well_data["formation_completion_code"] @@ -252,7 +258,7 @@ def step_impl(context): @then( "the response should include the aquifer class code to classify the aquifer into aquifer system." ) -def step_impl(context): +def step_step_step_5(context): for aquifer in context.water_well_data["aquifers"]: assert "aquifer_system" in aquifer assert {a.get("aquifer_system") for a in context.water_well_data["aquifers"]} == { @@ -263,7 +269,7 @@ def step_impl(context): @then( "the response should include the aquifer type as the type of aquifers penetrated by the well" ) -def step_impl(context): +def step_step_step_6(context): for aquifer in context.water_well_data["aquifers"]: assert "aquifer_types" in aquifer diff --git a/tests/features/steps/well-core-information.py b/tests/features/steps/well-core-information.py index f389d6af..cdd2cf34 100644 --- a/tests/features/steps/well-core-information.py +++ b/tests/features/steps/well-core-information.py @@ -1,7 +1,6 @@ from behave import then -from geoalchemy2.shape import to_shape - from core.constants import SRID_WGS84, SRID_UTM_ZONE_13N +from geoalchemy2.shape import to_shape from services.util import ( transform_srid, convert_m_to_ft, @@ -10,7 +9,7 @@ @then("the response should be in JSON format") -def step_impl(context): +def step_then_the_response_should_be_in_json_format(context): assert context.response["Content-Type"] == "application/json" @@ -20,14 +19,14 @@ def step_impl(context): @then("the response should include the well name (point ID) (i.e. NM-1234)") -def step_impl(context): +def step_then_the_response_should_include_the_well_name_point_id_i(context): assert "name" in context.water_well_data assert context.water_well_data["name"] == context.objects["wells"][0].name @then("the response should include the project(s) or group(s) associated with the well") -def step_impl(context): +def step_then_the_response_should_include_the_project_s_or_group_s(context): assert "groups" in context.water_well_data assert ( @@ -54,7 +53,7 @@ def step_impl(context): @then("the response should include the purpose of the well (current use)") -def step_impl(context): +def step_then_the_response_should_include_the_purpose_of_the_well_current(context): assert "well_purposes" in context.water_well_data assert "Domestic" in context.water_well_data["well_purposes"] @@ -73,7 +72,7 @@ def step_impl(context): @then( "the response should include the well hole status of the well as the status of the hole in the ground (from previous Status field)" ) -def step_impl(context): +def step_step_step(context): assert "well_status" in context.water_well_data well_status_record = retrieve_latest_polymorphic_history_table_record( @@ -83,7 +82,7 @@ def step_impl(context): @then("the response should include the monitoring frequency (new field)") -def step_impl(context): +def step_then_the_response_should_include_the_monitoring_frequency_new_field(context): assert "monitoring_frequencies" in context.water_well_data assert len(context.water_well_data["monitoring_frequencies"]) == 1 @@ -97,7 +96,7 @@ def step_impl(context): @then( "the response should include whether the well is currently being monitored with status text if applicable (from previous MonitoringStatus field)" ) -def step_impl(context): +def step_step_step_2(context): assert "monitoring_status" in context.water_well_data monitoring_status_record = retrieve_latest_polymorphic_history_table_record( @@ -115,7 +114,7 @@ def step_impl(context): @then("the response should include the release status of the well record") -def step_impl(context): +def step_then_the_response_should_include_the_release_status_of_the_well(context): assert "release_status" in context.water_well_data assert ( @@ -130,7 +129,7 @@ def step_impl(context): @then("the response should include the hole depth in feet") -def step_impl(context): +def step_then_the_response_should_include_the_hole_depth_in_feet(context): assert "hole_depth" in context.water_well_data assert "hole_depth_unit" in context.water_well_data @@ -141,7 +140,7 @@ def step_impl(context): @then("the response should include the well depth in feet") -def step_impl(context): +def step_then_the_response_should_include_the_well_depth_in_feet(context): assert "well_depth" in context.water_well_data assert "well_depth_unit" in context.water_well_data @@ -152,7 +151,7 @@ def step_impl(context): @then("the response should include the source of the well depth information") -def step_impl(context): +def step_then_the_response_should_include_the_source_of_the_well_depth(context): assert "well_depth_source" in context.water_well_data data_provenance_records = context.objects["data_provenance"] @@ -174,7 +173,9 @@ def step_impl(context): @then("the response should include the description of the measuring point") -def step_impl(context): +def step_then_the_response_should_include_the_description_of_the_measuring_point( + context, +): assert "measuring_point_description" in context.water_well_data assert ( @@ -184,7 +185,7 @@ def step_impl(context): @then("the response should include the measuring point height in feet") -def step_impl(context): +def step_then_the_response_should_include_the_measuring_point_height_in_feet(context): assert "measuring_point_height" in context.water_well_data assert "measuring_point_height_unit" in context.water_well_data @@ -202,7 +203,7 @@ def step_impl(context): @then( "the response should include location information in GeoJSON spec format RFC 7946" ) -def step_impl(context): +def step_step_step_3(context): assert "current_location" in context.water_well_data assert "type" in context.water_well_data["current_location"] assert "geometry" in context.water_well_data["current_location"] @@ -216,7 +217,7 @@ def step_impl(context): @then( 'the response should include a geometry object with type "Point" and coordinates array [longitude, latitude, elevation]' ) -def step_impl(context): +def step_step_step_4(context): point_wkb = context.objects["locations"][0].point point_wkt = to_shape(point_wkb) latitude = point_wkt.y @@ -232,7 +233,7 @@ def step_impl(context): @then( "the response should include the elevation in feet with vertical datum NAVD88 in the properties" ) -def step_impl(context): +def step_step_step_5(context): assert "elevation" in context.water_well_data["current_location"]["properties"] assert "elevation_unit" in context.water_well_data["current_location"]["properties"] assert "vertical_datum" in context.water_well_data["current_location"]["properties"] @@ -256,7 +257,7 @@ def step_impl(context): @then( "the response should include the elevation method (i.e. interpolated from digital elevation model) in the properties" ) -def step_impl(context): +def step_step_step_6(context): assert ( "elevation_method" in context.water_well_data["current_location"]["properties"] ) @@ -279,7 +280,7 @@ def step_impl(context): @then( "the response should include the UTM coordinates with datum NAD83 in the properties" ) -def step_impl(context): +def step_step_step_7(context): assert ( "utm_coordinates" in context.water_well_data["current_location"]["properties"] @@ -307,7 +308,7 @@ def step_impl(context): @then( "the response should include any alternate IDs for the well like the NMBGMR site_name (i.e. John Smith Well), USGS site number, or the OSE well ID and OSE well tag ID" ) -def step_impl(context): +def step_step_step_8(context): assert "alternate_ids" in context.water_well_data assert len(context.water_well_data["alternate_ids"]) == 3 diff --git a/tests/features/steps/well-inventory-csv-given.py b/tests/features/steps/well-inventory-csv-given.py index 70d3bdb6..f02144fc 100644 --- a/tests/features/steps/well-inventory-csv-given.py +++ b/tests/features/steps/well-inventory-csv-given.py @@ -24,11 +24,15 @@ def _set_file_content(context: Context, name): path = Path("tests") / "features" / "data" / name + _set_file_content_from_path(context, path, name) + + +def _set_file_content_from_path(context: Context, path: Path, name: str | None = None): context.file_path = path - with open(path, "r") as f: - context.file_name = name + with open(path, "r", encoding="utf-8", newline="") as f: + context.file_name = name or path.name context.file_content = f.read() - if name.endswith(".csv"): + if context.file_name.endswith(".csv"): context.rows = list(csv.DictReader(context.file_content.splitlines())) context.row_count = len(context.rows) context.file_type = "text/csv" @@ -41,14 +45,14 @@ def _set_file_content(context: Context, name): @given( 'my CSV file contains a row with a contact but is missing the required "contact_role" field for that contact' ) -def step_impl(context: Context): +def step_step_step(context: Context): _set_file_content(context, "well-inventory-missing-contact-role.csv") @given( "my CSV file contains a row that has an invalid postal code format in contact_1_address_1_postal_code" ) -def step_impl(context: Context): +def step_step_step_2(context: Context): _set_file_content(context, "well-inventory-invalid-postal-code.csv") @@ -57,42 +61,55 @@ def step_impl_valid_csv_file(context: Context): _set_file_content(context, "well-inventory-valid.csv") +@given("I use the real user-entered well inventory CSV file") +def step_impl_real_user_csv(context: Context): + path = ( + Path("tests") + / "features" + / "data" + / "well-inventory-real-user-entered-data.csv" + ) + _set_file_content_from_path(context, path) + + @given('my CSV file contains rows missing a required field "well_name_point_id"') -def step_impl(context: Context): +def step_given_my_csv_file_contains_rows_missing_a_required_field_well( + context: Context, +): _set_file_content(context, "well-inventory-missing-required.csv") @given('my CSV file contains one or more duplicate "well_name_point_id" values') -def step_impl(context: Context): +def step_given_my_csv_file_contains_one_or_more_duplicate_well_name(context: Context): _set_file_content(context, "well-inventory-duplicate.csv") @given( 'my CSV file contains invalid lexicon values for "contact_role" or other lexicon fields' ) -def step_impl(context: Context): +def step_step_step_3(context: Context): _set_file_content(context, "well-inventory-invalid-lexicon.csv") @given('my CSV file contains invalid ISO 8601 date values in the "date_time" field') -def step_impl(context: Context): +def step_given_my_csv_file_contains_invalid_iso_8601_date_values_in(context: Context): _set_file_content(context, "well-inventory-invalid-date.csv") @given( 'my CSV file contains values that cannot be parsed as numeric in numeric-required fields such as "utm_easting"' ) -def step_impl(context: Context): +def step_step_step_4(context: Context): _set_file_content(context, "well-inventory-invalid-numeric.csv") @given("my CSV file contains column headers but no data rows") -def step_impl(context: Context): +def step_given_my_csv_file_contains_column_headers_but_no_data_rows(context: Context): _set_file_content(context, "well-inventory-no-data-headers.csv") @given("my CSV file is empty") -def step_impl(context: Context): +def step_given_my_csv_file_is_empty(context: Context): # context.file_content = "" # context.rows = [] # context.file_type = "text/csv" @@ -100,7 +117,7 @@ def step_impl(context: Context): @given("I have a non-CSV file") -def step_impl(context: Context): +def step_given_i_have_a_non_csv_file(context: Context): _set_file_content(context, "well-inventory-invalid-filetype.txt") @@ -123,85 +140,107 @@ def step_impl_csv_file_is_encoded_utf8(context: Context): @given( "my CSV file contains a row with a contact with a phone number that is not in the valid format" ) -def step_impl(context: Context): +def step_step_step_5(context: Context): _set_file_content(context, "well-inventory-invalid-phone-number.csv") @given( "my CSV file contains a row with a contact with an email that is not in the valid format" ) -def step_impl(context: Context): +def step_step_step_6(context: Context): _set_file_content(context, "well-inventory-invalid-email.csv") @given( 'my CSV file contains a row with a contact but is missing the required "contact_type" field for that contact' ) -def step_impl(context: Context): +def step_step_step_7(context: Context): _set_file_content(context, "well-inventory-missing-contact-type.csv") @given( 'my CSV file contains a row with a contact_type value that is not in the valid lexicon for "contact_type"' ) -def step_impl(context: Context): +def step_step_step_8(context: Context): _set_file_content(context, "well-inventory-invalid-contact-type.csv") @given( 'my CSV file contains a row with a contact with an email but is missing the required "email_type" field for that email' ) -def step_impl(context: Context): +def step_step_step_9(context: Context): _set_file_content(context, "well-inventory-missing-email-type.csv") @given( 'my CSV file contains a row with a contact with a phone but is missing the required "phone_type" field for that phone' ) -def step_impl(context: Context): +def step_step_step_10(context: Context): _set_file_content(context, "well-inventory-missing-phone-type.csv") @given( 'my CSV file contains a row with a contact with an address but is missing the required "address_type" field for that address' ) -def step_impl(context: Context): +def step_step_step_11(context: Context): _set_file_content(context, "well-inventory-missing-address-type.csv") @given( "my CSV file contains a row with utm_easting utm_northing and utm_zone values that are not within New Mexico" ) -def step_impl(context: Context): +def step_step_step_12(context: Context): _set_file_content(context, "well-inventory-invalid-utm.csv") @given( 'my CSV file contains invalid ISO 8601 date values in the "date_time" or "date_drilled" field' ) -def step_impl(context: Context): +def step_step_step_13(context: Context): _set_file_content(context, "well-inventory-invalid-date-format.csv") @given("my CSV file contains all required headers but in a different column order") -def step_impl(context: Context): +def step_given_my_csv_file_contains_all_required_headers_but_in_a(context: Context): _set_file_content(context, "well-inventory-valid-reordered.csv") @given("my CSV file contains extra columns but is otherwise valid") -def step_impl(context: Context): +def step_given_my_csv_file_contains_extra_columns_but_is_otherwise_valid( + context: Context, +): _set_file_content(context, "well-inventory-valid-extra-columns.csv") @given( - 'my CSV file contains 3 rows of data with 2 valid rows and 1 row missing the required "well_name_point_id"' + 'my CSV file contains 3 rows of data with 2 valid rows and 1 row with a blank "well_name_point_id"' ) -def step_impl(context: Context): - _set_file_content(context, "well-inventory-invalid-partial.csv") +def step_step_step_14(context: Context): + df = _get_valid_df(context) + + # Start from two valid rows, add a third valid row, then blank only well_name_point_id. + df = pd.concat([df, df.iloc[[0]].copy()], ignore_index=True) + # Ensure copied row does not violate unique contact constraints. + if "field_staff" in df.columns: + df.loc[2, "field_staff"] = "AutoGen Staff 3" + if "field_staff_2" in df.columns: + df.loc[2, "field_staff_2"] = "AutoGen Staff 3B" + if "field_staff_3" in df.columns: + df.loc[2, "field_staff_3"] = "AutoGen Staff 3C" + if "contact_1_name" in df.columns: + df.loc[2, "contact_1_name"] = "AutoGen Contact 3A" + if "contact_2_name" in df.columns: + df.loc[2, "contact_2_name"] = "AutoGen Contact 3B" + + df.loc[2, "well_name_point_id"] = "" + + _set_content_from_df(context, df) @given('my CSV file contains a row missing the required "{required_field}" field') -def step_impl(context, required_field): +def step_given_my_csv_file_contains_a_row_missing_the_required_required( + context, required_field +): _set_file_content(context, "well-inventory-valid.csv") df = pd.read_csv(context.file_path, dtype={"contact_2_address_1_postal_code": str}) @@ -217,19 +256,19 @@ def step_impl(context, required_field): @given( 'my CSV file contains a row with an invalid boolean value "maybe" in the "is_open" field' ) -def step_impl(context: Context): +def step_step_step_15(context: Context): _set_file_content(context, "well-inventory-invalid-boolean-value-maybe.csv") @given("my CSV file contains a valid but duplicate header row") -def step_impl(context: Context): +def step_given_my_csv_file_contains_a_valid_but_duplicate_header_row(context: Context): _set_file_content(context, "well-inventory-duplicate-header.csv") @given( 'my CSV file header row contains the "contact_1_email_1" column name more than once' ) -def step_impl(context: Context): +def step_step_step_16(context: Context): _set_file_content(context, "well-inventory-duplicate-columns.csv") @@ -244,10 +283,14 @@ def _set_content_from_df(context: Context, df: pd.DataFrame, delimiter: str = ", df.to_csv(buffer, index=False, sep=delimiter) context.file_content = buffer.getvalue() context.rows = list(csv.DictReader(context.file_content.splitlines())) + context.row_count = len(context.rows) + context.file_type = "text/csv" @given("my CSV file contains more rows than the configured maximum for bulk upload") -def step_impl(context: Context): +def step_given_my_csv_file_contains_more_rows_than_the_configured_maximum( + context: Context, +): df = _get_valid_df(context) df = pd.concat([df.iloc[:2]] * 1001, ignore_index=True) @@ -256,14 +299,14 @@ def step_impl(context: Context): @given("my file is named with a .csv extension") -def step_impl(context: Context): +def step_given_my_file_is_named_with_a_csv_extension(context: Context): _set_file_content(context, "well-inventory-valid.csv") @given( 'my file uses "{delimiter_description}" as the field delimiter instead of commas' ) -def step_impl(context, delimiter_description: str): +def step_step_step_17(context, delimiter_description: str): df = _get_valid_df(context) if delimiter_description == "semicolons": @@ -276,32 +319,33 @@ def step_impl(context, delimiter_description: str): @given("my CSV file header row contains all required columns") -def step_impl(context: Context): +def step_given_my_csv_file_header_row_contains_all_required_columns(context: Context): _set_file_content(context, "well-inventory-valid.csv") @given( 'my CSV file contains a data row where the "site_name" field value includes a comma and is enclosed in quotes' ) -def step_impl(context: Context): +def step_step_step_18(context: Context): _set_file_content(context, "well-inventory-valid-comma-in-quotes.csv") @given( "my CSV file contains a data row where a field begins with a quote but does not have a matching closing quote" ) -def step_impl(context: Context): +def step_step_step_19(context: Context): df = _get_valid_df(context) df.loc[0, "well_name_point_id"] = '"well-name-point-id' _set_content_from_df(context, df) @given( - 'my CSV file contains all valid columns but uses "XY-" prefix for well_name_point_id values' + 'my CSV file contains all valid columns but uses uppercase "-xxxx" placeholders and blank values for well_name_point_id' ) -def step_impl(context: Context): +def step_step_step_20(context: Context): df = _get_valid_df(context) - df["well_name_point_id"] = df["well_name_point_id"].apply(lambda x: "XY-") + df.loc[0, "well_name_point_id"] = "" + df.loc[1, "well_name_point_id"] = "SAC-xxxx" # change contact name df.loc[0, "contact_1_name"] = "Contact 1" @@ -314,7 +358,7 @@ def step_impl(context: Context): @given( "my csv file contains a row where some but not all water level entry fields are filled" ) -def step_impl(context): +def step_step_step_21(context): _set_file_content(context, "well-inventory-missing-wl-fields.csv") diff --git a/tests/features/steps/well-inventory-csv-validation-error.py b/tests/features/steps/well-inventory-csv-validation-error.py index 7dfceac5..8aecbeae 100644 --- a/tests/features/steps/well-inventory-csv-validation-error.py +++ b/tests/features/steps/well-inventory-csv-validation-error.py @@ -34,7 +34,7 @@ def _handle_validation_error(context, expected_errors): @then( 'the response includes a validation error indicating the missing "address_type" value' ) -def step_impl(context: Context): +def step_step_step(context: Context): expected_errors = [ { "field": "composite field error", @@ -45,7 +45,9 @@ def step_impl(context: Context): @then("the response includes a validation error indicating the invalid UTM coordinates") -def step_impl(context: Context): +def step_then_the_response_includes_a_validation_error_indicating_the_invalid_utm( + context: Context, +): expected_errors = [ { "field": "composite field error", @@ -62,7 +64,7 @@ def step_impl(context: Context): @then( 'the response includes a validation error indicating an invalid "contact_type" value' ) -def step_impl(context): +def step_step_step_2(context): expected_errors = [ { "field": "contact_1_type", @@ -75,7 +77,7 @@ def step_impl(context): @then( 'the response includes a validation error indicating the missing "email_type" value' ) -def step_impl(context): +def step_step_step_3(context): expected_errors = [ { "field": "composite field error", @@ -88,7 +90,7 @@ def step_impl(context): @then( 'the response includes a validation error indicating the missing "phone_type" value' ) -def step_impl(context): +def step_step_step_4(context): expected_errors = [ { "field": "composite field error", @@ -101,7 +103,7 @@ def step_impl(context): @then( 'the response includes a validation error indicating the missing "contact_role" field' ) -def step_impl(context): +def step_step_step_5(context): expected_errors = [ { "field": "composite field error", @@ -114,7 +116,7 @@ def step_impl(context): @then( "the response includes a validation error indicating the invalid postal code format" ) -def step_impl(context): +def step_step_step_6(context): expected_errors = [ { "field": "contact_1_address_1_postal_code", @@ -127,7 +129,7 @@ def step_impl(context): @then( "the response includes a validation error indicating the invalid phone number format" ) -def step_impl(context): +def step_step_step_7(context): expected_errors = [ { "field": "contact_1_phone_1", @@ -138,7 +140,9 @@ def step_impl(context): @then("the response includes a validation error indicating the invalid email format") -def step_impl(context): +def step_then_the_response_includes_a_validation_error_indicating_the_invalid_email( + context, +): expected_errors = [ { "field": "contact_1_email_1", @@ -151,7 +155,7 @@ def step_impl(context): @then( 'the response includes a validation error indicating the missing "contact_type" value' ) -def step_impl(context): +def step_step_step_8(context): expected_errors = [ { "field": "composite field error", @@ -162,13 +166,17 @@ def step_impl(context): @then("the response includes a validation error indicating a repeated header row") -def step_impl(context: Context): +def step_then_the_response_includes_a_validation_error_indicating_a_repeated_header( + context: Context, +): expected_errors = [{"field": "header", "error": "Duplicate header row"}] _handle_validation_error(context, expected_errors) @then("the response includes a validation error indicating duplicate header names") -def step_impl(context: Context): +def step_then_the_response_includes_a_validation_error_indicating_duplicate_header_names( + context: Context, +): expected_errors = [ {"field": "['contact_1_email_1']", "error": "Duplicate columns found"} @@ -179,7 +187,7 @@ def step_impl(context: Context): @then( 'the response includes a validation error indicating an invalid boolean value for the "is_open" field' ) -def step_impl(context: Context): +def step_step_step_9(context: Context): expected_errors = [ { "field": "is_open", @@ -192,7 +200,7 @@ def step_impl(context: Context): @then( "the response includes validation errors for each missing water level entry field" ) -def step_impl(context): +def step_step_step_10(context): expected_errors = [ { "field": "composite field error", diff --git a/tests/features/steps/well-inventory-csv.py b/tests/features/steps/well-inventory-csv.py index 824d4213..8b23b0be 100644 --- a/tests/features/steps/well-inventory-csv.py +++ b/tests/features/steps/well-inventory-csv.py @@ -5,12 +5,11 @@ from behave import given, when, then from behave.runner import Context -from sqlalchemy import select - from cli.service_adapter import well_inventory_csv from db.engine import session_ctx from db.lexicon import LexiconCategory from services.util import convert_dt_tz_naive_to_tz_aware +from sqlalchemy import select @given("valid lexicon values exist for:") @@ -34,7 +33,7 @@ def step_impl_csv_includes_required_fields(context: Context): @given('each "well_name_point_id" value is unique per row') -def step_impl(context: Context): +def step_given_each_well_name_point_id_value_is_unique_per_row(context: Context): """Verifies that each "well_name_point_id" value is unique per row.""" seen_ids = set() for row in context.rows: @@ -46,7 +45,7 @@ def step_impl(context: Context): @given("the CSV includes optional fields when available:") -def step_impl(context: Context): +def step_given_the_csv_includes_optional_fields_when_available(context: Context): optional_fields = [row[0] for row in context.table] keys = context.rows[0].keys() @@ -56,7 +55,9 @@ def step_impl(context: Context): @given("the csv includes optional water level entry fields when available:") -def step_impl(context: Context): +def step_given_the_csv_includes_optional_water_level_entry_fields_when_available( + context: Context, +): optional_fields = [row[0] for row in context.table] context.water_level_optional_fields = optional_fields @@ -64,7 +65,7 @@ def step_impl(context: Context): @given( 'the required "date_time" values are valid ISO 8601 timezone-naive datetime strings (e.g. "2025-02-15T10:30:00")' ) -def step_impl(context: Context): +def step_step_step(context: Context): """Verifies that "date_time" values are valid ISO 8601 timezone-naive datetime strings.""" for row in context.rows: try: @@ -79,7 +80,7 @@ def step_impl(context: Context): @given( 'the optional "water_level_date_time" values are valid ISO 8601 timezone-naive datetime strings (e.g. "2025-02-15T10:30:00") when provided' ) -def step_impl(context: Context): +def step_step_step_2(context: Context): """Verifies that "water_level_date_time" values are valid ISO 8601 timezone-naive datetime strings.""" for row in context.rows: if row.get("water_level_date_time", None): @@ -96,7 +97,7 @@ def step_impl(context: Context): @when("I upload the file to the bulk upload endpoint") @when("I run the well inventory bulk upload command") -def step_impl(context: Context): +def step_when_i_run_the_well_inventory_bulk_upload_command(context: Context): suffix = Path(getattr(context, "file_name", "upload.csv")).suffix or ".csv" with tempfile.NamedTemporaryFile(mode="w", suffix=suffix, delete=False) as fp: fp.write(context.file_content) @@ -142,7 +143,7 @@ def json(self): @then( "all datetime objects are assigned the correct Mountain Time timezone offset based on the date value." ) -def step_impl(context: Context): +def step_step_step_3(context: Context): """Converts all datetime strings in the CSV rows to timezone-aware datetime objects with Mountain Time offset.""" for i, row in enumerate(context.rows): # Convert date_time field @@ -194,7 +195,7 @@ def step_impl(context: Context): @then("the response includes a summary containing:") -def step_impl(context: Context): +def step_then_the_response_includes_a_summary_containing(context: Context): response_json = context.response.json() summary = response_json.get("summary", {}) for row in context.table: @@ -207,7 +208,7 @@ def step_impl(context: Context): @then("the response includes an array of created well objects") -def step_impl(context: Context): +def step_then_the_response_includes_an_array_of_created_well_objects(context: Context): response_json = context.response.json() wells = response_json.get("wells", []) assert ( @@ -216,7 +217,9 @@ def step_impl(context: Context): @then("the response includes validation errors for all rows missing required fields") -def step_impl(context: Context): +def step_then_the_response_includes_validation_errors_for_all_rows_missing_required( + context: Context, +): response_json = context.response.json() validation_errors = response_json.get("validation_errors", []) assert len(validation_errors) == len( @@ -231,7 +234,9 @@ def step_impl(context: Context): @then("the response identifies the row and field for each error") -def step_impl(context: Context): +def step_then_the_response_identifies_the_row_and_field_for_each_error( + context: Context, +): response_json = context.response.json() validation_errors = response_json.get("validation_errors", []) for error in validation_errors: @@ -240,14 +245,16 @@ def step_impl(context: Context): @then("no wells are imported") -def step_impl(context: Context): +def step_then_no_wells_are_imported(context: Context): response_json = context.response.json() wells = response_json.get("wells", []) assert len(wells) == 0, "Expected no wells to be imported" @then("the response includes validation errors indicating duplicated values") -def step_impl(context: Context): +def step_then_the_response_includes_validation_errors_indicating_duplicated_values( + context: Context, +): response_json = context.response.json() validation_errors = response_json.get("validation_errors", []) @@ -263,7 +270,7 @@ def step_impl(context: Context): @then("each error identifies the row and field") -def step_impl(context: Context): +def step_then_each_error_identifies_the_row_and_field(context: Context): response_json = context.response.json() validation_errors = response_json.get("validation_errors", []) for error in validation_errors: @@ -272,7 +279,9 @@ def step_impl(context: Context): @then("the response includes validation errors identifying the invalid field and row") -def step_impl(context: Context): +def step_then_the_response_includes_validation_errors_identifying_the_invalid_field_and( + context: Context, +): response_json = context.response.json() validation_errors = response_json.get("validation_errors", []) for error in validation_errors: @@ -281,7 +290,9 @@ def step_impl(context: Context): @then("the response includes an error message indicating unsupported file type") -def step_impl(context: Context): +def step_then_the_response_includes_an_error_message_indicating_unsupported_file_type( + context: Context, +): response_json = context.response.json() assert "detail" in response_json, "Expected response to include an detail object" assert ( @@ -290,7 +301,9 @@ def step_impl(context: Context): @then("the response includes an error message indicating an empty file") -def step_impl(context: Context): +def step_then_the_response_includes_an_error_message_indicating_an_empty_file( + context: Context, +): response_json = context.response.json() assert "detail" in response_json, "Expected response to include an detail object" assert ( @@ -299,7 +312,9 @@ def step_impl(context: Context): @then("the response includes an error indicating that no data rows were found") -def step_impl(context: Context): +def step_then_the_response_includes_an_error_indicating_that_no_data_rows( + context: Context, +): response_json = context.response.json() assert "detail" in response_json, "Expected response to include an detail object" assert ( @@ -308,7 +323,7 @@ def step_impl(context: Context): @then("all wells are imported") -def step_impl(context: Context): +def step_then_all_wells_are_imported(context: Context): response_json = context.response.json() assert "wells" in response_json, "Expected response to include wells" assert len(response_json["wells"]) == context.row_count @@ -317,7 +332,7 @@ def step_impl(context: Context): @then( 'the response includes a validation error for the row missing "well_name_point_id"' ) -def step_impl(context: Context): +def step_step_step_4(context: Context): response_json = context.response.json() assert "summary" in response_json, "Expected summary in response" summary = response_json["summary"] @@ -343,7 +358,9 @@ def step_impl(context: Context): @then('the response includes a validation error for the "{required_field}" field') -def step_impl(context: Context, required_field: str): +def step_then_the_response_includes_a_validation_error_for_the_required_field( + context: Context, required_field: str +): response_json = context.response.json() assert "validation_errors" in response_json, "Expected validation errors" vs = response_json["validation_errors"] @@ -352,7 +369,9 @@ def step_impl(context: Context, required_field: str): @then("the response includes an error message indicating the row limit was exceeded") -def step_impl(context: Context): +def step_then_the_response_includes_an_error_message_indicating_the_row_limit( + context: Context, +): response_json = context.response.json() assert "detail" in response_json, "Expected response to include an detail object" assert ( @@ -361,7 +380,9 @@ def step_impl(context: Context): @then("the response includes an error message indicating an unsupported delimiter") -def step_impl(context: Context): +def step_then_the_response_includes_an_error_message_indicating_an_unsupported_delimiter( + context: Context, +): response_json = context.response.json() assert "detail" in response_json, "Expected response to include an detail object" assert ( @@ -371,7 +392,9 @@ def step_impl(context: Context): @then("all wells are imported with system-generated unique well_name_point_id values") -def step_impl(context: Context): +def step_then_all_wells_are_imported_with_system_generated_unique_well_name( + context: Context, +): response_json = context.response.json() assert "wells" in response_json, "Expected response to include wells" wells = response_json["wells"] diff --git a/tests/features/steps/well-inventory-real-user-csv.py b/tests/features/steps/well-inventory-real-user-csv.py new file mode 100644 index 00000000..79839f9c --- /dev/null +++ b/tests/features/steps/well-inventory-real-user-csv.py @@ -0,0 +1,72 @@ +from behave import then +from behave.runner import Context + + +@then("the response summary reports all rows were processed from the source CSV") +def step_then_the_response_summary_reports_all_rows_were_processed_from_the( + context: Context, +): + response_json = context.response.json() + summary = response_json.get("summary", {}) + assert ( + summary.get("total_rows_processed") == context.row_count + ), "Expected total_rows_processed to match CSV row count" + + +@then("the response summary includes import and validation counts") +def step_then_the_response_summary_includes_import_and_validation_counts( + context: Context, +): + response_json = context.response.json() + summary = response_json.get("summary", {}) + assert "total_rows_imported" in summary, "Expected total_rows_imported in summary" + assert ( + "validation_errors_or_warnings" in summary + ), "Expected validation_errors_or_warnings in summary" + + +@then("the command exit code matches whether validation errors were reported") +def step_then_the_command_exit_code_matches_whether_validation_errors_were_reported( + context: Context, +): + response_json = context.response.json() + has_validation_errors = bool(response_json.get("validation_errors")) + if has_validation_errors: + assert ( + context.cli_result.exit_code != 0 + ), "Expected non-zero exit code when validation errors exist" + else: + assert ( + context.cli_result.exit_code == 0 + ), "Expected zero exit code when validation errors do not exist" + + +@then("the response includes one or more validation errors") +def step_then_the_response_includes_one_or_more_validation_errors(context: Context): + response_json = context.response.json() + validation_errors = response_json.get("validation_errors", []) + assert validation_errors, "Expected one or more validation errors" + + +@then("each validation error contains row field and error details") +def step_then_each_validation_error_contains_row_field_and_error_details( + context: Context, +): + response_json = context.response.json() + validation_errors = response_json.get("validation_errors", []) + assert validation_errors, "Expected one or more validation errors" + for error in validation_errors: + assert "row" in error, "Expected validation error to include row" + assert "field" in error, "Expected validation error to include field" + assert "error" in error, "Expected validation error to include error" + + +@then("no wells are imported when validation errors are present") +def step_then_no_wells_are_imported_when_validation_errors_are_present( + context: Context, +): + response_json = context.response.json() + validation_errors = response_json.get("validation_errors", []) + wells = response_json.get("wells", []) + if validation_errors: + assert wells == [], "Expected no wells to be imported when errors are present" diff --git a/tests/features/steps/well-location.py b/tests/features/steps/well-location.py index 665fcdf3..68a95dc9 100644 --- a/tests/features/steps/well-location.py +++ b/tests/features/steps/well-location.py @@ -19,7 +19,7 @@ # TODO: should this use fixtures to populate and access data from the database? @given("the system has valid well and location data in the database") -def step_impl(context): +def step_given_the_system_has_valid_well_and_location_data_in_the(context): context.database = { "Well-Alpha": { "location": {"type": "Point", "coordinates": [32.222222, -110.999999]}, @@ -58,7 +58,9 @@ def step_impl_well_with_location(context: Context, well_name: str): @when('the technician retrieves the location for the well "{well_name}"') -def step_impl(context: Context, well_name: str): +def step_when_the_technician_retrieves_the_location_for_the_well_well_name( + context: Context, well_name: str +): """ :type context: behave.runner.Context """ @@ -66,7 +68,9 @@ def step_impl(context: Context, well_name: str): @then("the system should return the location details for that well") -def step_impl(context: Context): +def step_then_the_system_should_return_the_location_details_for_that_well( + context: Context, +): """ :type context: behave.runner.Context """ diff --git a/tests/features/steps/well-notes.py b/tests/features/steps/well-notes.py index 9b424f98..645dae99 100644 --- a/tests/features/steps/well-notes.py +++ b/tests/features/steps/well-notes.py @@ -17,18 +17,18 @@ @when("the user retrieves the well 9999") -def step_impl(context): +def step_when_the_user_retrieves_the_well_9999(context): context.response = context.client.get("thing/water-well/9999") context.notes = {} @then("the response should include an error message indicating the well was not found") -def step_impl(context): +def step_then_the_response_should_include_an_error_message_indicating_the_well(context): assert {"detail": "Thing with ID 9999 not found."} == context.response.json() @then("the notes should be a non-empty string") -def step_impl(context): +def step_then_the_notes_should_be_a_non_empty_string(context): for k, note in context.notes.items(): assert note, f"{k} Note is empty" @@ -36,7 +36,7 @@ def step_impl(context): @then( "the response should include location notes (i.e. driving directions and geographic well location notes)" ) -def step_impl(context): +def step_step_step(context): data = context.response.json() location = data["current_location"] assert "notes" in location["properties"], "Response does not include location notes" @@ -47,7 +47,7 @@ def step_impl(context): @then( "the response should include construction notes (i.e. pump notes and other construction notes)" ) -def step_impl(context): +def step_step_step_2(context): data = context.response.json() assert "construction_notes" in data, "Response does not include construction notes" assert data["construction_notes"] is not None, "Construction notes is null" @@ -55,7 +55,7 @@ def step_impl(context): @then("the response should include general well notes (catch all notes field)") -def step_impl(context): +def step_then_the_response_should_include_general_well_notes_catch_all_notes(context): data = context.response.json() assert "general_notes" in data, "Response does not include notes" assert data["general_notes"] is not None, "Notes is null" @@ -65,7 +65,7 @@ def step_impl(context): @then( "the response should include sampling procedure notes (notes about sampling procedures for all sample types, like water levels and water chemistry)" ) -def step_impl(context): +def step_step_step_3(context): data = context.response.json() assert ( "sampling_procedure_notes" in data @@ -79,7 +79,7 @@ def step_impl(context): @then( "the response should include water notes (i.e. water bearing zone information and other info from ose reports)" ) -def step_impl(context): +def step_step_step_4(context): data = context.response.json() assert "water_notes" in data, "Response does not include water notes" assert data["water_notes"] is not None, "Water notes is null" diff --git a/tests/features/well-inventory-csv.feature b/tests/features/well-inventory-csv.feature index 165fddba..e2d4e80e 100644 --- a/tests/features/well-inventory-csv.feature +++ b/tests/features/well-inventory-csv.feature @@ -184,8 +184,8 @@ Feature: Bulk upload well inventory from CSV via CLI And all wells are imported @positive @validation @autogenerate_ids @BDMS-TBD - Scenario: Upload succeeds and system auto-generates well_name_point_id when prefixed with "XY- - Given my CSV file contains all valid columns but uses "XY-" prefix for well_name_point_id values + Scenario: Upload succeeds and system auto-generates well_name_point_id for uppercase prefix placeholders and blanks + Given my CSV file contains all valid columns but uses uppercase "-xxxx" placeholders and blank values for well_name_point_id When I run the well inventory bulk upload command Then the command exits with code 0 And the system should return a response in JSON format @@ -194,14 +194,13 @@ Feature: Bulk upload well inventory from CSV via CLI ########################################################################### # NEGATIVE VALIDATION SCENARIOS ########################################################################### - @negative @validation @transactional_import @BDMS-TBD - Scenario: No wells are imported when any row fails validation - Given my CSV file contains 3 rows of data with 2 valid rows and 1 row missing the required "well_name_point_id" + @positive @validation @autogenerate_ids @BDMS-TBD + Scenario: Blank well_name_point_id values are auto-generated with the default prefix + Given my CSV file contains 3 rows of data with 2 valid rows and 1 row with a blank "well_name_point_id" When I run the well inventory bulk upload command - Then the command exits with a non-zero exit code + Then the command exits with code 0 And the system should return a response in JSON format - And the response includes a validation error for the row missing "well_name_point_id" - And no wells are imported + And all wells are imported with system-generated unique well_name_point_id values @negative @validation @BDMS-TBD Scenario: Upload fails when a row has an invalid postal code format @@ -293,16 +292,6 @@ Feature: Bulk upload well inventory from CSV via CLI And the response includes a validation error indicating the invalid UTM coordinates And no wells are imported - @negative @validation @BDMS-TBD - Scenario: Upload fails when required fields are missing - Given my CSV file contains rows missing a required field "well_name_point_id" - When I run the well inventory bulk upload command - Then the command exits with a non-zero exit code - And the system should return a response in JSON format - And the response includes validation errors for all rows missing required fields - And the response identifies the row and field for each error - And no wells are imported - @negative @validation @required_fields @BDMS-TBD Scenario Outline: Upload fails when a required field is missing Given my CSV file contains a row missing the required "" field diff --git a/tests/features/well-inventory-real-user-csv.feature b/tests/features/well-inventory-real-user-csv.feature new file mode 100644 index 00000000..0ec43b6d --- /dev/null +++ b/tests/features/well-inventory-real-user-csv.feature @@ -0,0 +1,39 @@ +@backend +@cli +Feature: Well inventory CLI with real user-entered CSV data + As a CLI user + I want to run the well inventory import against real user-entered data + So that parsing and summary behavior is validated against production-like input + + Background: + Given a functioning cli + And valid lexicon values exist for: + | lexicon category | + | role | + | contact_type | + | phone_type | + | email_type | + | address_type | + | elevation_method | + | well_pump_type | + | well_purpose | + | status_value | + | monitoring_frequency | + | sample_method | + | level_status | + | data_quality | + + @validation + Scenario: Run CLI import on the real user-entered well inventory CSV file with validation-heavy input + Given I use the real user-entered well inventory CSV file + And my CSV file is encoded in UTF-8 and uses commas as separators + And my CSV file contains multiple rows of well inventory data + When I run the well inventory bulk upload command + Then the command exits with a non-zero exit code + And the system should return a response in JSON format + And the response includes one or more validation errors + And each validation error contains row field and error details + And the response summary reports all rows were processed from the source CSV + And the response summary includes import and validation counts + And no wells are imported when validation errors are present + And the command exit code matches whether validation errors were reported diff --git a/tests/test_cli_commands.py b/tests/test_cli_commands.py index 220535ae..f70d8613 100644 --- a/tests/test_cli_commands.py +++ b/tests/test_cli_commands.py @@ -19,12 +19,12 @@ import uuid from pathlib import Path -from sqlalchemy import select -from typer.testing import CliRunner - from cli.cli import cli +from cli.service_adapter import WellInventoryResult from db import FieldActivity, FieldEvent, Observation, Sample from db.engine import session_ctx +from sqlalchemy import select +from typer.testing import CliRunner def test_initialize_lexicon_invokes_initializer(monkeypatch): @@ -70,14 +70,74 @@ def test_well_inventory_csv_command_calls_service(monkeypatch, tmp_path): def fake_well_inventory(file_path): captured["path"] = file_path + return WellInventoryResult( + exit_code=0, + stdout="", + stderr="", + payload={ + "summary": { + "total_rows_processed": 1, + "total_rows_imported": 1, + "validation_errors_or_warnings": 0, + }, + "validation_errors": [], + "wells": [{}], + }, + ) monkeypatch.setattr("cli.service_adapter.well_inventory_csv", fake_well_inventory) runner = CliRunner() result = runner.invoke(cli, ["well-inventory-csv", str(inventory_file)]) - assert result.exit_code == 0 + assert result.exit_code == 0, result.output assert Path(captured["path"]) == inventory_file + assert "[WELL INVENTORY IMPORT] SUCCESS" in result.output + + +def test_well_inventory_csv_command_reports_validation_errors(monkeypatch, tmp_path): + inventory_file = tmp_path / "inventory.csv" + inventory_file.write_text("header\nvalue\n") + + def fake_well_inventory(_file_path): + return WellInventoryResult( + exit_code=1, + stdout="", + stderr="", + payload={ + "summary": { + "total_rows_processed": 2, + "total_rows_imported": 0, + "validation_errors_or_warnings": 2, + }, + "validation_errors": [ + { + "row": 1, + "field": "contact_1_phone_1", + "error": "Invalid phone", + "value": "555-INVALID", + }, + { + "row": 2, + "field": "date_time", + "error": "Invalid datetime", + "value": "1/12/2026 14:37", + }, + ], + "wells": [], + }, + ) + + monkeypatch.setattr("cli.service_adapter.well_inventory_csv", fake_well_inventory) + + runner = CliRunner() + result = runner.invoke(cli, ["well-inventory-csv", str(inventory_file)]) + + assert result.exit_code == 1 + assert "Validation errors: 2" in result.output + assert "Row 1 (1 issue)" in result.output + assert "1. contact_1_phone_1: Invalid phone" in result.output + assert "input: 555-INVALID" in result.output def test_water_levels_bulk_upload_default_output(monkeypatch, tmp_path): diff --git a/tests/test_well_inventory.py b/tests/test_well_inventory.py index 95d43c79..010d4d6e 100644 --- a/tests/test_well_inventory.py +++ b/tests/test_well_inventory.py @@ -12,8 +12,6 @@ from pathlib import Path import pytest -from shapely import Point - from cli.service_adapter import well_inventory_csv from core.constants import SRID_UTM_ZONE_13N, SRID_WGS84 from db import ( @@ -28,6 +26,7 @@ ) from db.engine import session_ctx from services.util import transform_srid, convert_ft_to_m +from shapely import Point def test_well_inventory_db_contents(): @@ -481,12 +480,49 @@ def test_upload_duplicate_well_ids(self): errors = result.payload.get("validation_errors", []) assert any("Duplicate" in str(e) for e in errors) - def test_upload_missing_required_field(self): - """Upload fails when required field is missing.""" - file_path = Path("tests/features/data/well-inventory-missing-required.csv") - if file_path.exists(): - result = well_inventory_csv(file_path) - assert result.exit_code == 1 + def test_upload_blank_well_name_point_id_autogenerates(self, tmp_path): + """Upload succeeds when well_name_point_id is blank and auto-generates IDs.""" + source_path = Path("tests/features/data/well-inventory-valid.csv") + assert source_path.exists(), "Test data file does not exist." + with open(source_path, "r", encoding="utf-8", newline="") as rf: + reader = csv.DictReader(rf) + rows = list(reader) + fieldnames = reader.fieldnames + + for row in rows: + row["well_name_point_id"] = "" + + file_path = tmp_path / "well-inventory-blank-point-id.csv" + with open(file_path, "w", encoding="utf-8", newline="") as wf: + writer = csv.DictWriter(wf, fieldnames=fieldnames) + writer.writeheader() + writer.writerows(rows) + + result = well_inventory_csv(file_path) + assert result.exit_code == 0 + + def test_upload_reuses_existing_contact_name_organization(self, tmp_path): + """Upload succeeds when rows repeat contact name+organization values.""" + source_path = Path("tests/features/data/well-inventory-valid.csv") + assert source_path.exists(), "Test data file does not exist." + with open(source_path, "r", encoding="utf-8", newline="") as rf: + reader = csv.DictReader(rf) + rows = list(reader) + fieldnames = reader.fieldnames + + # Force duplicate contact identity across rows. + if len(rows) >= 2: + rows[1]["contact_1_name"] = rows[0]["contact_1_name"] + rows[1]["contact_1_organization"] = rows[0]["contact_1_organization"] + + file_path = tmp_path / "well-inventory-duplicate-contact-name-org.csv" + with open(file_path, "w", encoding="utf-8", newline="") as wf: + writer = csv.DictWriter(wf, fieldnames=fieldnames) + writer.writeheader() + writer.writerows(rows) + + result = well_inventory_csv(file_path) + assert result.exit_code == 0 def test_upload_invalid_date_format(self): """Upload fails when date format is invalid.""" @@ -787,20 +823,44 @@ def test_generate_autogen_well_id_with_offset(self): assert well_id == "XY-0011" assert offset == 11 - def test_autogen_regex_pattern(self): - """Test the AUTOGEN_REGEX pattern matches correctly.""" - from services.well_inventory_csv import AUTOGEN_REGEX + def test_extract_autogen_prefix_pattern(self): + """Test auto-generation prefix extraction for supported placeholders.""" + from services.well_inventory_csv import _extract_autogen_prefix + + # Existing supported form + assert _extract_autogen_prefix("XY-") == "XY-" + assert _extract_autogen_prefix("AB-") == "AB-" + + # New supported form (2-3 uppercase letter prefixes) + assert _extract_autogen_prefix("WL-XXXX") == "WL-" + assert _extract_autogen_prefix("SAC-XXXX") == "SAC-" + assert _extract_autogen_prefix("ABC -xxxx") == "ABC-" + + # Blank values use default prefix + assert _extract_autogen_prefix("") == "NM-" + assert _extract_autogen_prefix(" ") == "NM-" + + # Unsupported forms + assert _extract_autogen_prefix("XY-001") is None + assert _extract_autogen_prefix("XYZ-") == "XYZ-" + assert _extract_autogen_prefix("X-") is None + assert _extract_autogen_prefix("123-") is None + assert _extract_autogen_prefix("USER-XXXX") is None + assert _extract_autogen_prefix("wl-xxxx") is None + + def test_make_row_models_missing_well_name_point_id_column_errors(self): + """Missing well_name_point_id column should fail validation (blank cell is separate).""" + from unittest.mock import MagicMock + + from services.well_inventory_csv import _make_row_models - # Should match - assert AUTOGEN_REGEX.match("XY-") is not None - assert AUTOGEN_REGEX.match("AB-") is not None - assert AUTOGEN_REGEX.match("ab-") is not None + rows = [{"project": "ProjectA", "site_name": "Site1"}] + models, validation_errors = _make_row_models(rows, MagicMock()) - # Should not match - assert AUTOGEN_REGEX.match("XY-001") is None - assert AUTOGEN_REGEX.match("XYZ-") is None - assert AUTOGEN_REGEX.match("X-") is None - assert AUTOGEN_REGEX.match("123-") is None + assert models == [] + assert len(validation_errors) == 1 + assert validation_errors[0]["field"] == "well_name_point_id" + assert validation_errors[0]["error"] == "Field required" def test_generate_autogen_well_id_non_numeric_suffix(self): """Test auto-generation when existing well has non-numeric suffix.""" diff --git a/transfers/sensor_transfer.py b/transfers/sensor_transfer.py index 09dd1ffd..61aea732 100644 --- a/transfers/sensor_transfer.py +++ b/transfers/sensor_transfer.py @@ -202,7 +202,7 @@ def _group_step(self, session: Session, row: pd.Series, db_item: Base): row, installation_date, removal_date ) - if recording_interval: + if recording_interval is not None: recording_interval_unit = unit logger.info( f"name={sensor.name}, serial_no={sensor.serial_no}. " @@ -218,9 +218,11 @@ def _group_step(self, session: Session, row: pd.Series, db_item: Base): logger.critical( f"name={sensor.name}, serial_no={sensor.serial_no} error={error}" ) + self._capture_error( pointid, - f"name={sensor.name}, row.SerialNo={row.SerialNo}. error={error}", + f"name={sensor.name}, row.SerialNo={row.SerialNo}. " + f"error=Could not estimate recording interval. estimator error: {error}", "RecordingInterval", ) diff --git a/transfers/well_transfer.py b/transfers/well_transfer.py index 9b199589..60fa3ff6 100644 --- a/transfers/well_transfer.py +++ b/transfers/well_transfer.py @@ -279,146 +279,123 @@ def _get_dfs(self): cleaned_df = cleaned_df[cleaned_df["PointID"].isin(self.pointids)] return input_df, cleaned_df - def _step(self, session: Session, df: pd.DataFrame, i: int, row: pd.Series): - - try: - first_visit_date = get_first_visit_date(row) - well_purposes = ( - [] if isna(row.CurrentUse) else self._extract_well_purposes(row) - ) - well_casing_materials = ( - [] if isna(row.CasingDescription) else extract_casing_materials(row) - ) - well_pump_type = extract_well_pump_type(row) - - wcm = None - if notna(row.ConstructionMethod): - wcm = self._get_lexicon_value( - row, f"LU_ConstructionMethod:{row.ConstructionMethod}", "Unknown" - ) - - mpheight = row.MPHeight - mpheight_description = row.MeasuringPoint - if mpheight is None: - mphs = self._measuring_point_estimator.estimate_measuring_point_height( - row - ) - if mphs: - try: - mpheight = mphs[0][0] - mpheight_description = mphs[1][0] - except IndexError: - if self.verbose: - logger.warning( - f"Measuring point height estimation failed for well {row.PointID}, {mphs}" - ) - - data = CreateWell( - location_id=0, - name=row.PointID, - first_visit_date=first_visit_date, - hole_depth=row.HoleDepth, - well_depth=row.WellDepth, - well_casing_diameter=( - row.CasingDiameter * 12 if row.CasingDiameter else None - ), - well_casing_depth=row.CasingDepth, - release_status="public" if row.PublicRelease else "private", - measuring_point_height=mpheight, - measuring_point_description=mpheight_description, - notes=( - [{"content": row.Notes, "note_type": "General"}] - if row.Notes - else [] - ), - well_completion_date=row.CompletionDate, - well_driller_name=row.DrillerName, - well_construction_method=wcm, - well_pump_type=well_pump_type, - ) - - CreateWell.model_validate(data) - except ValidationError as e: - self._capture_validation_error(row.PointID, e) - return - - well = None - try: - well_data = data.model_dump(exclude=EXCLUDED_FIELDS) - well_data["thing_type"] = "water well" - well_data["nma_pk_welldata"] = row.WellID - well_data["nma_pk_location"] = row.LocationId - - well = Thing(**well_data) - session.add(well) - - if well_purposes: - for wp in well_purposes: - # TODO: add validation logic here - if wp in WellPurposeEnum: - wp_obj = WellPurpose(thing=well, purpose=wp) - session.add(wp_obj) - else: - logger.critical(f"{well.name}. Invalid well purpose: {wp}") - - if well_casing_materials: - for wcm in well_casing_materials: - # TODO: add validation logic here - if wcm in WellCasingMaterialEnum: - wcm_obj = WellCasingMaterial(thing=well, material=wcm) - session.add(wcm_obj) - else: - logger.critical( - f"{well.name}. Invalid well casing material: {wcm}" - ) - except Exception as e: - if well is not None: - session.expunge(well) - - self._capture_error(row.PointID, str(e), "UnknownField") - - logger.critical(f"Error creating well for {row.PointID}: {e}") - return - - try: - location, elevation_method, notes = make_location( - row, self._cached_elevations - ) - session.add(location) - # session.flush() - self._added_locations[row.PointID] = (elevation_method, notes) - except Exception as e: - import traceback - - traceback.print_exc() - self._capture_error(row.PointID, str(e), str(e), "Location") - logger.critical(f"Error making location for {row.PointID}: {e}") - - return - - assoc = LocationThingAssociation( - effective_start=datetime.now(tz=ZoneInfo("UTC")) - ) - - assoc.location = location - assoc.thing = well - session.add(assoc) - - if isna(row.AquiferType): - if self.verbose: - logger.info( - f"No AquiferType for {well.name}. Skipping aquifer association." - ) - else: - if self.verbose: - logger.info(f"Trying to associate aquifer for {well.name}") - try: - self._add_aquifers(session, row, well) - except Exception as e: - logger.critical( - f"Error creating aquifer association for {well.name}: {e}" - ) - + # def _step(self, session: Session, df: pd.DataFrame, i: int, row: pd.Series): + # + # try: + # first_visit_date = get_first_visit_date(row) + # well_purposes = ( + # [] if isna(row.CurrentUse) else self._extract_well_purposes(row) + # ) + # well_casing_materials = ( + # [] if isna(row.CasingDescription) else extract_casing_materials(row) + # ) + # well_pump_type = extract_well_pump_type(row) + # + # wcm = None + # if notna(row.ConstructionMethod): + # wcm = self._get_lexicon_value( + # row, f"LU_ConstructionMethod:{row.ConstructionMethod}", "Unknown" + # ) + # + # mpheight = row.MPHeight + # mpheight_description = row.MeasuringPoint + # if mpheight is None: + # mphs = self._measuring_point_estimator.estimate_measuring_point_height( + # row + # ) + # if mphs: + # try: + # mpheight = mphs[0][0] + # mpheight_description = mphs[1][0] + # except IndexError: + # if self.verbose: + # logger.warning( + # f"Measuring point height estimation failed for well {row.PointID}, {mphs}" + # ) + # + # data = CreateWell( + # location_id=0, + # name=row.PointID, + # first_visit_date=first_visit_date, + # hole_depth=row.HoleDepth, + # well_depth=row.WellDepth, + # well_casing_diameter=( + # row.CasingDiameter * 12 if row.CasingDiameter else None + # ), + # well_casing_depth=row.CasingDepth, + # release_status="public" if row.PublicRelease else "private", + # measuring_point_height=mpheight, + # measuring_point_description=mpheight_description, + # notes=( + # [{"content": row.Notes, "note_type": "General"}] + # if row.Notes + # else [] + # ), + # well_completion_date=row.CompletionDate, + # well_driller_name=row.DrillerName, + # well_construction_method=wcm, + # well_pump_type=well_pump_type, + # ) + # + # CreateWell.model_validate(data) + # except ValidationError as e: + # self._capture_validation_error(row.PointID, e) + # return + # + # well = None + # try: + # well_data = data.model_dump(exclude=EXCLUDED_FIELDS) + # well_data["thing_type"] = "water well" + # well_data["nma_pk_welldata"] = row.WellID + # well_data["nma_pk_location"] = row.LocationId + # + # well = Thing(**well_data) + # session.add(well) + # + # if well_purposes: + # for wp in well_purposes: + # # TODO: add validation logic here + # if wp in WellPurposeEnum: + # wp_obj = WellPurpose(thing=well, purpose=wp) + # session.add(wp_obj) + # else: + # logger.critical(f"{well.name}. Invalid well purpose: {wp}") + # + # if well_casing_materials: + # for wcm in well_casing_materials: + # # TODO: add validation logic here + # if wcm in WellCasingMaterialEnum: + # wcm_obj = WellCasingMaterial(thing=well, material=wcm) + # session.add(wcm_obj) + # else: + # logger.critical( + # f"{well.name}. Invalid well casing material: {wcm}" + # ) + # except Exception as e: + # if well is not None: + # session.expunge(well) + # + # self._capture_error(row.PointID, str(e), "UnknownField") + # + # logger.critical(f"Error creating well for {row.PointID}: {e}") + # return + # + # try: + # location, elevation_method, notes = make_location( + # row, self._cached_elevations + # ) + # session.add(location) + # # session.flush() + # self._added_locations[row.PointID] = (elevation_method, notes) + # except Exception as e: + # import traceback + # + # traceback.print_exc() + # self._capture_error(row.PointID, str(e), str(e), "Location") + # logger.critical(f"Error making location for {row.PointID}: {e}") + # + # return + # def _extract_well_purposes(self, row) -> list[str]: cu = row.CurrentUse @@ -643,9 +620,10 @@ def _build_well_payload(self, row) -> CreateWell | None: wcm = None if notna(row.ConstructionMethod): + cm = row.ConstructionMethod.strip() wcm = self._get_lexicon_value_safe( row, - f"LU_ConstructionMethod:{row.ConstructionMethod}", + f"LU_ConstructionMethod:{cm}", "Unknown", [], ) diff --git a/uv.lock b/uv.lock index 88f9dda5..faba9d95 100644 --- a/uv.lock +++ b/uv.lock @@ -1466,8 +1466,8 @@ requires-dist = [ { name = "pyasn1", specifier = "==0.6.2" }, { name = "pyasn1-modules", specifier = "==0.4.2" }, { name = "pycparser", specifier = "==2.23" }, - { name = "pydantic", specifier = "==2.11.7" }, - { name = "pydantic-core", specifier = "==2.33.2" }, + { name = "pydantic", specifier = "==2.12.5" }, + { name = "pydantic-core", specifier = "==2.41.5" }, { name = "pygments", specifier = "==2.19.2" }, { name = "pyjwt", specifier = "==2.11.0" }, { name = "pyproj", specifier = "==3.7.2" }, @@ -1939,7 +1939,7 @@ wheels = [ [[package]] name = "pydantic" -version = "2.11.7" +version = "2.12.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-types" }, @@ -1947,37 +1947,62 @@ dependencies = [ { name = "typing-extensions" }, { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" } +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" }, + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, ] [[package]] name = "pydantic-core" -version = "2.33.2" +version = "2.41.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" }, - { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" }, - { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" }, - { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" }, - { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" }, - { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" }, - { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" }, - { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" }, - { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" }, - { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" }, - { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" }, - { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" }, - { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" }, - { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" }, - { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" }, - { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" }, - { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, ] [[package]]