-
Notifications
You must be signed in to change notification settings - Fork 959
Sql autorunner #398
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Sql autorunner #398
Changes from all commits
cf333a0
3eda95a
9089fe8
f842ff4
5fd7625
aa79e26
78001ed
2843051
4e74f54
0349f73
4ee1854
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,3 @@ | ||
| pytest>=7.4 | ||
| pytest-timeout>=2.3 | ||
| pytest-json-report>=1.5 |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,118 @@ | ||
| name: SQL Unit Tests | ||
|
|
||
| on: | ||
| pull_request: | ||
| types: [opened, synchronize, reopened] | ||
|
|
||
| permissions: | ||
| contents: read | ||
| pull-requests: write | ||
|
|
||
| jobs: | ||
| run-assignment-queries: | ||
| if: startsWith(github.head_ref, 'assignment-') | ||
| runs-on: ubuntu-latest | ||
| permissions: | ||
| contents: read | ||
| pull-requests: write | ||
|
|
||
| steps: | ||
| - name: Checkout code | ||
| uses: actions/checkout@v4 | ||
| with: | ||
| ref: ${{ github.head_ref }} | ||
|
|
||
| - name: Set up Python | ||
| uses: actions/setup-python@v5 | ||
| with: | ||
| python-version: '3.11' | ||
|
|
||
| - name: Install dependencies | ||
| run: | | ||
| pip install -r ./.github/scripts/requirements.txt | ||
|
|
||
| - name: Get changed files | ||
| id: changes | ||
| run: | | ||
| git fetch | ||
| git diff --name-only origin/main > changed_files.txt | ||
|
|
||
| echo "Changed files:" | ||
| cat changed_files.txt | ||
| # The goal here to search for a file modified during PR that is match to a pattern | ||
| # and give priority in next order 2, 1 as it quite often happens when students submit | ||
| # PR without following assignments order and git flow. It will to run latest / greatest | ||
| assignment_changed="" | ||
|
|
||
| # Priority: assignment2 > assignment1 | ||
| if grep -qE '(^|/)assignment2\.sql$' changed_files.txt; then | ||
| assignment_changed=$(grep -E '(^|/)assignment2\.sql$' changed_files.txt | head -n1) | ||
| elif grep -qE '(^|/)assignment1\.sql$' changed_files.txt; then | ||
| assignment_changed=$(grep -E '(^|/)assignment1\.sql$' changed_files.txt | head -n1) | ||
| fi | ||
|
|
||
| if [ -n "$assignment_changed" ]; then | ||
| echo "assignment_changed=$assignment_changed" >> "$GITHUB_OUTPUT" | ||
| fi | ||
|
|
||
| - name: Run tests | ||
| id: pytest | ||
| run: | | ||
| pytest tests/test_assignment.py --file_path="${{steps.changes.outputs.assignment_changed}}" --tb=short --disable-warnings \ | ||
| --junitxml=pytest-report.xml || true | ||
|
|
||
| - name: Post test results to PR | ||
| uses: actions/github-script@v7 | ||
| with: | ||
| script: | | ||
| function jsonToMarkdownTable(data) { | ||
| if (!Array.isArray(data) || data.length === 0) { | ||
| return 'No data'; | ||
| } | ||
|
|
||
| const headers = Object.keys(data[0]); | ||
|
|
||
| // Header row | ||
| const headerRow = `| ${headers.join(' | ')} |`; | ||
|
|
||
| // Separator row | ||
| const separatorRow = `| ${headers.map(() => '---').join(' | ')} |`; | ||
|
|
||
| // Data rows | ||
| const rows = data.map(row => | ||
| `| ${headers.map(h => formatValue(row[h])).join(' | ')} |` | ||
| ); | ||
|
|
||
| return [headerRow, separatorRow, ...rows].join('\n'); | ||
| } | ||
|
|
||
| function formatValue(value) { | ||
| if (value === null || value === undefined) return ''; | ||
| if (typeof value === 'object') return `\`${JSON.stringify(value)}\``; | ||
| return String(value); | ||
| } | ||
|
|
||
| const fs = require('fs') | ||
| const file_read_result = fs.readFileSync('test-results.json', 'utf8') | ||
| const results = JSON.parse(file_read_result) | ||
| // Format PR comment | ||
| let body = `### 🧪 SQL Queries Run Results (up to 3 rows)\n\n` | ||
| body += `<details> <summary>Click to expand/collapse assignment queries execution results</summary>` | ||
| for (const result of results) { | ||
| if (result.result && result.result.trim().length > 0) { | ||
| body += `✅ Query ${result.number}: \n\n *${result.query}*, \n\n **Results**: \n` | ||
| const table = jsonToMarkdownTable(result.result) | ||
| body += `${table} \n` | ||
| body += `\n` | ||
| body += `-------------------------------------------------------- \n` | ||
| } | ||
| } | ||
| body += `</details>` | ||
|
|
||
| await github.rest.issues.createComment({ | ||
| owner: context.repo.owner, | ||
| repo: context.repo.repo, | ||
| issue_number: context.issue.number, | ||
| body | ||
| }) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,26 @@ | ||
| import sqlite3 | ||
| import pytest | ||
| from pathlib import Path | ||
|
|
||
| @pytest.fixture | ||
| def sqlite_db(): | ||
| source_db = Path("05_src/sql/farmersmarket.db") | ||
| # Create in-memory DB | ||
| conn = sqlite3.connect(":memory:") | ||
|
|
||
| # Load schema.db into memory | ||
| disk_db = sqlite3.connect(source_db) | ||
| conn.row_factory = sqlite3.Row | ||
| disk_db.backup(conn) | ||
| disk_db.close() | ||
|
|
||
| yield conn | ||
| conn.close() | ||
|
|
||
| def pytest_addoption(parser): | ||
| parser.addoption("--file_path", action="store", default="02_activities/assignments/DC_Cohort/assignment1.sql") | ||
|
|
||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We can choose to save these filepaths in environment variable (default="02_activities/assignments/DC_Cohort/assignment1.sql", "05_src/sql/farmersmarket.db"). Maybe under "env" key in .yml file
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The intent to provide a file name as parameter here, coming from the point, that we have multiple cohort running in parallel. It can be used for Micro credentials, etc. So default is basically default. In the same time this path is extracted from the file list modified in the pull request. |
||
| def pytest_generate_tests(metafunc): | ||
| option_value = metafunc.config.option.file_path | ||
| if 'file_path' in metafunc.fixturenames and option_value is not None: | ||
| metafunc.parametrize("file_path", [option_value]) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,83 @@ | ||
| import json | ||
| from pathlib import Path | ||
| import sqlite3 | ||
| import re | ||
|
|
||
| def load_queries(sql_file): | ||
| lines = Path(sql_file).read_text().splitlines() | ||
|
|
||
| queries = [] | ||
| current_query = None | ||
| buffer = [] | ||
|
|
||
| for line in lines: | ||
| stripped = line.strip() | ||
|
|
||
| # Start marker: -- QUERY <n> | ||
| if stripped.lower().startswith("--query"): | ||
| if current_query is not None: | ||
| raise AssertionError("Nested QUERY blocks are not allowed") | ||
|
|
||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The Assertions in this function will result in the autorunner to fail. I understand that in these cases, the learning support can see the error messages under 'Run tests' section of the workflow? See test case anjali-deshpande-hub#2 |
||
| parts = stripped.split() | ||
| if len(parts) < 2: | ||
| raise AssertionError(f"Invalid QUERY marker: {line}") | ||
|
|
||
| try: | ||
| current_query = int(parts[1]) | ||
| except ValueError: | ||
| raise AssertionError(f"Invalid QUERY number: {line}") | ||
|
|
||
| buffer = [] | ||
| continue | ||
|
|
||
| # End marker: -- END QUERY or -- END STATEMENT | ||
| if stripped.lower().startswith("--end"): | ||
| if current_query is None: | ||
| continue # ignore stray END | ||
|
|
||
| query = "\n".join(buffer).strip().rstrip(";") | ||
|
|
||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think somewhere here we should code for missing query. The query markers are present, but the code inside is missing. See anjali-deshpande-hub#1, Currently I am seeing: Instead, maybe we can say: |
||
| queries.append({ | ||
| "number": current_query, | ||
| "query": query | ||
| }) | ||
|
|
||
| current_query = None | ||
| buffer = [] | ||
| continue | ||
|
|
||
| # Collect lines inside a query block | ||
| if current_query is not None: | ||
| buffer.append(line) | ||
|
|
||
| if not queries: | ||
| raise AssertionError( | ||
| "No queries found. Use '-- QUERY <n>' and '-- END QUERY' markers." | ||
| ) | ||
|
|
||
| return queries | ||
|
|
||
| def run_query(conn, query): | ||
| cursor = conn.cursor() | ||
| cursor.execute(query) | ||
| rows = cursor.fetchall() | ||
| return [dict(row) for row in rows] | ||
|
|
||
| def test_assignment(sqlite_db, file_path): | ||
| run_assignment(sqlite_db, file_path) | ||
|
|
||
| def run_assignment(sqlite_db, file_path): | ||
| with open("test-results.json", "w") as json_file: | ||
| queries = load_queries(file_path) | ||
| test_result = [] | ||
| for parsed_query in queries: | ||
| try: | ||
| rows = run_query(sqlite_db, parsed_query['query']) | ||
| test_result.append( { "number": parsed_query['number'], "query": parsed_query['query'], "result": rows[0:3] }) | ||
| except Exception as e: | ||
| print(f"An unexpected error occurred: {e}") | ||
| json.dump(test_result, json_file, indent=2) | ||
| # The purpose of it to have report in the future in case DSI will want to go with unit testing style. | ||
| # assert True, "test execution query {} result {}".format(queries, test_result) | ||
|
|
||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.