diff --git a/.github/scripts/requirements.txt b/.github/scripts/requirements.txt
new file mode 100644
index 000000000..e336b28f4
--- /dev/null
+++ b/.github/scripts/requirements.txt
@@ -0,0 +1,3 @@
+pytest>=7.4
+pytest-timeout>=2.3
+pytest-json-report>=1.5
diff --git a/.github/workflows/sql_assignment_runner.yml b/.github/workflows/sql_assignment_runner.yml
new file mode 100644
index 000000000..fb5392176
--- /dev/null
+++ b/.github/workflows/sql_assignment_runner.yml
@@ -0,0 +1,118 @@
+name: SQL Unit Tests
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened]
+
+permissions:
+ contents: read
+ pull-requests: write
+
+jobs:
+ run-assignment-queries:
+ if: startsWith(github.head_ref, 'assignment-')
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ pull-requests: write
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.head_ref }}
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.11'
+
+ - name: Install dependencies
+ run: |
+ pip install -r ./.github/scripts/requirements.txt
+
+ - name: Get changed files
+ id: changes
+ run: |
+ git fetch
+ git diff --name-only origin/main > changed_files.txt
+
+ echo "Changed files:"
+ cat changed_files.txt
+ # The goal here to search for a file modified during PR that is match to a pattern
+ # and give priority in next order 2, 1 as it quite often happens when students submit
+ # PR without following assignments order and git flow. It will to run latest / greatest
+ assignment_changed=""
+
+ # Priority: assignment2 > assignment1
+ if grep -qE '(^|/)assignment2\.sql$' changed_files.txt; then
+ assignment_changed=$(grep -E '(^|/)assignment2\.sql$' changed_files.txt | head -n1)
+ elif grep -qE '(^|/)assignment1\.sql$' changed_files.txt; then
+ assignment_changed=$(grep -E '(^|/)assignment1\.sql$' changed_files.txt | head -n1)
+ fi
+
+ if [ -n "$assignment_changed" ]; then
+ echo "assignment_changed=$assignment_changed" >> "$GITHUB_OUTPUT"
+ fi
+
+ - name: Run tests
+ id: pytest
+ run: |
+ pytest tests/test_assignment.py --file_path="${{steps.changes.outputs.assignment_changed}}" --tb=short --disable-warnings \
+ --junitxml=pytest-report.xml || true
+
+ - name: Post test results to PR
+ uses: actions/github-script@v7
+ with:
+ script: |
+ function jsonToMarkdownTable(data) {
+ if (!Array.isArray(data) || data.length === 0) {
+ return 'No data';
+ }
+
+ const headers = Object.keys(data[0]);
+
+ // Header row
+ const headerRow = `| ${headers.join(' | ')} |`;
+
+ // Separator row
+ const separatorRow = `| ${headers.map(() => '---').join(' | ')} |`;
+
+ // Data rows
+ const rows = data.map(row =>
+ `| ${headers.map(h => formatValue(row[h])).join(' | ')} |`
+ );
+
+ return [headerRow, separatorRow, ...rows].join('\n');
+ }
+
+ function formatValue(value) {
+ if (value === null || value === undefined) return '';
+ if (typeof value === 'object') return `\`${JSON.stringify(value)}\``;
+ return String(value);
+ }
+
+ const fs = require('fs')
+ const file_read_result = fs.readFileSync('test-results.json', 'utf8')
+ const results = JSON.parse(file_read_result)
+ // Format PR comment
+ let body = `### 🧪 SQL Queries Run Results (up to 3 rows)\n\n`
+ body += ` Click to expand/collapse assignment queries execution results
`
+ for (const result of results) {
+ if (result.result && result.result.trim().length > 0) {
+ body += `✅ Query ${result.number}: \n\n *${result.query}*, \n\n **Results**: \n`
+ const table = jsonToMarkdownTable(result.result)
+ body += `${table} \n`
+ body += `\n`
+ body += `-------------------------------------------------------- \n`
+ }
+ }
+ body += ` `
+
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ body
+ })
+
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 000000000..5415915f8
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,26 @@
+import sqlite3
+import pytest
+from pathlib import Path
+
+@pytest.fixture
+def sqlite_db():
+ source_db = Path("05_src/sql/farmersmarket.db")
+ # Create in-memory DB
+ conn = sqlite3.connect(":memory:")
+
+ # Load schema.db into memory
+ disk_db = sqlite3.connect(source_db)
+ conn.row_factory = sqlite3.Row
+ disk_db.backup(conn)
+ disk_db.close()
+
+ yield conn
+ conn.close()
+
+def pytest_addoption(parser):
+ parser.addoption("--file_path", action="store", default="02_activities/assignments/DC_Cohort/assignment1.sql")
+
+def pytest_generate_tests(metafunc):
+ option_value = metafunc.config.option.file_path
+ if 'file_path' in metafunc.fixturenames and option_value is not None:
+ metafunc.parametrize("file_path", [option_value])
diff --git a/tests/test_assignment.py b/tests/test_assignment.py
new file mode 100644
index 000000000..db0dee2ef
--- /dev/null
+++ b/tests/test_assignment.py
@@ -0,0 +1,83 @@
+import json
+from pathlib import Path
+import sqlite3
+import re
+
+def load_queries(sql_file):
+ lines = Path(sql_file).read_text().splitlines()
+
+ queries = []
+ current_query = None
+ buffer = []
+
+ for line in lines:
+ stripped = line.strip()
+
+ # Start marker: -- QUERY
+ if stripped.lower().startswith("--query"):
+ if current_query is not None:
+ raise AssertionError("Nested QUERY blocks are not allowed")
+
+ parts = stripped.split()
+ if len(parts) < 2:
+ raise AssertionError(f"Invalid QUERY marker: {line}")
+
+ try:
+ current_query = int(parts[1])
+ except ValueError:
+ raise AssertionError(f"Invalid QUERY number: {line}")
+
+ buffer = []
+ continue
+
+ # End marker: -- END QUERY or -- END STATEMENT
+ if stripped.lower().startswith("--end"):
+ if current_query is None:
+ continue # ignore stray END
+
+ query = "\n".join(buffer).strip().rstrip(";")
+
+ queries.append({
+ "number": current_query,
+ "query": query
+ })
+
+ current_query = None
+ buffer = []
+ continue
+
+ # Collect lines inside a query block
+ if current_query is not None:
+ buffer.append(line)
+
+ if not queries:
+ raise AssertionError(
+ "No queries found. Use '-- QUERY ' and '-- END QUERY' markers."
+ )
+
+ return queries
+
+def run_query(conn, query):
+ cursor = conn.cursor()
+ cursor.execute(query)
+ rows = cursor.fetchall()
+ return [dict(row) for row in rows]
+
+def test_assignment(sqlite_db, file_path):
+ run_assignment(sqlite_db, file_path)
+
+def run_assignment(sqlite_db, file_path):
+ with open("test-results.json", "w") as json_file:
+ queries = load_queries(file_path)
+ test_result = []
+ for parsed_query in queries:
+ try:
+ rows = run_query(sqlite_db, parsed_query['query'])
+ test_result.append( { "number": parsed_query['number'], "query": parsed_query['query'], "result": rows[0:3] })
+ except Exception as e:
+ print(f"An unexpected error occurred: {e}")
+ json.dump(test_result, json_file, indent=2)
+ # The purpose of it to have report in the future in case DSI will want to go with unit testing style.
+ # assert True, "test execution query {} result {}".format(queries, test_result)
+
+