diff --git a/packages/check-core/src/perf/perf-stats.spec.ts b/packages/check-core/src/perf/perf-stats.spec.ts
new file mode 100644
index 00000000..b21e023c
--- /dev/null
+++ b/packages/check-core/src/perf/perf-stats.spec.ts
@@ -0,0 +1,87 @@
+// Copyright (c) 2026 Climate Interactive / New Venture Fund
+
+import { describe, expect, it } from 'vitest'
+
+import { PerfStats } from './perf-stats'
+
+/**
+ * Add a sequence of run times to a fresh `PerfStats` instance.
+ */
+function makeStats(times: number[]): PerfStats {
+ const stats = new PerfStats()
+ for (const t of times) {
+ stats.addRun(t)
+ }
+ return stats
+}
+
+describe('PerfStats', () => {
+ it('should produce zeroed report when no runs were added', () => {
+ const report = new PerfStats().toReport()
+ expect(report.minTime).toBe(0)
+ expect(report.maxTime).toBe(0)
+ expect(report.avgTime).toBe(0)
+ expect(report.medianTime).toBe(0)
+ expect(report.p95Time).toBe(0)
+ expect(report.stdDev).toBe(0)
+ expect(report.allTimes).toEqual([])
+ })
+
+ it('should report raw min and max from all samples', () => {
+ const report = makeStats([20, 10, 30, 15, 25]).toReport()
+ expect(report.minTime).toBe(10)
+ expect(report.maxTime).toBe(30)
+ })
+
+ it('should sort allTimes ascending in the report', () => {
+ const report = makeStats([20, 10, 30, 15, 25]).toReport()
+ expect(report.allTimes).toEqual([10, 15, 20, 25, 30])
+ })
+
+ it('should compute the trimmed mean (interquartile mean) for avgTime', () => {
+ // For 8 samples, the middle 50% is the 4 middle values [3,4,5,6] -> avg 4.5
+ const report = makeStats([1, 2, 3, 4, 5, 6, 7, 100]).toReport()
+ expect(report.avgTime).toBeCloseTo((3 + 4 + 5 + 6) / 4, 6)
+ })
+
+ it('should ignore extreme outliers when computing avgTime', () => {
+ // With heavy outliers on both ends, trimmed mean should sit near the bulk
+ const samples = [20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 1, 200]
+ const report = makeStats(samples).toReport()
+ expect(report.avgTime).toBeCloseTo(20, 6)
+ })
+
+ it('should compute median as 50th percentile (linear interpolation)', () => {
+ // Odd count: median is the middle value
+ expect(makeStats([1, 2, 3, 4, 5]).toReport().medianTime).toBeCloseTo(3, 6)
+ // Even count: median is interpolated midpoint
+ expect(makeStats([1, 2, 3, 4]).toReport().medianTime).toBeCloseTo(2.5, 6)
+ })
+
+ it('should compute p95 as the 95th percentile (linear interpolation)', () => {
+ // For samples 1..100 sorted ascending, p95 ≈ 95.05 with linear interpolation
+ const samples: number[] = []
+ for (let i = 1; i <= 100; i++) {
+ samples.push(i)
+ }
+ const report = makeStats(samples).toReport()
+ expect(report.p95Time).toBeCloseTo(95.05, 2)
+ })
+
+ it('should compute the population stddev across all samples', () => {
+ // Mean = 30, variance = ((10-30)^2 + (20-30)^2 + (30-30)^2 + (40-30)^2 + (50-30)^2) / 5 = 200
+ // stddev = sqrt(200) ≈ 14.142
+ const report = makeStats([10, 20, 30, 40, 50]).toReport()
+ expect(report.stdDev).toBeCloseTo(Math.sqrt(200), 4)
+ })
+
+ it('should produce stable percentiles for a single sample', () => {
+ const report = makeStats([42]).toReport()
+ expect(report.minTime).toBe(42)
+ expect(report.maxTime).toBe(42)
+ expect(report.avgTime).toBe(42)
+ expect(report.medianTime).toBe(42)
+ expect(report.p95Time).toBe(42)
+ expect(report.stdDev).toBe(0)
+ })
+})
diff --git a/packages/check-core/src/perf/perf-stats.ts b/packages/check-core/src/perf/perf-stats.ts
index 7a9794fd..118de209 100644
--- a/packages/check-core/src/perf/perf-stats.ts
+++ b/packages/check-core/src/perf/perf-stats.ts
@@ -1,47 +1,122 @@
-// Copyright (c) 2021-2022 Climate Interactive / New Venture Fund
+// Copyright (c) 2021-2026 Climate Interactive / New Venture Fund
+/**
+ * A summary of timing samples collected during a performance run.
+ */
export interface PerfReport {
+ /** Minimum sample time, in milliseconds. */
readonly minTime: number
+ /** Maximum sample time, in milliseconds. */
readonly maxTime: number
+ /**
+ * Trimmed mean (interquartile mean) computed from the middle 50% of samples,
+ * in milliseconds. This is more robust against outliers than a simple mean.
+ */
readonly avgTime: number
+ /** Median (50th percentile) sample time, in milliseconds. */
+ readonly medianTime: number
+ /** 95th percentile sample time, in milliseconds. */
+ readonly p95Time: number
+ /** Population standard deviation across all samples, in milliseconds. */
+ readonly stdDev: number
+ /** All recorded sample times, sorted ascending, in milliseconds. */
readonly allTimes: number[]
}
+/**
+ * Return the linearly-interpolated percentile of the given sorted array.
+ *
+ * @param sorted The samples sorted in ascending order. Must be non-empty.
+ * @param p The percentile to compute, in the range [0, 1].
+ * @returns The interpolated percentile value.
+ */
+function percentile(sorted: number[], p: number): number {
+ if (sorted.length === 1) {
+ return sorted[0]
+ }
+ const rank = p * (sorted.length - 1)
+ const lo = Math.floor(rank)
+ const hi = Math.ceil(rank)
+ if (lo === hi) {
+ return sorted[lo]
+ }
+ const frac = rank - lo
+ return sorted[lo] + (sorted[hi] - sorted[lo]) * frac
+}
+
+/**
+ * Collect performance timing samples and produce a robust statistical summary.
+ */
export class PerfStats {
private readonly times: number[] = []
+ /**
+ * Record a single run time sample.
+ *
+ * @param timeInMillis The run time in milliseconds.
+ */
addRun(timeInMillis: number): void {
this.times.push(timeInMillis)
}
+ /**
+ * Get the raw run time samples that have been recorded.
+ *
+ * @returns A copy of the recorded run times, in insertion order.
+ */
+ getTimes(): number[] {
+ return this.times.slice()
+ }
+
+ /**
+ * Produce a `PerfReport` summarizing the recorded samples.
+ *
+ * @returns The summary report.
+ */
toReport(): PerfReport {
if (this.times.length === 0) {
return {
minTime: 0,
maxTime: 0,
avgTime: 0,
+ medianTime: 0,
+ p95Time: 0,
+ stdDev: 0,
allTimes: []
}
}
- // Get the absolute min and max times, just for informational
- // purposes (these will be thrown out before computing the average)
- const minTime = Math.min(...this.times)
- const maxTime = Math.max(...this.times)
+ // Sort the samples ascending for percentile and trimmed-mean calculations
+ const sortedTimes = this.times.slice().sort((a, b) => a - b)
+ const n = sortedTimes.length
+
+ // Raw min/max
+ const minTime = sortedTimes[0]
+ const maxTime = sortedTimes[n - 1]
- // Sort the run times, then keep only the middle 50% so that we
- // ignore outliers for computing the average time
- const sortedTimes = this.times.sort()
- const minIndex = Math.floor(sortedTimes.length / 4)
- const maxIndex = minIndex + Math.ceil(sortedTimes.length / 2)
+ // Trimmed mean across the middle 50% of samples (interquartile mean).
+ // This matches the historical behavior of `avgTime`.
+ const minIndex = Math.floor(n / 4)
+ const maxIndex = minIndex + Math.max(1, Math.ceil(n / 2))
const middleTimes = sortedTimes.slice(minIndex, maxIndex)
- const totalTime = middleTimes.reduce((a, b) => a + b, 0)
- const avgTime = totalTime / middleTimes.length
+ const avgTime = middleTimes.reduce((a, b) => a + b, 0) / middleTimes.length
+
+ // Robust quantiles
+ const medianTime = percentile(sortedTimes, 0.5)
+ const p95Time = percentile(sortedTimes, 0.95)
+
+ // Population standard deviation across all samples
+ const mean = sortedTimes.reduce((a, b) => a + b, 0) / n
+ const variance = sortedTimes.reduce((acc, t) => acc + (t - mean) * (t - mean), 0) / n
+ const stdDev = Math.sqrt(variance)
return {
minTime,
maxTime,
avgTime,
+ medianTime,
+ p95Time,
+ stdDev,
allTimes: sortedTimes
}
}
diff --git a/packages/check-ui-shell/src/components/perf/dot-plot-vm.ts b/packages/check-ui-shell/src/components/perf/dot-plot-vm.ts
index 8dee37f2..eb5decd3 100644
--- a/packages/check-ui-shell/src/components/perf/dot-plot-vm.ts
+++ b/packages/check-ui-shell/src/components/perf/dot-plot-vm.ts
@@ -1,30 +1,69 @@
-// Copyright (c) 2021-2022 Climate Interactive / New Venture Fund
+// Copyright (c) 2021-2026 Climate Interactive / New Venture Fund
+/**
+ * View model for a single horizontal dot plot.
+ */
export interface DotPlotViewModel {
- /** Raw values of the dots. */
+ /** Raw values of the dots (full set, including any that overflow the visible domain). */
values: number[]
/** Raw average value. */
avg: number
- /** Positions of the dots, in the range [0, 100]. */
+ /** Lower bound of the visible domain (the value at the left tick). */
+ min: number
+ /** Upper bound of the visible domain (the value at the right tick). */
+ max: number
+ /** Positions of the in-range dots, in the range [0, 100]. */
points: number[]
- /** Position of the average line, in the range [0, 100]. */
+ /** Position of the average line, in the range [0, 100] (clamped). */
avgPoint: number
+ /** Number of samples that exceed the upper bound (rendered as an overflow indicator). */
+ overflowCount: number
}
+/**
+ * Build a `DotPlotViewModel` for the given samples. Values that exceed `max`
+ * are excluded from the rendered dots and counted in `overflowCount` so the
+ * caller can display a "tail beyond the visible range" indicator.
+ *
+ * @param values The raw sample values.
+ * @param min The lower bound of the visible domain.
+ * @param max The upper bound of the visible domain.
+ * @param avg The average value to highlight (clamped to the visible range).
+ * @returns A populated dot plot view model.
+ */
export function createDotPlotViewModel(values: number[], min: number, max: number, avg: number): DotPlotViewModel {
- // Convert raw values to percentages
const spread = max - min
function pct(x: number): number {
- if (spread !== 0) {
- return ((x - min) / (max - min)) * 100
- } else {
+ if (spread === 0) {
+ return 0
+ }
+ const p = ((x - min) / spread) * 100
+ if (p < 0) {
return 0
}
+ if (p > 100) {
+ return 100
+ }
+ return p
}
+
+ const points: number[] = []
+ let overflowCount = 0
+ for (const v of values) {
+ if (v > max) {
+ overflowCount++
+ } else {
+ points.push(pct(v))
+ }
+ }
+
return {
values,
avg,
- points: values.map(p => pct(p)),
- avgPoint: pct(avg)
+ min,
+ max,
+ points,
+ avgPoint: pct(avg),
+ overflowCount
}
}
diff --git a/packages/check-ui-shell/src/components/perf/dot-plot.svelte b/packages/check-ui-shell/src/components/perf/dot-plot.svelte
index 106a32ba..504a3a0a 100644
--- a/packages/check-ui-shell/src/components/perf/dot-plot.svelte
+++ b/packages/check-ui-shell/src/components/perf/dot-plot.svelte
@@ -1,4 +1,4 @@
-
+
@@ -17,6 +20,22 @@ export let colorClass: string
{/each}
+ {#if viewModel.overflowCount > 0}
+ +{viewModel.overflowCount}
+ {/if}
+ {#if showAxisLabels}
+ {viewModel.min.toFixed(1)}
+ {viewModel.max.toFixed(1)}
+ {/if}
+ {#if avgLabelPosition === 'below'}
+
+ {viewModel.avg.toFixed(1)}
+
+ {:else if avgLabelPosition === 'above'}
+
+ {viewModel.avg.toFixed(1)}
+
+ {/if}
@@ -65,4 +84,56 @@ $line-color: #555;
border-radius: $dot-size * 0.5;
opacity: 0.2;
}
+
+.overflow {
+ position: absolute;
+ left: 100%;
+ top: 0;
+ height: $height;
+ display: flex;
+ align-items: center;
+ margin-left: 0.4rem;
+ color: #888;
+ font-family: monospace;
+ font-size: 0.75rem;
+ white-space: nowrap;
+}
+
+.axis-label {
+ position: absolute;
+ top: $height;
+ margin-top: 0.1rem;
+ color: #888;
+ font-family: monospace;
+ font-size: 0.75rem;
+ white-space: nowrap;
+ transform: translateX(-50%);
+
+ &.axis-label-left {
+ left: 0;
+ }
+
+ &.axis-label-right {
+ left: 100%;
+ }
+}
+
+.avg-label {
+ position: absolute;
+ font-family: monospace;
+ font-size: 0.75rem;
+ white-space: nowrap;
+
+ &.avg-label-below {
+ top: $height;
+ margin-top: 0.1rem;
+ transform: translateX(-50%);
+ }
+
+ &.avg-label-above {
+ top: 0;
+ margin-top: -0.1rem;
+ transform: translate(-50%, -100%);
+ }
+}
diff --git a/packages/check-ui-shell/src/components/perf/perf-table-row-vm.ts b/packages/check-ui-shell/src/components/perf/perf-table-row-vm.ts
index 87bccee9..9147bd93 100644
--- a/packages/check-ui-shell/src/components/perf/perf-table-row-vm.ts
+++ b/packages/check-ui-shell/src/components/perf/perf-table-row-vm.ts
@@ -1,15 +1,47 @@
-// Copyright (c) 2021-2022 Climate Interactive / New Venture Fund
+// Copyright (c) 2021-2026 Climate Interactive / New Venture Fund
import type { DotPlotViewModel } from './dot-plot-vm'
+/**
+ * Indicates how to color a percent-change value.
+ * - 'better': improvement (faster); rendered green.
+ * - 'worse': regression (slower); rendered red.
+ * - 'neutral': no change or not applicable; rendered dim.
+ */
+export type PerfPctChangeKind = 'better' | 'worse' | 'neutral'
+
+/**
+ * A single row in the perf-runner results table. A row is either:
+ * - a per-run row, displaying stats from one Run button click, or
+ * - a summary row, pooling samples from every per-run row.
+ */
export interface PerfTableRowViewModel {
- num: number
- minTimeL: string
+ /** Label shown in the leftmost column ("1", "2", ... or "all" for summary). */
+ label: string
+ /** True if this row aggregates samples across all runs. */
+ isSummary: boolean
+ /** Median time for the left bundle, formatted to one decimal place. */
+ medianTimeL: string
+ /** Median time for the right bundle, formatted to one decimal place. */
+ medianTimeR: string
+ /** Trimmed-mean ("avg") time for the left bundle, formatted to one decimal place. */
avgTimeL: string
- maxTimeL: string
- minTimeR: string
+ /** Trimmed-mean ("avg") time for the right bundle, formatted to one decimal place. */
avgTimeR: string
- maxTimeR: string
+ /** Signed percent change of the right avgTime relative to the left (e.g. "+2.5%"). */
+ pctChange: string
+ /** Hint for coloring the percent-change value. */
+ pctChangeKind: PerfPctChangeKind
+ /** 95th-percentile time for the left bundle, formatted to one decimal place. */
+ p95TimeL: string
+ /** 95th-percentile time for the right bundle, formatted to one decimal place. */
+ p95TimeR: string
+ /** Standard deviation for the left bundle, formatted to one decimal place. */
+ stdDevL: string
+ /** Standard deviation for the right bundle, formatted to one decimal place. */
+ stdDevR: string
+ /** Dot plot for the left bundle's samples. */
dotPlotL: DotPlotViewModel
+ /** Dot plot for the right bundle's samples. */
dotPlotR: DotPlotViewModel
}
diff --git a/packages/check-ui-shell/src/components/perf/perf-vm.spec.ts b/packages/check-ui-shell/src/components/perf/perf-vm.spec.ts
new file mode 100644
index 00000000..7310483c
--- /dev/null
+++ b/packages/check-ui-shell/src/components/perf/perf-vm.spec.ts
@@ -0,0 +1,105 @@
+// Copyright (c) 2026 Climate Interactive / New Venture Fund
+
+import { get } from 'svelte/store'
+import { describe, expect, it } from 'vitest'
+
+import { PerfStats } from '@sdeverywhere/check-core'
+
+import { createPerfViewModel } from './perf-vm'
+
+/**
+ * Build a `PerfReport` from the given run-time samples.
+ */
+function reportOf(times: number[]) {
+ const stats = new PerfStats()
+ for (const t of times) {
+ stats.addRun(t)
+ }
+ return stats.toReport()
+}
+
+describe('PerfViewModel', () => {
+ it('should append a per-run row plus a summary row when one run is added', () => {
+ const vm = createPerfViewModel()
+ vm.addRow(reportOf([10, 11, 12, 13, 14]), reportOf([20, 21, 22, 23, 24]))
+
+ const rows = get(vm.rows)
+ expect(rows.length).toBe(2)
+ expect(rows[0].label).toBe('1')
+ expect(rows[0].isSummary).toBe(false)
+ expect(rows[1].label).toBe('all')
+ expect(rows[1].isSummary).toBe(true)
+ })
+
+ it('should pool samples across runs in the summary row', () => {
+ const vm = createPerfViewModel()
+ vm.addRow(reportOf([10, 10, 10, 10]), reportOf([20, 20, 20, 20]))
+ vm.addRow(reportOf([12, 12, 12, 12]), reportOf([24, 24, 24, 24]))
+
+ const rows = get(vm.rows)
+ expect(rows.length).toBe(3)
+ const summary = rows[2]
+ // Pooled left samples are [10,10,10,10,12,12,12,12]; trimmed mean of middle 50% = 11
+ expect(summary.avgTimeL).toBe('11.0')
+ // Pooled right samples = [20,20,20,20,24,24,24,24]; trimmed mean = 22
+ expect(summary.avgTimeR).toBe('22.0')
+ })
+
+ it('should compute percent change of avg from L to R', () => {
+ const vm = createPerfViewModel()
+ // L avg = 100, R avg = 90 -> -10%
+ vm.addRow(reportOf([100, 100, 100, 100]), reportOf([90, 90, 90, 90]))
+
+ const rows = get(vm.rows)
+ expect(rows[0].pctChange).toBe('-10.0%')
+ expect(rows[0].pctChangeKind).toBe('better')
+ })
+
+ it('should classify a regression as worse', () => {
+ const vm = createPerfViewModel()
+ vm.addRow(reportOf([100, 100, 100, 100]), reportOf([110, 110, 110, 110]))
+
+ const rows = get(vm.rows)
+ expect(rows[0].pctChange).toBe('+10.0%')
+ expect(rows[0].pctChangeKind).toBe('worse')
+ })
+
+ it('should classify a tiny change as neutral and emit no pct text', () => {
+ const vm = createPerfViewModel()
+ vm.addRow(reportOf([100, 100, 100, 100]), reportOf([100, 100, 100, 100]))
+
+ const rows = get(vm.rows)
+ expect(rows[0].pctChange).toBe('')
+ expect(rows[0].pctChangeKind).toBe('neutral')
+ })
+
+ it('should expose median, p95, and stddev formatted to one decimal', () => {
+ const vm = createPerfViewModel()
+ vm.addRow(reportOf([1, 2, 3, 4, 5]), reportOf([10, 20, 30, 40, 50]))
+
+ const rows = get(vm.rows)
+ expect(rows[0].medianTimeL).toBe('3.0')
+ expect(rows[0].medianTimeR).toBe('30.0')
+ expect(rows[0].p95TimeL).toMatch(/^4\.[6-9]/)
+ expect(rows[0].p95TimeR).toMatch(/^4[6-9]/)
+ expect(rows[0].stdDevL).not.toBe('0.0')
+ expect(rows[0].stdDevR).not.toBe('0.0')
+ })
+
+ it('should produce dot plot bounds based on pooled p95 so outliers do not skew the scale', () => {
+ const vm = createPerfViewModel()
+ // Most samples cluster around 20; one extreme outlier at 1000
+ const dense: number[] = []
+ for (let i = 0; i < 100; i++) {
+ dense.push(20)
+ }
+ dense.push(1000)
+ vm.addRow(reportOf(dense), reportOf(dense.slice()))
+
+ const rows = get(vm.rows)
+ // Pooled p95 should be near 20 (since the 1000 is way past p95 of 100 samples)
+ // and the 1000-sample should land in overflowCount, not in the visible points
+ expect(rows[0].dotPlotL.overflowCount).toBeGreaterThanOrEqual(1)
+ expect(rows[0].dotPlotR.overflowCount).toBeGreaterThanOrEqual(1)
+ })
+})
diff --git a/packages/check-ui-shell/src/components/perf/perf-vm.ts b/packages/check-ui-shell/src/components/perf/perf-vm.ts
index 009870ff..29948795 100644
--- a/packages/check-ui-shell/src/components/perf/perf-vm.ts
+++ b/packages/check-ui-shell/src/components/perf/perf-vm.ts
@@ -1,66 +1,167 @@
-// Copyright (c) 2021-2022 Climate Interactive / New Venture Fund
+// Copyright (c) 2021-2026 Climate Interactive / New Venture Fund
import type { Readable, Writable } from 'svelte/store'
-import { get, writable } from 'svelte/store'
+import { writable } from 'svelte/store'
import type { PerfReport } from '@sdeverywhere/check-core'
+import { PerfStats } from '@sdeverywhere/check-core'
import type { DotPlotViewModel } from './dot-plot-vm'
import { createDotPlotViewModel } from './dot-plot-vm'
-import type { PerfTableRowViewModel } from './perf-table-row-vm'
+import type { PerfPctChangeKind, PerfTableRowViewModel } from './perf-table-row-vm'
+/**
+ * View model for the perf-runner results table. Holds a row per Run-button click
+ * plus a summary row that pools samples across all runs once at least one run
+ * has completed.
+ */
export class PerfViewModel {
private readonly writableRows: Writable
+ /** The visible rows, including the summary row when present. */
public readonly rows: Readable
- private minTime = Number.MAX_VALUE
- private maxTime = 0
+
+ /** Raw timing samples for the left bundle, partitioned by run. */
+ private readonly samplesByRunL: number[][] = []
+ /** Raw timing samples for the right bundle, partitioned by run. */
+ private readonly samplesByRunR: number[][] = []
constructor() {
this.writableRows = writable([])
this.rows = this.writableRows
}
+ /**
+ * Append a new run to the table and refresh the summary row.
+ *
+ * @param reportL The performance report for the left bundle.
+ * @param reportR The performance report for the right bundle.
+ */
addRow(reportL: PerfReport, reportR: PerfReport): void {
- // Compute the min/max time across all rows
- const reportMinTime = Math.min(reportL.minTime, reportR.minTime)
- const reportMaxTime = Math.max(reportL.maxTime, reportR.maxTime)
- const overallMinTime = Math.min(this.minTime, reportMinTime)
- const overallMaxTime = Math.max(this.maxTime, reportMaxTime)
- this.minTime = overallMinTime
- this.maxTime = overallMaxTime
-
- function updateBounds(vm: DotPlotViewModel): DotPlotViewModel {
- return createDotPlotViewModel(vm.values, overallMinTime, overallMaxTime, vm.avg)
- }
+ this.samplesByRunL.push(reportL.allTimes)
+ this.samplesByRunR.push(reportR.allTimes)
+
+ const summaryReportL = pooledReport(this.samplesByRunL)
+ const summaryReportR = pooledReport(this.samplesByRunR)
- // Update the dot plots for all existing rows so that they all use the
- // same min/max bounds
- const allRows = get(this.writableRows)
- for (const row of allRows) {
- row.dotPlotL = updateBounds(row.dotPlotL)
- row.dotPlotR = updateBounds(row.dotPlotR)
+ // Determine shared bounds for all dot plots. We use the absolute min as
+ // the lower bound and the pooled-p95 as the upper bound so a single
+ // outlier sample doesn't squish every plot to the left edge. Samples
+ // beyond p95 are rendered as an overflow indicator on the right.
+ const lowerBound = Math.min(summaryReportL.minTime, summaryReportR.minTime)
+ const upperBound = Math.max(summaryReportL.p95Time, summaryReportR.p95Time)
+
+ const perRunRows: PerfTableRowViewModel[] = []
+ for (let i = 0; i < this.samplesByRunL.length; i++) {
+ const runReportL = reportFromTimes(this.samplesByRunL[i])
+ const runReportR = reportFromTimes(this.samplesByRunR[i])
+ perRunRows.push(buildRow(`${i + 1}`, false, runReportL, runReportR, lowerBound, upperBound))
}
- function dotPlot(report: PerfReport): DotPlotViewModel {
- return createDotPlotViewModel(report.allTimes, overallMinTime, overallMaxTime, report.avgTime)
+ const summaryRow = buildRow('all', true, summaryReportL, summaryReportR, lowerBound, upperBound)
+
+ this.writableRows.set([...perRunRows, summaryRow])
+ }
+}
+
+/**
+ * Compute a `PerfReport` from the given list of run-time samples.
+ */
+function reportFromTimes(times: number[]): PerfReport {
+ const stats = new PerfStats()
+ for (const t of times) {
+ stats.addRun(t)
+ }
+ return stats.toReport()
+}
+
+/**
+ * Compute a pooled `PerfReport` across multiple runs by treating every sample
+ * equally.
+ */
+function pooledReport(samplesByRun: number[][]): PerfReport {
+ const stats = new PerfStats()
+ for (const runSamples of samplesByRun) {
+ for (const t of runSamples) {
+ stats.addRun(t)
}
+ }
+ return stats.toReport()
+}
+
+/**
+ * Compute the percent change of `r` relative to `l` (i.e. (r - l) / l * 100).
+ * Returns 0 if `l` is zero.
+ */
+function pctChange(l: number, r: number): number {
+ if (l === 0) {
+ return 0
+ }
+ return ((r - l) / l) * 100
+}
+
+/**
+ * Format a signed percentage with a leading "+" or "-" and a "%" suffix.
+ * Returns an empty string if `value` is exactly 0.
+ */
+function formatSignedPct(value: number): string {
+ if (value === 0) {
+ return ''
+ }
+ const sign = value > 0 ? '+' : ''
+ return `${sign}${value.toFixed(1)}%`
+}
+
+/**
+ * Classify the kind of percent change for color hinting. A value below the
+ * threshold (default 0.05%) is treated as neutral to avoid noisy coloring.
+ */
+function classifyPctChange(value: number): PerfPctChangeKind {
+ if (Math.abs(value) < 0.05) {
+ return 'neutral'
+ }
+ return value < 0 ? 'better' : 'worse'
+}
- // Add the new row
- allRows.push({
- num: allRows.length + 1,
- minTimeL: reportL.minTime.toFixed(1),
- avgTimeL: reportL.avgTime.toFixed(1),
- maxTimeL: reportL.maxTime.toFixed(1),
- minTimeR: reportR.minTime.toFixed(1),
- avgTimeR: reportR.avgTime.toFixed(1),
- maxTimeR: reportR.maxTime.toFixed(1),
- dotPlotL: dotPlot(reportL),
- dotPlotR: dotPlot(reportR)
- })
- this.writableRows.set(allRows)
+/**
+ * Build a single `PerfTableRowViewModel`.
+ */
+function buildRow(
+ label: string,
+ isSummary: boolean,
+ reportL: PerfReport,
+ reportR: PerfReport,
+ lowerBound: number,
+ upperBound: number
+): PerfTableRowViewModel {
+ const change = pctChange(reportL.avgTime, reportR.avgTime)
+ return {
+ label,
+ isSummary,
+ medianTimeL: reportL.medianTime.toFixed(1),
+ medianTimeR: reportR.medianTime.toFixed(1),
+ avgTimeL: reportL.avgTime.toFixed(1),
+ avgTimeR: reportR.avgTime.toFixed(1),
+ pctChange: formatSignedPct(change),
+ pctChangeKind: classifyPctChange(change),
+ p95TimeL: reportL.p95Time.toFixed(1),
+ p95TimeR: reportR.p95Time.toFixed(1),
+ stdDevL: reportL.stdDev.toFixed(1),
+ stdDevR: reportR.stdDev.toFixed(1),
+ dotPlotL: dotPlot(reportL, lowerBound, upperBound),
+ dotPlotR: dotPlot(reportR, lowerBound, upperBound)
}
}
+/**
+ * Build a `DotPlotViewModel` for the given report and bounds.
+ */
+function dotPlot(report: PerfReport, lower: number, upper: number): DotPlotViewModel {
+ return createDotPlotViewModel(report.allTimes, lower, upper, report.avgTime)
+}
+
+/**
+ * Create an empty `PerfViewModel`.
+ */
export function createPerfViewModel(): PerfViewModel {
return new PerfViewModel()
}
diff --git a/packages/check-ui-shell/src/components/perf/perf.svelte b/packages/check-ui-shell/src/components/perf/perf.svelte
index 12b3edab..fc8babfc 100644
--- a/packages/check-ui-shell/src/components/perf/perf.svelte
+++ b/packages/check-ui-shell/src/components/perf/perf.svelte
@@ -1,4 +1,4 @@
-
+