From 0582e5e24c32192111a33d64cd221f1e9f639875 Mon Sep 17 00:00:00 2001 From: Olmo Maldonado Date: Tue, 12 May 2026 18:08:05 -0700 Subject: [PATCH 1/2] make sure we pass fetch even with support streaming false --- packages/proxy/src/providers/openai.test.ts | 27 +++++++++++++++++++++ packages/proxy/src/proxy.ts | 6 +++++ 2 files changed, 33 insertions(+) diff --git a/packages/proxy/src/providers/openai.test.ts b/packages/proxy/src/providers/openai.test.ts index 2a9ed639..124690e7 100644 --- a/packages/proxy/src/providers/openai.test.ts +++ b/packages/proxy/src/providers/openai.test.ts @@ -379,6 +379,33 @@ it("handles /responses as endpoint_path", async () => { }); }); +it("uses injected fetch when supportsStreaming is false", async () => { + const { fetch, requests } = createCapturingFetch({ captureOnly: true }); + + await callProxyV1({ + body: { + model: "gpt-4o-mini", + messages: [{ role: "user", content: "hello" }], + stream: true, + }, + fetch, + getApiSecrets: async () => [ + { + type: "openai", + name: "openai", + secret: "provider-secret", + metadata: { + api_base: "http://test.com/v1", + supportsStreaming: false, + }, + }, + ], + }); + + expect(requests.length).toBe(1); + expect(requests[0].url).toBe("http://test.com/v1/chat/completions"); +}); + it("uses model path for azure when metadata.deployment is non-string", async () => { const { fetch, requests } = createCapturingFetch({ captureOnly: true }); diff --git a/packages/proxy/src/proxy.ts b/packages/proxy/src/proxy.ts index cd414cff..7d6e28c7 100644 --- a/packages/proxy/src/proxy.ts +++ b/packages/proxy/src/proxy.ts @@ -2336,6 +2336,7 @@ async function fetchOpenAI( bodyData, setHeader, signal, + fetch, }); } @@ -2494,6 +2495,7 @@ async function fetchOpenAIFakeStream({ bodyData, setHeader, signal, + fetch, }: { method: "GET" | "POST"; fullURL: URL; @@ -2501,6 +2503,7 @@ async function fetchOpenAIFakeStream({ bodyData: null | any; setHeader: (name: string, value: string) => void; signal?: AbortSignal; + fetch: FetchFn; }): Promise { let isStream = false; if (bodyData) { @@ -2610,11 +2613,13 @@ async function fetchVertexAnthropicMessages({ modelSpec, body, signal, + fetch, }: { secret: APISecret; modelSpec: ModelSpec | null; body: unknown; signal?: AbortSignal; + fetch: FetchFn; }): Promise { const { baseUrl, accessToken } = await vertexEndpointInfo({ secret, @@ -2689,6 +2694,7 @@ async function fetchAnthropicMessages({ modelSpec, body, signal, + fetch: customFetch, }); default: throw new ProxyBadRequestError( From d371a98917b7698098cad9349ed91ba31734de11 Mon Sep 17 00:00:00 2001 From: Olmo Maldonado Date: Fri, 15 May 2026 11:57:40 -0700 Subject: [PATCH 2/2] fix: use injected fetch for provider token requests - Pass the configured fetch implementation into Databricks OAuth exchanges. - Use injected fetch for Google service account token requests across OpenAI, Vertex, Anthropic, and Gemini paths. - Add coverage to ensure Databricks OAuth avoids global fetch and applies the returned bearer token. --- packages/proxy/src/providers/databricks.ts | 6 +- packages/proxy/src/providers/openai.test.ts | 96 +++++++++++++++++++++ packages/proxy/src/proxy.ts | 18 +++- 3 files changed, 116 insertions(+), 4 deletions(-) diff --git a/packages/proxy/src/providers/databricks.ts b/packages/proxy/src/providers/databricks.ts index 32f91100..867455a5 100644 --- a/packages/proxy/src/providers/databricks.ts +++ b/packages/proxy/src/providers/databricks.ts @@ -18,6 +18,7 @@ export async function getDatabricksOAuthAccessToken({ digest, cacheGet, cachePut, + fetch = globalThis.fetch, }: { secret: z.infer; apiBase: string; @@ -29,6 +30,7 @@ export async function getDatabricksOAuthAccessToken({ value: string, ttl_seconds?: number, ) => Promise; + fetch?: typeof globalThis.fetch; }): Promise { const { client_id, client_secret } = secret; const tokenUrl = `${apiBase}/oidc/v1/token`; @@ -59,7 +61,9 @@ export async function getDatabricksOAuthAccessToken({ }); if (!res.ok) { throw new Error( - `Databricks OAuth error (${res.status}): ${res.statusText} ${await res.text()}`, + `Databricks OAuth error (${res.status}): ${ + res.statusText + } ${await res.text()}`, ); } diff --git a/packages/proxy/src/providers/openai.test.ts b/packages/proxy/src/providers/openai.test.ts index 124690e7..715481d5 100644 --- a/packages/proxy/src/providers/openai.test.ts +++ b/packages/proxy/src/providers/openai.test.ts @@ -3,6 +3,7 @@ import { OpenAIChatCompletionChunk, OpenAIChatCompletionCreateParams, } from "@types"; +import { type APISecret } from "@schema"; import { bypass, http, HttpResponse, JsonBodyType } from "msw"; import { setupServer } from "msw/node"; import { ChatCompletionContentPart } from "openai/resources"; @@ -18,6 +19,7 @@ import { } from "vitest"; import { callProxyV1, createCapturingFetch } from "../../utils/tests"; import * as proxyUtil from "../util"; +import { type FetchFn } from "../proxy"; import { normalizeOpenAIContent } from "./openai"; import * as util from "./util"; import { @@ -30,6 +32,26 @@ import { CSV_DATA_URL, } from "../../tests/fixtures/base64"; +function fetchInputUrl(input: Parameters[0]): string { + if (typeof input === "string") { + return input; + } + if (input instanceof URL) { + return input.toString(); + } + return input.url; +} + +function fetchHeaderValue( + headers: HeadersInit | undefined, + name: string, +): string | null { + if (!headers) { + return null; + } + return new Headers(headers).get(name); +} + it("should deny reasoning_effort for unsupported models non-streaming", async () => { const { json } = await callProxyV1< OpenAIChatCompletionCreateParams, @@ -406,6 +428,80 @@ it("uses injected fetch when supportsStreaming is false", async () => { expect(requests[0].url).toBe("http://test.com/v1/chat/completions"); }); +it("uses injected fetch for Databricks OAuth token exchange", async () => { + vi.stubGlobal("fetch", async () => { + throw new Error("global fetch was called"); + }); + + try { + const calls: Array<{ + url: string; + body: BodyInit | null | undefined; + headers: HeadersInit | undefined; + }> = []; + const fetch: FetchFn = async (input, init) => { + const url = fetchInputUrl(input); + calls.push({ url, body: init?.body, headers: init?.headers }); + + if (url === "https://dbc.example/oidc/v1/token") { + return new Response( + JSON.stringify({ + access_token: "databricks-token", + token_type: "Bearer", + expires_in: 3600, + }), + { headers: { "content-type": "application/json" } }, + ); + } + + return new Response(JSON.stringify({ choices: [] }), { + headers: { "content-type": "application/json" }, + }); + }; + const getApiSecrets = async (): Promise => [ + { + type: "databricks", + name: "databricks", + secret: JSON.stringify({ + client_id: "client-id", + client_secret: "client-secret", + }), + metadata: { + api_base: "https://dbc.example", + auth_type: "service_principal_oauth", + supportsStreaming: true, + }, + }, + ]; + + await callProxyV1({ + body: { + model: "databricks-model", + messages: [{ role: "user", content: "hello" }], + }, + fetch, + ...{ getApiSecrets }, + }); + + expect(calls.map((call) => call.url)).toEqual([ + "https://dbc.example/oidc/v1/token", + "https://dbc.example/serving-endpoints/databricks-model/invocations", + ]); + expect(calls[0]?.body?.toString()).toBe( + "grant_type=client_credentials&scope=all-apis", + ); + expect(fetchHeaderValue(calls[0]?.headers, "authorization")).toBe( + // Base64 for the fake "client-id:client-secret" fixture above. + "Basic Y2xpZW50LWlkOmNsaWVudC1zZWNyZXQ=", // gitleaks:allow + ); + expect(fetchHeaderValue(calls[1]?.headers, "authorization")).toBe( + "Bearer databricks-token", + ); + } finally { + vi.unstubAllGlobals(); + } +}); + it("uses model path for azure when metadata.deployment is non-string", async () => { const { fetch, requests } = createCapturingFetch({ captureOnly: true }); diff --git a/packages/proxy/src/proxy.ts b/packages/proxy/src/proxy.ts index 7d6e28c7..10575886 100644 --- a/packages/proxy/src/proxy.ts +++ b/packages/proxy/src/proxy.ts @@ -2135,7 +2135,7 @@ async function fetchOpenAI( bearerToken = secret.secret; } else { // authType === "service_account_key" - bearerToken = await getGoogleAccessToken(secret.secret); + bearerToken = await getGoogleAccessToken(secret.secret, fetch); } } else { const metadataApiBase = @@ -2215,6 +2215,7 @@ async function fetchOpenAI( digest, cacheGet, cachePut, + fetch, }); } else { bearerToken = secret.secret; @@ -2584,10 +2585,12 @@ async function vertexEndpointInfo({ secret: { secret, metadata }, modelSpec, defaultLocation, + fetch = globalThis.fetch, }: { secret: APISecret; modelSpec: ModelSpec | null; defaultLocation: string; + fetch?: FetchFn; }): Promise { const { project, location, authType, api_base } = VertexMetadataSchema.parse(metadata); @@ -2598,7 +2601,9 @@ async function vertexEndpointInfo({ }); const apiBase = getVertexBaseUrl(api_base, resolvedLocation); const accessToken = - authType === "access_token" ? secret : await getGoogleAccessToken(secret); + authType === "access_token" + ? secret + : await getGoogleAccessToken(secret, fetch); if (!accessToken) { throw new Error("Failed to get Google access token"); } @@ -2625,6 +2630,7 @@ async function fetchVertexAnthropicMessages({ secret, modelSpec, defaultLocation: "us-east5", + fetch, }); const { model, ...rest } = z .object({ @@ -2968,6 +2974,7 @@ async function fetchAnthropicChatCompletions({ secret, modelSpec, defaultLocation: "us-east5", + fetch: customFetch, }); fullURL = new URL( `${baseUrl}/${params.model}:${ @@ -3156,7 +3163,10 @@ async function openAIToolsToGoogleTools(params: { return out; } -async function getGoogleAccessToken(secret: string): Promise { +async function getGoogleAccessToken( + secret: string, + fetch: FetchFn = globalThis.fetch, +): Promise { const { private_key_id: kid, private_key: pk, @@ -3240,6 +3250,7 @@ async function fetchGoogleGenerateContent({ secret, modelSpec, defaultLocation: "us-central1", + fetch, }); const url = new URL(`${baseUrl}/${model}:${method}`); if (method === "streamGenerateContent") { @@ -3376,6 +3387,7 @@ async function fetchGoogleChatCompletions({ secret, modelSpec, defaultLocation: "us-central1", + fetch, }); fullURL = new URL( `${baseUrl}/${model}:${