From 37520fb574795879d91e2e6ba2e75627cc798d62 Mon Sep 17 00:00:00 2001 From: ankrgyl <565363+ankrgyl@users.noreply.github.com> Date: Sun, 17 May 2026 08:46:59 +0000 Subject: [PATCH] chore: sync new models --- packages/proxy/schema/index.ts | 2 + packages/proxy/schema/model_list.json | 58 ++++++++++++++++++++------- 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/packages/proxy/schema/index.ts b/packages/proxy/schema/index.ts index 78accd54..44b4ea36 100644 --- a/packages/proxy/schema/index.ts +++ b/packages/proxy/schema/index.ts @@ -614,6 +614,7 @@ export const AvailableEndpointTypes: { [name: string]: ModelEndpointType[] } = { "accounts/fireworks/models/deepseek-v4-pro": ["fireworks"], "accounts/fireworks/models/minimax-m2p7": ["fireworks"], "grok-4.3": ["xAI"], + "grok-4.3-latest": ["xAI"], "mistral-large-2512": ["mistral"], "mistral-small-2603": ["mistral"], "codestral-2508": ["mistral"], @@ -816,6 +817,7 @@ export const AvailableEndpointTypes: { [name: string]: ModelEndpointType[] } = { "gpt-audio": ["openai", "azure"], "gpt-audio-mini": ["openai", "azure"], "gpt-realtime-2025-08-28": ["openai", "azure"], + "gpt-realtime-2": ["openai", "azure"], "gpt-realtime-1.5": ["openai", "azure"], "gpt-realtime-mini-2025-12-15": ["openai", "azure"], "gpt-realtime-mini-2025-10-06": ["openai", "azure"], diff --git a/packages/proxy/schema/model_list.json b/packages/proxy/schema/model_list.json index 9ddeff51..504336d4 100644 --- a/packages/proxy/schema/model_list.json +++ b/packages/proxy/schema/model_list.json @@ -6869,6 +6869,36 @@ "xAI" ] }, + "grok-4.3": { + "format": "openai", + "flavor": "chat", + "multimodal": true, + "input_cost_per_mil_tokens": 1.25, + "output_cost_per_mil_tokens": 2.5, + "input_cache_read_cost_per_mil_tokens": 0.2, + "displayName": "Grok 4.3", + "max_input_tokens": 1000000, + "max_output_tokens": 1000000, + "available_providers": [ + "xAI" + ] + }, + "grok-4.3-latest": { + "format": "openai", + "flavor": "chat", + "multimodal": true, + "input_cost_per_mil_tokens": 1.25, + "output_cost_per_mil_tokens": 2.5, + "input_cache_read_cost_per_mil_tokens": 0.2, + "reasoning": true, + "displayName": "Grok 4.3", + "parent": "grok-4.3", + "max_input_tokens": 1000000, + "max_output_tokens": 1000000, + "available_providers": [ + "xAI" + ] + }, "grok-4": { "format": "openai", "flavor": "chat", @@ -7223,20 +7253,6 @@ "xAI" ] }, - "grok-4.3": { - "format": "openai", - "flavor": "chat", - "multimodal": true, - "input_cost_per_mil_tokens": 1.25, - "output_cost_per_mil_tokens": 2.5, - "input_cache_read_cost_per_mil_tokens": 0.2, - "displayName": "Grok 4.3", - "max_input_tokens": 1000000, - "max_output_tokens": 1000000, - "available_providers": [ - "xAI" - ] - }, "amazon.nova-pro-v1:0": { "format": "converse", "flavor": "chat", @@ -11215,6 +11231,20 @@ "azure" ] }, + "gpt-realtime-2": { + "format": "openai", + "flavor": "chat", + "input_cost_per_mil_tokens": 4, + "output_cost_per_mil_tokens": 16, + "input_cache_read_cost_per_mil_tokens": 0.4, + "displayName": "GPT Realtime 2", + "max_input_tokens": 32000, + "max_output_tokens": 4096, + "available_providers": [ + "openai", + "azure" + ] + }, "gpt-realtime-1.5": { "format": "openai", "flavor": "chat",