Skip to content

Commit 4122767

Browse files
authored
Merge pull request #35 from jcommaret/mistral-new-models
new models and capabilites
2 parents 975816a + a60c159 commit 4122767

2 files changed

Lines changed: 37 additions & 17 deletions

File tree

src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -250,18 +250,19 @@ export const defaultModelsOfProvider = {
250250
// NOTE: Keep this list in sync with Mistral's current models.
251251
// Reference: https://docs.mistral.ai/getting-started/models/ (checked 2025-11-30)
252252
// Latest general models:
253-
'mistral-medium-3.1', // Premier: Frontier-class multimodal model (Aug 2025)
254-
'mistral-small-3.2', // Open: Update to previous small model (June 2025)
253+
'mistral-large-latest',
254+
'mistral-medium-latest', // Premier: Frontier-class multimodal model (Aug 2025)
255+
'mistral-small-latest', // Open: Update to previous small model (June 2025)
255256
// Reasoning models:
256-
'magistral-medium-1.2', // Premier: Frontier-class multimodal reasoning model (Sept 2025)
257-
'magistral-small-1.2', // Open: Small multimodal reasoning model (Sept 2025)
257+
'magistral-medium-latest', // Premier: Frontier-class multimodal reasoning model (Sept 2025)
258+
'magistral-small-latest', // Open: Small multimodal reasoning model (Sept 2025)
258259
// Edge models:
259260
'ministral-8b', // Premier: Powerful edge model with high performance/price ratio
260261
'ministral-3b', // Premier: World's best edge model
261262
// Code models:
262263
'codestral-latest', // Premier: Cutting-edge language model for coding (July 2025)
263-
'devstral-medium-1.0', // Premier: Enterprise-grade text model for SWE use cases (July 2025)
264-
'devstral-small-1.1', // Open: Open source model that excels at SWE use cases (July 2025)
264+
'devstral-medium-latest',// Premier: Enterprise-grade text model for SWE use cases (July 2025)
265+
'devstral-small-latest', // Open: Open source model that excels at SWE use cases (July 2025)
265266
// Audio models:
266267
'voxtral-mini-transcribe', // Premier: Efficient audio input model for transcription (July 2025)
267268
'voxtral-mini', // Open: Mini version of first audio input model (July 2025)
@@ -1357,7 +1358,7 @@ const mistralModelOptions = { // https://mistral.ai/products/la-plateforme#prici
13571358
contextWindow: 131_000,
13581359
reservedOutputTokenSpace: 8_192,
13591360
cost: { input: 2.00, output: 6.00 },
1360-
supportsFIM: false,
1361+
supportsFIM: true,
13611362
downloadable: { sizeGb: 73 },
13621363
supportsSystemMessage: 'system-role',
13631364
reasoningCapabilities: false,
@@ -1366,7 +1367,7 @@ const mistralModelOptions = { // https://mistral.ai/products/la-plateforme#prici
13661367
contextWindow: 131_000,
13671368
reservedOutputTokenSpace: 8_192,
13681369
cost: { input: 0.40, output: 2.00 },
1369-
supportsFIM: false,
1370+
supportsFIM: true,
13701371
downloadable: { sizeGb: 'not-known' },
13711372
supportsSystemMessage: 'system-role',
13721373
reasoningCapabilities: false,

src/vs/workbench/contrib/cortexide/electron-main/llmMessage/sendLLMMessage.impl.ts

Lines changed: 28 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -280,14 +280,14 @@ const newOpenAICompatibleSDK = async ({ settingsOfProvider, providerName, includ
280280
}
281281
else if (providerName === 'awsBedrock') {
282282
/**
283-
* We treat Bedrock as *OpenAI-compatible only through a proxy*:
284-
* • LiteLLM default → http://localhost:4000/v1
285-
* • Bedrock-Access-Gateway → https://<api-id>.execute-api.<region>.amazonaws.com/openai/
286-
*
287-
* The native Bedrock runtime endpoint
288-
* https://bedrock-runtime.<region>.amazonaws.com
289-
* is **NOT** OpenAI-compatible, so we do *not* fall back to it here.
290-
*/
283+
* We treat Bedrock as *OpenAI-compatible only through a proxy*:
284+
* • LiteLLM default → http://localhost:4000/v1
285+
* • Bedrock-Access-Gateway → https://<api-id>.execute-api.<region>.amazonaws.com/openai/
286+
*
287+
* The native Bedrock runtime endpoint
288+
* https://bedrock-runtime.<region>.amazonaws.com
289+
* is **NOT** OpenAI-compatible, so we do *not* fall back to it here.
290+
*/
291291
const { endpoint, apiKey } = settingsOfProvider.awsBedrock
292292

293293
// ① use the user-supplied proxy if present
@@ -660,7 +660,26 @@ const _sendOpenAICompatibleChat = async ({ messages, onText, onFinalMessage, onE
660660

661661
// message
662662
const newText = chunk.choices[0]?.delta?.content ?? ''
663-
fullTextSoFar += newText
663+
664+
// Handle Mistral's object content
665+
if (providerName === 'mistral' && typeof newText === 'object' && newText !== null) {
666+
// Parse Mistral's content object
667+
if (Array.isArray(newText)) {
668+
for (const item of newText as any[]) {
669+
if (item.type === 'text' && item.text) {
670+
fullTextSoFar += item.text
671+
} else if (item.type === 'thinking' && item.thinking) {
672+
for (const thinkingItem of item.thinking as any[]) {
673+
if (thinkingItem.type === 'text' && thinkingItem.text) {
674+
fullReasoningSoFar += thinkingItem.text
675+
}
676+
}
677+
}
678+
}
679+
}
680+
} else {
681+
fullTextSoFar += newText
682+
}
664683

665684
// tool call
666685
for (const tool of chunk.choices[0]?.delta?.tool_calls ?? []) {

0 commit comments

Comments
 (0)