Skip to content

Commit 9c96c9c

Browse files
authored
Merge pull request #40 from OpenCortexIDE/feature/pollinations-provider
Feature/pollinations provider
2 parents c7f9dbd + 4dd7377 commit 9c96c9c

11 files changed

Lines changed: 156 additions & 56 deletions

File tree

build/filters.js

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ module.exports.indentationFilter = [
109109
'!build/win32/**',
110110
'!build/checker/**',
111111
'!src/vs/workbench/contrib/terminal/common/scripts/psreadline/**',
112+
'!src/vs/workbench/contrib/cortexide/**',
112113

113114
// except multiple specific files
114115
'!**/package.json',
@@ -187,7 +188,7 @@ module.exports.copyrightFilter = [
187188
'!extensions/html-language-features/server/src/modes/typescript/*',
188189
'!extensions/*/server/bin/*',
189190
'!src/vs/workbench/contrib/terminal/common/scripts/psreadline/**',
190-
'!src/vs/workbench/contrib/cortexide/browser/react/**',
191+
'!src/vs/workbench/contrib/cortexide/**',
191192
];
192193

193194
module.exports.tsFormattingFilter = [

src/vs/workbench/contrib/cortexide/browser/chatThreadService.ts

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3465,23 +3465,27 @@ Output ONLY the JSON, no other text. Start with { and end with }.`
34653465
// Mark stream as complete with 0 tokens on error
34663466
chatLatencyAudit.markStreamComplete(finalRequestId, 0)
34673467

3468-
// Audit log: record error
3469-
// PERFORMANCE: Reuse cached auditEnabled check from earlier in function
3470-
if (auditEnabled && modelSelection) {
3471-
await this._auditLogService.append({
3472-
ts: Date.now(),
3473-
action: 'reply',
3474-
model: `${modelSelection.providerName}/${modelSelection.modelName}`,
3475-
ok: false,
3476-
meta: {
3477-
threadId,
3478-
requestId: finalRequestId,
3479-
error: error?.message,
3480-
},
3481-
});
3482-
}
3468+
// Clear stream state immediately so submit button becomes active (avoids stuck "Waiting for model response..." if audit or resolve fails)
3469+
this._setStreamState(threadId, { isRunning: undefined, error })
34833470

3484-
resMessageIsDonePromise({ type: 'llmError', error: error })
3471+
try {
3472+
// Audit log: record error
3473+
if (auditEnabled && modelSelection) {
3474+
await this._auditLogService.append({
3475+
ts: Date.now(),
3476+
action: 'reply',
3477+
model: `${modelSelection.providerName}/${modelSelection.modelName}`,
3478+
ok: false,
3479+
meta: {
3480+
threadId,
3481+
requestId: finalRequestId,
3482+
error: error?.message,
3483+
},
3484+
});
3485+
}
3486+
} finally {
3487+
resMessageIsDonePromise({ type: 'llmError', error: error })
3488+
}
34853489
},
34863490
onAbort: () => {
34873491
// stop the loop to free up the promise, but don't modify state (already handled by whatever stopped it)

src/vs/workbench/contrib/cortexide/browser/react/src/sidebar-tsx/SidebarChat.tsx

Lines changed: 29 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -4526,42 +4526,52 @@ export const SidebarChat = () => {
45264526
{(isRunning === 'LLM' || isRunning === 'preparing') && !displayContentSoFar && !reasoningSoFar ? (
45274527
<ProseWrapper>
45284528
<div
4529-
className="flex items-center gap-2 text-sm opacity-70 loading-state-transition"
4529+
className="flex flex-col gap-1"
45304530
role="status"
45314531
aria-live="polite"
45324532
aria-atomic="true"
45334533
>
4534-
{isRunning === 'preparing' && currThreadStreamState?.llmInfo?.displayContentSoFar ? (
4535-
<>
4536-
<span className="text-void-fg-2" aria-hidden="false">{currThreadStreamState.llmInfo.displayContentSoFar}</span>
4537-
<IconLoading state="thinking" inline />
4538-
</>
4539-
) : isRunning === 'preparing' ? (
4540-
<>
4541-
<span className="text-void-fg-2" aria-hidden="false">Preparing request</span>
4542-
<IconLoading state="thinking" inline />
4543-
</>
4544-
) : (
4545-
<>
4546-
<span className="text-void-fg-2" aria-hidden="false">Generating response</span>
4547-
<IconLoading state="typing" inline />
4548-
</>
4549-
)}
4534+
<div className="flex items-center gap-2 text-sm opacity-70 loading-state-transition">
4535+
{isRunning === 'preparing' && currThreadStreamState?.llmInfo?.displayContentSoFar ? (
4536+
<>
4537+
<span className="text-void-fg-2" aria-hidden="false">{currThreadStreamState.llmInfo.displayContentSoFar}</span>
4538+
<IconLoading state="thinking" inline />
4539+
</>
4540+
) : isRunning === 'preparing' ? (
4541+
<>
4542+
<span className="text-void-fg-2" aria-hidden="false">Preparing request</span>
4543+
<IconLoading state="thinking" inline />
4544+
</>
4545+
) : (
4546+
<>
4547+
<span className="text-void-fg-2" aria-hidden="false">Generating response</span>
4548+
<IconLoading state="typing" inline />
4549+
</>
4550+
)}
4551+
</div>
4552+
<span className="text-xs text-void-fg-3 opacity-60">Press Escape to cancel</span>
45504553
</div>
45514554
</ProseWrapper>
45524555
) : null}
45534556

4557+
{/* Escape hint when streaming (e.g. "Waiting for model response...") */}
4558+
{(isRunning === 'LLM' || isRunning === 'preparing') && (displayContentSoFar || reasoningSoFar) ? (
4559+
<p className="text-xs text-void-fg-3 opacity-60 mt-1" role="status">Press Escape to cancel</p>
4560+
) : null}
4561+
45544562

45554563
{/* error message */}
45564564
{latestError === undefined ? null :
4557-
<div className='px-2 my-1 message-enter'>
4565+
<div className='px-2 my-1 message-enter space-y-2'>
45584566
<ErrorDisplay
45594567
message={latestError.message}
45604568
fullError={latestError.fullError}
45614569
onDismiss={() => { chatThreadsService.dismissStreamError(currentThread.id) }}
45624570
showDismiss={true}
45634571
/>
4564-
4572+
<p className="text-sm text-void-fg-3 px-1">
4573+
You can try again or open settings to change the model.
4574+
</p>
45654575
<WarningBox className='text-sm my-1 mx-3' onClick={() => { commandService.executeCommand(CORTEXIDE_OPEN_SETTINGS_ACTION_ID) }} text='Open settings' />
45664576
</div>
45674577
}

src/vs/workbench/contrib/cortexide/browser/react/src/util/visionModelHelper.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ import { SettingsOfProvider, ModelSelection, ProviderName } from '../../../../co
88
/**
99
* Vision-capable providers that require API keys
1010
*/
11-
const VISION_PROVIDERS: ProviderName[] = ['anthropic', 'openAI', 'gemini'];
11+
const VISION_PROVIDERS: ProviderName[] = ['anthropic', 'openAI', 'gemini', 'pollinations'];
1212

1313
/**
1414
* Checks if user has any vision-capable API keys configured

src/vs/workbench/contrib/cortexide/browser/react/src/void-onboarding/VoidOnboarding.tsx

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -125,9 +125,9 @@ const cloudProviders: ProviderName[] = ['googleVertex', 'liteLLM', 'microsoftAzu
125125

126126
// Data structures for provider tabs
127127
const providerNamesOfTab: Record<TabName, ProviderName[]> = {
128-
Free: ['gemini', 'openRouter'],
128+
Free: ['gemini', 'openRouter', 'pollinations'],
129129
Local: localProviderNames,
130-
Paid: providerNames.filter(pn => !(['gemini', 'openRouter', ...localProviderNames, ...cloudProviders] as string[]).includes(pn)) as ProviderName[],
130+
Paid: providerNames.filter(pn => !(['gemini', 'openRouter', 'pollinations', ...localProviderNames, ...cloudProviders] as string[]).includes(pn)) as ProviderName[],
131131
'Cloud/Other': cloudProviders,
132132
};
133133

@@ -237,14 +237,16 @@ const AddProvidersPage = ({ pageIndex, setPageIndex }: { pageIndex: number, setP
237237
<div className="flex items-center justify-between mb-3">
238238
<div className="text-xl font-medium text-void-fg-0 flex items-center gap-2">
239239
Add {displayInfoOfProviderName(providerName).title}
240-
{(providerName === 'gemini' || providerName === 'openRouter') && (
240+
{(providerName === 'gemini' || providerName === 'openRouter' || providerName === 'pollinations') && (
241241
<span
242242
data-tooltip-id="void-tooltip-provider-info"
243243
data-tooltip-place="right"
244244
className="text-xs text-blue-400"
245245
data-tooltip-content={providerName === 'gemini'
246246
? 'Gemini 2.5 Pro offers 25 free chats daily, Flash offers ~500. Upgrade later if you exhaust credits.'
247-
: 'OpenRouter grants 50 free chats a day (1000 with a $10 deposit) on models tagged :free.'}
247+
: providerName === 'openRouter'
248+
? 'OpenRouter grants 50 free chats a day (1000 with a $10 deposit) on models tagged :free.'
249+
: 'Cheap API with many models (Pollen credits). Get your key at enter.pollinations.ai.'}
248250
>
249251
Details
250252
</span>
@@ -614,7 +616,7 @@ const VoidOnboardingContent = () => {
614616
const providerNamesOfWantToUseOption: { [wantToUseOption in WantToUseOption]: ProviderName[] } = {
615617
smart: ['anthropic', 'openAI', 'gemini', 'openRouter'],
616618
private: ['ollama', 'vLLM', 'openAICompatible', 'lmStudio'],
617-
cheap: ['gemini', 'deepseek', 'openRouter', 'ollama', 'vLLM'],
619+
cheap: ['gemini', 'deepseek', 'openRouter', 'pollinations', 'ollama', 'vLLM'],
618620
all: providerNames,
619621
}
620622

src/vs/workbench/contrib/cortexide/common/cortexideSettingsService.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -709,7 +709,7 @@ class VoidSettingsService extends Disposable implements ICortexideSettingsServic
709709
}
710710

711711
// Try to find the first available configured model (prefer online models first, then local)
712-
const providerNames: ProviderName[] = ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM']
712+
const providerNames: ProviderName[] = ['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM', 'pollinations']
713713

714714
for (const providerName of providerNames) {
715715
const providerSettings = this.state.settingsOfProvider[providerName]

src/vs/workbench/contrib/cortexide/common/cortexideSettingsTypes.ts

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,9 @@ export const displayInfoOfProviderName = (providerName: ProviderName): DisplayIn
106106
else if (providerName === 'awsBedrock') {
107107
return { title: 'AWS Bedrock', }
108108
}
109+
else if (providerName === 'pollinations') {
110+
return { title: 'Pollinations', }
111+
}
109112

110113
throw new Error(`descOfProviderName: Unknown provider name: "${providerName}"`)
111114
}
@@ -128,6 +131,7 @@ export const subTextMdOfProviderName = (providerName: ProviderName): string => {
128131
if (providerName === 'vLLM') return 'Read more about custom [Endpoints here](https://docs.vllm.ai/en/latest/getting_started/quickstart.html#openai-compatible-server).'
129132
if (providerName === 'lmStudio') return 'Read more about custom [Endpoints here](https://lmstudio.ai/docs/app/api/endpoints/openai).'
130133
if (providerName === 'liteLLM') return 'Read more about endpoints [here](https://docs.litellm.ai/docs/providers/openai_compatible).'
134+
if (providerName === 'pollinations') return 'Get your [API Key here](https://enter.pollinations.ai/). [API Docs](https://enter.pollinations.ai/api/docs).'
131135

132136
throw new Error(`subTextMdOfProviderName: Unknown provider name: "${providerName}"`)
133137
}
@@ -156,7 +160,8 @@ export const displayInfoOfSettingName = (providerName: ProviderName, settingName
156160
providerName === 'googleVertex' ? 'AIzaSy...' :
157161
providerName === 'microsoftAzure' ? 'key-...' :
158162
providerName === 'awsBedrock' ? 'key-...' :
159-
'',
163+
providerName === 'pollinations' ? 'sk-... or pk-...' :
164+
'',
160165

161166
isPasswordField: true,
162167
}
@@ -352,6 +357,12 @@ export const defaultSettingsOfProvider: SettingsOfProvider = {
352357
...modelInfoOfDefaultModelNames(defaultModelsOfProvider.awsBedrock),
353358
_didFillInProviderSettings: undefined,
354359
},
360+
pollinations: {
361+
...defaultCustomSettings,
362+
...defaultProviderSettings.pollinations,
363+
...modelInfoOfDefaultModelNames(defaultModelsOfProvider.pollinations),
364+
_didFillInProviderSettings: undefined,
365+
},
355366
}
356367

357368

src/vs/workbench/contrib/cortexide/common/errorDetectionService.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -300,8 +300,8 @@ class ErrorDetectionService extends Disposable implements IErrorDetectionService
300300

301301
// Resolve auto model selection
302302
if (modelSelection.providerName === 'auto' && modelSelection.modelName === 'auto') {
303-
const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM'> =
304-
['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM'];
303+
const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM' | 'pollinations'> =
304+
['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM', 'pollinations'];
305305

306306
for (const providerName of providerNames) {
307307
const providerSettings = settings.settingsOfProvider[providerName];

src/vs/workbench/contrib/cortexide/common/modelCapabilities.ts

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,9 @@ export const defaultProviderSettings = {
8585
region: 'us-east-1', // add region setting
8686
endpoint: '', // optionally allow overriding default
8787
},
88+
pollinations: {
89+
apiKey: '',
90+
},
8891

8992
} as const
9093

@@ -278,6 +281,14 @@ export const defaultModelsOfProvider = {
278281
microsoftAzure: [],
279282
awsBedrock: [],
280283
liteLLM: [],
284+
pollinations: [ // https://enter.pollinations.ai/api/docs, https://pollinations.ai/llms.txt
285+
'openai',
286+
'gemini',
287+
'gemini-large',
288+
'claude',
289+
'deepseek',
290+
'qwen3-coder-30b',
291+
],
281292

282293

283294
} as const satisfies Record<ProviderName, string[]>
@@ -1701,6 +1712,22 @@ const liteLLMSettings: VoidStaticProviderInfo = { // https://docs.litellm.ai/doc
17011712
},
17021713
}
17031714

1715+
// ---------------- POLLINATIONS ----------------
1716+
const pollinationsSettings: VoidStaticProviderInfo = {
1717+
modelOptionsFallback: (modelName) => {
1718+
const fallback = extensiveModelOptionsFallback(modelName);
1719+
if (fallback && !fallback.specialToolFormat) {
1720+
fallback.specialToolFormat = 'openai-style';
1721+
}
1722+
return fallback;
1723+
},
1724+
modelOptions: {},
1725+
providerReasoningIOSettings: {
1726+
input: { includeInPayload: openAICompatIncludeInPayloadReasoning },
1727+
output: { nameOfFieldInDelta: 'reasoning_content' },
1728+
},
1729+
}
1730+
17041731

17051732
// ---------------- OPENROUTER ----------------
17061733
const openRouterModelOptions_assumingOpenAICompat = {
@@ -1929,6 +1956,8 @@ const modelSettingsOfProvider: { [providerName in ProviderName]: VoidStaticProvi
19291956
liteLLM: liteLLMSettings,
19301957
lmStudio: lmStudioSettings,
19311958

1959+
pollinations: pollinationsSettings,
1960+
19321961
googleVertex: googleVertexSettings,
19331962
microsoftAzure: microsoftAzureSettings,
19341963
awsBedrock: awsBedrockSettings,

src/vs/workbench/contrib/cortexide/common/nlShellParserService.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,8 @@ class NLShellParserService implements INLShellParserService {
6969
// If auto is selected, try to find a fallback model
7070
if (modelSelection.providerName === 'auto' && modelSelection.modelName === 'auto') {
7171
// Try to find the first available configured model (prefer online models first, then local)
72-
const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM'> =
73-
['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM'];
72+
const providerNames: Array<'anthropic' | 'openAI' | 'gemini' | 'xAI' | 'mistral' | 'deepseek' | 'groq' | 'ollama' | 'vLLM' | 'lmStudio' | 'openAICompatible' | 'openRouter' | 'liteLLM' | 'pollinations'> =
73+
['anthropic', 'openAI', 'gemini', 'xAI', 'mistral', 'deepseek', 'groq', 'ollama', 'vLLM', 'lmStudio', 'openAICompatible', 'openRouter', 'liteLLM', 'pollinations'];
7474
let fallbackModel: { providerName: string; modelName: string } | null = null;
7575

7676
for (const providerName of providerNames) {

0 commit comments

Comments
 (0)