Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 34 additions & 15 deletions packages/core/src/llm-core/platform/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -239,23 +239,29 @@
const latestTokenUsage = this._createTokenUsageTracker()
let stream: AsyncGenerator<ChatGenerationChunk> | null = null
let hasChunk = false
let hasResponse = false
let hasToolCallChunk = false

try {
stream = await this._createStream(streamParams)

for await (const chunk of stream) {
hasToolCallChunk =
this._handleStreamChunk(
chunk,
runManager,
latestTokenUsage
) || hasToolCallChunk
const hasTool = this._handleStreamChunk(
chunk,
runManager,
latestTokenUsage
)
hasToolCallChunk = hasTool || hasToolCallChunk
hasChunk = true
hasResponse =
hasResponse ||
hasTool ||
getMessageContent(chunk.message.content).trim()

Check warning on line 259 in packages/core/src/llm-core/platform/model.ts

View check run for this annotation

codefactor.io / CodeFactor

packages/core/src/llm-core/platform/model.ts#L259

Insert `.length·>` (prettier/prettier)

Check failure on line 259 in packages/core/src/llm-core/platform/model.ts

View workflow job for this annotation

GitHub Actions / lint

Insert `.length·>`
.length > 0

Check warning on line 260 in packages/core/src/llm-core/platform/model.ts

View check run for this annotation

codefactor.io / CodeFactor

packages/core/src/llm-core/platform/model.ts#L260

Delete `·.length·>` (prettier/prettier)

Check failure on line 260 in packages/core/src/llm-core/platform/model.ts

View workflow job for this annotation

GitHub Actions / lint

Delete `·.length·>`
yield chunk
}

this._ensureChunksReceived(hasChunk)
this._ensureChunksReceived(hasChunk, hasResponse)
this._finalizeStream(
hasToolCallChunk,
latestTokenUsage,
Expand All @@ -268,12 +274,12 @@
if (
this._shouldRethrowStreamError(
error,
hasChunk,
hasResponse,
attempt,
maxRetries
)
) {
if (hasChunk) {
if (hasResponse) {
logger.debug(
'Stream failed after yielding chunks, cannot retry'
)
Expand Down Expand Up @@ -322,10 +328,11 @@
return hasToolCallChunk
}

private _hasToolCallChunk(message?: AIMessageChunk): boolean {
private _hasToolCallChunk(message?: AIMessage | AIMessageChunk): boolean {
return (
(message?.tool_calls?.length ?? 0) > 0 ||
(message?.tool_call_chunks?.length ?? 0) > 0 ||
((message as AIMessageChunk | undefined)?.tool_call_chunks
?.length ?? 0) > 0 ||
(message?.invalid_tool_calls?.length ?? 0) > 0
)
}
Expand All @@ -347,8 +354,8 @@
latestTokenUsage.output_token_details = usage.output_token_details
}

private _ensureChunksReceived(hasChunk: boolean) {
if (hasChunk) {
private _ensureChunksReceived(hasChunk: boolean, hasResponse: boolean) {
if (hasChunk && hasResponse) {
return
}

Expand Down Expand Up @@ -391,12 +398,12 @@

private _shouldRethrowStreamError(
error: unknown,
hasChunk: boolean,
hasResponse: boolean,
attempt: number,
maxRetries: number
): boolean {
return (

Check warning on line 405 in packages/core/src/llm-core/platform/model.ts

View check run for this annotation

codefactor.io / CodeFactor

packages/core/src/llm-core/platform/model.ts#L405

Replace `·hasResponse·||` with `⏎············hasResponse·||⏎···········` (prettier/prettier)
this._isAbortError(error) || hasChunk || attempt === maxRetries - 1
this._isAbortError(error) || hasResponse || attempt === maxRetries - 1

Check failure on line 406 in packages/core/src/llm-core/platform/model.ts

View workflow job for this annotation

GitHub Actions / lint

Replace `·hasResponse·||` with `⏎············hasResponse·||⏎···········`
)
}

Expand Down Expand Up @@ -495,6 +502,18 @@
})
}

if (
getMessageContent(response.message.content).trim()
.length < 1 &&
this._hasToolCallChunk(
response.message as AIMessage | AIMessageChunk
) !== true
) {
throw new ChatLunaError(
ChatLunaErrorCode.API_REQUEST_FAILED
)
}
Comment on lines +505 to +515
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

这里的空响应检查逻辑在 options.streamtrue 时是冗余的。因为 _streamResponseChunks 内部已经通过 _ensureChunksReceived 确保了 hasResponsetrue 才会正常返回。如果流式响应为空,_streamResponseChunks 会直接抛出 API_REQUEST_FAILED。虽然保留此处的检查作为兜底是安全的,但将其逻辑明确区分或仅针对非流式路径(_completion)会更清晰。


return response
} catch (error) {
if (
Expand Down
Loading