Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions go/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -474,6 +474,7 @@ Genkit provides a unified interface across all major AI providers. Use whichever
| **Google AI** | `googlegenai.GoogleAI` | Gemini 2.5 Flash, Gemini 2.5 Pro, and more |
| **Vertex AI** | `vertexai.VertexAI` | Gemini models via Google Cloud |
| **Anthropic** | `anthropic.Anthropic` | Claude 3.5, Claude 3 Opus, and more |
| **OpenAI** | `openai.OpenAI` | GPT-5, GPT-5-mini, GPT-5-nano, GPT-4o and more |
| **Ollama** | `ollama.Ollama` | Llama, Mistral, and other open models |
| **OpenAI Compatible** | `compat_oai` | Any OpenAI-compatible API |

Expand All @@ -484,6 +485,9 @@ g := genkit.Init(ctx, genkit.WithPlugins(&googlegenai.GoogleAI{}))
// Anthropic
g := genkit.Init(ctx, genkit.WithPlugins(&anthropic.Anthropic{}))

// OpenAI
g := genkit.Init(ctx, genkit.WithPlugins(&openai.OpenAI{}))

// Ollama (local models)
g := genkit.Init(ctx, genkit.WithPlugins(&ollama.Ollama{
ServerAddress: "http://localhost:11434",
Expand Down
1 change: 1 addition & 0 deletions go/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ require (
github.com/jba/slog v0.2.0
github.com/lib/pq v1.10.9
github.com/mark3labs/mcp-go v0.29.0
github.com/openai/openai-go/v3 v3.16.0
github.com/pgvector/pgvector-go v0.3.0
github.com/stretchr/testify v1.10.0
github.com/weaviate/weaviate v1.30.0
Expand Down
2 changes: 2 additions & 0 deletions go/go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,8 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/openai/openai-go v1.8.2 h1:UqSkJ1vCOPUpz9Ka5tS0324EJFEuOvMc+lA/EarJWP8=
github.com/openai/openai-go v1.8.2/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y=
github.com/openai/openai-go/v3 v3.16.0 h1:VdqS+GFZgAvEOBcWNyvLVwPlYEIboW5xwiUCcLrVf8c=
github.com/openai/openai-go/v3 v3.16.0/go.mod h1:cdufnVK14cWcT9qA1rRtrXx4FTRsgbDPW7Ia7SS5cZo=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
Expand Down
10 changes: 10 additions & 0 deletions go/plugins/internal/models.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,14 @@ var (
Media: true,
Constrained: ai.ConstrainedSupportNone,
}

// Media describes model capabilities for models with media and text input and output
Media = ai.ModelSupports{
Multiturn: false,
Tools: false,
ToolChoice: false,
SystemRole: false,
Media: true,
Constrained: ai.ConstrainedSupportNone,
}
)
135 changes: 135 additions & 0 deletions go/plugins/openai/generate.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
// Copyright 2026 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package openai

import (
"context"
"fmt"

"github.com/firebase/genkit/go/ai"
"github.com/openai/openai-go/v3"
"github.com/openai/openai-go/v3/responses"
)

// generate is the entry point function to request content generation to the OpenAI client
func generate(ctx context.Context, client *openai.Client, model string, input *ai.ModelRequest, cb func(context.Context, *ai.ModelResponseChunk) error,
) (*ai.ModelResponse, error) {
req, err := toOpenAIResponseParams(model, input)
if err != nil {
return nil, err
}

// stream mode
if cb != nil {
resp, err := generateStream(ctx, client, req, input, cb)
if err != nil {
return nil, err
}
return resp, nil

}

resp, err := generateComplete(ctx, client, req, input)
if err != nil {
return nil, err
}
return resp, nil
}

// generateStream starts a new streaming response
func generateStream(ctx context.Context, client *openai.Client, req *responses.ResponseNewParams, input *ai.ModelRequest, cb func(context.Context, *ai.ModelResponseChunk) error) (*ai.ModelResponse, error) {
stream := client.Responses.NewStreaming(ctx, *req)
defer stream.Close()

var (
toolRefMap = make(map[string]string)
finalResp *responses.Response
)

for stream.Next() {
evt := stream.Current()
chunk := &ai.ModelResponseChunk{}

switch v := evt.AsAny().(type) {
case responses.ResponseTextDeltaEvent:
chunk.Content = append(chunk.Content, ai.NewTextPart(v.Delta))

case responses.ResponseReasoningTextDeltaEvent:
chunk.Content = append(chunk.Content, ai.NewReasoningPart(v.Delta, nil))

case responses.ResponseFunctionCallArgumentsDeltaEvent:
name := toolRefMap[v.ItemID]
chunk.Content = append(chunk.Content, ai.NewToolRequestPart(&ai.ToolRequest{
Ref: v.ItemID,
Name: name,
Input: v.Delta,
}))

case responses.ResponseOutputItemAddedEvent:
switch item := v.Item.AsAny().(type) {
case responses.ResponseFunctionToolCall:
toolRefMap[item.CallID] = item.Name
chunk.Content = append(chunk.Content, ai.NewToolRequestPart(&ai.ToolRequest{
Ref: item.CallID,
Name: item.Name,
}))
}

case responses.ResponseCompletedEvent:
finalResp = &v.Response
}

if len(chunk.Content) > 0 {
if err := cb(ctx, chunk); err != nil {
return nil, fmt.Errorf("callback error: %w", err)
}
}
}

if err := stream.Err(); err != nil {
return nil, fmt.Errorf("stream error: %w", err)
}

if finalResp != nil {
mResp, err := translateResponse(finalResp)
if err != nil {
return nil, err
}
mResp.Request = input
return mResp, nil
}

// prevent returning an error if stream does not provide [responses.ResponseCompletedEvent]
// user might already have received the chunks throughout the loop
return &ai.ModelResponse{
Request: input,
Message: &ai.Message{Role: ai.RoleModel},
}, nil
Comment on lines +114 to +119
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The function returns a default ai.ModelResponse if the stream ends without a ResponseCompletedEvent. This can hide potential issues, as a stream should ideally end with either a completion event or an error. Returning a default response might lead to silent failures or confusing behavior for the caller, who would receive an empty response without usage statistics.

Consider returning an error if finalResp is nil and stream.Err() is also nil to make failures more explicit.

Suggested change
// prevent returning an error if stream does not provide [responses.ResponseCompletedEvent]
// user might already have received the chunks throughout the loop
return &ai.ModelResponse{
Request: input,
Message: &ai.Message{Role: ai.RoleModel},
}, nil
return nil, fmt.Errorf("stream finished without a completion event")

Copy link
Contributor Author

@hugoaguirre hugoaguirre Jan 13, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note for the future: This is an unlikely scenario but, since we are talking about streaming: The user might already have received all the chunks, failing at this point might cause the request to fail. This edge-case is to cover up a possible scenario when responses.ResponseCompletedEvent is not sent

}

// generateComplete starts a new completion
func generateComplete(ctx context.Context, client *openai.Client, req *responses.ResponseNewParams, input *ai.ModelRequest) (*ai.ModelResponse, error) {
resp, err := client.Responses.New(ctx, *req)
if err != nil {
return nil, err
}

modelResp, err := translateResponse(resp)
if err != nil {
return nil, err
}
modelResp.Request = input
return modelResp, nil
}
Loading
Loading