Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
152 changes: 152 additions & 0 deletions sdk/agentserver/azure-ai-agentserver-responses/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
# Python TypeSpec Code Generation Tooling
# Targets: generate-models, clean, install-typespec-deps

OUTPUT_DIR ?= azure/ai/responses/server/models/_generated
TYPESPEC_DIR ?= type_spec
OPENAPI_SPEC ?= type_spec/TempTypeSpecFiles/Foundry/openapi3/v1/microsoft-foundry-openapi3.yaml
VALIDATORS_OUTPUT ?= $(OUTPUT_DIR)/_validators.py
ROOT_SCHEMAS ?= CreateResponse
LOCAL_TYPESPEC_PACKAGES := @typespec/compiler @typespec/http @typespec/openapi @typespec/openapi3 @typespec/versioning @typespec/events @typespec/sse @azure-tools/typespec-python @azure-tools/typespec-azure-core @azure-tools/typespec-client-generator-core @azure-tools/openai-typespec
TEMP_OUTPUT_DIR := $(OUTPUT_DIR)/.tmp_codegen
MODEL_PACKAGE_DIR := $(TEMP_OUTPUT_DIR)/azure/ai/responses/server/sdk/models
MODEL_SHIMS_DIR := scripts/generated_shims

.PHONY: generate-models generate-validators generate-contracts clean install-typespec-deps

ifeq ($(OS),Windows_NT)
SHELL := cmd
.SHELLFLAGS := /c
endif

# --------------------------------------------------------------------------
# generate-validators: Generate JSON payload validators from OpenAPI
# --------------------------------------------------------------------------
ifeq ($(OS),Windows_NT)
generate-validators:
@where python >NUL 2>NUL || (echo Error: python is required and was not found on PATH. 1>&2 && exit /b 1)
@if not exist "$(OPENAPI_SPEC)" (echo Error: OpenAPI spec not found at $(OPENAPI_SPEC). 1>&2 && exit /b 1)
@echo Generating payload validators from $(OPENAPI_SPEC)...
python scripts/generate_validators.py --input "$(OPENAPI_SPEC)" --output "$(VALIDATORS_OUTPUT)" --root-schemas "$(ROOT_SCHEMAS)"
@echo Generated validators at $(VALIDATORS_OUTPUT)
else
generate-validators:
@command -v python >/dev/null 2>&1 || { \
echo "Error: python is required and was not found on PATH." >&2; \
exit 1; \
}
@test -f "$(OPENAPI_SPEC)" || { \
echo "Error: OpenAPI spec not found at $(OPENAPI_SPEC)." >&2; \
exit 1; \
}
@echo "Generating payload validators from $(OPENAPI_SPEC)..."
python scripts/generate_validators.py --input "$(OPENAPI_SPEC)" --output "$(VALIDATORS_OUTPUT)" --root-schemas "$(ROOT_SCHEMAS)"
@echo "Generated validators at $(VALIDATORS_OUTPUT)"
endif

# --------------------------------------------------------------------------
# generate-contracts: Generate models + validators artifacts
# --------------------------------------------------------------------------
generate-contracts: generate-models generate-validators

TYPESPEC_OUTPUT_DIR := {cwd}/../$(TEMP_OUTPUT_DIR)

# --------------------------------------------------------------------------
# generate-models: Compile TypeSpec definitions into Python model classes
# --------------------------------------------------------------------------
ifeq ($(OS),Windows_NT)
generate-models:
@where tsp-client >NUL 2>NUL || (echo Error: tsp-client is not installed. 1>&2 && echo Run 'make install-typespec-deps' to install it. 1>&2 && exit /b 1)
@where npm >NUL 2>NUL || (echo Error: npm is required. Install Node.js ^(v18+^) from https://nodejs.org/ 1>&2 && exit /b 1)
@echo Syncing upstream TypeSpec sources...
cd /d $(TYPESPEC_DIR) && tsp-client sync
@echo Installing local TypeSpec compiler dependencies...
npm install --prefix $(TYPESPEC_DIR) --no-save $(LOCAL_TYPESPEC_PACKAGES)
@echo Generating Python models...
@if exist "$(OUTPUT_DIR)" rmdir /s /q "$(OUTPUT_DIR)"
cd /d $(TYPESPEC_DIR) && npx tsp compile . --emit @azure-tools/typespec-python --option "@azure-tools/typespec-python.emitter-output-dir=$(TYPESPEC_OUTPUT_DIR)"
@if not exist "$(MODEL_PACKAGE_DIR)" (echo Error: generated model package was not found. 1>&2 && exit /b 1)
@if not exist "$(OUTPUT_DIR)\sdk" mkdir "$(OUTPUT_DIR)\sdk"
@xcopy /E /I /Y "$(MODEL_PACKAGE_DIR)" "$(OUTPUT_DIR)\sdk\models" >NUL
@if exist "$(OUTPUT_DIR)\sdk\models\aio" rmdir /s /q "$(OUTPUT_DIR)\sdk\models\aio"
@if exist "$(OUTPUT_DIR)\sdk\models\operations" rmdir /s /q "$(OUTPUT_DIR)\sdk\models\operations"
@if exist "$(OUTPUT_DIR)\sdk\models\_client.py" del /q "$(OUTPUT_DIR)\sdk\models\_client.py"
@if exist "$(OUTPUT_DIR)\sdk\models\_configuration.py" del /q "$(OUTPUT_DIR)\sdk\models\_configuration.py"
@if exist "$(OUTPUT_DIR)\sdk\models\_version.py" del /q "$(OUTPUT_DIR)\sdk\models\_version.py"
@copy /Y "$(MODEL_SHIMS_DIR)\sdk_models__init__.py" "$(OUTPUT_DIR)\sdk\models\__init__.py" >NUL
@copy /Y "$(MODEL_SHIMS_DIR)\__init__.py" "$(OUTPUT_DIR)\__init__.py" >NUL
@copy /Y "$(MODEL_SHIMS_DIR)\_enums.py" "$(OUTPUT_DIR)\_enums.py" >NUL
@copy /Y "$(MODEL_SHIMS_DIR)\_models.py" "$(OUTPUT_DIR)\_models.py" >NUL
@copy /Y "$(MODEL_SHIMS_DIR)\_patch.py" "$(OUTPUT_DIR)\_patch.py" >NUL
@if exist "$(TEMP_OUTPUT_DIR)" rmdir /s /q "$(TEMP_OUTPUT_DIR)"
else
generate-models:
@command -v tsp-client >/dev/null 2>&1 || { \
echo "Error: tsp-client is not installed." >&2; \
echo "Run 'make install-typespec-deps' to install it." >&2; \
exit 1; \
}
@command -v npm >/dev/null 2>&1 || { \
echo "Error: npm is required. Install Node.js (v18+) from https://nodejs.org/" >&2; \
exit 1; \
}
@echo "Syncing upstream TypeSpec sources..."
cd $(TYPESPEC_DIR) && tsp-client sync
@echo "Installing local TypeSpec compiler dependencies..."
npm install --prefix $(TYPESPEC_DIR) --no-save $(LOCAL_TYPESPEC_PACKAGES)
@echo "Generating Python models..."
rm -rf $(OUTPUT_DIR)
cd $(TYPESPEC_DIR) && npx tsp compile . --emit @azure-tools/typespec-python --option "@azure-tools/typespec-python.emitter-output-dir=$(TYPESPEC_OUTPUT_DIR)"
@test -d $(MODEL_PACKAGE_DIR) || { \
echo "Error: generated model package was not found." >&2; \
exit 1; \
}
mkdir -p $(OUTPUT_DIR)/sdk
cp -R $(MODEL_PACKAGE_DIR) $(OUTPUT_DIR)/sdk/models
rm -rf $(OUTPUT_DIR)/sdk/models/aio
rm -rf $(OUTPUT_DIR)/sdk/models/operations
rm -f $(OUTPUT_DIR)/sdk/models/_client.py
rm -f $(OUTPUT_DIR)/sdk/models/_configuration.py
rm -f $(OUTPUT_DIR)/sdk/models/_version.py
cp $(MODEL_SHIMS_DIR)/sdk_models__init__.py $(OUTPUT_DIR)/sdk/models/__init__.py
cp $(MODEL_SHIMS_DIR)/__init__.py $(OUTPUT_DIR)/__init__.py
cp $(MODEL_SHIMS_DIR)/_enums.py $(OUTPUT_DIR)/_enums.py
cp $(MODEL_SHIMS_DIR)/_models.py $(OUTPUT_DIR)/_models.py
cp $(MODEL_SHIMS_DIR)/_patch.py $(OUTPUT_DIR)/_patch.py
rm -rf $(TEMP_OUTPUT_DIR)
endif

# --------------------------------------------------------------------------
# clean: Remove all previously generated Python model files
# --------------------------------------------------------------------------
ifeq ($(OS),Windows_NT)
clean:
@if exist "$(OUTPUT_DIR)" rmdir /s /q "$(OUTPUT_DIR)"
else
clean:
rm -rf $(OUTPUT_DIR)
endif

# --------------------------------------------------------------------------
# install-typespec-deps: Install tsp-client CLI and sync TypeSpec sources
# --------------------------------------------------------------------------
ifeq ($(OS),Windows_NT)
install-typespec-deps:
@where node >NUL 2>NUL || (echo Error: Node.js ^(v18+^) is required. Install from https://nodejs.org/ 1>&2 && exit /b 1)
@where npm >NUL 2>NUL || (echo Error: npm is required. Install Node.js ^(v18+^) from https://nodejs.org/ 1>&2 && exit /b 1)
npm install -g @azure-tools/typespec-client-generator-cli
npm install --prefix $(TYPESPEC_DIR) --no-save $(LOCAL_TYPESPEC_PACKAGES)
cd /d $(TYPESPEC_DIR) && tsp-client sync
else
install-typespec-deps:
@command -v node >/dev/null 2>&1 || { \
echo "Error: Node.js (v18+) is required. Install from https://nodejs.org/" >&2; \
exit 1; \
}
@command -v npm >/dev/null 2>&1 || { \
echo "Error: npm is required. Install Node.js (v18+) from https://nodejs.org/" >&2; \
exit 1; \
}
npm install -g @azure-tools/typespec-client-generator-cli
npm install --prefix $(TYPESPEC_DIR) --no-save $(LOCAL_TYPESPEC_PACKAGES)
cd $(TYPESPEC_DIR) && tsp-client sync
endif
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
"""Handler and context contracts for user-defined response execution."""

from __future__ import annotations

import asyncio
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Any, AsyncIterable, Awaitable, Callable, Mapping, Protocol, Sequence, runtime_checkable

from ._generated import CreateResponse, OutputItem, ResponseStreamEvent
from .models import ResponseModeFlags

OutputItemsLoader = Callable[[], Awaitable[Sequence[OutputItem]]]
RawBodyType = Mapping[str, Any] | Sequence[Any] | str | int | float | bool | None


@runtime_checkable
class ResponseContext(Protocol):
"""Runtime context exposed to response handlers.

This mirrors the referenced .NET ``IResponseContext`` shape:
- response identifier
- shutdown signal flag
- raw body access
- async input/history resolution
"""

@property
def response_id(self) -> str:
"""Get the unique response identifier."""

@property
def is_shutdown_requested(self) -> bool:
"""Get whether shutdown has been requested by the host."""

@is_shutdown_requested.setter
def is_shutdown_requested(self, value: bool) -> None:
"""Set whether shutdown has been requested by the host."""

@property
def raw_body(self) -> RawBodyType:
"""Get the raw request body payload for extension field access."""

async def get_input_items(self) -> Sequence[OutputItem]:
"""Resolve and return request input items."""

async def get_history(self) -> Sequence[OutputItem]:
"""Resolve and return conversation history items."""


@dataclass(slots=True)
class RuntimeResponseContext(ResponseContext):
"""Default runtime context implementation used by hosting orchestration."""

response_id: str
mode_flags: ResponseModeFlags
raw_body: RawBodyType = None
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
_is_shutdown_requested: bool = False
_input_items_loader: OutputItemsLoader | None = None
_history_loader: OutputItemsLoader | None = None
_input_items_cache: Sequence[OutputItem] | None = None
_history_cache: Sequence[OutputItem] | None = None

@property
def is_shutdown_requested(self) -> bool:
"""Get whether shutdown has been requested by the host."""
return self._is_shutdown_requested

@is_shutdown_requested.setter
def is_shutdown_requested(self, value: bool) -> None:
"""Set whether shutdown has been requested by the host."""
self._is_shutdown_requested = value

async def get_input_items(self) -> Sequence[OutputItem]:
"""Resolve and cache request input items."""
if self._input_items_cache is not None:
return self._input_items_cache

if self._input_items_loader is None:
self._input_items_cache = ()
return self._input_items_cache

loaded = await self._input_items_loader()
self._input_items_cache = tuple(loaded)
return self._input_items_cache

async def get_history(self) -> Sequence[OutputItem]:
"""Resolve and cache conversation history items."""
if self._history_cache is not None:
return self._history_cache

if self._history_loader is None:
self._history_cache = ()
return self._history_cache

loaded = await self._history_loader()
self._history_cache = tuple(loaded)
return self._history_cache


@runtime_checkable
class ResponseHandler(Protocol):
"""Primary async handler contract consumed by route orchestration.

Mirrors the referenced .NET ``IResponseHandler`` single-method design.
"""

def create_async(
self,
request: CreateResponse,
context: ResponseContext,
cancellation_signal: asyncio.Event,
) -> AsyncIterable[ResponseStreamEvent]:
"""Yield the full response event stream for one create request."""
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
"""Starlette hosting integration for the Responses server package."""

from __future__ import annotations

from typing import TYPE_CHECKING

from ._options import ResponsesServerOptions

if TYPE_CHECKING:
from starlette.applications import Starlette

from ._handlers import ResponseHandler


def map_responses_server(
app: "Starlette",
handler: "ResponseHandler",
*,
prefix: str = "",
options: ResponsesServerOptions | None = None,
) -> None:
"""Register Responses API routes on a Starlette application.

:param app: Starlette application instance to configure.
:param handler: User-provided response handler implementation.
:param prefix: Optional route prefix.
:param options: Optional server runtime options.
"""
raise NotImplementedError("Route registration will be implemented in Phase 2/3.")
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
"""Compatibility module for runtime response server models.

Canonical non-generated type definitions now live in ``azure.ai.responses.server.models``.
"""

from .models.runtime import (
ResponseExecution,
ResponseModeFlags,
ResponseSession,
ResponseStatus,
StreamEventRecord,
StreamReplayState,
TerminalResponseStatus,
)

__all__ = [
"ResponseExecution",
"ResponseModeFlags",
"ResponseSession",
"ResponseStatus",
"StreamEventRecord",
"StreamReplayState",
"TerminalResponseStatus",
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
"""Observability and identity header helpers."""

from __future__ import annotations


def build_platform_server_header(sdk_name: str, version: str, runtime: str, extra: str | None = None) -> str:
"""Build the platform server identity header value.

:param sdk_name: SDK package name.
:param version: SDK package version.
:param runtime: Runtime marker, such as python/3.10.
:param extra: Optional additional identity suffix.
:returns: Formatted identity header value.
"""
base_value = f"{sdk_name}/{version} ({runtime})"
return f"{base_value} {extra}".strip() if extra else base_value
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
"""Typed options for configuring the Responses server runtime."""

from __future__ import annotations

from dataclasses import dataclass


@dataclass(slots=True)
class ResponsesServerOptions:
"""Configuration values for hosting and runtime behavior.

This shape mirrors the .NET `ResponsesServerOptions` surface:
- SSE keep-alive is disabled by default.
- `default_model` is optional.
- `default_fetch_history_count` defaults to 100.
- `additional_server_identity` is optional.
"""

default_fetch_history_count_value: int = 100
additional_server_identity: str | None = None
default_model: str | None = None
default_fetch_history_count: int = default_fetch_history_count_value
sse_keep_alive_interval_seconds: int | None = None

def __post_init__(self) -> None:
"""Validate and normalize option values."""
if self.additional_server_identity is not None:
normalized = self.additional_server_identity.strip()
self.additional_server_identity = normalized or None

if self.default_model is not None:
normalized_model = self.default_model.strip()
self.default_model = normalized_model or None

if self.sse_keep_alive_interval_seconds is not None and self.sse_keep_alive_interval_seconds <= 0:
raise ValueError("sse_keep_alive_interval_seconds must be > 0 when set")

if self.default_fetch_history_count <= 0:
raise ValueError("default_fetch_history_count must be > 0")

@property
def sse_keep_alive_enabled(self) -> bool:
"""Return whether periodic SSE keep-alive comments are enabled."""
return self.sse_keep_alive_interval_seconds is not None
Loading
Loading