From 5b840dc3b601502531e433f40a6ec2ce596339a6 Mon Sep 17 00:00:00 2001 From: Bor4brn <117600305+Bor4brn@users.noreply.github.com> Date: Mon, 16 Mar 2026 17:55:22 +0300 Subject: [PATCH] docs: refresh docs theme and guides --- docs/.vitepress/config.mts | 23 ++- docs/.vitepress/theme/custom.css | 210 +++++++++++++++++++++++++++ docs/.vitepress/theme/index.ts | 6 + docs/contributing.md | 25 +++- docs/guide/ai-providers.md | 140 ++++++++++++++++++ docs/guide/client-integration.md | 116 +++++++++++++++ docs/guide/conversations-messages.md | 101 +++++++++++++ docs/guide/custom-adapters.md | 132 +++++++++++++++++ docs/index.md | 133 ++++++++--------- docs/public/AgentServer.svg | 1 + 10 files changed, 808 insertions(+), 79 deletions(-) create mode 100644 docs/.vitepress/theme/custom.css create mode 100644 docs/.vitepress/theme/index.ts create mode 100644 docs/guide/ai-providers.md create mode 100644 docs/guide/client-integration.md create mode 100644 docs/guide/conversations-messages.md create mode 100644 docs/guide/custom-adapters.md create mode 100644 docs/public/AgentServer.svg diff --git a/docs/.vitepress/config.mts b/docs/.vitepress/config.mts index 18c052d..228d2e9 100644 --- a/docs/.vitepress/config.mts +++ b/docs/.vitepress/config.mts @@ -2,13 +2,17 @@ import { defineConfig } from 'vitepress'; export default defineConfig({ title: 'Agent Server', - description: 'REST API server infrastructure for AI agents - supports agent-sdk and custom implementations', + description: 'REST API server infrastructure for AI agents with storage, auth, streaming, and framework adapters.', base: '/agent-server/', ignoreDeadLinks: true, + appearance: false, + themeConfig: { - logo: '/logo.svg', + logo: '/AgentServer.svg', + siteTitle: 'Agent Server', nav: [ { text: 'Guide', link: '/guide/getting-started' }, + { text: 'Architecture', link: '/guide/architecture' }, { text: 'API Reference', link: '/api/server' }, { text: 'Examples', link: '/examples/' }, { @@ -34,6 +38,8 @@ export default defineConfig({ items: [ { text: 'Express.js', link: '/guide/express' }, { text: 'Next.js', link: '/guide/nextjs' }, + { text: 'Custom Adapters', link: '/guide/custom-adapters' }, + { text: 'Client Integration', link: '/guide/client-integration' }, ], }, { @@ -41,9 +47,11 @@ export default defineConfig({ items: [ { text: 'Storage Providers', link: '/guide/storage' }, { text: 'Authentication', link: '/guide/authentication' }, + { text: 'Conversations & Messages', link: '/guide/conversations-messages' }, { text: 'File Management', link: '/guide/file-management' }, { text: 'Streaming', link: '/guide/streaming' }, { text: 'Swagger UI', link: '/guide/swagger' }, + { text: 'AI Providers', link: '/guide/ai-providers' }, ], }, { @@ -86,16 +94,19 @@ export default defineConfig({ { icon: 'github', link: 'https://github.com/Cognipeer/agent-server' }, ], footer: { - message: 'Released under the MIT License.', - copyright: 'Copyright © 2025 CognipeerAI', + message: 'Agent Server is part of the Cognipeer platform.', + copyright: 'Copyright © 2026 Cognipeer', }, search: { provider: 'local', }, }, head: [ - ['link', { rel: 'icon', type: 'image/svg+xml', href: '/agent-server/favicon.svg' }], - ['meta', { name: 'theme-color', content: '#3eaf7c' }], + ['link', { rel: 'preconnect', href: 'https://fonts.googleapis.com' }], + ['link', { rel: 'preconnect', href: 'https://fonts.gstatic.com', crossorigin: '' }], + ['link', { rel: 'stylesheet', href: 'https://fonts.googleapis.com/css2?family=Lexend+Deca:wght@400;500;600;700;800&display=swap' }], + ['link', { rel: 'icon', type: 'image/svg+xml', href: '/agent-server/AgentServer.svg' }], + ['meta', { name: 'theme-color', content: '#00b5a5' }], ['meta', { name: 'og:type', content: 'website' }], ['meta', { name: 'og:locale', content: 'en' }], ['meta', { name: 'og:site_name', content: 'Agent Server Documentation' }], diff --git a/docs/.vitepress/theme/custom.css b/docs/.vitepress/theme/custom.css new file mode 100644 index 0000000..641bec3 --- /dev/null +++ b/docs/.vitepress/theme/custom.css @@ -0,0 +1,210 @@ +:root { + --vp-font-family-base: 'Lexend Deca', 'Segoe UI', sans-serif; + --vp-font-family-mono: 'SFMono-Regular', 'SF Mono', 'Menlo', 'Monaco', 'Consolas', monospace; + + --cp-c-ink-1: #212b57; + --cp-c-ink-2: #3f4d79; + --cp-c-ink-3: #6f7c99; + --cp-c-link-1: #1e3d74; + --cp-c-link-2: #17305d; + + --vp-c-brand-1: #00b5a5; + --vp-c-brand-2: #009689; + --vp-c-brand-3: #007c70; + --vp-c-brand-soft: rgba(0, 181, 165, 0.14); + + --vp-c-text-1: var(--cp-c-ink-1); + --vp-c-text-2: var(--cp-c-ink-2); + --vp-c-text-3: var(--cp-c-ink-3); + + --vp-c-bg: #f7f8f8; + --vp-c-bg-alt: #f1f3f3; + --vp-c-bg-soft: #ebeeee; + --vp-c-bg-elv: #ffffff; + + --vp-code-color: var(--cp-c-link-1); + --vp-code-link-color: var(--cp-c-link-1); + --vp-code-link-hover-color: var(--cp-c-link-2); + + --vp-home-hero-name-color: transparent; + --vp-home-hero-name-background: linear-gradient(135deg, #009689 0%, #00b5a5 45%, #07e3d0 100%); + + --vp-button-brand-bg: #00b5a5; + --vp-button-brand-hover-bg: #009689; + --vp-button-brand-active-bg: #007c70; + --vp-button-brand-border: transparent; + + --vp-button-alt-bg: #ffffff; + --vp-button-alt-border: rgba(0, 124, 112, 0.18); + --vp-button-alt-hover-bg: #f5fffd; + + --vp-custom-block-tip-bg: rgba(0, 181, 165, 0.08); + --vp-custom-block-tip-border: rgba(0, 181, 165, 0.28); + --vp-custom-block-tip-text: #0d6159; +} + +html, +body, +#app { + background: #f7f8f8; +} + +.Layout { + background: transparent; +} + +.VPNav { + background: rgba(255, 255, 255, 0.88) !important; + border-bottom: 1px solid rgba(0, 124, 112, 0.12); + backdrop-filter: blur(18px); +} + +.VPNavBarTitle .title { + gap: 0.8rem; +} + +.VPNavBarTitle .logo { + width: 34px; + height: 34px; + border-radius: 12px; +} + +.VPNavBarTitle .title span:last-child { + font-size: 0.98rem; + font-weight: 700; + letter-spacing: -0.02em; + color: var(--vp-c-text-1); +} + +.VPHomeHero .container { + align-items: center; +} + +.VPHomeHero .main { + max-width: 37rem; +} + +.VPHomeHero .name { + font-weight: 800; + letter-spacing: -0.04em; +} + +.VPHomeHero .text { + max-width: 13ch; + font-weight: 700; + letter-spacing: -0.045em; +} + +.VPHomeHero .tagline { + max-width: 35rem; + color: var(--vp-c-text-2); + font-size: 1.08rem; + line-height: 1.75; +} + +.VPFeature { + border: none; + border-radius: 24px; + background: transparent; +} + +.VPFeature.link:hover { + border-color: transparent; +} + +.VPFeature .title { + font-size: 1.18rem; + line-height: 1.35; +} + +.VPFeature .details { + font-size: 0.98rem; + line-height: 1.72; +} + +.VPFeature .box { + min-height: 100%; + border: 1px solid rgba(0, 124, 112, 0.12); + border-radius: 12px; + background: rgba(255, 255, 255, 0.74); +} + +.VPFeature.link:hover .box { + border-color: rgba(0, 124, 112, 0.24); + background: rgba(255, 255, 255, 0.92); +} + +.vp-doc h1, +.vp-doc h2, +.vp-doc h3 { + letter-spacing: -0.03em; +} + +.vp-doc a { + color: var(--cp-c-link-1); + font-weight: 600; + text-decoration-color: rgba(0, 181, 165, 0.35); + text-underline-offset: 0.16em; +} + +.vp-doc a:hover, +.vp-doc a > code, +.VPNavBarMenuLink:hover, +.VPNavBarMenuLink.active, +.VPLocalNav .menu-link:hover, +.VPLocalNav .menu-link.active, +.VPSidebarItem .item .link:hover .text, +.VPDocOutlineItem .outline-link:hover, +.VPDocOutlineItem.is-active > .outline-link, +.VPDocPager .link:hover .title { + color: var(--cp-c-link-2); +} + +.VPNavBarMenuLink, +.VPLocalNav .menu-link, +.VPSidebarItem .item .text, +.VPDocOutlineItem .outline-link, +.VPDocPager .title { + color: var(--vp-c-text-2); +} + +.VPLocalNav .menu-link[aria-current='true'], +.VPDocOutlineItem .outline-link.active { + color: var(--cp-c-link-2) !important; +} + +.VPSidebarItem.is-active > .item .text, +.VPSidebarItem.is-active > .item .link > .text, +.VPSidebarItem .item .link[aria-current='page'] .text, +.VPSidebarItem .item .link.active .text { + color: var(--vp-c-brand-1) !important; +} + +.vp-doc :not(pre, h1, h2, h3, h4, h5, h6) > code, +.vp-doc a > code, +.vp-doc td > code, +.vp-doc th > code { + color: var(--cp-c-link-2); +} + +.vp-doc table { + display: table; + width: 100%; + border-collapse: collapse; + overflow: hidden; + border-radius: 18px; +} + +.vp-doc table th { + background: rgba(0, 181, 165, 0.08); +} + +.vp-doc table th, +.vp-doc table td { + border-color: rgba(0, 124, 112, 0.12); +} + +.vp-doc div[class*='language-'] { + border: 1px solid rgba(0, 124, 112, 0.14); + border-radius: 20px; +} diff --git a/docs/.vitepress/theme/index.ts b/docs/.vitepress/theme/index.ts new file mode 100644 index 0000000..6643efe --- /dev/null +++ b/docs/.vitepress/theme/index.ts @@ -0,0 +1,6 @@ +import DefaultTheme from 'vitepress/theme-without-fonts'; +import './custom.css'; + +export default { + extends: DefaultTheme, +}; diff --git a/docs/contributing.md b/docs/contributing.md index f2514c6..74b69cc 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -1,6 +1,8 @@ # Contributing -Thank you for your interest in contributing to `@cognipeer/agent-server`! +Thank you for your interest in contributing to `@cognipeer/agent-server`. + +Agent Server docs live under `docs/` and are rendered with VitePress using the shared Cognipeer docs shell. ## Development Setup @@ -23,6 +25,24 @@ npm install npm run dev ``` +4. Run docs locally: + +```bash +npm run docs:dev +``` + +## Project Areas + +- `src/`: runtime source for the server, adapters, and providers +- `docs/`: VitePress documentation source +- `examples/`: integration examples and sample server setups + +## Documentation + +- Theme config: `docs/.vitepress/config.mts` +- Theme styling: `docs/.vitepress/theme/` +- Public docs assets: `docs/public/` + ## Running Tests ```bash @@ -33,6 +53,7 @@ npm test ```bash npm run build +npm run docs:build ``` ## Code Style @@ -48,7 +69,7 @@ npm run lint 1. Fork the repository 2. Create a feature branch 3. Make your changes -4. Run tests and linting +4. Run tests, linting, and docs build where relevant 5. Submit a pull request ## Reporting Issues diff --git a/docs/guide/ai-providers.md b/docs/guide/ai-providers.md new file mode 100644 index 0000000..6d2e559 --- /dev/null +++ b/docs/guide/ai-providers.md @@ -0,0 +1,140 @@ +# AI Providers + +Agent Server can expose more than SDK agents. It also knows how to register provider-backed agents directly through two built-in runtime surfaces: + +- OpenAI-compatible Chat Completions +- OpenAI Responses API + +That gives you a path to serve remote model-backed agents without first wrapping everything in `@cognipeer/agent-sdk`. + +## When this feature is useful + +Use provider-backed registration when: + +- you already have an OpenAI-compatible endpoint and want to expose it as an agent quickly +- you need a lightweight server integration before adopting a richer SDK runtime +- you want built-in Responses API features such as tools or response chaining +- you want a consistent REST surface while the actual model backend stays configurable + +## Chat Completions agents + +`registerChatCompletionsAgent(...)` connects an agent id to any OpenAI-compatible `/v1/chat/completions` endpoint. + +The provider config supports: + +- `baseUrl` +- `apiKey` +- `model` +- optional `systemPrompt` +- optional `temperature` +- optional `maxTokens` +- optional path override +- optional extra headers +- optional extra body fields + +Typical use cases: + +- OpenAI or Azure OpenAI +- proxy gateways that expose an OpenAI-compatible path +- internal inference services that mirror Chat Completions behavior + +```ts +server.registerChatCompletionsAgent( + 'support-bot', + { + baseUrl: 'https://api.openai.com', + apiKey: process.env.OPENAI_API_KEY!, + model: 'gpt-4o-mini', + systemPrompt: 'You are a concise support assistant.', + }, + { + name: 'Support Bot', + description: 'Chat Completions backed support agent', + } +); +``` + +## Responses API agents + +`registerResponsesAgent(...)` connects an agent id to an OpenAI-style `/v1/responses` endpoint. + +This config supports: + +- `baseUrl` +- `apiKey` +- `model` +- optional `instructions` +- optional `temperature` +- optional `maxOutputTokens` +- optional path override +- optional extra headers +- optional `tools` +- optional extra body fields + +This path is useful when you want the newer Responses model interaction style, including built-in tool declarations such as web search or code interpreter. + +```ts +server.registerResponsesAgent( + 'research-bot', + { + baseUrl: 'https://api.openai.com', + apiKey: process.env.OPENAI_API_KEY!, + model: 'gpt-4o', + instructions: 'You are a research assistant.', + tools: [{ type: 'web_search_preview' }], + }, + { + name: 'Research Bot', + description: 'Responses API backed agent', + } +); +``` + +## Runtime differences that matter + +| Concern | Chat Completions | Responses API | +| --- | --- | --- | +| Main endpoint | `/v1/chat/completions` | `/v1/responses` | +| System-level prompt field | `systemPrompt` | `instructions` | +| Token option | `maxTokens` | `maxOutputTokens` | +| Built-in tools config | Not part of this provider surface | Supported through `tools` | +| Response chaining | Standard message history replay | Supports `previous_response_id` chaining | + +Choose the provider based on the backend contract you actually have, not on naming preference. + +## How Agent Server uses them at message time + +When a conversation message is sent, Agent Server: + +1. loads the stored message history +2. normalizes messages into the provider-specific shape +3. invokes the selected provider +4. persists the assistant output back into the conversation transcript + +For Responses API agents, the runtime also stores the returned `responseId` in conversation state as `lastResponseId`, so later turns can continue the chain. + +## Streaming support + +Both provider surfaces have streaming helpers inside the runtime: + +- `streamChatCompletions(...)` +- `streamResponsesApi(...)` + +Those helpers feed the same Agent Server SSE contract used by the `/messages` route when the request body includes `stream: true`. + +That means your client integration can keep one streaming contract even if the underlying agent backend changes. + +## How to choose + +| If you need... | Start with... | +| --- | --- | +| broad compatibility with existing OpenAI-style gateways | Chat Completions | +| built-in Responses tools and response chaining | Responses API | +| the thinnest remote model wrapper | Chat Completions | +| a path closer to newer OpenAI response semantics | Responses API | + +## Where to look next + +- [Streaming](/guide/streaming) for SSE delivery behavior +- [Swagger UI](/guide/swagger) for the generated endpoint contract +- [API Reference](/api/server) for the core server surface diff --git a/docs/guide/client-integration.md b/docs/guide/client-integration.md new file mode 100644 index 0000000..6edc2d2 --- /dev/null +++ b/docs/guide/client-integration.md @@ -0,0 +1,116 @@ +# Client Integration + +Client integration is the layer where your frontend, mobile app, or SDK consumer meets the Agent Server REST surface. This deserves its own guide because the main runtime questions are not about storage internals. They are about which routes to call, how to pass identity, when to stream, and how to keep conversation state coherent in the UI. + +## What the client usually needs + +Most product integrations end up needing four flows: + +1. list or create conversations +2. load a conversation with its existing messages +3. send a new message, optionally with files +4. stream the assistant response when low-latency UX matters + +If you model those four flows cleanly, the rest of the client work becomes much simpler. + +## Recommended request flow + +Use this sequence for a normal chat-style product integration: + +1. call `GET /conversations` to load the user’s recent sessions +2. call `POST /conversations` when the user starts a new thread +3. call `GET /conversations/:conversationId` to hydrate the message list +4. call `POST /conversations/:conversationId/messages` for each new turn +5. call `GET /conversations/:conversationId/messages` when you need pagination or a sync refresh + +This keeps the client aligned with the runtime’s own conversation model instead of inventing a second local thread abstraction. + +## Identity and auth headers + +From the client’s point of view, identity can come from: + +- an `Authorization` header handled by the server auth provider +- framework-owned session logic that ultimately resolves a user id +- explicit request fields like `userId`, when your deployment intentionally allows that + +If the server is enforcing authenticated conversation ownership, treat the auth header as part of every conversation and message request, not only the send-message call. + +## Sending messages + +The main message route accepts: + +- `message` +- optional `files` +- optional `metadata` +- optional `stream` + +Standard JSON mode returns the persisted user message and the persisted assistant response in one payload. Use this when: + +- your UI is simple +- low latency is not critical +- you want the easiest integration path first + +## Streaming mode + +If the client sends `stream: true`, the same route switches to SSE output. + +Use streaming when: + +- you want partial text to appear while the response is generated +- your UI needs to surface tool activity or progress +- the server may take long enough that a single blocking response feels broken + +The event stream can emit: + +- `stream.start` +- `stream.text` +- `stream.tool_call` +- `stream.tool_result` +- `stream.progress` +- `stream.error` +- `stream.done` + +Design your client event handling around that contract instead of assuming text-only chunks. + +## File-aware clients + +Files can enter the system in two ways: + +- as attachments on `POST /conversations/:conversationId/messages` +- through the dedicated file routes under `/files` + +For product UIs, the first path is usually the better starting point because it keeps the message turn and attachment lifecycle together. + +Use the direct file routes when: + +- you need separate upload workflows +- files must exist before a message is sent +- you need metadata lookup or download endpoints independently of a message composer + +## Swagger as an integration tool + +When Swagger is enabled, it is not just for backend developers. It is useful for frontend teams too: + +- inspect exact payload shapes +- verify whether auth is required on a route +- test conversation and file flows before writing a dedicated client +- compare docs claims against the generated runtime contract + +This shortens integration loops and catches drift earlier. + +## Good client defaults + +If you are building a new client against Agent Server, start with these defaults: + +- create the conversation explicitly before the first send +- keep the server’s `conversationId` as the canonical thread id +- prefer JSON mode first, then move to SSE once the base flow is stable +- treat auth headers as part of every request, not just writes +- let the server own transcript persistence instead of duplicating message state rules on the client + +## Where to look next + +- [Conversations & Messages](/guide/conversations-messages) for the lifecycle under the hood +- [Streaming](/guide/streaming) for SSE event handling +- [File Management](/guide/file-management) for upload and download paths +- [Swagger UI](/guide/swagger) for runtime contract inspection diff --git a/docs/guide/conversations-messages.md b/docs/guide/conversations-messages.md new file mode 100644 index 0000000..59d8aeb --- /dev/null +++ b/docs/guide/conversations-messages.md @@ -0,0 +1,101 @@ +# Conversations & Messages + +Conversations and messages are the core persistence layer of Agent Server. They sit above raw storage, but below framework adapters and client UX, which makes them the part you need to understand before auth, files, and streaming feel predictable together. + +## What the runtime owns + +The server exposes a conversation lifecycle with built-in storage calls for: + +- creating a conversation +- listing conversations with pagination +- loading a conversation with its message history +- updating title and metadata +- deleting a conversation +- listing messages for a conversation +- sending a new message and persisting both user and assistant turns + +This is not just an API surface. The runtime also verifies conversation access, resolves the effective user id, and updates conversation state when an agent backend returns new state. + +## Conversation model + +At runtime, a conversation stores: + +- `id` +- `agentId` +- optional `userId` +- optional `title` +- optional `metadata` +- optional `state` +- `createdAt` +- `updatedAt` + +The `state` field matters when you use custom handlers or Responses API chaining. It is the durable place where runtime-specific state can survive between turns. + +## Message model + +Messages are persisted as a message-first transcript. A message can contain: + +- `role`: `user`, `assistant`, `system`, or `tool` +- `content`: either a plain string or structured content parts +- optional `name` +- optional `toolCalls` +- optional `toolCallId` +- optional `files` +- optional `metadata` + +That means the transcript can preserve more than just plain text. Tool call details, attachments, and richer content parts can all move through the same storage contract. + +## Route surface + +These routes make up the main conversation flow: + +| Method | Route | What it does | +| --- | --- | --- | +| `GET` | `/conversations` | List conversations with pagination and optional `agentId` or `userId` filters | +| `POST` | `/conversations` | Create a new conversation for an agent | +| `GET` | `/conversations/:conversationId` | Load one conversation together with messages | +| `PATCH` | `/conversations/:conversationId` | Update title or metadata | +| `DELETE` | `/conversations/:conversationId` | Delete a conversation | +| `GET` | `/conversations/:conversationId/messages` | List messages with paging and order | +| `POST` | `/conversations/:conversationId/messages` | Send a message and persist the assistant response | + +The OpenAPI schema in the Swagger generator mirrors the same surface, so the docs and Swagger UI stay aligned. + +## User identity and access checks + +Conversation ownership is not inferred from one source only. The runtime resolves the acting user from: + +1. explicit request data such as `userId` +2. authenticated token user info +3. the optional `resolveUserId` callback in auth config + +When a conversation already has a `userId`, Agent Server verifies that the current request resolves to the same user before returning or mutating the record. + +This is why conversation handling should be treated as its own feature area instead of a side note under auth. + +## Sending a message + +Sending a message does more than append text: + +1. the server validates the request +2. it loads the conversation and verifies access +3. it persists the new user message +4. it loads prior messages as context +5. it routes execution to the registered agent backend +6. it persists the assistant response +7. it updates conversation state or title when needed + +If the request includes `stream: true`, the same route switches into SSE streaming mode instead of the standard JSON response path. + +## Title generation + +If title generation is configured and the conversation is still untitled, the runtime can automatically generate a short title from the first user message. This happens after the first exchange and updates the stored conversation record. + +Use this when your client UI needs a readable conversation list without implementing a second naming pipeline. + +## Where to look next + +- [Authentication](/guide/authentication) for user resolution and token validation +- [Streaming](/guide/streaming) for the `stream: true` response path +- [File Management](/guide/file-management) for file uploads and stored attachments +- [Swagger UI](/guide/swagger) for the generated REST contract diff --git a/docs/guide/custom-adapters.md b/docs/guide/custom-adapters.md new file mode 100644 index 0000000..27e3f1b --- /dev/null +++ b/docs/guide/custom-adapters.md @@ -0,0 +1,132 @@ +# Custom Adapters + +Agent Server is framework-agnostic at the core. Express and Next.js are convenience adapters, not special runtime modes. If your stack uses another HTTP layer, the right approach is to keep `AgentServer` as the business runtime and write a thin adapter around `handleRequest(...)`. + +## What the core expects + +The runtime does not require a specific web framework. It needs only: + +- the HTTP method +- the request path +- query parameters +- an optional request body +- an optional authenticated user + +The core entry point is `server.handleRequest(method, path, ctx)`, where `ctx` carries: + +- `user` +- `query` +- `body` + +The response comes back as a `RouteResult` with: + +- `status` +- optional `body` +- optional `headers` +- optional `raw` +- optional `stream` + +That split is why adapters stay small. They translate framework request and response objects, but they do not own routing logic, auth policy, or storage behavior. + +## What the built-in adapters demonstrate + +Use the existing adapters as reference implementations: + +- Express adapter: request filtering, auth header lookup, SSE streaming, and plain JSON or HTML responses +- Next.js adapter: App Router handler shape, query parsing, `Response` objects, and streaming via `ReadableStream` + +These are the key responsibilities every custom adapter should preserve. + +## Minimal adapter flow + +Every adapter should follow this order: + +1. detect whether the request path belongs to `server.getBasePath()` +2. parse query parameters into a plain object +3. parse request body when the method allows it +4. resolve the authenticated user if your host framework provides one +5. call `server.handleRequest(...)` +6. map `RouteResult` back into the framework response object +7. handle `result.stream` as SSE when present + +If you skip that order, the server still may work, but edge cases like excluded auth routes, Swagger HTML, or streaming can break. + +## Skeleton example + +```ts +async function handleFrameworkRequest(request: FrameworkRequest) { + const result = await server.handleRequest( + request.method, + request.path, + { + user: request.user, + query: request.query, + body: request.body, + } + ); + + if (result.stream) { + return streamSse(result.stream, result.status, result.headers); + } + + return sendFrameworkResponse({ + status: result.status, + headers: result.headers, + body: result.raw ?? result.body, + }); +} +``` + +## Authentication in custom adapters + +There are two valid patterns: + +- let the host framework authenticate first and pass `user` into the route context +- forward the raw auth header and let Agent Server run its configured auth provider path + +Whichever approach you choose, keep it consistent. Mixing partial framework auth with partial runtime auth is where access bugs usually start. + +If you use the Agent Server auth config, preserve: + +- `headerName` +- `excludeRoutes` +- the same route path shape relative to `basePath` + +## Streaming in custom adapters + +If `handleRequest(...)` returns `result.stream`, the adapter must return an SSE response instead of serializing JSON. + +At minimum, set headers equivalent to: + +- `Content-Type: text/event-stream` +- `Cache-Control: no-cache` +- `Connection: keep-alive` + +Then write each yielded chunk as-is. Do not repackage the chunks into a different event format, because client integrations and built-in docs already assume the server’s SSE contract. + +## Swagger and raw responses + +Not every response is JSON. Custom adapters should also preserve: + +- HTML responses for Swagger UI +- raw binary responses for file download paths + +If you force everything through JSON serialization, `/docs` and file routes will break. + +## When to write a custom adapter + +Write one when: + +- your host framework is neither Express nor Next.js +- you need tighter control over request lifecycle hooks +- you are embedding Agent Server into an existing platform router +- you need framework-specific auth or tracing before the server runtime executes + +Do not write one just to slightly restyle an existing integration. Start from Express or Next.js unless you have a real hosting mismatch. + +## Where to look next + +- [Express.js](/guide/express) for the middleware pattern +- [Next.js](/guide/nextjs) for the App Router handler pattern +- [Client Integration](/guide/client-integration) for frontend-facing request expectations +- [Streaming](/guide/streaming) for the SSE response contract diff --git a/docs/index.md b/docs/index.md index 871f046..110c3b3 100644 --- a/docs/index.md +++ b/docs/index.md @@ -3,46 +3,53 @@ layout: home hero: name: Agent Server - text: REST API Infrastructure for AI Agents - tagline: Framework-agnostic server with built-in storage, authentication, streaming, and Swagger UI - image: - src: /agent-server/logo.svg - alt: Agent Server + text: Ship Agent APIs Without Rebuilding The Server Layer + tagline: Framework-agnostic infrastructure for storage, authentication, file handling, streaming, and Swagger-backed agent APIs across Express, Next.js, and custom runtimes. actions: - theme: brand text: Get Started link: /guide/getting-started - theme: alt - text: View on GitHub - link: https://github.com/Cognipeer/agent-server + text: Study Architecture + link: /guide/architecture features: - - icon: 🤖 - title: Agent SDK Integration - details: Register agents created with @cognipeer/agent-sdk directly and expose them as REST APIs. - - icon: 🔧 - title: Custom Handler Support - details: Integrate agents from any library with a simple handler interface. - - icon: 💾 - title: Multiple Storage Backends - details: Built-in PostgreSQL and MongoDB providers with auto-migration support. - - icon: 🔐 - title: Authentication - details: Token-based and JWT authentication with customizable providers. - - icon: 📡 - title: SSE Streaming - details: Real-time response streaming with Server-Sent Events. - - icon: 📚 - title: Swagger UI - details: Automatic OpenAPI documentation with interactive Swagger UI. - - icon: 📁 - title: File Management - details: Built-in file upload and download with storage integration. - - icon: 🌐 - title: Framework Agnostic - details: Works with Express, Next.js, and other frameworks via adapters. + - title: SDK Agents And Custom Handlers + details: Register agents built with `@cognipeer/agent-sdk` or expose your own runtime handlers through the same REST surface. + - title: Storage That Does Not Start From Scratch + details: Use the built-in PostgreSQL, MongoDB, and SQLite providers instead of rebuilding conversation and file persistence for every deployment. + - title: Auth, Files, And Streaming In One Runtime + details: Keep token or JWT auth, file uploads, and SSE response delivery inside the same server layer that already knows your agent routes. + - title: Framework Adapters That Match Real Integrations + details: Start with Express or Next.js and keep a clean path toward custom adapters when your hosting model does not fit the defaults. + - title: Swagger And REST Contracts Ready To Inspect + details: Publish interactive API docs and predictable endpoints without manually maintaining separate OpenAPI scaffolding. + - title: Examples And Guides That Stay Operational + details: Move from setup into adapters, providers, auth, and endpoint details without losing the shape of the existing docs tree. --- +## Start Here + +If you are integrating Agent Server for the first time, this is the shortest useful reading order: + +1. [Getting Started](/guide/getting-started) to boot a working server quickly. +2. [Core Concepts](/guide/core-concepts) to understand agents, conversations, storage, and files. +3. [Architecture](/guide/architecture) to see how adapters, providers, and API routes fit together. + +If you already know the basics, jump directly to the part that matches your work: + +- Wiring an app runtime? Start with [Express.js](/guide/express) or [Next.js](/guide/nextjs). +- Choosing persistence and auth? Start with [Storage Providers](/guide/storage) and [Authentication](/guide/authentication). +- Building against the runtime contract? Start with [API Reference](/api/server) and [REST Endpoints](/api/endpoints). + +## Choose Your Integration Path + +| Start with | Best for | What you get | +| --- | --- | --- | +| Guide | Teams standing up the server for the first time | Setup, architecture, adapters, storage, auth, files, and streaming guidance | +| API Reference | Backend teams implementing or extending the runtime | Server interfaces, types, adapters, providers, and endpoint contracts | +| Examples | Teams that want a runnable starting point | Express, Next.js, storage-backed, and auth-aware sample integrations | + ## Quick Start ::: code-group @@ -61,23 +68,18 @@ pnpm add @cognipeer/agent-server ::: -## Basic Usage - -```typescript +```ts import express from 'express'; import { createAgentServer, createPostgresProvider, createExpressMiddleware, } from '@cognipeer/agent-server'; -import { createSmartAgent } from '@cognipeer/agent-sdk'; -// Storage provider const storage = createPostgresProvider({ - connectionString: 'postgresql://user:pass@localhost:5432/mydb', + connectionString: process.env.DATABASE_URL!, }); -// Agent server const agentServer = createAgentServer({ basePath: '/api/agents', storage, @@ -88,43 +90,32 @@ const agentServer = createAgentServer({ }, }); -// Register your agent -const myAgent = createSmartAgent({ - name: 'Assistant', - model: myLLMModel, - tools: [...], -}); -agentServer.registerSDKAgent('assistant', myAgent, { - description: 'A helpful assistant', -}); - -// Express app const app = express(); app.use(express.json()); await storage.connect(); app.use(createExpressMiddleware(agentServer)); - -app.listen(3000, () => { - console.log('Server running at http://localhost:3000'); - console.log('Swagger UI at http://localhost:3000/api/agents/docs'); -}); +app.listen(3000); ``` -## API Endpoints - -| Method | Endpoint | Description | -|--------|----------|-------------| -| GET | /agents | List all agents | -| GET | /agents/:agentId | Get agent details | -| GET | /conversations | List conversations | -| POST | /conversations | Create new conversation | -| GET | /conversations/:id | Get conversation with messages | -| PATCH | /conversations/:id | Update conversation | -| DELETE | /conversations/:id | Delete conversation | -| GET | /conversations/:id/messages | List messages | -| POST | /conversations/:id/messages | Send message | -| POST | /files | Upload file | -| GET | /files/:fileId | Get file metadata | -| GET | /files/:fileId/content | Download file | -| DELETE | /files/:fileId | Delete file | +## Docs Map + +- [Guide](/guide/getting-started): setup, concepts, adapters, storage, auth, file management, streaming, and Swagger guidance. +- [Architecture](/guide/architecture): how the server runtime, adapters, providers, and API surface are layered. +- [API Reference](/api/server): core interfaces, types, adapters, providers, and REST endpoint contracts. +- [Examples](/examples/): runnable integration paths for Express, Next.js, custom agents, and auth flows. + +## Production Checklist + +- Decide early which framework adapter and storage backend own your runtime so conversations and files do not get reworked later. +- Confirm your auth model, base path, and streaming expectations before publishing endpoints to consuming clients. +- Keep Swagger enabled in environments where API inspection shortens integration loops. +- Validate file handling, conversation persistence, and error behavior together instead of treating them as separate subsystems. +- Run `npm run docs:build` when docs or examples change so links and frontmatter stay valid. + +## What This Site Covers + +- A practical path from install to a production-ready agent API server without rebuilding transport, persistence, and auth plumbing by hand. +- The runtime contracts behind adapters, providers, endpoint behavior, and framework integration. +- The operational guidance needed to move from local examples into real deployments. +- A docs shell aligned with the wider Cognipeer docs surfaces while preserving Agent Server's own structure and examples. diff --git a/docs/public/AgentServer.svg b/docs/public/AgentServer.svg new file mode 100644 index 0000000..84c5a12 --- /dev/null +++ b/docs/public/AgentServer.svg @@ -0,0 +1 @@ + \ No newline at end of file