|
| 1 | +"""Simple task server demonstrating MCP tasks over streamable HTTP.""" |
| 2 | + |
| 3 | +from collections.abc import AsyncIterator |
| 4 | +from contextlib import asynccontextmanager |
| 5 | +from dataclasses import dataclass |
| 6 | +from typing import Any |
| 7 | + |
| 8 | +import anyio |
| 9 | +import click |
| 10 | +import mcp.types as types |
| 11 | +from anyio.abc import TaskGroup |
| 12 | +from mcp.server.lowlevel import Server |
| 13 | +from mcp.server.streamable_http_manager import StreamableHTTPSessionManager |
| 14 | +from mcp.shared.experimental.tasks import InMemoryTaskStore, task_execution |
| 15 | +from starlette.applications import Starlette |
| 16 | +from starlette.routing import Mount |
| 17 | + |
| 18 | + |
| 19 | +@dataclass |
| 20 | +class AppContext: |
| 21 | + task_group: TaskGroup |
| 22 | + store: InMemoryTaskStore |
| 23 | + |
| 24 | + |
| 25 | +@asynccontextmanager |
| 26 | +async def lifespan(server: Server[AppContext, Any]) -> AsyncIterator[AppContext]: |
| 27 | + store = InMemoryTaskStore() |
| 28 | + async with anyio.create_task_group() as tg: |
| 29 | + yield AppContext(task_group=tg, store=store) |
| 30 | + store.cleanup() |
| 31 | + |
| 32 | + |
| 33 | +server: Server[AppContext, Any] = Server("simple-task-server", lifespan=lifespan) |
| 34 | + |
| 35 | + |
| 36 | +@server.list_tools() |
| 37 | +async def list_tools() -> list[types.Tool]: |
| 38 | + return [ |
| 39 | + types.Tool( |
| 40 | + name="long_running_task", |
| 41 | + description="A task that takes a few seconds to complete with status updates", |
| 42 | + inputSchema={"type": "object", "properties": {}}, |
| 43 | + ) |
| 44 | + ] |
| 45 | + |
| 46 | + |
| 47 | +@server.call_tool() |
| 48 | +async def handle_call_tool(name: str, arguments: dict[str, Any]) -> list[types.TextContent] | types.CreateTaskResult: |
| 49 | + ctx = server.request_context |
| 50 | + app = ctx.lifespan_context |
| 51 | + |
| 52 | + if not ctx.experimental.is_task: |
| 53 | + return [types.TextContent(type="text", text="Error: This tool must be called as a task")] |
| 54 | + |
| 55 | + # Create the task |
| 56 | + metadata = ctx.experimental.task_metadata |
| 57 | + assert metadata is not None |
| 58 | + task = await app.store.create_task(metadata) |
| 59 | + |
| 60 | + # Spawn background work |
| 61 | + async def do_work() -> None: |
| 62 | + async with task_execution(task.taskId, app.store) as task_ctx: |
| 63 | + await task_ctx.update_status("Starting work...") |
| 64 | + await anyio.sleep(1) |
| 65 | + |
| 66 | + await task_ctx.update_status("Processing step 1...") |
| 67 | + await anyio.sleep(1) |
| 68 | + |
| 69 | + await task_ctx.update_status("Processing step 2...") |
| 70 | + await anyio.sleep(1) |
| 71 | + |
| 72 | + await task_ctx.complete( |
| 73 | + types.CallToolResult(content=[types.TextContent(type="text", text="Task completed!")]) |
| 74 | + ) |
| 75 | + |
| 76 | + app.task_group.start_soon(do_work) |
| 77 | + return types.CreateTaskResult(task=task) |
| 78 | + |
| 79 | + |
| 80 | +@server.experimental.get_task() |
| 81 | +async def handle_get_task(request: types.GetTaskRequest) -> types.GetTaskResult: |
| 82 | + app = server.request_context.lifespan_context |
| 83 | + task = await app.store.get_task(request.params.taskId) |
| 84 | + if task is None: |
| 85 | + raise ValueError(f"Task {request.params.taskId} not found") |
| 86 | + return types.GetTaskResult( |
| 87 | + taskId=task.taskId, |
| 88 | + status=task.status, |
| 89 | + statusMessage=task.statusMessage, |
| 90 | + createdAt=task.createdAt, |
| 91 | + ttl=task.ttl, |
| 92 | + pollInterval=task.pollInterval, |
| 93 | + ) |
| 94 | + |
| 95 | + |
| 96 | +@server.experimental.get_task_result() |
| 97 | +async def handle_get_task_result(request: types.GetTaskPayloadRequest) -> types.GetTaskPayloadResult: |
| 98 | + app = server.request_context.lifespan_context |
| 99 | + result = await app.store.get_result(request.params.taskId) |
| 100 | + if result is None: |
| 101 | + raise ValueError(f"Result for task {request.params.taskId} not found") |
| 102 | + assert isinstance(result, types.CallToolResult) |
| 103 | + return types.GetTaskPayloadResult(**result.model_dump()) |
| 104 | + |
| 105 | + |
| 106 | +@click.command() |
| 107 | +@click.option("--port", default=8000, help="Port to listen on") |
| 108 | +def main(port: int) -> int: |
| 109 | + import uvicorn |
| 110 | + |
| 111 | + session_manager = StreamableHTTPSessionManager(app=server) |
| 112 | + |
| 113 | + @asynccontextmanager |
| 114 | + async def app_lifespan(app: Starlette) -> AsyncIterator[None]: |
| 115 | + async with session_manager.run(): |
| 116 | + yield |
| 117 | + |
| 118 | + starlette_app = Starlette( |
| 119 | + routes=[Mount("/mcp", app=session_manager.handle_request)], |
| 120 | + lifespan=app_lifespan, |
| 121 | + ) |
| 122 | + |
| 123 | + print(f"Starting server on http://localhost:{port}/mcp") |
| 124 | + uvicorn.run(starlette_app, host="127.0.0.1", port=port) |
| 125 | + return 0 |
0 commit comments