Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Custom MCP requests + hooks #535

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 21 commits into
base: main
Choose a base branch
Loading
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 85 additions & 0 deletions 85 README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
- [Advanced Usage](#advanced-usage)
- [Low-Level Server](#low-level-server)
- [Writing MCP Clients](#writing-mcp-clients)
- [Custom Requests](#custom-requests)
- [MCP Primitives](#mcp-primitives)
- [Server Capabilities](#server-capabilities)
- [Documentation](#documentation)
Expand Down Expand Up @@ -621,6 +622,90 @@ if __name__ == "__main__":
asyncio.run(run())
```

### Custom Requests

The MCP sdk can be extended with custom requests to support use cases outside [Model Context Protocol specification](https://spec.modelcontextprotocol.io)

*warning:* This capability is opt-in and must be explicitly declared in `experimental_capabilities` in the server/client's capabilities

Example of MCP server side custom requests processing:

```python
import asyncio as aio
from typing import Literal

import anyio

import mcp.types as types
from mcp.client.session import ClientSession
from mcp.server.lowlevel import Server
from mcp.shared.memory import create_client_server_memory_streams

## define custom request type


class AddOneParams(types.RequestParams):
value: int


class AddOneRequest(types.CustomRequest[AddOneParams, Literal["add_one"]]):
method: Literal["add_one"] = "add_one"
params: AddOneParams


class AddOneResult(types.CustomResult):
result: int


async def run_all():
async with anyio.create_task_group() as tg:
async with create_client_server_memory_streams() as (
client_streams,
server_streams,
):
client_read, client_write = client_streams
server_read, server_write = server_streams

server = Server("my-add-one-server")

## handle custom request type
@server.handle_custom_request(AddOneRequest)
async def handle_add_one_request(req: AddOneRequest) -> AddOneResult:
return AddOneResult(result=req.params.value + 1)

tg.start_soon(
lambda: server.run(
server_read,
server_write,
server.create_initialization_options(
experimental_capabilities={"custom_requests": {}},
),
raise_exceptions=True,
)
)

async with ClientSession(
read_stream=client_read,
write_stream=client_write,
experimental_capabilities={"custom_requests": {}},
) as client_session:
await client_session.initialize()

## send custom request type
req = AddOneRequest(params=AddOneParams(value=1))
res = await client_session.send_custom_request(
req, response_type=AddOneResult
)
print(res)

tg.cancel_scope.cancel()


if __name__ == "__main__":
aio.run(run_all())
```


### MCP Primitives

The MCP protocol defines three core primitives that servers can implement:
Expand Down
139 changes: 139 additions & 0 deletions 139 examples/custom_requests/ttl.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
#!/usr/bin/env -S uv run --script
# /// script
# dependencies = [
# "mcp",
# ]
# [tool.uv.sources]
# mcp = { path = "/workspace" }
# ///

##
## The goal of this example is to demonstrate a workflow where
## users can define their own message types for MCP and how to
## process then client and/or server side.
##
## In this concrete example we demonstrate a new set of message types
## such that the client sends a request to the server and the server
## sends a response back to the client and back and forth until a TTL
## is reached.
##
## This is meant to demonstrate a possible future where MCP is used
## more bidirectionally as defined by a user.
##


import asyncio as aio
from typing import Any, Literal

import anyio

import mcp.types as types
from mcp.client.session import ClientSession, CustomRequestHandlerFnT
from mcp.server.lowlevel import Server
from mcp.shared.context import RequestContext
from mcp.shared.memory import create_client_server_memory_streams

EXPERIMENTAL_CAPABILITIES: dict[str, dict[str, Any]] = {"custom_requests": {}}

## Define a simple ttl protocol, sending a request to/from the client/server
## back and forth until a TTL is reached.


class TTLParams(types.RequestParams):
ttl: int


class TTLRequest(types.CustomRequest[TTLParams, Literal["ttl"]]):
method: Literal["ttl"] = "ttl"
params: TTLParams


class TTLPayloadResult(types.CustomResult):
message: str


async def run_all():
async with anyio.create_task_group() as tg:
async with create_client_server_memory_streams() as (
client_streams,
server_streams,
):
client_read, client_write = client_streams
server_read, server_write = server_streams

## MCP Server code
server = Server("my-custom-server")

@server.handle_custom_request(TTLRequest)
async def handle_ttl_request(req: TTLRequest) -> TTLPayloadResult:
print(f"SERVER: RECEIVED REQUEST WITH TTL={req.params.ttl}")
if req.params.ttl > 0:
tg.start_soon(
server.request_context.session.send_custom_request,
TTLRequest(
params=TTLParams(
ttl=req.params.ttl - 1,
)
),
TTLPayloadResult,
)
return TTLPayloadResult(message=f"Recieved ttl {req.params.ttl}!")

tg.start_soon(
lambda: server.run(
server_read,
server_write,
server.create_initialization_options(
experimental_capabilities=EXPERIMENTAL_CAPABILITIES,
),
raise_exceptions=True,
)
)

## MCP Client code

class TTLPayloadResponder(
CustomRequestHandlerFnT[TTLRequest, TTLPayloadResult]
):
async def __call__(
self,
context: RequestContext["ClientSession", Any],
message: TTLRequest,
) -> TTLPayloadResult | types.ErrorData:
print(f"CLIENT: RECEIVED REQUEST WITH TTL={message.params.ttl}")
if message.params.ttl > 0:
tg.start_soon(
context.session.send_custom_request,
TTLRequest(
params=TTLParams(
ttl=message.params.ttl - 1,
)
),
TTLPayloadResult,
)
return TTLPayloadResult(
message=f"Recieved ttl {message.params.ttl}!"
)

async with ClientSession(
read_stream=client_read,
write_stream=client_write,
experimental_capabilities=EXPERIMENTAL_CAPABILITIES,
custom_request_handlers={
"ttl": TTLPayloadResponder(),
},
) as client_session:
await client_session.initialize()

req = TTLRequest(params=TTLParams(ttl=8))
print(f"Sending: {req}")
await client_session.send_custom_request(
req, response_type=TTLPayloadResult
)
await anyio.sleep(1)

tg.cancel_scope.cancel()


if __name__ == "__main__":
aio.run(run_all())
6 changes: 6 additions & 0 deletions 6 src/mcp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
CompleteRequest,
CreateMessageRequest,
CreateMessageResult,
CustomRequest,
CustomRequestWrapper,
CustomRequestWrapperParams,
ErrorData,
GetPromptRequest,
GetPromptResult,
Expand Down Expand Up @@ -66,6 +69,9 @@
"CreateMessageRequest",
"CreateMessageResult",
"ErrorData",
"CustomRequest",
"CustomRequestWrapper",
"CustomRequestWrapperParams",
"GetPromptRequest",
"GetPromptResult",
"Implementation",
Expand Down
Loading
Loading
Morty Proxy This is a proxified and sanitized view of the page, visit original site.