8000 Support nginx reverse proxy or configure base_url · Issue #242 · modelcontextprotocol/python-sdk · GitHub
[go: up one dir, main page]

Skip to content
Support nginx reverse proxy or configure base_url #242
Open
@MiV1N

Description

@MiV1N

Is your feature request related to a problem? Please describe.

I want to deploy multiple MCP servers behind Nginx, with Nginx using URL prefixes to reverse proxy messages to different MCP servers, but I get some errors when accessing:

graph LR
    subgraph "Application Host Process"

        C1[Client]

    end

    subgraph "Internet"

        N3[Nginx <br> 192.168.1.224:8889]
        S3[MCP Server <br> 192.168.1.224:18080]

        C1 --> N3
        N3 <--> S3
    end

Loading

nginx.conf

server {
    listen 8889;

    location /abcdef/dddddd/ {
        rewrite ^/abcdef/dddddd/(.*)$ /$1 break;
        proxy_pass http://192.168.1.224:18080; 

        proxy_set_header Host $host;
        proxy_set_header    X-Real-Scheme $scheme;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    }
}

server.py

import argparse
import os
from dotenv import load_dotenv
from mcp.server.fastmcp import FastMCP
from starlette.applications import Starlette
from mcp.server.sse import SseServerTransport
from starlette.requests import Request
from starlette.routing import Mount, Route
from mcp.server import Server
from datetime import datetime
import logging
import warnings
import uvicorn

MCP_SERVER_NAME = "elasticsearch-mcp-sse"

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(MCP_SERVER_NAME)

mcp = FastMCP(MCP_SERVER_NAME)


@mcp.tool()
def get_time() -> str:   
    return datetime.now().isoformat()


def create_starlette_app(mcp_server: Server, *, debug: bool = False) -> Starlette:
    """Create a Starlette application that can server the provied mcp server with SSE."""
    sse = SseServerTransport("/messages/")

    async def handle_sse(request: Request) -> None:
        async with sse.connect_sse(
                request.scope,
                request.receive,
                request._send,
        ) as (read_stream, write_stream):
            await mcp_server.run(
                read_stream,
                write_stream,
                mcp_server.create_initialization_options(),
            )

    return Starlette(
        debug=debug,
        routes=[
            Route("/sse", endpoint=handle_sse),
            Mount("/messages/", app=sse.handle_post_message),
        ],
    )

if __name__ == "__main__":
    mcp_server = mcp._mcp_server

    parser = argparse.ArgumentParser(description='Run MCP SSE-based server')
    parser.add_argument('--host', default='0.0.0.0', help='Host to bind to')
    parser.add_argument('--port', type=int, default=18080, help='Port to listen on')
    args = parser.parse_args()

    # Bind SSE request handling to MCP server
    starlette_app = create_starlette_app(mcp_server, debug=True)

    uvicorn.run(starlette_app, host=args.host, port=args.port)

uv run server.py

client.py

import asyncio
from contextlib import AsyncExitStack
import json
from mcp import ClientSession
from mcp.client.sse import sse_client
import os
from openai import AsyncOpenAI
import sys
from typing import Optional

class MCPClient:
    def __init__(self):
        # Initialize session and client objects
        self.session: Optional[ClientSession] = None
        self.exit_stack = AsyncExitStack()
        self.client = AsyncOpenAI(
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
            api_key=os.getenv("OPENROUTER_API_KEY"),
        )

    async def connect_to_sse_server(self, server_url: str):
        """Connect to an MCP server running with SSE transport"""
        # Store the context managers so they stay alive
        self._streams_context = sse_client(url=server_url)
        streams = await self._streams_context.__aenter__()

        self._session_context = ClientSession(*streams)
        self.session: ClientSession = await self._session_context.__aenter__()

        # Initialize
        await self.session.initialize()

        # List available tools to verify connection
        print("Initialized SSE client...")
        print("Listing tools...")
        response = await self.session.list_tools()
        tools = response.tools
        print("\nConnected to server with tools:", [tool.name for tool in tools])

    async def cleanup(self):
        """Properly clean up the session and streams"""
        if self._session_context:
            await self._session_context.__aexit__(None, None, None)
        if self._streams_context:
            await self._streams_context.__aexit__(None, None, None)

    async def process_query(self, query: str) -> str:
        """Process a query using OpenAI and available tools"""
        messages = [
            {
                "role": "user",
                "content": query
            }
        ]

        response = await self.session.list_tools()
        available_tools = [{
            "type": "function",
            "function": {
                "name": tool.name,
                "description": tool.description,
                "parameters": tool.inputSchema
            }
        } for tool in response.tools]

        # Initial OpenAI API call
        response = await self.client.chat.completions.create(
            model="qwen-plus",
            messages=messages,
            tools=available_tools
        )
        
        # Process response and handle tool calls
        tool_results = []
        final_text = []

        message = response.choices[0].message
        final_text.append(message.content or "")
        print(final_text)

        while message.tool_calls:
            # Handle each tool call
            for tool_call in message.tool_calls:
                tool_name = tool_call.function.name
                tool_args = json.loads(tool_call.function.arguments)
                
                # Execute tool call
                result = await self.session.call_tool(tool_name, tool_args)
                tool_results.append({"call": tool_name, "result": result})
                final_text.append(f"[Calling tool {tool_name} with args {tool_args}]")

                # Add tool call and result to messages
                messages.append({
                    "role": "assistant",
                    "tool_calls": [
                        {
                            "id": tool_call.id,
                            "type": "function",
                            "function": {
                                "name": tool_name,
                                "arguments": json.dumps(tool_args)
                            }
                        }
                    ]
                })
                messages.append({
                    "role": "tool",
                    "tool_call_id": tool_call.id,
                    "content": str(result.content)
                })

            # Get next response from OpenAI
            response = await self.client.chat.completions.create(
                model="qwen-plus",
                messages=messages,
                tools=available_tools
            )
            
            message = response.choices[0].message
            if message.content:
                final_text.append(message.content)

        return "\n".join(final_text)

    async def chat_loop(self):
        """Run an interactive chat loop"""
        print("\nMCP Client Started!")
        print("Type your queries or 'quit' to exit.")
        
        while True:
            try:
                query = input("\nQuery: ").strip()
                if query.lower() == 'quit':
                    break
                response = await self.process_query(query)
                print("\n" + response)
            except Exception as e:
                print(f"\nError: {str(e)}")

async def main():
    if len(sys.argv) < 2:
        print("Usage: uv run client.py <URL of SSE MCP server (i.e. http://localhost:18080/sse)>")
        sys.exit(1)
        
    client = MCPClient()
    try:
        await client.connect_to_sse_server(server_url=sys.argv[1])
        await client.chat_loop()
    finally:
        await client.cleanup()

if __name__ == "__main__":
    asyncio.run(main())

python3 client.py http://192.168.1.224:8889/abcdef/dddddd/sse

I received the following error:

RROR:mcp.client.sse:Error in post_writer: Client error '404 Not Found' for url 'http://192.168.1.224:8889/messages/?session_id=bd882da221894cfcb535ef3907926e64'
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/404

Describe the solution you'd like
Perhaps it can be compatible with the MCP service after Nginx reverse proxy by configuring base_url or modifying the code.

Describe alternatives you've considered

I modified line 64 of https://github.com/modelcontextprotocol/python-sdk/blob/main/src/mcp/client/sse.py to the following code, and now it works for me.

try:
    async for sse in event_source.aiter_sse():
        logger.debug(f"Received SSE event: {sse.event}")
        match sse.event:
            case "endpoint":
                # endpoint_url = urljoin(url, sse.data)
                endpoint_url = urljoin(url, sse.data[1:]  if sse.data.startswith('/') else sse.data)
                logger.info(
                    f"Received endpoint URL: {endpoint_url}"
                )

Additional context

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions

      0