Introduction to MCP Client of MCP Protocol

Written by
Clara Bennett
Updated on:July-11th-2025
Recommendation

Explore the construction and principles of the MCP protocol client, and provide a practical guide for non-Cursor users.

Core content:
1. Analysis of the working principle of the MCP client
2. How to build an MCP client
3. Detailed explanation of the function to create a connection to the MCP server

Yang Fangxian
Founder of 53AI/Most Valuable Expert of Tencent Cloud (TVP)

In this chapter, I will develop an MCP client by myself. I hope you like it.

1. Working Principle

The core part consists of two parts:

  • Session: A session between an MCP client and an MCP server. After the connection is established, the client can obtain tools, resources, and prompts in the MCP server, as well as the ability to remotely call tools.
  • LLM: The brain of the MCP client, which automatically determines whether a tool needs to be called and which tool to call based on the user's requirements. Therefore, this LLM must support Tool Call to be used normally.

2. MCP Client Construction

1. Create the McpClient class

import asyncio
import json
import sys
import time
from typing import Optional
from contextlib import AsyncExitStack
from mcp.client.sse import sse_client
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from openai import AsyncOpenAI

class MCPClient:
    def __init__(self):
        #Initialize session and client objects
        self.session: Optional[ClientSession] = None
        self.exit_stack = AsyncExitStack()
        self.client = AsyncOpenAI(
            # Here we use Alibaba's qwen-plus large model
            api_key = "your api key" ,
            base_url= "https://dashscope.aliyuncs.com/compatible-mode/v1" ,
        )

2. Create a function to connect to the MCP server

The MCP server transport protocol supports stdio and sse, so two functions are created here

async def connect_to_server(self, server_script_path: str):
    "" "Connect to an MCP server

    Args:
        server_script_path: Path to the server script (.py or .js)
    "
""
    is_python = server_script_path.endswith( ".py" )
    is_js = server_script_path.endswith( ".js" )
    if  not (is_python or is_js):
        raise ValueError( "Server script must be a .py or .js file" )

    command  =  "python" if  is_python  else "node"
    server_params = StdioServerParameters(
        command = command , args=[server_script_path], env=None
    )

    stdio_transport = await self.exit_stack.enter_async_context(
        stdio_client(server_params)
    )
    self.stdio, self.write = stdio_transport
    self.session = await self.exit_stack.enter_async_context(
        ClientSession(self.stdio, self.write)
    )

    await self.session.initialize()

    # List available tools
    response = await self.session.list_tools()
    tools = response.tools
    print ( "\nConnected to server with tools:" , [tool.name  for  tool  in  tools])

async def connect_to_sse_server(self, server_url: str):
    "" "Connect to an MCP server

    Args:
        server_script_path: Path to the server script (.py or .js)
    "
""
    self._streams_context = sse_client(url=server_url)

    streams = await self._streams_context.__aenter__()
    self._session_context = ClientSession(*streams)
    self.session = await self._session_context.__aenter__()

    await self.session.initialize()
    # List available tools
    response = await self.session.list_tools()
    tools = response.tools
    print ( "\nConnected to server with tools:" , [tool.name  for  tool  in  tools])

3. Handle user needs

async def process_query(self, query: str) -> str:
    "" "Use the tools provided by the LLM and MCP servers to process queries" ""
    messages = [
        {
            "role""user" ,
            "content" : query
        }
    ]

    response = await self.session.list_tools()
    available_tools = [{
        "type""function" ,
        "function" : {
            "name" : tool.name,
            "description" : tool.description,
            "parameters" : tool.inputSchema
        }
    }  for  tool  in  response.tools]

    # Initialize LLM API call
    response = await self.client.chat.completions.create(
        model = "qwen-plus" ,
        messages=messages,
        tools=available_tools  # Pass the tool list to LLM
    )


    final_text = []
    message = response.choices[0].message
    print (response.choices[0])
    final_text.append(message.content or  "" )

    # Handle the response and process the tool call
    if  message.tool_calls:
        # Process each tool call
        for  tool_call  in  message.tool_calls:
            tool_name = tool_call.function.name
            tool_args = json.loads(tool_call.function.arguments)

            # Execute tool call
            start_time = time.time()
            result = await self.session.call_tool(tool_name, tool_args)
            end_time = time.time()
            print (f "Tool {tool_name} took {end_time - start_time} seconds to execute" )
            final_text.append(f "[Calling tool {tool_name} with args {tool_args}]" )

            # Add tool calls and results to the message history
            messages.append({
                "role""assistant" ,
                "tool_calls" : [
                    {
                        "id" : tool_call.id,
                        "type""function" ,
                        "function" : {
                            "name" : tool_name,
                            "arguments" : json.dumps(tool_args)
                        }
                    }
                ]
            })
            messages.append({
                "role""tool" ,
                "tool_call_id" : tool_call.id,
                "content" : str(result.content)
            })

        # Send the result of the tool call to LLM
        response = await self.client.chat.completions.create(
            model = "qwen-plus" ,
            messages=messages,
            tools=available_tools
        )

        message = response.choices[0].message
        if  message.content:
            final_text.append(message.content)

    return "\n" .join(final_text)

4. Loop chat function

async def chat_loop(self):
    "" "Run an interactive chat loop" ""
    print ( "\nMCP Client Started!" )
    print ( "Type your queries or 'quit' to exit." )

    while  True:
        try:
            query = input( "\nQuery: " ).strip()

            if  query.lower() ==  'quit' :
                break

            response = await self.process_query(query)
            print ( "\n"  + response)

        except Exception as e:
            print (f "\nError: {str(e)}" )

5. Session Cleanup

async def cleanup(self):
    "" "Clean up resources" ""
    await self.exit_stack.aclose()

6. Entry function

async def main():
    if  len(sys.argv) < 2:
        print ( "Usage: python client.py <path_to_server_script>" )
        sys.exit(1)

    client = MCPClient()
    try:
        # Select according to the MCP Server transmission protocol
        await client.connect_to_sse_server(sys.argv[1])
        await client.chat_loop()
    finally:
        await client.cleanup()


if  __name__ ==  "__main__" :
    # asyncio.run(main())
    loop = asyncio.get_event_loop()
    loop.run_until_complete(main())

7. Program Execution

# The MCP Server transport protocol is stdio, and the startup command is:
uv run client.py  "full server script path"

# The MCP Server transmission protocol is sse, and the startup command is:
uv run client.py http://127.0.0.1:8000/sse
# The above sse address can be adjusted according to your own needs

8. Run screenshot

9. Complete source code

import asyncio
import json
import sys
import time
from typing import Optional
from contextlib import AsyncExitStack
from mcp.client.sse import sse_client
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from openai import AsyncOpenAI


class MCPClient:
    def __init__(self):
        #Initialize session and client objects
        self.session: Optional[ClientSession] = None
        self.exit_stack = AsyncExitStack()
        self.client = AsyncOpenAI(
            # If no environment variables are configured, please replace it with Bailian API Key: api_key="sk-xxx"
            api_key= "sk-e6fdadbd5f774ee0be35f6ecc1c52fe7" ,
            base_url= "https://dashscope.aliyuncs.com/compatible-mode/v1" ,
        )

    async def connect_to_server(self, server_script_path: str):
        "" "Connect to an MCP server

        Args:
            server_script_path: Path to the server script (.py or .js)
        "
""
        is_python = server_script_path.endswith( ".py" )
        is_js = server_script_path.endswith( ".js" )
        if  not (is_python or is_js):
            raise ValueError( "Server script must be a .py or .js file" )

        command  =  "python" if  is_python  else "node"
        server_params = StdioServerParameters(
            command = command , args=[server_script_path], env=None
        )

        stdio_transport = await self.exit_stack.enter_async_context(
            stdio_client(server_params)
        )
        self.stdio, self.write = stdio_transport
        self.session = await self.exit_stack.enter_async_context(
            ClientSession(self.stdio, self.write)
        )

        await self.session.initialize()

        # List available tools
        response = await self.session.list_tools()
        tools = response.tools
        print ( "\nConnected to server with tools:" , [tool.name  for  tool  in  tools])

    async def connect_to_sse_server(self, server_url: str):
        "" "Connect to an MCP server

        Args:
            server_script_path: Path to the server script (.py or .js)
        "
""
        self._streams_context = sse_client(url=server_url)

        streams = await self._streams_context.__aenter__()
        self._session_context = ClientSession(*streams)
        self.session = await self._session_context.__aenter__()

        await self.session.initialize()
        # List available tools
        response = await self.session.list_tools()
        tools = response.tools
        print ( "\nConnected to server with tools:" , [tool.name  for  tool  in  tools])
        
    async def process_query(self, query: str) -> str:
        "" "Use the tools provided by the LLM and MCP servers to process queries" ""
        messages = [
            {
                "role""user" ,
                "content" : query
            }
        ]

        response = await self.session.list_tools()
        available_tools = [{
            "type""function" ,
            "function" : {
                "name" : tool.name,
                "description" : tool.description,
                "parameters" : tool.inputSchema
            }
        }  for  tool  in  response.tools]

        # Initialize LLM API call
        response = await self.client.chat.completions.create(
            model = "qwen-plus" ,
            messages=messages,
            tools=available_tools  # Pass the tool list to LLM
        )

    
        final_text = []
        message = response.choices[0].message
        print (response.choices[0])
        final_text.append(message.content or  "" )

        # Handle the response and process the tool call
        if  message.tool_calls:
            # Process each tool call
            for  tool_call  in  message.tool_calls:
                tool_name = tool_call.function.name
                tool_args = json.loads(tool_call.function.arguments)
                
                # Execute tool call
                start_time = time.time()
                result = await self.session.call_tool(tool_name, tool_args)
                end_time = time.time()
                print (f "Tool {tool_name} took {end_time - start_time} seconds to execute" )
                final_text.append(f "[Calling tool {tool_name} with args {tool_args}]" )

                # Add tool calls and results to the message history
                messages.append({
                    "role""assistant" ,
                    "tool_calls" : [
                        {
                            "id" : tool_call.id,
                            "type""function" ,
                            "function" : {
                                "name" : tool_name,
                                "arguments" : json.dumps(tool_args)
                            }
                        }
                    ]
                })
                messages.append({
                    "role""tool" ,
                    "tool_call_id" : tool_call.id,
                    "content" : str(result.content)
                })

            # Send the result of the tool call to LLM
            response = await self.client.chat.completions.create(
                model = "qwen-plus" ,
                messages=messages,
                tools=available_tools
            )
            
            message = response.choices[0].message
            if  message.content:
                final_text.append(message.content)

        return "\n" .join(final_text)
    
    async def chat_loop(self):
        "" "Run an interactive chat loop" ""
        print ( "\nMCP Client Started!" )
        print ( "Type your queries or 'quit' to exit." )

        while  True:
            try:
                query = input( "\nQuery: " ).strip()

                if  query.lower() ==  'quit' :
                    break

                response = await self.process_query(query)
                print ( "\n"  + response)

            except Exception as e:
                print (f "\nError: {str(e)}" )

    async def cleanup(self):
        "" "Clean up resources" ""
        await self.exit_stack.aclose()


async def main():
    if  len(sys.argv) < 2:
        print ( "Usage: python client.py <path_to_server_script>" )
        sys.exit(1)

    client = MCPClient()
    try:
        # Select according to the MCP Server transmission protocol
        await client.connect_to_sse_server(sys.argv[1])
        await client.chat_loop()
    finally:
        await client.cleanup()


if  __name__ ==  "__main__" :
    # asyncio.run(main())
    loop = asyncio.get_event_loop()
    loop.run_until_complete(main())

Conclusion

This article mainly describes how to build MCP Client through code. Through the analysis of the overall construction steps, I believe everyone can understand the working mechanism of MCP Client. You can try to build a personalized MCP Client by yourself in the future, and use your imagination to cooperate with MCP Server.