Introduction
Now that we understand MCP servers and clients, let's integrate them into a complete AI agent. This section shows how to bridge MCP's tool discovery with LLM function calling, creating agents that can dynamically use any MCP server.
The Goal: Build an agent that automatically discovers MCP tools, presents them to the LLM, executes tool calls, and handles resultsβall while maintaining security and proper error handling.
Architecture Overview
An MCP-powered agent combines multiple components:
πmcp_agent_architecture.txt
1MCP-POWERED AGENT ARCHITECTURE
2
3βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
4β AGENT HOST β
5β βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ β
6β β AGENT CORE β β
7β β βββββββββββββββ βββββββββββββββ βββββββββββββ β β
8β β β System β β Agent β β Message β β β
9β β β Prompt β β Loop β β History β β β
10β β βββββββββββββββ ββββββββ¬βββββββ βββββββββββββ β β
11β β β β β
12β ββββββββββββββββββββββββββββββΌββββββββββββββββββββββββββββ β
13β β β
14β ββββββββββββββββββββββββββββββΌββββββββββββββββββββββββββββ β
15β β TOOL BRIDGE β β
16β β βββββββββββββββ βββββββββββββββ βββββββββββββ β β
17β β β Tool β β Schema β β Result β β β
18β β β Router βββββΆβ Converter βββββΆβ Formatter β β β
19β β ββββββββ¬βββββββ βββββββββββββββ βββββββββββββ β β
20β β β β β
21β βββββββββββΌβββββββββββββββββββββββββββββββββββββββββββββββ β
22β β β
23β βββββββββββΌβββββββββββββββββββββββββββββββββββββββββββββββ β
24β β MCP CLIENT LAYER β β
25β β βββββββββββββββ βββββββββββββββ βββββββββββββββββββ β β
26β β β GitHub β β Files β β Database β β β
27β β β Server β β Server β β Server β β β
28β β βββββββββββββββ βββββββββββββββ βββββββββββββββββββ β β
29β βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ β
30βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| Component | Purpose |
|---|---|
| Agent Core | Manages conversation, system prompt, agent loop |
| Tool Bridge | Converts MCP tools to LLM format, routes calls |
| Schema Converter | Translates MCP schemas to provider-specific format |
| Result Formatter | Converts MCP results to LLM-friendly messages |
| MCP Client Layer | Manages connections to MCP servers |
Converting MCP Tools for LLMs
MCP tools must be converted to the format expected by your LLM provider:
MCP to Anthropic Format
πmcp_to_anthropic.py
1from dataclasses import dataclass
2from typing import Any
3
4@dataclass
5class MCPTool:
6 """MCP tool as discovered from server."""
7 name: str
8 description: str
9 input_schema: dict
10 server_name: str
11
12def mcp_to_anthropic_tool(mcp_tool: MCPTool) -> dict:
13 """Convert MCP tool to Anthropic Claude format."""
14 return {
15 "name": mcp_tool.name,
16 "description": mcp_tool.description,
17 "input_schema": mcp_tool.input_schema
18 }
19
20def mcp_tools_to_anthropic(tools: list[MCPTool]) -> list[dict]:
21 """Convert all MCP tools to Anthropic format."""
22 return [mcp_to_anthropic_tool(t) for t in tools]
23
24# Example conversion
25mcp_tool = MCPTool(
26 name="read_file",
27 description="Read the contents of a file",
28 input_schema={
29 "type": "object",
30 "properties": {
31 "path": {
32 "type": "string",
33 "description": "Path to the file to read"
34 }
35 },
36 "required": ["path"]
37 },
38 server_name="filesystem"
39)
40
41anthropic_tool = mcp_to_anthropic_tool(mcp_tool)
42# Result:
43# {
44# "name": "read_file",
45# "description": "Read the contents of a file",
46# "input_schema": {
47# "type": "object",
48# "properties": {
49# "path": {"type": "string", "description": "..."}
50# },
51# "required": ["path"]
52# }
53# }MCP to OpenAI Format
πmcp_to_openai.py
1def mcp_to_openai_tool(mcp_tool: MCPTool) -> dict:
2 """Convert MCP tool to OpenAI function calling format."""
3 return {
4 "type": "function",
5 "function": {
6 "name": mcp_tool.name,
7 "description": mcp_tool.description,
8 "parameters": mcp_tool.input_schema
9 }
10 }
11
12def mcp_tools_to_openai(tools: list[MCPTool]) -> list[dict]:
13 """Convert all MCP tools to OpenAI format."""
14 return [mcp_to_openai_tool(t) for t in tools]
15
16# OpenAI format example:
17# {
18# "type": "function",
19# "function": {
20# "name": "read_file",
21# "description": "Read the contents of a file",
22# "parameters": {
23# "type": "object",
24# "properties": {
25# "path": {"type": "string", "description": "..."}
26# },
27# "required": ["path"]
28# }
29# }
30# }Handling Tool Results
πresult_formatting.py
1def format_mcp_result_for_anthropic(
2 tool_name: str,
3 tool_use_id: str,
4 mcp_result: dict
5) -> dict:
6 """Format MCP tool result for Anthropic messages."""
7
8 # MCP results have a 'content' array
9 content = mcp_result.get("content", [])
10
11 # Extract text content
12 text_parts = []
13 for item in content:
14 if item.get("type") == "text":
15 text_parts.append(item.get("text", ""))
16 elif item.get("type") == "resource":
17 resource = item.get("resource", {})
18 text_parts.append(resource.get("text", str(resource)))
19
20 result_text = "\n".join(text_parts)
21
22 # Check for errors
23 is_error = mcp_result.get("isError", False)
24
25 return {
26 "type": "tool_result",
27 "tool_use_id": tool_use_id,
28 "content": result_text,
29 "is_error": is_error
30 }
31
32
33def format_mcp_result_for_openai(
34 tool_call_id: str,
35 mcp_result: dict
36) -> dict:
37 """Format MCP tool result for OpenAI messages."""
38
39 content = mcp_result.get("content", [])
40 text_parts = [
41 item.get("text", "")
42 for item in content
43 if item.get("type") == "text"
44 ]
45
46 return {
47 "role": "tool",
48 "tool_call_id": tool_call_id,
49 "content": "\n".join(text_parts)
50 }Agent Loop Integration
The agent loop must handle MCP tool discovery and execution:
πmcp_agent_loop.py
1import anthropic
2from dataclasses import dataclass
3from typing import Any
4
5@dataclass
6class AgentConfig:
7 """Configuration for MCP-powered agent."""
8 model: str = "claude-sonnet-4-20250514"
9 max_iterations: int = 10
10 system_prompt: str = "You are a helpful assistant with access to tools."
11
12class MCPAgent:
13 """Agent that uses MCP servers for tool execution."""
14
15 def __init__(
16 self,
17 mcp_client: "MCPClientManager",
18 config: AgentConfig = None
19 ):
20 self.mcp = mcp_client
21 self.config = config or AgentConfig()
22 self.anthropic = anthropic.Anthropic()
23 self.messages: list[dict] = []
24
25 def get_available_tools(self) -> list[dict]:
26 """Get all MCP tools in Anthropic format."""
27 mcp_tools = self.mcp.list_tools()
28 return mcp_tools_to_anthropic(mcp_tools)
29
30 async def run(self, user_message: str) -> str:
31 """Run the agent loop with MCP tools."""
32
33 self.messages.append({
34 "role": "user",
35 "content": user_message
36 })
37
38 tools = self.get_available_tools()
39
40 for iteration in range(self.config.max_iterations):
41 # Call the LLM
42 response = self.anthropic.messages.create(
43 model=self.config.model,
44 max_tokens=4096,
45 system=self.config.system_prompt,
46 tools=tools,
47 messages=self.messages
48 )
49
50 # Check stop reason
51 if response.stop_reason == "end_turn":
52 # Extract final text response
53 final_text = ""
54 for block in response.content:
55 if block.type == "text":
56 final_text += block.text
57 return final_text
58
59 elif response.stop_reason == "tool_use":
60 # Process tool calls
61 await self._handle_tool_calls(response)
62
63 else:
64 # Unexpected stop reason
65 break
66
67 return "Max iterations reached"
68
69 async def _handle_tool_calls(self, response) -> None:
70 """Handle tool calls from LLM response."""
71
72 # Add assistant message with tool uses
73 self.messages.append({
74 "role": "assistant",
75 "content": response.content
76 })
77
78 # Process each tool use
79 tool_results = []
80 for block in response.content:
81 if block.type == "tool_use":
82 result = await self._execute_tool(
83 block.name,
84 block.input
85 )
86
87 tool_results.append(
88 format_mcp_result_for_anthropic(
89 tool_name=block.name,
90 tool_use_id=block.id,
91 mcp_result=result
92 )
93 )
94
95 # Add tool results to messages
96 self.messages.append({
97 "role": "user",
98 "content": tool_results
99 })
100
101 async def _execute_tool(
102 self,
103 name: str,
104 arguments: dict
105 ) -> dict:
106 """Execute an MCP tool and return result."""
107 try:
108 result = await self.mcp.call_tool(name, arguments)
109 return result
110 except Exception as e:
111 return {
112 "content": [{"type": "text", "text": f"Error: {str(e)}"}],
113 "isError": True
114 }Complete Agent Example
Here's a complete example putting everything together:
πcomplete_mcp_agent.py
1import asyncio
2from dataclasses import dataclass, field
3from typing import Optional
4import anthropic
5
6# MCP Client (simplified from previous section)
7from mcp_client import MCPClientManager
8
9@dataclass
10class Tool:
11 name: str
12 description: str
13 input_schema: dict
14 server_name: str
15
16@dataclass
17class AgentConfig:
18 model: str = "claude-sonnet-4-20250514"
19 max_tokens: int = 4096
20 max_iterations: int = 10
21 system_prompt: str = """You are a helpful AI assistant with access to various tools.
22
23Use tools when needed to accomplish tasks. Always explain what you're doing.
24
25Available capabilities:
26- File system operations (read, write, list files)
27- Code execution in a sandbox
28- Web search and fetching
29
30Think step by step and use tools appropriately."""
31
32
33class MCPPoweredAgent:
34 """Complete agent implementation with MCP integration."""
35
36 def __init__(
37 self,
38 config: AgentConfig = None,
39 mcp_servers: dict = None
40 ):
41 self.config = config or AgentConfig()
42 self.mcp = MCPClientManager()
43 self.client = anthropic.Anthropic()
44 self.messages: list = []
45 self.tools: list[Tool] = []
46 self._mcp_servers = mcp_servers or {}
47
48 async def initialize(self) -> None:
49 """Initialize MCP connections."""
50 for name, server_config in self._mcp_servers.items():
51 await self.mcp.add_server(
52 name=name,
53 command=server_config["command"],
54 args=server_config.get("args", [])
55 )
56
57 # Collect all tools
58 self.tools = self.mcp.list_tools()
59 print(f"Loaded {len(self.tools)} tools from {len(self._mcp_servers)} servers")
60
61 def _get_anthropic_tools(self) -> list[dict]:
62 """Convert tools to Anthropic format."""
63 return [
64 {
65 "name": tool.name,
66 "description": tool.description,
67 "input_schema": tool.input_schema
68 }
69 for tool in self.tools
70 ]
71
72 async def chat(self, message: str) -> str:
73 """Send a message and get a response."""
74
75 self.messages.append({
76 "role": "user",
77 "content": message
78 })
79
80 tools = self._get_anthropic_tools()
81
82 for iteration in range(self.config.max_iterations):
83 print(f"\n--- Iteration {iteration + 1} ---")
84
85 response = self.client.messages.create(
86 model=self.config.model,
87 max_tokens=self.config.max_tokens,
88 system=self.config.system_prompt,
89 tools=tools if tools else None,
90 messages=self.messages
91 )
92
93 print(f"Stop reason: {response.stop_reason}")
94
95 # Handle text response
96 if response.stop_reason == "end_turn":
97 self.messages.append({
98 "role": "assistant",
99 "content": response.content
100 })
101
102 # Extract final text
103 return self._extract_text(response.content)
104
105 # Handle tool use
106 elif response.stop_reason == "tool_use":
107 self.messages.append({
108 "role": "assistant",
109 "content": response.content
110 })
111
112 tool_results = await self._process_tool_calls(response.content)
113
114 self.messages.append({
115 "role": "user",
116 "content": tool_results
117 })
118
119 else:
120 return f"Unexpected stop reason: {response.stop_reason}"
121
122 return "Reached maximum iterations"
123
124 async def _process_tool_calls(self, content: list) -> list[dict]:
125 """Process tool use blocks and return results."""
126 results = []
127
128 for block in content:
129 if hasattr(block, "type") and block.type == "tool_use":
130 print(f"Calling tool: {block.name}")
131 print(f"Arguments: {block.input}")
132
133 try:
134 mcp_result = await self.mcp.call_tool(
135 block.name,
136 block.input
137 )
138
139 # Format result
140 result_text = self._format_mcp_result(mcp_result)
141 print(f"Result: {result_text[:200]}...")
142
143 results.append({
144 "type": "tool_result",
145 "tool_use_id": block.id,
146 "content": result_text
147 })
148
149 except Exception as e:
150 print(f"Tool error: {e}")
151 results.append({
152 "type": "tool_result",
153 "tool_use_id": block.id,
154 "content": f"Error: {str(e)}",
155 "is_error": True
156 })
157
158 return results
159
160 def _format_mcp_result(self, result: dict) -> str:
161 """Format MCP result to string."""
162 content = result.get("content", [])
163 parts = []
164
165 for item in content:
166 if isinstance(item, dict):
167 if item.get("type") == "text":
168 parts.append(item.get("text", ""))
169 elif item.get("type") == "resource":
170 resource = item.get("resource", {})
171 parts.append(resource.get("text", str(resource)))
172
173 return "\n".join(parts) if parts else str(result)
174
175 def _extract_text(self, content: list) -> str:
176 """Extract text from response content."""
177 parts = []
178 for block in content:
179 if hasattr(block, "type") and block.type == "text":
180 parts.append(block.text)
181 return "\n".join(parts)
182
183 async def cleanup(self) -> None:
184 """Clean up MCP connections."""
185 await self.mcp.disconnect_all()
186
187
188# Usage example
189async def main():
190 # Configure MCP servers
191 servers = {
192 "filesystem": {
193 "command": "python",
194 "args": ["servers/filesystem_server.py"]
195 },
196 "web": {
197 "command": "python",
198 "args": ["servers/web_server.py"]
199 }
200 }
201
202 # Create and initialize agent
203 agent = MCPPoweredAgent(mcp_servers=servers)
204 await agent.initialize()
205
206 try:
207 # Interactive chat loop
208 print("Agent ready. Type 'quit' to exit.\n")
209
210 while True:
211 user_input = input("You: ").strip()
212
213 if user_input.lower() == "quit":
214 break
215
216 if not user_input:
217 continue
218
219 response = await agent.chat(user_input)
220 print(f"\nAssistant: {response}\n")
221
222 finally:
223 await agent.cleanup()
224
225
226if __name__ == "__main__":
227 asyncio.run(main())Production Enhancement
For production, add streaming support, better error handling, token counting, and cost tracking. Consider using Claude's extended thinking for complex reasoning tasks.
Best Practices
Key practices for robust MCP agent integration:
1. Dynamic Tool Discovery
πdynamic_tools.py
1class DynamicToolAgent(MCPPoweredAgent):
2 """Agent that refreshes tools when servers change."""
3
4 async def refresh_tools(self) -> None:
5 """Refresh tool list from all servers."""
6 self.tools = []
7 for server in self.mcp.servers.values():
8 await server._discover_tools()
9 self.tools.extend(server.tools)
10
11 print(f"Refreshed: {len(self.tools)} tools available")
12
13 def add_tool_change_listener(self) -> None:
14 """Listen for tool list changes."""
15 # MCP servers can notify of tool changes
16 self.mcp.on_notification(
17 "notifications/tools/list_changed",
18 lambda: asyncio.create_task(self.refresh_tools())
19 )2. Error Recovery
πerror_recovery.py
1async def _process_tool_calls_with_retry(
2 self,
3 content: list,
4 max_retries: int = 3
5) -> list[dict]:
6 """Process tool calls with retry logic."""
7 results = []
8
9 for block in content:
10 if block.type != "tool_use":
11 continue
12
13 for attempt in range(max_retries):
14 try:
15 result = await self.mcp.call_tool(
16 block.name,
17 block.input
18 )
19
20 results.append({
21 "type": "tool_result",
22 "tool_use_id": block.id,
23 "content": self._format_mcp_result(result)
24 })
25 break
26
27 except ConnectionError as e:
28 if attempt < max_retries - 1:
29 print(f"Retry {attempt + 1} for {block.name}")
30 await asyncio.sleep(1)
31 else:
32 results.append({
33 "type": "tool_result",
34 "tool_use_id": block.id,
35 "content": f"Failed after {max_retries} attempts: {e}",
36 "is_error": True
37 })
38
39 return results3. Tool Filtering
πtool_filtering.py
1def get_relevant_tools(
2 self,
3 context: str,
4 max_tools: int = 20
5) -> list[dict]:
6 """Return most relevant tools for current context."""
7
8 # Simple keyword matching (could use embeddings)
9 context_lower = context.lower()
10
11 scored_tools = []
12 for tool in self.tools:
13 score = 0
14
15 # Check name match
16 if any(word in tool.name for word in context_lower.split()):
17 score += 2
18
19 # Check description match
20 desc_lower = tool.description.lower()
21 for word in context_lower.split():
22 if word in desc_lower:
23 score += 1
24
25 scored_tools.append((tool, score))
26
27 # Sort by score and take top N
28 scored_tools.sort(key=lambda x: x[1], reverse=True)
29
30 return [
31 self._to_anthropic_format(tool)
32 for tool, score in scored_tools[:max_tools]
33 ]4. Caching Tool Results
πtool_caching.py
1from functools import lru_cache
2import hashlib
3import json
4
5class CachingToolExecutor:
6 """Cache tool results for idempotent operations."""
7
8 def __init__(self, cache_size: int = 100):
9 self._cache = {}
10 self._cache_size = cache_size
11
12 def _cache_key(self, name: str, args: dict) -> str:
13 """Generate cache key for tool call."""
14 args_str = json.dumps(args, sort_keys=True)
15 return hashlib.sha256(
16 f"{name}:{args_str}".encode()
17 ).hexdigest()
18
19 async def execute(
20 self,
21 mcp: MCPClientManager,
22 name: str,
23 args: dict,
24 cacheable: bool = False
25 ) -> dict:
26 """Execute tool with optional caching."""
27
28 if cacheable:
29 key = self._cache_key(name, args)
30 if key in self._cache:
31 print(f"Cache hit: {name}")
32 return self._cache[key]
33
34 result = await mcp.call_tool(name, args)
35
36 if cacheable:
37 self._cache[key] = result
38 # Evict old entries if cache is full
39 if len(self._cache) > self._cache_size:
40 oldest = next(iter(self._cache))
41 del self._cache[oldest]
42
43 return resultSummary
Key points for integrating MCP with agents:
- Tool conversion: Translate MCP schemas to LLM provider formats (Anthropic, OpenAI)
- Result formatting: Convert MCP results back to message format
- Agent loop: Integrate tool discovery and execution into the reasoning loop
- Dynamic discovery: Refresh tools when servers change
- Error handling: Implement retries and graceful degradation
- Optimization: Filter relevant tools, cache idempotent results
Chapter Complete: You now understand MCP from protocol to integration. In the next chapter, we'll explore memory systemsβhow to give your agents persistent, contextual knowledge.