Introduction
Modern agent systems benefit from being platform-agnostic. Whether you're using Claude, GPT-4, Gemini, or open-source models, your agent architecture should adapt. This section explores patterns for building portable agents.
The Portability Principle: Design agents so the LLM is a replaceable component. This provides resilience, flexibility, and the ability to choose the best model for each task.
Why Multi-Platform
Business Reasons
- Vendor independence: Avoid lock-in to a single provider
- Cost optimization: Use cheaper models for simpler tasks
- Reliability: Fallback when one provider has issues
- Capability matching: Different models excel at different tasks
Technical Reasons
| Model | Strength | Best For |
|---|---|---|
| Claude Opus | Long context, nuanced reasoning | Complex analysis, long documents |
| GPT-4o | Speed, multimodal | Real-time applications, vision |
| o3 | Deep reasoning | Complex problem-solving |
| Gemini 2.5 Flash | Fast, cheap | High-volume, simple tasks |
| Gemini 2.5 Pro | Long context, multimodal | Large codebases, video |
| Open source (Llama) | Privacy, customization | On-premise, fine-tuning |
The LLM Abstraction Layer
A clean abstraction layer allows swapping providers without changing agent logic:
🐍llm_abstraction.py
1from abc import ABC, abstractmethod
2from dataclasses import dataclass
3from typing import Any
4
5@dataclass
6class Message:
7 role: str # "user", "assistant", "system"
8 content: str
9
10@dataclass
11class ToolCall:
12 name: str
13 arguments: dict[str, Any]
14
15@dataclass
16class LLMResponse:
17 content: str | None
18 tool_calls: list[ToolCall]
19 finish_reason: str
20 usage: dict[str, int]
21
22class LLMProvider(ABC):
23 """Abstract base class for LLM providers."""
24
25 @abstractmethod
26 def generate(
27 self,
28 messages: list[Message],
29 tools: list[dict] | None = None,
30 temperature: float = 0.7,
31 max_tokens: int = 4096,
32 ) -> LLMResponse:
33 """Generate a response from the LLM."""
34 pass
35
36 @abstractmethod
37 def supports_tools(self) -> bool:
38 """Check if this provider supports native tool calling."""
39 pass
40
41 @abstractmethod
42 def max_context_length(self) -> int:
43 """Return the maximum context length."""
44 passAnthropic Implementation
🐍anthropic_provider.py
1import anthropic
2
3class AnthropicProvider(LLMProvider):
4 """Anthropic Claude implementation."""
5
6 def __init__(self, model: str = "claude-sonnet-4-20250514"):
7 self.client = anthropic.Anthropic()
8 self.model = model
9
10 def generate(
11 self,
12 messages: list[Message],
13 tools: list[dict] | None = None,
14 temperature: float = 0.7,
15 max_tokens: int = 4096,
16 ) -> LLMResponse:
17 # Convert to Anthropic format
18 anthropic_messages = [
19 {"role": m.role, "content": m.content}
20 for m in messages
21 if m.role != "system"
22 ]
23
24 system = next(
25 (m.content for m in messages if m.role == "system"),
26 None
27 )
28
29 # Convert tools to Anthropic format
30 anthropic_tools = None
31 if tools:
32 anthropic_tools = [
33 {
34 "name": t["name"],
35 "description": t["description"],
36 "input_schema": t["parameters"],
37 }
38 for t in tools
39 ]
40
41 response = self.client.messages.create(
42 model=self.model,
43 max_tokens=max_tokens,
44 system=system,
45 messages=anthropic_messages,
46 tools=anthropic_tools,
47 temperature=temperature,
48 )
49
50 # Convert response to common format
51 return self._convert_response(response)
52
53 def _convert_response(self, response) -> LLMResponse:
54 content = None
55 tool_calls = []
56
57 for block in response.content:
58 if block.type == "text":
59 content = block.text
60 elif block.type == "tool_use":
61 tool_calls.append(ToolCall(
62 name=block.name,
63 arguments=block.input,
64 ))
65
66 return LLMResponse(
67 content=content,
68 tool_calls=tool_calls,
69 finish_reason=response.stop_reason,
70 usage={
71 "input_tokens": response.usage.input_tokens,
72 "output_tokens": response.usage.output_tokens,
73 },
74 )
75
76 def supports_tools(self) -> bool:
77 return True
78
79 def max_context_length(self) -> int:
80 return 200000 # Claude's context windowOpenAI Implementation
🐍openai_provider.py
1from openai import OpenAI
2
3class OpenAIProvider(LLMProvider):
4 """OpenAI GPT implementation."""
5
6 def __init__(self, model: str = "gpt-4o"):
7 self.client = OpenAI()
8 self.model = model
9
10 def generate(
11 self,
12 messages: list[Message],
13 tools: list[dict] | None = None,
14 temperature: float = 0.7,
15 max_tokens: int = 4096,
16 ) -> LLMResponse:
17 # Convert to OpenAI format
18 openai_messages = [
19 {"role": m.role, "content": m.content}
20 for m in messages
21 ]
22
23 # Convert tools to OpenAI format
24 openai_tools = None
25 if tools:
26 openai_tools = [
27 {
28 "type": "function",
29 "function": {
30 "name": t["name"],
31 "description": t["description"],
32 "parameters": t["parameters"],
33 },
34 }
35 for t in tools
36 ]
37
38 response = self.client.chat.completions.create(
39 model=self.model,
40 messages=openai_messages,
41 tools=openai_tools,
42 temperature=temperature,
43 max_tokens=max_tokens,
44 )
45
46 return self._convert_response(response)
47
48 def _convert_response(self, response) -> LLMResponse:
49 message = response.choices[0].message
50
51 tool_calls = []
52 if message.tool_calls:
53 for tc in message.tool_calls:
54 tool_calls.append(ToolCall(
55 name=tc.function.name,
56 arguments=json.loads(tc.function.arguments),
57 ))
58
59 return LLMResponse(
60 content=message.content,
61 tool_calls=tool_calls,
62 finish_reason=response.choices[0].finish_reason,
63 usage={
64 "input_tokens": response.usage.prompt_tokens,
65 "output_tokens": response.usage.completion_tokens,
66 },
67 )
68
69 def supports_tools(self) -> bool:
70 return True
71
72 def max_context_length(self) -> int:
73 return 128000 # GPT-4o context windowTool Compatibility
Tools need a common format that can be translated to each provider:
🐍tool_compatibility.py
1from dataclasses import dataclass
2from typing import Any, Callable
3
4@dataclass
5class Tool:
6 """Platform-agnostic tool definition."""
7
8 name: str
9 description: str
10 parameters: dict[str, Any] # JSON Schema
11 handler: Callable[[dict], str]
12
13 def to_anthropic(self) -> dict:
14 """Convert to Anthropic tool format."""
15 return {
16 "name": self.name,
17 "description": self.description,
18 "input_schema": self.parameters,
19 }
20
21 def to_openai(self) -> dict:
22 """Convert to OpenAI tool format."""
23 return {
24 "type": "function",
25 "function": {
26 "name": self.name,
27 "description": self.description,
28 "parameters": self.parameters,
29 },
30 }
31
32 def to_gemini(self) -> dict:
33 """Convert to Gemini tool format."""
34 return {
35 "name": self.name,
36 "description": self.description,
37 "parameters": self.parameters,
38 }
39
40
41# Example tool definition
42read_file_tool = Tool(
43 name="read_file",
44 description="Read the contents of a file at the given path",
45 parameters={
46 "type": "object",
47 "properties": {
48 "path": {
49 "type": "string",
50 "description": "The file path to read",
51 },
52 },
53 "required": ["path"],
54 },
55 handler=lambda args: open(args["path"]).read(),
56)Tool Registry
🐍tool_registry.py
1class ToolRegistry:
2 """Manage tools across platforms."""
3
4 def __init__(self):
5 self.tools: dict[str, Tool] = {}
6
7 def register(self, tool: Tool) -> None:
8 self.tools[tool.name] = tool
9
10 def get(self, name: str) -> Tool | None:
11 return self.tools.get(name)
12
13 def execute(self, name: str, arguments: dict) -> str:
14 tool = self.get(name)
15 if not tool:
16 raise ValueError(f"Unknown tool: {name}")
17 return tool.handler(arguments)
18
19 def to_provider_format(self, provider: str) -> list[dict]:
20 """Convert all tools to provider-specific format."""
21 converters = {
22 "anthropic": lambda t: t.to_anthropic(),
23 "openai": lambda t: t.to_openai(),
24 "gemini": lambda t: t.to_gemini(),
25 }
26
27 converter = converters.get(provider)
28 if not converter:
29 raise ValueError(f"Unknown provider: {provider}")
30
31 return [converter(t) for t in self.tools.values()]Implementation Patterns
Pattern 1: Provider Factory
🐍provider_factory.py
1from enum import Enum
2
3class ProviderType(Enum):
4 ANTHROPIC = "anthropic"
5 OPENAI = "openai"
6 GEMINI = "gemini"
7 LOCAL = "local"
8
9class ProviderFactory:
10 """Factory for creating LLM providers."""
11
12 @staticmethod
13 def create(
14 provider_type: ProviderType,
15 model: str | None = None,
16 ) -> LLMProvider:
17 if provider_type == ProviderType.ANTHROPIC:
18 return AnthropicProvider(model or "claude-sonnet-4-20250514")
19
20 elif provider_type == ProviderType.OPENAI:
21 return OpenAIProvider(model or "gpt-4o")
22
23 elif provider_type == ProviderType.GEMINI:
24 return GeminiProvider(model or "gemini-2.5-flash")
25
26 elif provider_type == ProviderType.LOCAL:
27 return LocalProvider(model or "llama3.3-70b")
28
29 raise ValueError(f"Unknown provider: {provider_type}")
30
31
32# Usage
33provider = ProviderFactory.create(ProviderType.ANTHROPIC)
34response = provider.generate(messages, tools=tools)Pattern 2: Multi-Provider Agent
🐍multi_provider_agent.py
1class MultiProviderAgent:
2 """Agent that can use multiple LLM providers."""
3
4 def __init__(
5 self,
6 primary: LLMProvider,
7 fallback: LLMProvider | None = None,
8 task_router: dict[str, LLMProvider] | None = None,
9 ):
10 self.primary = primary
11 self.fallback = fallback
12 self.task_router = task_router or {}
13 self.tools = ToolRegistry()
14
15 def run(self, task: str) -> str:
16 # Route to specialized provider if available
17 provider = self._select_provider(task)
18
19 messages = [
20 Message(role="system", content=self.system_prompt),
21 Message(role="user", content=task),
22 ]
23
24 try:
25 return self._execute_with_provider(provider, messages)
26 except Exception as e:
27 if self.fallback:
28 print(f"Primary failed: {e}. Using fallback.")
29 return self._execute_with_provider(self.fallback, messages)
30 raise
31
32 def _select_provider(self, task: str) -> LLMProvider:
33 """Select provider based on task type."""
34 task_lower = task.lower()
35
36 for keyword, provider in self.task_router.items():
37 if keyword in task_lower:
38 return provider
39
40 return self.primary
41
42 def _execute_with_provider(
43 self,
44 provider: LLMProvider,
45 messages: list[Message],
46 ) -> str:
47 # Determine provider type for tool format
48 provider_name = provider.__class__.__name__.lower()
49 if "anthropic" in provider_name:
50 tools = self.tools.to_provider_format("anthropic")
51 elif "openai" in provider_name:
52 tools = self.tools.to_provider_format("openai")
53 else:
54 tools = self.tools.to_provider_format("gemini")
55
56 # Agent loop
57 while True:
58 response = provider.generate(messages, tools=tools)
59
60 if response.tool_calls:
61 # Execute tools
62 for tc in response.tool_calls:
63 result = self.tools.execute(tc.name, tc.arguments)
64 messages.append(Message(
65 role="assistant",
66 content=f"Tool {tc.name} called",
67 ))
68 messages.append(Message(
69 role="user",
70 content=f"Tool result: {result}",
71 ))
72 else:
73 return response.content
74
75
76# Usage
77agent = MultiProviderAgent(
78 primary=AnthropicProvider("claude-sonnet-4-20250514"),
79 fallback=OpenAIProvider("gpt-4o"),
80 task_router={
81 "analyze image": OpenAIProvider("gpt-4o"), # Vision tasks
82 "long document": AnthropicProvider("claude-opus-4-20250514"), # Long context
83 "quick fix": OpenAIProvider("gpt-4o-mini"), # Fast, cheap
84 },
85)Pattern 3: Capability-Based Routing
🐍capability_routing.py
1from dataclasses import dataclass
2
3@dataclass
4class ModelCapabilities:
5 max_context: int
6 supports_vision: bool
7 supports_tools: bool
8 reasoning_strength: int # 1-10
9 speed: int # 1-10
10 cost_per_1k_tokens: float
11
12CAPABILITIES = {
13 "claude-opus-4-20250514": ModelCapabilities(
14 max_context=200000,
15 supports_vision=True,
16 supports_tools=True,
17 reasoning_strength=10,
18 speed=5,
19 cost_per_1k_tokens=0.015,
20 ),
21 "claude-sonnet-4-20250514": ModelCapabilities(
22 max_context=200000,
23 supports_vision=True,
24 supports_tools=True,
25 reasoning_strength=8,
26 speed=7,
27 cost_per_1k_tokens=0.003,
28 ),
29 "gpt-4o": ModelCapabilities(
30 max_context=128000,
31 supports_vision=True,
32 supports_tools=True,
33 reasoning_strength=8,
34 speed=8,
35 cost_per_1k_tokens=0.005,
36 ),
37 "gpt-4o-mini": ModelCapabilities(
38 max_context=128000,
39 supports_vision=True,
40 supports_tools=True,
41 reasoning_strength=6,
42 speed=10,
43 cost_per_1k_tokens=0.00015,
44 ),
45}
46
47
48class CapabilityRouter:
49 """Route tasks to models based on required capabilities."""
50
51 def select_model(
52 self,
53 task: str,
54 context_length: int,
55 requires_vision: bool = False,
56 requires_tools: bool = True,
57 min_reasoning: int = 5,
58 max_cost: float | None = None,
59 ) -> str:
60 candidates = []
61
62 for model, caps in CAPABILITIES.items():
63 # Filter by requirements
64 if context_length > caps.max_context:
65 continue
66 if requires_vision and not caps.supports_vision:
67 continue
68 if requires_tools and not caps.supports_tools:
69 continue
70 if caps.reasoning_strength < min_reasoning:
71 continue
72 if max_cost and caps.cost_per_1k_tokens > max_cost:
73 continue
74
75 candidates.append((model, caps))
76
77 if not candidates:
78 raise ValueError("No model meets requirements")
79
80 # Sort by reasoning strength (primary) then speed (secondary)
81 candidates.sort(
82 key=lambda x: (x[1].reasoning_strength, x[1].speed),
83 reverse=True,
84 )
85
86 return candidates[0][0]Start Simple
Begin with a single provider. Add multi-provider support when you have clear requirements - routing logic adds complexity.
Summary
Multi-platform agent design:
- Abstraction layer: Common interface for all LLM providers
- Tool compatibility: Convert tools to provider-specific formats
- Provider factory: Easy instantiation of different providers
- Task routing: Match tasks to optimal models
- Capability-based: Select models by requirements
Next: Let's explore safety and verification - how Codex ensures its actions are correct and secure.