Chapter 15
15 min read
Section 94 of 175

Conditional Routing

LangGraph Deep Dive

Introduction

Conditional routing is what makes LangGraph workflows dynamic and intelligent. Instead of following fixed paths, your graph can make decisions about which nodes to execute based on state, content, or LLM reasoning.

Section Overview: We'll explore various routing strategies, LLM-based decision making, parallel execution patterns, and graph composition with subgraphs.

Routing Strategies

State-Based Routing

🐍python
1from typing import TypedDict, Annotated, Literal
2from langgraph.graph import StateGraph, START, END
3import operator
4
5
6class TaskState(TypedDict):
7    """State for task routing."""
8    task_type: str
9    priority: int
10    content: str
11    results: Annotated[list[str], operator.add]
12
13
14def route_by_type(state: TaskState) -> str:
15    """Route based on task type."""
16    task_type = state["task_type"]
17
18    routes = {
19        "code": "code_handler",
20        "research": "research_handler",
21        "writing": "writing_handler",
22        "analysis": "analysis_handler"
23    }
24
25    return routes.get(task_type, "general_handler")
26
27
28def route_by_priority(state: TaskState) -> str:
29    """Route based on priority level."""
30    priority = state["priority"]
31
32    if priority >= 9:
33        return "urgent_handler"
34    elif priority >= 5:
35        return "normal_handler"
36    else:
37        return "low_priority_handler"
38
39
40def build_routing_graph():
41    """Build graph with type and priority routing."""
42    graph = StateGraph(TaskState)
43
44    # Handler nodes
45    handlers = [
46        "code_handler", "research_handler", "writing_handler",
47        "analysis_handler", "general_handler",
48        "urgent_handler", "normal_handler", "low_priority_handler"
49    ]
50
51    for handler in handlers:
52        def make_handler(name):
53            def handler_fn(state: TaskState) -> dict:
54                return {"results": [f"Handled by [name]"]}
55            return handler_fn
56        graph.add_node(handler, make_handler(handler))
57
58    graph.add_node("priority_router", lambda s: s)
59
60    # Type-based routing from START
61    graph.add_conditional_edges(
62        START,
63        route_by_type,
64        {h: h for h in handlers[:5]}  # Type handlers
65    )
66
67    # Priority routing from each type handler
68    for handler in handlers[:5]:
69        graph.add_conditional_edges(
70            handler,
71            route_by_priority,
72            {h: h for h in handlers[5:]}  # Priority handlers
73        )
74
75    # All priority handlers go to END
76    for handler in handlers[5:]:
77        graph.add_edge(handler, END)
78
79    return graph.compile()

Content-Based Routing

🐍python
1import re
2
3
4def route_by_content(state: TaskState) -> str:
5    """Route based on content analysis."""
6    content = state["content"].lower()
7
8    # Keyword matching
9    if any(kw in content for kw in ["python", "javascript", "code", "function"]):
10        return "code_handler"
11
12    if any(kw in content for kw in ["research", "study", "investigate"]):
13        return "research_handler"
14
15    if any(kw in content for kw in ["write", "draft", "compose"]):
16        return "writing_handler"
17
18    # Pattern matching
19    if re.search(r'\d+\s*[\+\-\*\/]\s*\d+', content):
20        return "calculator_handler"
21
22    if re.search(r'http[s]?://', content):
23        return "url_handler"
24
25    return "general_handler"
26
27
28def route_by_length(state: TaskState) -> str:
29    """Route based on content length."""
30    length = len(state["content"])
31
32    if length < 50:
33        return "quick_handler"
34    elif length < 500:
35        return "standard_handler"
36    else:
37        return "long_content_handler"

LLM-Based Routing

For complex decisions, use an LLM to determine the routing:

🐍python
1from langchain_openai import ChatOpenAI
2from langchain_core.messages import SystemMessage, HumanMessage
3from pydantic import BaseModel, Field
4from typing import Literal
5
6
7class RouterDecision(BaseModel):
8    """Structured output for routing decision."""
9    route: Literal["technical", "creative", "analytical", "general"] = Field(
10        description="The category of task to route to"
11    )
12    confidence: float = Field(
13        description="Confidence in the routing decision (0-1)"
14    )
15    reasoning: str = Field(
16        description="Brief explanation of the routing decision"
17    )
18
19
20llm = ChatOpenAI(model="gpt-4o", temperature=0)
21structured_llm = llm.with_structured_output(RouterDecision)
22
23
24def llm_router(state: TaskState) -> dict:
25    """Use LLM to make routing decision."""
26    messages = [
27        SystemMessage(content="""Analyze the task and decide which handler should process it:
28- technical: coding, debugging, system design
29- creative: writing, brainstorming, design
30- analytical: data analysis, research, comparison
31- general: simple questions, greetings, unclear tasks"""),
32        HumanMessage(content=f"Task: [state['content']]")
33    ]
34
35    decision = structured_llm.invoke(messages)
36
37    return {
38        "routing_decision": decision.route,
39        "routing_confidence": decision.confidence,
40        "results": [f"Router reasoning: [decision.reasoning]"]
41    }
42
43
44class LLMRoutedState(TypedDict):
45    content: str
46    routing_decision: str
47    routing_confidence: float
48    results: Annotated[list[str], operator.add]
49
50
51def get_llm_route(state: LLMRoutedState) -> str:
52    """Extract route from LLM decision."""
53    return state["routing_decision"]
54
55
56def build_llm_routed_graph():
57    """Build graph with LLM-based routing."""
58    graph = StateGraph(LLMRoutedState)
59
60    graph.add_node("router", llm_router)
61    graph.add_node("technical", lambda s: {"results": ["Technical processing"]})
62    graph.add_node("creative", lambda s: {"results": ["Creative processing"]})
63    graph.add_node("analytical", lambda s: {"results": ["Analytical processing"]})
64    graph.add_node("general", lambda s: {"results": ["General processing"]})
65
66    graph.add_edge(START, "router")
67
68    graph.add_conditional_edges(
69        "router",
70        get_llm_route,
71        {
72            "technical": "technical",
73            "creative": "creative",
74            "analytical": "analytical",
75            "general": "general"
76        }
77    )
78
79    for handler in ["technical", "creative", "analytical", "general"]:
80        graph.add_edge(handler, END)
81
82    return graph.compile()

Parallel Branching

LangGraph supports parallel execution through fan-out patterns:

🐍python
1from typing import TypedDict, Annotated
2import asyncio
3import operator
4
5
6class ParallelState(TypedDict):
7    """State for parallel execution."""
8    input: str
9    branch_a_result: str
10    branch_b_result: str
11    branch_c_result: str
12    final_result: str
13
14
15def branch_a(state: ParallelState) -> dict:
16    """Process branch A."""
17    return {"branch_a_result": f"Branch A processed: [state['input']]"}
18
19
20def branch_b(state: ParallelState) -> dict:
21    """Process branch B."""
22    return {"branch_b_result": f"Branch B processed: [state['input']]"}
23
24
25def branch_c(state: ParallelState) -> dict:
26    """Process branch C."""
27    return {"branch_c_result": f"Branch C processed: [state['input']]"}
28
29
30def aggregate_results(state: ParallelState) -> dict:
31    """Combine results from all branches."""
32    combined = f"""
33    A: [state['branch_a_result']]
34    B: [state['branch_b_result']]
35    C: [state['branch_c_result']]
36    """
37    return {"final_result": combined}
38
39
40def build_parallel_graph():
41    """Build graph with parallel branches."""
42    graph = StateGraph(ParallelState)
43
44    # Add branch nodes
45    graph.add_node("branch_a", branch_a)
46    graph.add_node("branch_b", branch_b)
47    graph.add_node("branch_c", branch_c)
48    graph.add_node("aggregate", aggregate_results)
49
50    # Fan-out: START goes to all branches
51    graph.add_edge(START, "branch_a")
52    graph.add_edge(START, "branch_b")
53    graph.add_edge(START, "branch_c")
54
55    # Fan-in: All branches go to aggregate
56    graph.add_edge("branch_a", "aggregate")
57    graph.add_edge("branch_b", "aggregate")
58    graph.add_edge("branch_c", "aggregate")
59
60    graph.add_edge("aggregate", END)
61
62    return graph.compile()
63
64
65# Conditional parallel execution
66def route_to_branches(state: ParallelState) -> list[str]:
67    """Dynamically select which branches to execute."""
68    content = state["input"].lower()
69    branches = []
70
71    if "technical" in content:
72        branches.append("branch_a")
73    if "creative" in content:
74        branches.append("branch_b")
75    if "analytical" in content:
76        branches.append("branch_c")
77
78    return branches if branches else ["branch_a"]  # Default

Subgraphs and Composition

Complex workflows can be built by composing smaller graphs:

🐍python
1from langgraph.graph import StateGraph, START, END
2
3
4# Define a reusable subgraph
5def create_research_subgraph():
6    """Create a research-focused subgraph."""
7
8    class ResearchState(TypedDict):
9        query: str
10        sources: Annotated[list[str], operator.add]
11        findings: Annotated[list[str], operator.add]
12
13    def search(state: ResearchState) -> dict:
14        return {"sources": [f"Source for: [state['query']]"]}
15
16    def analyze(state: ResearchState) -> dict:
17        return {"findings": ["Analysis complete"]}
18
19    graph = StateGraph(ResearchState)
20    graph.add_node("search", search)
21    graph.add_node("analyze", analyze)
22    graph.add_edge(START, "search")
23    graph.add_edge("search", "analyze")
24    graph.add_edge("analyze", END)
25
26    return graph.compile()
27
28
29def create_writing_subgraph():
30    """Create a writing-focused subgraph."""
31
32    class WritingState(TypedDict):
33        topic: str
34        outline: str
35        draft: str
36
37    def create_outline(state: WritingState) -> dict:
38        return {"outline": f"Outline for: [state['topic']]"}
39
40    def write_draft(state: WritingState) -> dict:
41        return {"draft": f"Draft based on: [state['outline']]"}
42
43    graph = StateGraph(WritingState)
44    graph.add_node("outline", create_outline)
45    graph.add_node("draft", write_draft)
46    graph.add_edge(START, "outline")
47    graph.add_edge("outline", "draft")
48    graph.add_edge("draft", END)
49
50    return graph.compile()
51
52
53# Compose subgraphs into main graph
54def create_composed_graph():
55    """Create a main graph that uses subgraphs."""
56
57    class MainState(TypedDict):
58        task_type: str
59        input: str
60        output: str
61
62    research_graph = create_research_subgraph()
63    writing_graph = create_writing_subgraph()
64
65    def research_node(state: MainState) -> dict:
66        result = research_graph.invoke({"query": state["input"]})
67        return {"output": str(result)}
68
69    def writing_node(state: MainState) -> dict:
70        result = writing_graph.invoke({"topic": state["input"]})
71        return {"output": str(result)}
72
73    def route_to_subgraph(state: MainState) -> str:
74        if state["task_type"] == "research":
75            return "research"
76        return "writing"
77
78    graph = StateGraph(MainState)
79    graph.add_node("research", research_node)
80    graph.add_node("writing", writing_node)
81
82    graph.add_conditional_edges(
83        START,
84        route_to_subgraph,
85        {
86            "research": "research",
87            "writing": "writing"
88        }
89    )
90
91    graph.add_edge("research", END)
92    graph.add_edge("writing", END)
93
94    return graph.compile()

Key Takeaways

  • State-based routing uses explicit state fields to make deterministic routing decisions.
  • Content-based routing analyzes the actual content to determine the appropriate handler.
  • LLM routing enables intelligent, context-aware decisions for complex categorization.
  • Parallel branching with fan-out/fan-in patterns enables concurrent processing.
  • Subgraph composition allows building complex workflows from reusable components.
Next Section Preview: We'll explore human-in-the-loop patterns for workflows that require human oversight or approval.