Introduction
One of LangGraph's most powerful features is native support for cyclic graphs - workflows that can loop back on themselves. This enables iterative refinement, retry logic, and agent loops that continue until a goal is achieved.
Section Overview: We'll explore loop patterns, iteration control mechanisms, refinement workflows, and the classic ReAct agent loop.
Loop Patterns
Basic Loop Structure
🐍python
1from typing import TypedDict, Annotated, Literal
2from langgraph.graph import StateGraph, START, END
3import operator
4
5
6class LoopState(TypedDict):
7 """State for loop-based workflows."""
8 input: str
9 iteration: int
10 max_iterations: int
11 results: Annotated[list[str], operator.add]
12 is_complete: bool
13
14
15def process_step(state: LoopState) -> dict:
16 """Process one iteration."""
17 iteration = state["iteration"]
18 result = f"Processed iteration [iteration + 1]"
19
20 return {
21 "iteration": iteration + 1,
22 "results": [result]
23 }
24
25
26def check_complete(state: LoopState) -> dict:
27 """Check if processing should continue."""
28 iteration = state["iteration"]
29 max_iter = state["max_iterations"]
30
31 # Check completion conditions
32 is_complete = iteration >= max_iter
33
34 return {"is_complete": is_complete}
35
36
37def should_continue(state: LoopState) -> Literal["process", "end"]:
38 """Routing function for the loop."""
39 if state["is_complete"]:
40 return "end"
41 return "process"
42
43
44def build_basic_loop():
45 """Build a basic loop graph."""
46 graph = StateGraph(LoopState)
47
48 # Add nodes
49 graph.add_node("process", process_step)
50 graph.add_node("check", check_complete)
51
52 # Add edges
53 graph.add_edge(START, "process")
54 graph.add_edge("process", "check")
55
56 # Conditional edge creates the loop
57 graph.add_conditional_edges(
58 "check",
59 should_continue,
60 {
61 "process": "process", # Loop back
62 "end": END
63 }
64 )
65
66 return graph.compile()
67
68
69# Usage
70def run_loop():
71 agent = build_basic_loop()
72
73 result = agent.invoke({
74 "input": "test",
75 "iteration": 0,
76 "max_iterations": 3,
77 "results": [],
78 "is_complete": False
79 })
80
81 print(f"Iterations: [result['iteration']]")
82 print(f"Results: [result['results']]")Self-Loop Pattern
🐍python
1def build_self_loop():
2 """Build a graph with a self-loop on a single node."""
3 graph = StateGraph(LoopState)
4
5 def combined_step(state: LoopState) -> dict:
6 """Combined process and check in one node."""
7 iteration = state["iteration"] + 1
8 result = f"Step [iteration]"
9 is_complete = iteration >= state["max_iterations"]
10
11 return {
12 "iteration": iteration,
13 "results": [result],
14 "is_complete": is_complete
15 }
16
17 def should_loop(state: LoopState) -> Literal["loop", "end"]:
18 if state["is_complete"]:
19 return "end"
20 return "loop"
21
22 graph.add_node("step", combined_step)
23
24 graph.add_edge(START, "step")
25
26 graph.add_conditional_edges(
27 "step",
28 should_loop,
29 {
30 "loop": "step", # Self-loop
31 "end": END
32 }
33 )
34
35 return graph.compile()Iteration Control
Controlling when loops terminate is critical for preventing infinite loops and ensuring quality outputs:
🐍python
1from typing import TypedDict, Annotated, Literal, Optional
2import operator
3import time
4
5
6class ControlledLoopState(TypedDict):
7 """State with multiple termination conditions."""
8 query: str
9 iteration: int
10 max_iterations: int
11 start_time: float
12 timeout_seconds: float
13 quality_score: float
14 quality_threshold: float
15 results: Annotated[list[str], operator.add]
16 termination_reason: Optional[str]
17
18
19def create_controlled_loop():
20 """Build a loop with multiple termination conditions."""
21
22 def process_with_quality(state: ControlledLoopState) -> dict:
23 """Process and compute quality score."""
24 iteration = state["iteration"] + 1
25
26 # Simulate processing with improving quality
27 quality = min(0.2 * iteration, 0.95)
28 result = f"Result [iteration] (quality: [quality:.2f])"
29
30 return {
31 "iteration": iteration,
32 "quality_score": quality,
33 "results": [result]
34 }
35
36 def check_termination(state: ControlledLoopState) -> Literal["continue", "max_iter", "timeout", "quality"]:
37 """Check all termination conditions."""
38
39 # Check max iterations
40 if state["iteration"] >= state["max_iterations"]:
41 return "max_iter"
42
43 # Check timeout
44 elapsed = time.time() - state["start_time"]
45 if elapsed > state["timeout_seconds"]:
46 return "timeout"
47
48 # Check quality threshold
49 if state["quality_score"] >= state["quality_threshold"]:
50 return "quality"
51
52 return "continue"
53
54 def set_termination(reason: str):
55 """Create a node that sets termination reason."""
56 def node(state: ControlledLoopState) -> dict:
57 return {"termination_reason": reason}
58 return node
59
60 # Build graph
61 graph = StateGraph(ControlledLoopState)
62
63 graph.add_node("process", process_with_quality)
64 graph.add_node("term_max_iter", set_termination("max_iterations_reached"))
65 graph.add_node("term_timeout", set_termination("timeout"))
66 graph.add_node("term_quality", set_termination("quality_threshold_met"))
67
68 graph.add_edge(START, "process")
69
70 graph.add_conditional_edges(
71 "process",
72 check_termination,
73 {
74 "continue": "process",
75 "max_iter": "term_max_iter",
76 "timeout": "term_timeout",
77 "quality": "term_quality"
78 }
79 )
80
81 graph.add_edge("term_max_iter", END)
82 graph.add_edge("term_timeout", END)
83 graph.add_edge("term_quality", END)
84
85 return graph.compile()Refinement Loops
Refinement loops iteratively improve outputs based on feedback:
🐍python
1from langchain_openai import ChatOpenAI
2from langchain_core.messages import HumanMessage, SystemMessage
3
4
5class RefinementState(TypedDict):
6 """State for iterative refinement."""
7 task: str
8 current_draft: str
9 feedback: str
10 iteration: int
11 max_iterations: int
12 is_approved: bool
13 history: Annotated[list[dict], operator.add]
14
15
16llm = ChatOpenAI(model="gpt-4o", temperature=0.7)
17critic_llm = ChatOpenAI(model="gpt-4o", temperature=0.0)
18
19
20def generate_draft(state: RefinementState) -> dict:
21 """Generate or refine the draft."""
22 iteration = state["iteration"]
23
24 if iteration == 0:
25 # Initial generation
26 messages = [
27 SystemMessage(content="You are a skilled writer."),
28 HumanMessage(content=f"Write a first draft for: [state['task']]")
29 ]
30 else:
31 # Refinement based on feedback
32 messages = [
33 SystemMessage(content="You are a skilled writer improving your work."),
34 HumanMessage(content=f"""
35Task: [state['task']]
36
37Current draft:
38[state['current_draft']]
39
40Feedback to address:
41[state['feedback']]
42
43Please improve the draft based on this feedback.
44""")
45 ]
46
47 response = llm.invoke(messages)
48
49 return {
50 "current_draft": response.content,
51 "iteration": iteration + 1,
52 "history": [{"iteration": iteration + 1, "draft": response.content}]
53 }
54
55
56def evaluate_draft(state: RefinementState) -> dict:
57 """Critique the current draft."""
58 messages = [
59 SystemMessage(content="""You are a critical editor. Evaluate the draft and provide feedback.
60If the draft is excellent, respond with "APPROVED" at the start.
61Otherwise, provide specific improvement suggestions."""),
62 HumanMessage(content=f"""
63Task: [state['task']]
64
65Draft to evaluate:
66[state['current_draft']]
67""")
68 ]
69
70 response = critic_llm.invoke(messages)
71 feedback = response.content
72
73 is_approved = feedback.strip().startswith("APPROVED")
74
75 return {
76 "feedback": feedback,
77 "is_approved": is_approved
78 }
79
80
81def should_refine(state: RefinementState) -> Literal["refine", "done"]:
82 """Check if more refinement is needed."""
83 if state["is_approved"]:
84 return "done"
85 if state["iteration"] >= state["max_iterations"]:
86 return "done"
87 return "refine"
88
89
90def build_refinement_loop():
91 """Build the refinement loop graph."""
92 graph = StateGraph(RefinementState)
93
94 graph.add_node("generate", generate_draft)
95 graph.add_node("evaluate", evaluate_draft)
96
97 graph.add_edge(START, "generate")
98 graph.add_edge("generate", "evaluate")
99
100 graph.add_conditional_edges(
101 "evaluate",
102 should_refine,
103 {
104 "refine": "generate", # Loop back
105 "done": END
106 }
107 )
108
109 return graph.compile()Agent Loops
The ReAct (Reasoning + Acting) pattern is a common agent loop in LangGraph:
🐍python
1from typing import TypedDict, Annotated, Literal, Union
2from langchain_core.messages import BaseMessage, HumanMessage, AIMessage, ToolMessage
3from langchain_openai import ChatOpenAI
4from langchain_core.tools import tool
5import operator
6import json
7
8
9# Define tools
10@tool
11def search(query: str) -> str:
12 """Search for information."""
13 return f"Search results for: [query]"
14
15
16@tool
17def calculator(expression: str) -> str:
18 """Evaluate a math expression."""
19 try:
20 result = eval(expression)
21 return str(result)
22 except Exception as e:
23 return f"Error: [e]"
24
25
26tools = [search, calculator]
27
28
29class AgentState(TypedDict):
30 """State for ReAct agent."""
31 messages: Annotated[list[BaseMessage], operator.add]
32 iteration: int
33 max_iterations: int
34
35
36def create_react_agent():
37 """Build a ReAct agent with tool use."""
38
39 # Bind tools to LLM
40 llm = ChatOpenAI(model="gpt-4o", temperature=0)
41 llm_with_tools = llm.bind_tools(tools)
42
43 def agent_node(state: AgentState) -> dict:
44 """Agent decides what to do next."""
45 messages = state["messages"]
46 response = llm_with_tools.invoke(messages)
47
48 return {
49 "messages": [response],
50 "iteration": state["iteration"] + 1
51 }
52
53 def tool_node(state: AgentState) -> dict:
54 """Execute tool calls."""
55 last_message = state["messages"][-1]
56
57 tool_messages = []
58 for tool_call in last_message.tool_calls:
59 tool_name = tool_call["name"]
60 tool_args = tool_call["args"]
61
62 # Find and execute tool
63 tool_fn = next(t for t in tools if t.name == tool_name)
64 result = tool_fn.invoke(tool_args)
65
66 tool_messages.append(
67 ToolMessage(
68 content=str(result),
69 tool_call_id=tool_call["id"]
70 )
71 )
72
73 return {"messages": tool_messages}
74
75 def should_continue(state: AgentState) -> Literal["tools", "end"]:
76 """Check if agent should use tools or finish."""
77 last_message = state["messages"][-1]
78
79 # Check iteration limit
80 if state["iteration"] >= state["max_iterations"]:
81 return "end"
82
83 # Check if agent wants to use tools
84 if hasattr(last_message, "tool_calls") and last_message.tool_calls:
85 return "tools"
86
87 return "end"
88
89 # Build graph
90 graph = StateGraph(AgentState)
91
92 graph.add_node("agent", agent_node)
93 graph.add_node("tools", tool_node)
94
95 graph.add_edge(START, "agent")
96
97 graph.add_conditional_edges(
98 "agent",
99 should_continue,
100 {
101 "tools": "tools",
102 "end": END
103 }
104 )
105
106 graph.add_edge("tools", "agent") # Loop back after tool execution
107
108 return graph.compile()
109
110
111# Usage
112def run_react_agent():
113 agent = create_react_agent()
114
115 result = agent.invoke({
116 "messages": [HumanMessage(content="What is 25 * 4 + 10?")],
117 "iteration": 0,
118 "max_iterations": 5
119 })
120
121 print("Final answer:", result["messages"][-1].content)Key Takeaways
- Cyclic graphs enable iteration through conditional edges that route back to earlier nodes.
- Multiple termination conditions prevent infinite loops - use max iterations, timeouts, and quality thresholds.
- Refinement loops improve quality through generate-evaluate-refine cycles.
- ReAct is a fundamental pattern for agents that reason and act in a loop.
- Track iteration state to control loop behavior and debug issues.
Next Section Preview: We'll explore conditional routing patterns for building dynamic, branching workflows.