What is LangGraph?
LangGraph is a framework for building stateful, multi-step agent workflows. It models agents as graphs where:- Nodes = Processing steps (LLM calls, tool use, logic)
- Edges = Transitions between steps
- State = Data passed between nodes
Why LangGraph? Simple agent loops break down with complex logic. LangGraph gives you explicit control over agent flow, branching, and state management.
Core Concepts
Copy
┌─────────────┐
│ START │
└──────┬──────┘
│
┌──────▼──────┐
│ Agent │◄────────────┐
└──────┬──────┘ │
│ │
┌──────▼──────┐ │
│ Should Use │ │
│ Tools? │ │
└──────┬──────┘ │
│ │
┌────────┼────────┐ │
│ Yes │ │ No │
▼ │ ▼ │
┌─────────┐ │ ┌─────────┐ │
│ Tools │────┘ │ END │ │
└────┬────┘ └─────────┘ │
│ │
└──────────────────────────────┘
Installation
Copy
pip install langgraph langchain-openai
Basic Agent Graph
Copy
from typing import TypedDict, Annotated, Sequence
from langchain_openai import ChatOpenAI
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
import operator
# Define state schema
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
# Initialize LLM
llm = ChatOpenAI(model="gpt-4o")
# Define tools
from langchain_core.tools import tool
@tool
def search(query: str) -> str:
"""Search the web for information."""
return f"Results for '{query}': [mock search results]"
@tool
def calculator(expression: str) -> str:
"""Calculate a mathematical expression."""
return str(eval(expression))
tools = [search, calculator]
llm_with_tools = llm.bind_tools(tools)
# Define nodes
def agent(state: AgentState) -> dict:
"""Main agent node - decides what to do"""
response = llm_with_tools.invoke(state["messages"])
return {"messages": [response]}
def should_continue(state: AgentState) -> str:
"""Determine next step based on last message"""
last_message = state["messages"][-1]
if last_message.tool_calls:
return "tools"
return END
# Build graph
workflow = StateGraph(AgentState)
# Add nodes
workflow.add_node("agent", agent)
workflow.add_node("tools", ToolNode(tools))
# Add edges
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent",
should_continue,
{
"tools": "tools",
END: END
}
)
workflow.add_edge("tools", "agent")
# Compile
app = workflow.compile()
# Run
result = app.invoke({
"messages": [HumanMessage(content="What is 25 * 4 + 10?")]
})
print(result["messages"][-1].content)
Multi-Step Workflow
Copy
from typing import TypedDict, Literal
from langgraph.graph import StateGraph, END
class WorkflowState(TypedDict):
task: str
plan: list[str]
current_step: int
results: list[str]
final_output: str
def planner(state: WorkflowState) -> dict:
"""Create a plan for the task"""
response = llm.invoke(f"""
Create a step-by-step plan for: {state['task']}
Return as a numbered list.
""")
# Parse steps
lines = response.content.strip().split('\n')
steps = [line.strip() for line in lines if line.strip()]
return {"plan": steps, "current_step": 0, "results": []}
def executor(state: WorkflowState) -> dict:
"""Execute current step"""
step = state["plan"][state["current_step"]]
response = llm.invoke(f"""
Execute this step: {step}
Previous results: {state['results']}
""")
return {
"results": state["results"] + [response.content],
"current_step": state["current_step"] + 1
}
def should_continue_execution(state: WorkflowState) -> Literal["executor", "synthesizer"]:
"""Check if more steps remain"""
if state["current_step"] < len(state["plan"]):
return "executor"
return "synthesizer"
def synthesizer(state: WorkflowState) -> dict:
"""Combine results into final output"""
response = llm.invoke(f"""
Task: {state['task']}
Steps completed: {state['plan']}
Results: {state['results']}
Synthesize these into a final comprehensive answer.
""")
return {"final_output": response.content}
# Build workflow
workflow = StateGraph(WorkflowState)
workflow.add_node("planner", planner)
workflow.add_node("executor", executor)
workflow.add_node("synthesizer", synthesizer)
workflow.set_entry_point("planner")
workflow.add_edge("planner", "executor")
workflow.add_conditional_edges("executor", should_continue_execution)
workflow.add_edge("synthesizer", END)
app = workflow.compile()
# Run
result = app.invoke({"task": "Research and summarize the latest AI trends"})
print(result["final_output"])
Human-in-the-Loop
Copy
from langgraph.checkpoint.memory import MemorySaver
class ApprovalState(TypedDict):
request: str
analysis: str
approved: bool | None
final_response: str
def analyze(state: ApprovalState) -> dict:
response = llm.invoke(f"Analyze this request: {state['request']}")
return {"analysis": response.content}
def human_approval(state: ApprovalState) -> dict:
"""Pause here for human approval"""
# This node just passes through - approval happens externally
return {}
def execute_approved(state: ApprovalState) -> dict:
response = llm.invoke(f"""
Execute this approved request: {state['request']}
Analysis: {state['analysis']}
""")
return {"final_response": response.content}
def check_approval(state: ApprovalState) -> Literal["execute", "reject"]:
if state.get("approved"):
return "execute"
return "reject"
def reject(state: ApprovalState) -> dict:
return {"final_response": "Request was not approved."}
# Build with checkpointing
workflow = StateGraph(ApprovalState)
workflow.add_node("analyze", analyze)
workflow.add_node("human_approval", human_approval)
workflow.add_node("execute", execute_approved)
workflow.add_node("reject", reject)
workflow.set_entry_point("analyze")
workflow.add_edge("analyze", "human_approval")
workflow.add_conditional_edges("human_approval", check_approval)
workflow.add_edge("execute", END)
workflow.add_edge("reject", END)
# Compile with memory for persistence
memory = MemorySaver()
app = workflow.compile(checkpointer=memory, interrupt_before=["human_approval"])
# Start workflow
config = {"configurable": {"thread_id": "request-123"}}
result = app.invoke({"request": "Delete all user data"}, config)
# At this point, workflow is paused at human_approval
print("Analysis:", result["analysis"])
print("Waiting for approval...")
# Later: resume with approval
app.update_state(config, {"approved": True})
final_result = app.invoke(None, config)
print("Final:", final_result["final_response"])
Parallel Execution
Copy
from langgraph.graph import StateGraph, END
from typing import TypedDict
class ParallelState(TypedDict):
query: str
web_results: str
db_results: str
combined: str
def search_web(state: ParallelState) -> dict:
# Simulate web search
return {"web_results": f"Web results for: {state['query']}"}
def search_database(state: ParallelState) -> dict:
# Simulate database search
return {"db_results": f"Database results for: {state['query']}"}
def combine_results(state: ParallelState) -> dict:
combined = f"""
Web: {state['web_results']}
Database: {state['db_results']}
"""
return {"combined": combined}
workflow = StateGraph(ParallelState)
workflow.add_node("web_search", search_web)
workflow.add_node("db_search", search_database)
workflow.add_node("combine", combine_results)
# Fan-out: Start both searches in parallel
workflow.set_entry_point("web_search")
workflow.set_entry_point("db_search") # Both are entry points
# Fan-in: Both must complete before combine
workflow.add_edge("web_search", "combine")
workflow.add_edge("db_search", "combine")
workflow.add_edge("combine", END)
app = workflow.compile()
result = app.invoke({"query": "LangGraph tutorials"})
Subgraphs
Copy
# Define a reusable subgraph
def create_research_subgraph():
class ResearchState(TypedDict):
topic: str
sources: list[str]
summary: str
def gather_sources(state):
return {"sources": [f"Source about {state['topic']}"]}
def summarize(state):
return {"summary": f"Summary of {len(state['sources'])} sources"}
subgraph = StateGraph(ResearchState)
subgraph.add_node("gather", gather_sources)
subgraph.add_node("summarize", summarize)
subgraph.set_entry_point("gather")
subgraph.add_edge("gather", "summarize")
subgraph.add_edge("summarize", END)
return subgraph.compile()
# Use in parent graph
class MainState(TypedDict):
question: str
research: str
answer: str
research_graph = create_research_subgraph()
def do_research(state: MainState) -> dict:
result = research_graph.invoke({"topic": state["question"]})
return {"research": result["summary"]}
def generate_answer(state: MainState) -> dict:
return {"answer": f"Based on {state['research']}: [answer]"}
main_workflow = StateGraph(MainState)
main_workflow.add_node("research", do_research)
main_workflow.add_node("answer", generate_answer)
main_workflow.set_entry_point("research")
main_workflow.add_edge("research", "answer")
main_workflow.add_edge("answer", END)
Visualization
Copy
# Generate graph visualization
from IPython.display import Image, display
display(Image(app.get_graph().draw_mermaid_png()))
# Or as Mermaid text
print(app.get_graph().draw_mermaid())
Common Patterns
Agent Executor
LLM decides actions, tools execute, loop until done
Plan-Execute
Create plan first, then execute each step
Reflection
Execute, evaluate, improve, repeat
Multi-Agent
Multiple specialized agents in a workflow
Best Practices
Keep State Minimal
Keep State Minimal
Only store what’s needed between nodes. Large state = slower execution.
Use Checkpointing
Use Checkpointing
Enable persistence for long-running workflows and human-in-the-loop.
Handle Errors in Nodes
Handle Errors in Nodes
Each node should handle its own errors gracefully.
Test Individual Nodes
Test Individual Nodes
Unit test nodes before assembling the graph.
Next Steps
MCP Protocol
Learn the Model Context Protocol for tool integration