LangChain with LangGraph: 5 Core Modules
Your Name
September 16, 2025
Abstract
This blog introduces how to build powerful AI workflows using LangChain and its
graph-based orchestration library LangGraph. We will go through 5 modules showing
how to build nodes, tools, memory, branches, and execution graphs.
Contents
1 Module 1: LLM Node 2
2 Module 2: Tool Node 2
3 Module 3: Memory Node 2
4 Module 4: Conditional Branching 3
5 Module 5: Full Graph Execution 4
1
1 Module 1: LLM Node
A basic node that uses an LLM to respond to input.
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
llm = ChatOpenAI(model="gpt-3.5-turbo")
def llm_node(state):
query = state["input"]
result = llm.invoke(query)
return {"answer": result.content}
graph = StateGraph(dict)
graph.add_node("llm", llm_node)
graph.set_entry_point("llm")
graph.add_edge("llm", END)
wf = graph.compile()
print(wf.invoke({"input": "Hello, who are you?"}))
2 Module 2: Tool Node
Nodes can also call external tools or APIs.
from langgraph.graph import StateGraph, END
def weather_node(state):
city = state["city"]
# Simulate weather data
return {"weather": f"{city}: 29°C, Cloudy"}
graph = StateGraph(dict)
graph.add_node("weather", weather_node)
graph.set_entry_point("weather")
graph.add_edge("weather", END)
wf = graph.compile()
print(wf.invoke({"city":"Delhi"}))
3 Module 3: Memory Node
We can persist data across steps using memory.
from langgraph.graph import StateGraph, END
2
memory_store = {}
def memory_node(state):
user = state["user"]
memory_store[user] = state["message"]
return {"saved": f"Stored message for {user}"}
graph = StateGraph(dict)
graph.add_node("memory", memory_node)
graph.set_entry_point("memory")
graph.add_edge("memory", END)
wf = graph.compile()
print(wf.invoke({"user":"Alice", "message":"Hi there"}))
4 Module 4: Conditional Branching
LangGraph allows conditional edges to choose the next node dynamically.
from langgraph.graph import StateGraph, END
def classify_node(state):
text = state["text"]
if "weather" in text.lower():
return {"route": "weather"}
else:
return {"route": "llm"}
def weather(state): return {"result": "It's sunny."}
def llm(state): return {"result": "General response."}
graph = StateGraph(dict)
graph.add_node("classify", classify_node)
graph.add_node("weather", weather)
graph.add_node("llm", llm)
graph.set_entry_point("classify")
graph.add_conditional_edges("classify", lambda s: s["route"], {
"weather":"weather", "llm":"llm"
})
graph.add_edge("weather", END)
graph.add_edge("llm", END)
wf = graph.compile()
print(wf.invoke({"text":"Tell me the weather"}))
3
5 Module 5: Full Graph Execution
Combining all nodes into one complete workflow.
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
llm = ChatOpenAI(model="gpt-3.5-turbo")
def greet(state): return {"msg": "Hello, let's begin."}
def get_weather(state): return {"weather": "32°C and sunny."}
def ask_llm(state):
res = llm.invoke("Write a haiku about the sun.")
return {"poem": res.content}
graph = StateGraph(dict)
graph.add_node("start", greet)
graph.add_node("weather", get_weather)
graph.add_node("haiku", ask_llm)
graph.set_entry_point("start")
graph.add_edge("start","weather")
graph.add_edge("weather","haiku")
graph.add_edge("haiku",END)
wf = graph.compile()
print(wf.invoke({}))