Lesson 11 of 22

Building Applications with Ollama

LangGraph with Ollama

3 min read

LangGraph enables building stateful, multi-step AI workflows with cycles and conditional logic. Combined with Ollama, you can create sophisticated local AI agents.

Installation

pip install langgraph langchain-ollama

LangGraph Concepts

┌─────────────────────────────────────────────────────────────────┐
│                    LangGraph Core Concepts                       │
├─────────────────────────────────────────────────────────────────┤
│                                                                 │
│  State: Shared data that persists across nodes                  │
│  Nodes: Functions that process and update state                 │
│  Edges: Connections between nodes (can be conditional)          │
│  Graph: The complete workflow definition                        │
│                                                                 │
│  Key advantage: Cycles! Nodes can loop back for iteration      │
│                                                                 │
└─────────────────────────────────────────────────────────────────┘

Simple Linear Graph

from typing import TypedDict
from langgraph.graph import StateGraph, END
from langchain_ollama import ChatOllama

# Define the state
class State(TypedDict):
    question: str
    answer: str

# Initialize Ollama
llm = ChatOllama(model="llama3.2")

# Define nodes
def think(state: State) -> State:
    """Generate an answer."""
    response = llm.invoke(f"Answer concisely: {state['question']}")
    return {"answer": response.content}

# Build the graph
graph = StateGraph(State)
graph.add_node("think", think)
graph.set_entry_point("think")
graph.add_edge("think", END)

# Compile and run
app = graph.compile()
result = app.invoke({"question": "What is Python?"})
print(result["answer"])

Conditional Routing

from typing import TypedDict, Literal
from langgraph.graph import StateGraph, END
from langchain_ollama import ChatOllama

class State(TypedDict):
    query: str
    query_type: str
    response: str

llm = ChatOllama(model="llama3.2")

def classify_query(state: State) -> State:
    """Classify the query type."""
    response = llm.invoke(
        f"Classify this query as 'code' or 'general'. "
        f"Reply with just the word.\nQuery: {state['query']}"
    )
    query_type = response.content.strip().lower()
    return {"query_type": query_type}

def handle_code_query(state: State) -> State:
    """Handle coding questions."""
    response = llm.invoke(
        f"As a coding expert, answer: {state['query']}"
    )
    return {"response": response.content}

def handle_general_query(state: State) -> State:
    """Handle general questions."""
    response = llm.invoke(
        f"Answer this general question: {state['query']}"
    )
    return {"response": response.content}

def route_query(state: State) -> Literal["code", "general"]:
    """Route based on query type."""
    if "code" in state["query_type"]:
        return "code"
    return "general"

# Build graph with conditional routing
graph = StateGraph(State)

graph.add_node("classify", classify_query)
graph.add_node("code", handle_code_query)
graph.add_node("general", handle_general_query)

graph.set_entry_point("classify")
graph.add_conditional_edges(
    "classify",
    route_query,
    {"code": "code", "general": "general"}
)
graph.add_edge("code", END)
graph.add_edge("general", END)

app = graph.compile()

# Test
result = app.invoke({"query": "How do I write a for loop in Python?"})
print(result["response"])

Iterative Refinement (Cycles)

from typing import TypedDict
from langgraph.graph import StateGraph, END
from langchain_ollama import ChatOllama

class State(TypedDict):
    task: str
    draft: str
    feedback: str
    iteration: int

llm = ChatOllama(model="llama3.2")

def write_draft(state: State) -> State:
    """Write or refine a draft."""
    if state.get("draft"):
        prompt = f"""Improve this draft based on feedback:
Draft: {state['draft']}
Feedback: {state['feedback']}
Write an improved version:"""
    else:
        prompt = f"Write a short paragraph about: {state['task']}"

    response = llm.invoke(prompt)
    return {
        "draft": response.content,
        "iteration": state.get("iteration", 0) + 1
    }

def critique(state: State) -> State:
    """Critique the current draft."""
    response = llm.invoke(
        f"Critique this draft briefly. What could be better?\n\n{state['draft']}"
    )
    return {"feedback": response.content}

def should_continue(state: State) -> str:
    """Decide if we should refine more."""
    if state["iteration"] >= 3:  # Max 3 iterations
        return "end"
    return "continue"

# Build graph with cycle
graph = StateGraph(State)

graph.add_node("write", write_draft)
graph.add_node("critique", critique)

graph.set_entry_point("write")
graph.add_edge("write", "critique")
graph.add_conditional_edges(
    "critique",
    should_continue,
    {"continue": "write", "end": END}
)

app = graph.compile()

result = app.invoke({"task": "Explain machine learning to a child"})
print(f"Final draft (after {result['iteration']} iterations):")
print(result["draft"])

ReAct Agent Pattern

from typing import TypedDict, Annotated
from langgraph.graph import StateGraph, END
from langchain_ollama import ChatOllama
import operator

class AgentState(TypedDict):
    question: str
    thoughts: Annotated[list[str], operator.add]
    answer: str
    step: int

llm = ChatOllama(model="llama3.2")

def think(state: AgentState) -> AgentState:
    """Think about the problem."""
    response = llm.invoke(
        f"Question: {state['question']}\n"
        f"Previous thoughts: {state['thoughts']}\n"
        f"Think step by step. What's your next thought?"
    )
    return {
        "thoughts": [response.content],
        "step": state.get("step", 0) + 1
    }

def decide(state: AgentState) -> AgentState:
    """Decide if we have enough to answer."""
    response = llm.invoke(
        f"Question: {state['question']}\n"
        f"Thoughts so far: {state['thoughts']}\n"
        f"Can you answer now? Reply YES or NO."
    )
    return {"answer": response.content}

def answer(state: AgentState) -> AgentState:
    """Generate final answer."""
    response = llm.invoke(
        f"Question: {state['question']}\n"
        f"Based on these thoughts: {state['thoughts']}\n"
        f"Give a clear, final answer:"
    )
    return {"answer": response.content}

def should_answer(state: AgentState) -> str:
    """Check if ready to answer."""
    if state["step"] >= 3:  # Force answer after 3 steps
        return "answer"
    if "YES" in state.get("answer", "").upper():
        return "answer"
    return "think"

graph = StateGraph(AgentState)

graph.add_node("think", think)
graph.add_node("decide", decide)
graph.add_node("answer", answer)

graph.set_entry_point("think")
graph.add_edge("think", "decide")
graph.add_conditional_edges(
    "decide",
    should_answer,
    {"think": "think", "answer": "answer"}
)
graph.add_edge("answer", END)

app = graph.compile()

result = app.invoke({
    "question": "What's the best way to learn programming?",
    "thoughts": [],
    "step": 0
})
print(f"Thought process: {len(result['thoughts'])} steps")
print(f"Answer: {result['answer']}")

Visualizing the Graph

# Generate graph visualization (requires graphviz)
from IPython.display import Image, display

# If using Jupyter
display(Image(app.get_graph().draw_mermaid_png()))

# Or save to file
with open("graph.png", "wb") as f:
    f.write(app.get_graph().draw_mermaid_png())

Key Patterns Summary

Pattern Use Case
Linear Simple sequential processing
Conditional Route based on content/classification
Cyclic Iterative refinement, self-correction
ReAct Reasoning + action loops

LangGraph enables complex, stateful AI workflows entirely on local hardware. In the next module, we'll explore advanced patterns including local RAG and function calling. :::

Quiz

Module 3: Building Applications with Ollama

Take Quiz