Guardrails AI Framework
Guardrails AI Setup and Architecture
3 min read
Guardrails AI is an open-source framework focused on structured output validation and automatic correction for LLM applications. It excels at ensuring LLM responses conform to specific schemas and business rules.
Installation
pip install guardrails-ai
# Install specific validators from Guardrails Hub
guardrails hub install hub://guardrails/regex_match
guardrails hub install hub://guardrails/valid_length
guardrails hub install hub://guardrails/toxic_language
Core Architecture
┌─────────────────────────────────────────────────────────┐
│ Guardrails AI │
├─────────────────────────────────────────────────────────┤
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ Guard │───▶│ Validators │───▶│ Output │ │
│ │ (Schema) │ │ (Chain) │ │ (Typed) │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
├─────────────────────────────────────────────────────────┤
│ ┌───────────────────────────┐ │
│ │ Reask / Correction │ │
│ │ (Automatic Retry) │ │
│ └───────────────────────────┘ │
├─────────────────────────────────────────────────────────┤
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ LiteLLM │ │ OpenAI │ │ Anthropic │ │
│ │ Backend │ │ Backend │ │ Backend │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
└─────────────────────────────────────────────────────────┘
Basic Usage
from guardrails import Guard
from guardrails.hub import ValidLength, ToxicLanguage
from pydantic import BaseModel, Field
from typing import List
# Define output schema
class ProductReview(BaseModel):
summary: str = Field(
description="Brief summary of the review",
json_schema_extra={"validators": [ValidLength(min=10, max=200)]}
)
sentiment: str = Field(
description="Overall sentiment: positive, negative, or neutral"
)
key_points: List[str] = Field(
description="Main points from the review",
json_schema_extra={"validators": [ValidLength(min=1, max=5, on_fail="fix")]}
)
# Create guard
guard = Guard.for_pydantic(ProductReview)
# Generate with validation
result = guard(
model="gpt-4o",
messages=[{
"role": "user",
"content": "Analyze this review: 'Great product, fast shipping, excellent quality. Would buy again!'"
}]
)
# Access validated output
print(result.validated_output)
# ProductReview(summary="...", sentiment="positive", key_points=["..."])
Guard Configuration Options
from guardrails import Guard, OnFailAction
guard = Guard.for_pydantic(ProductReview)
# Configure behavior on validation failure
guard = guard.use_many(
ValidLength(min=10, max=200, on_fail=OnFailAction.FIX),
ToxicLanguage(on_fail=OnFailAction.EXCEPTION)
)
# Configure reask behavior
result = guard(
model="gpt-4o",
messages=[...],
num_reasks=3, # Retry up to 3 times on validation failure
reask_prompt="The output was invalid. Please fix: {error_message}"
)
OnFail Actions
| Action | Behavior |
|---|---|
EXCEPTION |
Raise ValidationError |
FIX |
Attempt automatic correction |
REASK |
Ask LLM to regenerate |
FILTER |
Remove invalid field |
REFRAIN |
Return None for field |
NOOP |
Log warning, continue |
Guard with Multiple Validators
from guardrails import Guard
from guardrails.hub import (
ValidLength,
RegexMatch,
ToxicLanguage,
ValidChoices
)
from pydantic import BaseModel, Field
class CustomerResponse(BaseModel):
greeting: str = Field(
json_schema_extra={
"validators": [
ValidLength(min=5, max=50),
ToxicLanguage(threshold=0.5, on_fail="exception")
]
}
)
response_type: str = Field(
json_schema_extra={
"validators": [
ValidChoices(choices=["answer", "clarification", "escalation"])
]
}
)
ticket_id: str = Field(
json_schema_extra={
"validators": [
RegexMatch(regex=r"^TKT-\d{6}$", on_fail="fix")
]
}
)
guard = Guard.for_pydantic(CustomerResponse)
Streaming Support
from guardrails import Guard
guard = Guard.for_pydantic(ProductReview)
# Streaming generation with validation
async for chunk in guard.stream(
model="gpt-4o",
messages=[{"role": "user", "content": "Analyze this review..."}]
):
if chunk.validated_output:
print(chunk.validated_output)
Integration with Existing LLM Calls
from guardrails import Guard
from openai import OpenAI
client = OpenAI()
# Wrap existing client
guard = Guard.for_pydantic(ProductReview)
@guard
def analyze_review(review_text: str) -> ProductReview:
"""Guardrails wraps this function."""
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": f"Analyze: {review_text}"}
]
)
return response.choices[0].message.content
# Call with automatic validation
result = analyze_review("Great product!")
Validation History
result = guard(
model="gpt-4o",
messages=[...],
num_reasks=3
)
# Access validation history
for call in result.call_log:
print(f"Attempt: {call.iteration}")
print(f"Raw output: {call.raw_output}")
print(f"Validation passed: {call.validated_output is not None}")
if call.error:
print(f"Error: {call.error}")
Error Handling
from guardrails import Guard
from guardrails.errors import ValidationError
guard = Guard.for_pydantic(ProductReview)
try:
result = guard(
model="gpt-4o",
messages=[...],
num_reasks=3
)
if result.validated_output:
process_review(result.validated_output)
else:
# All reasks failed
handle_validation_failure(result.raw_output)
except ValidationError as e:
# Validator raised exception (on_fail="exception")
log_validation_error(e)
Setup Tip: Start with
on_fail="fix"for most validators during development, then tighten to"exception"for production where data quality is critical.
Next: Building complex schemas with Pydantic validators. :::