إضافة ميزات مدعومة بالـ AI
تكامل AI SDK
Project Goal: Build a production-ready AI chat interface with streaming responses using the Vercel AI SDK.
Why Vercel AI SDK?
The Vercel AI SDK provides:
- Unified API across OpenAI, Anthropic, Google, and others
- Streaming-first - Built for real-time responses
- React hooks - Easy frontend integration
- Edge-ready - Works on Vercel Edge, Cloudflare Workers
- Type-safe - Full TypeScript support
Project Setup Prompt
Add AI chat functionality to my Next.js application:
## Tech Stack
- Next.js 15 with App Router
- Vercel AI SDK v4
- Anthropic Claude 4 (claude-sonnet-4-20250514)
- Tailwind CSS for styling
## Features
1. Chat interface with message history
2. Streaming responses
3. Model switching (Claude, GPT-4)
4. System prompt configuration
5. Token usage tracking
6. Rate limiting per user
## Project Structure
/app /api/chat route.ts # Chat API endpoint /chat page.tsx # Chat interface /components /chat chat-interface.tsx message-list.tsx chat-input.tsx /lib /ai providers.ts # AI provider configuration prompts.ts # System prompts
Installation
# Install AI SDK and providers
npm install ai @ai-sdk/anthropic @ai-sdk/openai
# Install UI dependencies
npm install lucide-react
Provider Configuration
// lib/ai/providers.ts
import { anthropic } from '@ai-sdk/anthropic';
import { openai } from '@ai-sdk/openai';
export type ModelProvider = 'anthropic' | 'openai';
export type ModelId =
| 'claude-sonnet-4-20250514'
| 'claude-3-5-haiku-20241022'
| 'gpt-4o'
| 'gpt-4o-mini';
interface ModelConfig {
provider: ModelProvider;
model: ModelId;
displayName: string;
maxTokens: number;
costPer1kInput: number;
costPer1kOutput: number;
}
export const models: Record<ModelId, ModelConfig> = {
'claude-sonnet-4-20250514': {
provider: 'anthropic',
model: 'claude-sonnet-4-20250514',
displayName: 'Claude Sonnet 4',
maxTokens: 8192,
costPer1kInput: 0.003,
costPer1kOutput: 0.015,
},
'claude-3-5-haiku-20241022': {
provider: 'anthropic',
model: 'claude-3-5-haiku-20241022',
displayName: 'Claude 3.5 Haiku',
maxTokens: 8192,
costPer1kInput: 0.0008,
costPer1kOutput: 0.004,
},
'gpt-4o': {
provider: 'openai',
model: 'gpt-4o',
displayName: 'GPT-4o',
maxTokens: 16384,
costPer1kInput: 0.005,
costPer1kOutput: 0.015,
},
'gpt-4o-mini': {
provider: 'openai',
model: 'gpt-4o-mini',
displayName: 'GPT-4o Mini',
maxTokens: 16384,
costPer1kInput: 0.00015,
costPer1kOutput: 0.0006,
},
};
export function getModel(modelId: ModelId) {
const config = models[modelId];
switch (config.provider) {
case 'anthropic':
return anthropic(config.model);
case 'openai':
return openai(config.model);
default:
throw new Error(`Unknown provider: ${config.provider}`);
}
}
System Prompts
// lib/ai/prompts.ts
export const systemPrompts = {
default: `You are a helpful AI assistant. Be concise and direct in your responses.`,
coder: `You are an expert software developer. When answering questions:
- Provide working code examples
- Explain your reasoning
- Consider edge cases
- Follow best practices for the language/framework
- Use modern syntax and patterns`,
writer: `You are a professional writer and editor. Help users:
- Improve their writing clarity
- Fix grammar and punctuation
- Enhance tone and style
- Structure content effectively`,
analyst: `You are a data analyst. Help users:
- Analyze data and trends
- Create visualizations suggestions
- Explain statistical concepts
- Provide actionable insights`,
};
export type PromptType = keyof typeof systemPrompts;
Chat API Route
// app/api/chat/route.ts
import { streamText, convertToCoreMessages } from 'ai';
import { getModel, type ModelId } from '@/lib/ai/providers';
import { systemPrompts, type PromptType } from '@/lib/ai/prompts';
export const maxDuration = 30; // Allow streaming for up to 30 seconds
export async function POST(request: Request) {
try {
const {
messages,
modelId = 'claude-sonnet-4-20250514',
promptType = 'default',
} = await request.json();
// Validate inputs
if (!messages || !Array.isArray(messages)) {
return new Response('Invalid messages', { status: 400 });
}
const model = getModel(modelId as ModelId);
const systemPrompt = systemPrompts[promptType as PromptType];
const result = streamText({
model,
system: systemPrompt,
messages: convertToCoreMessages(messages),
maxTokens: 4096,
temperature: 0.7,
// Optional: Add tool calling
// tools: {
// getWeather: {
// description: 'Get current weather for a location',
// parameters: z.object({
// location: z.string().describe('City name'),
// }),
// execute: async ({ location }) => {
// // Fetch weather data
// return { temp: 72, conditions: 'sunny' };
// },
// },
// },
});
return result.toDataStreamResponse();
} catch (error) {
console.error('Chat API error:', error);
return new Response('Internal Server Error', { status: 500 });
}
}
Chat Interface Component
// components/chat/chat-interface.tsx
'use client';
import { useChat } from 'ai/react';
import { useState } from 'react';
import { MessageList } from './message-list';
import { ChatInput } from './chat-input';
import { models, type ModelId } from '@/lib/ai/providers';
import { type PromptType } from '@/lib/ai/prompts';
export function ChatInterface() {
const [modelId, setModelId] = useState<ModelId>('claude-sonnet-4-20250514');
const [promptType, setPromptType] = useState<PromptType>('default');
const {
messages,
input,
setInput,
handleSubmit,
isLoading,
stop,
reload,
error,
} = useChat({
api: '/api/chat',
body: {
modelId,
promptType,
},
onError: (error) => {
console.error('Chat error:', error);
},
});
return (
<div className="flex h-screen flex-col">
{/* Header */}
<header className="border-b bg-background px-4 py-3">
<div className="flex items-center justify-between">
<h1 className="text-lg font-semibold">AI Chat</h1>
<div className="flex items-center gap-4">
{/* Model Selector */}
<select
value={modelId}
onChange={(e) => setModelId(e.target.value as ModelId)}
className="rounded-md border bg-background px-3 py-1.5 text-sm"
>
{Object.entries(models).map(([id, config]) => (
<option key={id} value={id}>
{config.displayName}
</option>
))}
</select>
{/* Prompt Type Selector */}
<select
value={promptType}
onChange={(e) => setPromptType(e.target.value as PromptType)}
className="rounded-md border bg-background px-3 py-1.5 text-sm"
>
<option value="default">General</option>
<option value="coder">Coder</option>
<option value="writer">Writer</option>
<option value="analyst">Analyst</option>
</select>
</div>
</div>
</header>
{/* Messages */}
<MessageList
messages={messages}
isLoading={isLoading}
error={error}
onRetry={reload}
/>
{/* Input */}
<ChatInput
input={input}
setInput={setInput}
handleSubmit={handleSubmit}
isLoading={isLoading}
onStop={stop}
/>
</div>
);
}
Message List Component
// components/chat/message-list.tsx
'use client';
import { type Message } from 'ai';
import { useRef, useEffect } from 'react';
import { User, Bot, AlertCircle, RefreshCw } from 'lucide-react';
import { cn } from '@/lib/utils';
interface MessageListProps {
messages: Message[];
isLoading: boolean;
error: Error | undefined;
onRetry: () => void;
}
export function MessageList({
messages,
isLoading,
error,
onRetry,
}: MessageListProps) {
const scrollRef = useRef<HTMLDivElement>(null);
// Auto-scroll to bottom on new messages
useEffect(() => {
if (scrollRef.current) {
scrollRef.current.scrollTop = scrollRef.current.scrollHeight;
}
}, [messages]);
return (
<div
ref={scrollRef}
className="flex-1 overflow-y-auto px-4 py-6"
>
<div className="mx-auto max-w-3xl space-y-6">
{messages.length === 0 && (
<div className="text-center text-muted-foreground">
<p className="text-lg font-medium">Start a conversation</p>
<p className="text-sm">Send a message to begin chatting with AI</p>
</div>
)}
{messages.map((message) => (
<div
key={message.id}
className={cn(
'flex gap-3',
message.role === 'user' ? 'flex-row-reverse' : ''
)}
>
<div
className={cn(
'flex h-8 w-8 shrink-0 items-center justify-center rounded-full',
message.role === 'user'
? 'bg-primary text-primary-foreground'
: 'bg-muted'
)}
>
{message.role === 'user' ? (
<User className="h-4 w-4" />
) : (
<Bot className="h-4 w-4" />
)}
</div>
<div
className={cn(
'rounded-lg px-4 py-2',
message.role === 'user'
? 'bg-primary text-primary-foreground'
: 'bg-muted'
)}
>
<div className="prose prose-sm dark:prose-invert max-w-none">
{message.content}
</div>
</div>
</div>
))}
{isLoading && (
<div className="flex gap-3">
<div className="flex h-8 w-8 items-center justify-center rounded-full bg-muted">
<Bot className="h-4 w-4" />
</div>
<div className="rounded-lg bg-muted px-4 py-2">
<div className="flex items-center gap-2">
<div className="h-2 w-2 animate-bounce rounded-full bg-foreground/50" />
<div className="h-2 w-2 animate-bounce rounded-full bg-foreground/50 [animation-delay:0.2s]" />
<div className="h-2 w-2 animate-bounce rounded-full bg-foreground/50 [animation-delay:0.4s]" />
</div>
</div>
</div>
)}
{error && (
<div className="flex items-center gap-2 rounded-lg bg-destructive/10 px-4 py-3 text-destructive">
<AlertCircle className="h-4 w-4" />
<span className="text-sm">{error.message}</span>
<button
onClick={onRetry}
className="ml-auto flex items-center gap-1 text-sm hover:underline"
>
<RefreshCw className="h-3 w-3" />
Retry
</button>
</div>
)}
</div>
</div>
);
}
Chat Input Component
// components/chat/chat-input.tsx
'use client';
import { FormEvent, KeyboardEvent } from 'react';
import { Send, Square } from 'lucide-react';
interface ChatInputProps {
input: string;
setInput: (input: string) => void;
handleSubmit: (e: FormEvent<HTMLFormElement>) => void;
isLoading: boolean;
onStop: () => void;
}
export function ChatInput({
input,
setInput,
handleSubmit,
isLoading,
onStop,
}: ChatInputProps) {
const handleKeyDown = (e: KeyboardEvent<HTMLTextAreaElement>) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
const form = e.currentTarget.form;
if (form && input.trim()) {
form.requestSubmit();
}
}
};
return (
<div className="border-t bg-background px-4 py-4">
<form
onSubmit={handleSubmit}
className="mx-auto flex max-w-3xl items-end gap-2"
>
<div className="relative flex-1">
<textarea
value={input}
onChange={(e) => setInput(e.target.value)}
onKeyDown={handleKeyDown}
placeholder="Type a message..."
rows={1}
className="w-full resize-none rounded-lg border bg-background px-4 py-3 pr-12 focus:outline-none focus:ring-2 focus:ring-primary"
style={{
minHeight: '48px',
maxHeight: '200px',
}}
/>
</div>
{isLoading ? (
<button
type="button"
onClick={onStop}
className="flex h-12 w-12 items-center justify-center rounded-lg bg-destructive text-destructive-foreground hover:bg-destructive/90"
>
<Square className="h-4 w-4" />
</button>
) : (
<button
type="submit"
disabled={!input.trim()}
className="flex h-12 w-12 items-center justify-center rounded-lg bg-primary text-primary-foreground hover:bg-primary/90 disabled:opacity-50"
>
<Send className="h-4 w-4" />
</button>
)}
</form>
<p className="mt-2 text-center text-xs text-muted-foreground">
Press Enter to send, Shift+Enter for new line
</p>
</div>
);
}
Chat Page
// app/chat/page.tsx
import { ChatInterface } from '@/components/chat/chat-interface';
export default function ChatPage() {
return <ChatInterface />;
}
Environment Variables
# .env.local
ANTHROPIC_API_KEY=sk-ant-...
OPENAI_API_KEY=sk-...
Key Takeaways
- Vercel AI SDK provides unified streaming across providers
- useChat hook handles message state and streaming automatically
- System prompts customize AI behavior for different use cases
- Model switching lets users choose cost/capability tradeoffs
- Error handling and stop functionality improve UX
تكامل AI SDK
هدف المشروع: بناء واجهة دردشة AI جاهزة للإنتاج مع استجابات البث باستخدام Vercel AI SDK.
لماذا Vercel AI SDK؟
Vercel AI SDK يوفر:
- API موحد عبر OpenAI وAnthropic وGoogle وغيرها
- البث أولاً - مبني للاستجابات في الوقت الفعلي
- هوكس React - تكامل سهل للواجهة الأمامية
- جاهز للـ Edge - يعمل على Vercel Edge وCloudflare Workers
- آمن الأنواع - دعم كامل لـ TypeScript
برومبت إعداد المشروع
أضف وظيفة دردشة AI لتطبيق Next.js:
## التقنيات
- Next.js 15 مع App Router
- Vercel AI SDK v4
- Anthropic Claude 4 (claude-sonnet-4-20250514)
- Tailwind CSS للتنسيق
## الميزات
1. واجهة دردشة مع سجل الرسائل
2. استجابات البث
3. تبديل النماذج (Claude، GPT-4)
4. تكوين برومبت النظام
5. تتبع استخدام الرموز
6. تحديد المعدل لكل مستخدم
## هيكل المشروع
/app /api/chat route.ts # نقطة نهاية API الدردشة /chat page.tsx # واجهة الدردشة /components /chat chat-interface.tsx message-list.tsx chat-input.tsx /lib /ai providers.ts # تكوين مزودي AI prompts.ts # برومبتات النظام
التثبيت
# تثبيت AI SDK والمزودين
npm install ai @ai-sdk/anthropic @ai-sdk/openai
# تثبيت تبعيات الواجهة
npm install lucide-react
تكوين المزودين
// lib/ai/providers.ts
import { anthropic } from '@ai-sdk/anthropic';
import { openai } from '@ai-sdk/openai';
export type ModelProvider = 'anthropic' | 'openai';
export type ModelId =
| 'claude-sonnet-4-20250514'
| 'claude-3-5-haiku-20241022'
| 'gpt-4o'
| 'gpt-4o-mini';
interface ModelConfig {
provider: ModelProvider;
model: ModelId;
displayName: string;
maxTokens: number;
costPer1kInput: number;
costPer1kOutput: number;
}
export const models: Record<ModelId, ModelConfig> = {
'claude-sonnet-4-20250514': {
provider: 'anthropic',
model: 'claude-sonnet-4-20250514',
displayName: 'Claude Sonnet 4',
maxTokens: 8192,
costPer1kInput: 0.003,
costPer1kOutput: 0.015,
},
'claude-3-5-haiku-20241022': {
provider: 'anthropic',
model: 'claude-3-5-haiku-20241022',
displayName: 'Claude 3.5 Haiku',
maxTokens: 8192,
costPer1kInput: 0.0008,
costPer1kOutput: 0.004,
},
'gpt-4o': {
provider: 'openai',
model: 'gpt-4o',
displayName: 'GPT-4o',
maxTokens: 16384,
costPer1kInput: 0.005,
costPer1kOutput: 0.015,
},
'gpt-4o-mini': {
provider: 'openai',
model: 'gpt-4o-mini',
displayName: 'GPT-4o Mini',
maxTokens: 16384,
costPer1kInput: 0.00015,
costPer1kOutput: 0.0006,
},
};
export function getModel(modelId: ModelId) {
const config = models[modelId];
switch (config.provider) {
case 'anthropic':
return anthropic(config.model);
case 'openai':
return openai(config.model);
default:
throw new Error(`مزود غير معروف: ${config.provider}`);
}
}
برومبتات النظام
// lib/ai/prompts.ts
export const systemPrompts = {
default: `أنت مساعد AI مفيد. كن موجزاً ومباشراً في ردودك.`,
coder: `أنت مطور برمجيات خبير. عند الإجابة على الأسئلة:
- قدم أمثلة كود عاملة
- اشرح منطقك
- ضع في الاعتبار الحالات الحدية
- اتبع أفضل الممارسات للغة/الإطار
- استخدم صيغة وأنماط حديثة`,
writer: `أنت كاتب ومحرر محترف. ساعد المستخدمين في:
- تحسين وضوح كتاباتهم
- إصلاح القواعد والترقيم
- تعزيز النبرة والأسلوب
- هيكلة المحتوى بفعالية`,
analyst: `أنت محلل بيانات. ساعد المستخدمين في:
- تحليل البيانات والاتجاهات
- إنشاء اقتراحات التصور
- شرح المفاهيم الإحصائية
- تقديم رؤى قابلة للتنفيذ`,
};
export type PromptType = keyof typeof systemPrompts;
مسار API الدردشة
// app/api/chat/route.ts
import { streamText, convertToCoreMessages } from 'ai';
import { getModel, type ModelId } from '@/lib/ai/providers';
import { systemPrompts, type PromptType } from '@/lib/ai/prompts';
export const maxDuration = 30; // السماح بالبث حتى 30 ثانية
export async function POST(request: Request) {
try {
const {
messages,
modelId = 'claude-sonnet-4-20250514',
promptType = 'default',
} = await request.json();
// التحقق من المدخلات
if (!messages || !Array.isArray(messages)) {
return new Response('رسائل غير صالحة', { status: 400 });
}
const model = getModel(modelId as ModelId);
const systemPrompt = systemPrompts[promptType as PromptType];
const result = streamText({
model,
system: systemPrompt,
messages: convertToCoreMessages(messages),
maxTokens: 4096,
temperature: 0.7,
});
return result.toDataStreamResponse();
} catch (error) {
console.error('خطأ API الدردشة:', error);
return new Response('خطأ داخلي في الخادم', { status: 500 });
}
}
مكون واجهة الدردشة
// components/chat/chat-interface.tsx
'use client';
import { useChat } from 'ai/react';
import { useState } from 'react';
import { MessageList } from './message-list';
import { ChatInput } from './chat-input';
import { models, type ModelId } from '@/lib/ai/providers';
import { type PromptType } from '@/lib/ai/prompts';
export function ChatInterface() {
const [modelId, setModelId] = useState<ModelId>('claude-sonnet-4-20250514');
const [promptType, setPromptType] = useState<PromptType>('default');
const {
messages,
input,
setInput,
handleSubmit,
isLoading,
stop,
reload,
error,
} = useChat({
api: '/api/chat',
body: {
modelId,
promptType,
},
onError: (error) => {
console.error('خطأ الدردشة:', error);
},
});
return (
<div className="flex h-screen flex-col">
{/* الرأس */}
<header className="border-b bg-background px-4 py-3">
<div className="flex items-center justify-between">
<h1 className="text-lg font-semibold">دردشة AI</h1>
<div className="flex items-center gap-4">
{/* محدد النموذج */}
<select
value={modelId}
onChange={(e) => setModelId(e.target.value as ModelId)}
className="rounded-md border bg-background px-3 py-1.5 text-sm"
>
{Object.entries(models).map(([id, config]) => (
<option key={id} value={id}>
{config.displayName}
</option>
))}
</select>
{/* محدد نوع البرومبت */}
<select
value={promptType}
onChange={(e) => setPromptType(e.target.value as PromptType)}
className="rounded-md border bg-background px-3 py-1.5 text-sm"
>
<option value="default">عام</option>
<option value="coder">مبرمج</option>
<option value="writer">كاتب</option>
<option value="analyst">محلل</option>
</select>
</div>
</div>
</header>
{/* الرسائل */}
<MessageList
messages={messages}
isLoading={isLoading}
error={error}
onRetry={reload}
/>
{/* الإدخال */}
<ChatInput
input={input}
setInput={setInput}
handleSubmit={handleSubmit}
isLoading={isLoading}
onStop={stop}
/>
</div>
);
}
متغيرات البيئة
# .env.local
ANTHROPIC_API_KEY=sk-ant-...
OPENAI_API_KEY=sk-...
النقاط الرئيسية
- Vercel AI SDK يوفر بثاً موحداً عبر المزودين
- هوك useChat يتعامل مع حالة الرسائل والبث تلقائياً
- برومبتات النظام تخصص سلوك AI لحالات استخدام مختلفة
- تبديل النماذج يتيح للمستخدمين اختيار مقايضات التكلفة/القدرة
- معالجة الأخطاء ووظيفة الإيقاف تحسن تجربة المستخدم