Loading...
Loading...
Builds AI chat interfaces and conversational UI with streaming responses, context management, and multi-modal support. Use when creating ChatGPT-style interfaces, AI assistants, code copilots, or conversational agents. Handles streaming text, token limits, regeneration, feedback loops, tool usage visualization, and AI-specific error patterns. Provides battle-tested components from leading AI products with accessibility and performance built in.
npx skill4agent add ancoleman/ai-design-components building-ai-chatimport { useChat } from 'ai/react';
export function MinimalAIChat() {
const { messages, input, handleInputChange, handleSubmit, isLoading, stop } = useChat();
return (
<div className="chat-container">
<div className="messages">
{messages.map(m => (
<div key={m.id} className={`message ${m.role}`}>
<div className="content">{m.content}</div>
</div>
))}
{isLoading && <div className="thinking">AI is thinking...</div>}
</div>
<form onSubmit={handleSubmit} className="input-form">
<input
value={input}
onChange={handleInputChange}
placeholder="Ask anything..."
disabled={isLoading}
/>
{isLoading ? (
<button type="button" onClick={stop}>Stop</button>
) : (
<button type="submit">Send</button>
)}
</form>
</div>
);
}examples/basic-chat.tsx// User message
<div className="message user">
<div className="content">{message.content}</div>
<time className="timestamp">{formatTime(message.timestamp)}</time>
</div>
// AI message with streaming
<div className="message ai">
<Streamdown className="content">{message.content}</Streamdown>
{message.isStreaming && <span className="cursor">▊</span>}
</div>
// System message
<div className="message system">
<Icon type="info" />
<span>{message.content}</span>
</div>references/message-components.md<div className="input-container">
<button onClick={attachFile} aria-label="Attach file">
<PaperclipIcon />
</button>
<textarea
value={input}
onChange={handleChange}
onKeyDown={handleKeyDown}
placeholder="Type a message..."
rows={1}
style={{ height: textareaHeight }}
/>
<button onClick={toggleVoice} aria-label="Voice input">
<MicIcon />
</button>
<button type="submit" disabled={!input.trim() || isLoading}>
<SendIcon />
</button>
</div><div className="response-controls">
{isStreaming && (
<button onClick={stop} className="stop-btn">
Stop generating
</button>
)}
{!isStreaming && (
<>
<button onClick={regenerate} aria-label="Regenerate response">
<RefreshIcon /> Regenerate
</button>
<button onClick={continueGeneration} aria-label="Continue">
Continue
</button>
<button onClick={editMessage} aria-label="Edit message">
<EditIcon /> Edit
</button>
</>
)}
</div><div className="feedback-controls">
<button
onClick={() => sendFeedback('positive')}
aria-label="Good response"
className={feedback === 'positive' ? 'selected' : ''}
>
<ThumbsUpIcon />
</button>
<button
onClick={() => sendFeedback('negative')}
aria-label="Bad response"
className={feedback === 'negative' ? 'selected' : ''}
>
<ThumbsDownIcon />
</button>
<button onClick={copyToClipboard} aria-label="Copy">
<CopyIcon />
</button>
<button onClick={share} aria-label="Share">
<ShareIcon />
</button>
</div>// Use Streamdown for AI streaming (handles incomplete markdown)
import { Streamdown } from '@vercel/streamdown';
// Auto-scroll management
useEffect(() => {
if (shouldAutoScroll()) {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
}
}, [messages]);
// Smart auto-scroll heuristic
function shouldAutoScroll() {
const threshold = 100; // px from bottom
const isNearBottom =
container.scrollHeight - container.scrollTop - container.clientHeight < threshold;
const userNotReading = !hasUserScrolledUp && !isTextSelected;
return isNearBottom && userNotReading;
}references/streaming-ux.md// User-friendly token display
function TokenIndicator({ used, total }) {
const percentage = (used / total) * 100;
const remaining = total - used;
return (
<div className="token-indicator">
<div className="progress-bar">
<div className="progress-fill" style={{ width: `${percentage}%` }} />
</div>
<span className="token-text">
{percentage > 80
? `⚠️ About ${Math.floor(remaining / 250)} messages left`
: `${Math.floor(remaining / 250)} pages of conversation remaining`}
</span>
</div>
);
}references/context-management.md// Image upload with preview
function ImageUpload({ onUpload }) {
return (
<div
className="upload-zone"
onDrop={handleDrop}
onDragOver={preventDefault}
>
<input
type="file"
accept="image/*"
onChange={handleFileSelect}
multiple
hidden
ref={fileInputRef}
/>
{previews.map(preview => (
<img key={preview.id} src={preview.url} alt="Upload preview" />
))}
</div>
);
}references/multi-modal.md// Refusal handling
if (response.type === 'refusal') {
return (
<div className="error refusal">
<Icon type="info" />
<p>I cannot help with that request.</p>
<details>
<summary>Why?</summary>
<p>{response.reason}</p>
</details>
<p>Try asking: {response.suggestion}</p>
</div>
);
}
// Rate limit communication
if (error.code === 'RATE_LIMIT') {
return (
<div className="error rate-limit">
<p>Please wait {error.retryAfter} seconds</p>
<CountdownTimer seconds={error.retryAfter} onComplete={retry} />
</div>
);
}references/error-handling.mdfunction ToolUsage({ tool }) {
return (
<div className="tool-usage">
<div className="tool-header">
<Icon type={tool.type} />
<span>{tool.name}</span>
{tool.status === 'running' && <Spinner />}
</div>
{tool.status === 'complete' && (
<details>
<summary>View details</summary>
<pre>{JSON.stringify(tool.result, null, 2)}</pre>
</details>
)}
</div>
);
}references/tool-usage.md# Core AI chat functionality
npm install ai @ai-sdk/react @ai-sdk/openai
# Streaming markdown rendering
npm install @vercel/streamdown
# Syntax highlighting
npm install react-syntax-highlighter
# Security for LLM outputs
npm install dompurify// Memoize message rendering
const MemoizedMessage = memo(Message, (prev, next) =>
prev.content === next.content && prev.isStreaming === next.isStreaming
);
// Debounce streaming updates
const debouncedUpdate = useMemo(
() => debounce(updateMessage, 50),
[]
);
// Virtual scrolling for long conversations
import { VariableSizeList } from 'react-window';references/streaming-ux.mdimport DOMPurify from 'dompurify';
function SafeAIContent({ content }) {
const sanitized = DOMPurify.sanitize(content, {
ALLOWED_TAGS: ['p', 'br', 'strong', 'em', 'code', 'pre', 'blockquote', 'ul', 'ol', 'li'],
ALLOWED_ATTR: ['class']
});
return <Streamdown>{sanitized}</Streamdown>;
}// ARIA live regions for screen readers
<div role="log" aria-live="polite" aria-relevant="additions">
{messages.map(msg => (
<article key={msg.id} role="article" aria-label={`${msg.role} message`}>
{msg.content}
</article>
))}
</div>
// Loading announcements
<div role="status" aria-live="polite" className="sr-only">
{isLoading ? 'AI is responding' : ''}
</div>references/accessibility.mdscripts/parse_stream.jsscripts/calculate_tokens.pyscripts/format_messages.jsreferences/streaming-patterns.mdreferences/context-management.mdreferences/multimodal-input.mdreferences/feedback-loops.mdreferences/error-handling.mdreferences/tool-usage.mdreferences/accessibility-chat.mdreferences/library-guide.mdreferences/performance-optimization.mdexamples/basic-chat.tsxexamples/streaming-chat.tsxexamples/multimodal-chat.tsxexamples/code-assistant.tsxexamples/tool-calling-chat.tsxassets/system-prompts.jsonassets/message-templates.jsonassets/error-messages.jsonassets/themes.json/* Message bubbles use design tokens */
.message.user {
background: var(--message-user-bg, var(--color-primary));
color: var(--message-user-text, var(--color-white));
padding: var(--message-padding, var(--spacing-md));
border-radius: var(--message-border-radius, var(--radius-lg));
}
.message.ai {
background: var(--message-ai-bg, var(--color-gray-100));
color: var(--message-ai-text, var(--color-text-primary));
}skills/design-tokens/