Loading...
Loading...
Expert skill for building AI systems with Weft, a Rust-based programming language where LLMs, humans, APIs, and infrastructure are first-class primitives with typed connections and durable execution.
npx skill4agent add aradotso/trending-skills weft-ai-languageSkill by ara.so — Daily 2026 Skills collection.
brew install bashdev.shgit clone https://github.com/WeaveMindAI/weft.git
cd weft
cp .env.example .env
# Edit .env — add your API keys.envOPENROUTER_API_KEY= # Required for LLM nodes
TAVILY_API_KEY= # Required for Web Search nodes
ELEVENLABS_API_KEY= # Required for Speech-to-Text nodes
APOLLO_API_KEY= # Required for Apollo enrichment nodes
DISCORD_BOT_TOKEN= # Required for Discord nodes# Terminal 1 — backend (PostgreSQL, Restate, all services)
./dev.sh server
# Terminal 2 — dashboard (SvelteKit at http://localhost:5173)
./dev.sh dashboard
# Or both at once
./dev.sh all./dev.sh server # Start backend services
./dev.sh dashboard # Start frontend
./dev.sh all # Start everything
./dev.sh extension # Build browser extension
./cleanup.sh # Stop everything, wipe Restate + DB
./cleanup.sh --no-db # Stop services, keep database
./cleanup.sh --services # Stop services only
./cleanup.sh --db-destroy # Remove PostgreSQL container entirely
cargo build # Build without running PostgreSQL (uses .sqlx snapshots)
cargo test # Test without running PostgreSQLcurl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.31.0/kind-$(uname -s | tr '[:upper:]' '[:lower:]')-amd64
chmod +x ./kind && sudo mv ./kind /usr/local/bin/kind
INFRASTRUCTURE_TARGET=local ./dev.sh servernode_name = NodeType -> (output_port: OutputType) {
label: "Human-readable name"
config_key: "value"
}
node_name.input_port = other_node.output_port# Project: Poem Generator
# Description: Writes a short poem about any topic
topic = Text {
label: "Topic"
value: "the silence between stars"
}
llm_config = LlmConfig {
label: "Config"
model: "anthropic/claude-sonnet-4.6"
systemPrompt: "Write a short, beautiful poem (4-6 lines) about the given topic."
temperature: "0.8"
}
poet = LlmInference -> (response: String) {
label: "Poet"
}
poet.prompt = topic.value
poet.config = llm_config.config
output = Debug { label: "Poem" }
output.data = poet.response| Node | Purpose |
|---|---|
| Configure model, system prompt, temperature |
| Call an LLM, returns |
| Node | Purpose |
|---|---|
| Static or dynamic text value |
| Numeric value |
| Key-value map |
| Ordered list |
| Bundle/unbundle multiple values |
| Node | Purpose |
|---|---|
| Conditional branching |
| Pause execution, send form to human, resume on response |
| Start a program from a human action |
DiscordSlackTelegramWhatsAppEmailXPostgresMemoryApolloWebSearchSpeechToTextCronDebugTemplateHTTPCode# Project: Content Summarizer
# Description: Summarizes a webpage given a URL
url_input = Text {
label: "URL"
value: "https://example.com/article"
}
search = WebSearch -> (results: String) {
label: "Fetch Content"
}
search.query = url_input.value
summarizer_config = LlmConfig {
label: "Summarizer Config"
model: "anthropic/claude-sonnet-4.6"
systemPrompt: "Summarize the following content in 3 bullet points."
temperature: "0.3"
}
summarizer = LlmInference -> (response: String) {
label: "Summarizer"
}
summarizer.prompt = search.results
summarizer.config = summarizer_config.config
output = Debug { label: "Summary" }
output.data = summarizer.response# Project: Content Approval Pipeline
# Description: AI drafts content, human approves before publishing
draft_config = LlmConfig {
label: "Drafter Config"
model: "openai/gpt-4o"
systemPrompt: "Write a Twitter thread about the given topic. Be engaging."
temperature: "0.7"
}
topic = Text {
label: "Topic"
value: "distributed systems"
}
drafter = LlmInference -> (response: String) {
label: "Content Drafter"
}
drafter.prompt = topic.value
drafter.config = draft_config.config
# Pauses execution indefinitely until a human responds
approval = HumanQuery -> (approved: Boolean, feedback: String) {
label: "Human Approval"
question: "Do you approve this draft for publishing?"
}
approval.content = drafter.response
gate = Gate -> (passed: String) {
label: "Approval Gate"
}
gate.condition = approval.approved
gate.value = drafter.response
publisher = Discord {
label: "Publish to Discord"
channel: "announcements"
}
publisher.message = gate.passed# Project: Sentiment Router
# Description: Routes messages based on sentiment analysis
message = Text {
label: "Input Message"
value: "This product is absolutely terrible!"
}
sentiment_config = LlmConfig {
label: "Sentiment Config"
model: "anthropic/claude-haiku-3.5"
systemPrompt: "Classify sentiment as 'positive' or 'negative'. Respond with one word only."
temperature: "0.0"
}
classifier = LlmInference -> (response: String) {
label: "Sentiment Classifier"
}
classifier.prompt = message.value
classifier.config = sentiment_config.config
is_negative = Gate -> (passed: String) {
label: "Is Negative?"
}
is_negative.condition = classifier.response
is_negative.value = message.value
alert = Slack {
label: "Alert Team"
channel: "customer-issues"
}
alert.message = is_negative.passed# Project: Daily Digest
# Description: Sends a daily news digest every morning
schedule = Cron {
label: "Daily Trigger"
expression: "0 8 * * *"
}
news = WebSearch -> (results: String) {
label: "Fetch News"
}
news.query = "AI and technology news today"
digest_config = LlmConfig {
label: "Digest Config"
model: "openai/gpt-4o-mini"
systemPrompt: "Summarize these news items into a concise morning digest."
temperature: "0.4"
}
digest = LlmInference -> (response: String) {
label: "Digest Writer"
}
digest.prompt = news.results
digest.config = digest_config.config
send = Email {
label: "Send Digest"
to: "team@example.com"
subject: "Your Daily AI Digest"
}
send.body = digest.response# Project: Research Agent
# Description: Researches a topic and produces a structured report
query = Text {
label: "Research Query"
value: "latest advances in protein folding"
}
search = WebSearch -> (results: String) {
label: "Search"
}
search.query = query.value
# Enrich with professional data
enrichment = Apollo -> (data: String) {
label: "Enrichment"
}
analyst_config = LlmConfig {
label: "Analyst Config"
model: "anthropic/claude-sonnet-4.6"
systemPrompt: "You are a research analyst. Given search results, produce a structured report with: Executive Summary, Key Findings, Implications, and Further Reading."
temperature: "0.2"
}
pack_inputs = Pack -> (bundle: Dict) {
label: "Combine Sources"
}
pack_inputs.search_results = search.results
analyst = LlmInference -> (response: String) {
label: "Research Analyst"
}
analyst.prompt = pack_inputs.bundle
analyst.config = analyst_config.config
store = Postgres {
label: "Store Report"
table: "research_reports"
}
store.data = analyst.response
notify = Slack {
label: "Notify Team"
channel: "research"
}
notify.message = analyst.responsecatalog/inventorycatalog/
└── my_category/
└── my_node/
├── backend.rs
└── frontend.tsbackend.rsuse weft_nodes::prelude::*;
use serde::{Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize)]
pub struct MyNodeConfig {
pub label: String,
pub my_setting: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct MyNodeInputs {
pub text: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct MyNodeOutputs {
pub result: String,
}
pub struct MyNode;
#[async_trait]
impl Node for MyNode {
type Config = MyNodeConfig;
type Inputs = MyNodeInputs;
type Outputs = MyNodeOutputs;
async fn run(
config: Self::Config,
inputs: Self::Inputs,
) -> Result<Self::Outputs, NodeError> {
let result = format!("{}: {}", config.my_setting, inputs.text);
Ok(MyNodeOutputs { result })
}
}frontend.tsimport type { NodeDefinition } from "@/types/nodes";
export const MyNode: NodeDefinition = {
type: "MyNode",
label: "My Node",
icon: "sparkles", // Lucide icon name
category: "my_category",
inputs: [
{ name: "text", type: "String", required: true },
],
outputs: [
{ name: "result", type: "String" },
],
config: [
{ name: "label", type: "string", default: "My Node" },
{ name: "my_setting", type: "string", default: "prefix" },
],
};./dev.sh server| Type | Description |
|---|---|
| Text value |
| Numeric value |
| True/false |
| Key-value map |
| Ordered collection |
| Nullable type — null propagates through the graph |
| Union type |
weft/
├── catalog/ # Node definitions — source of truth
│ ├── ai/ # LlmConfig, LlmInference
│ ├── code/ # Python execution
│ ├── communication/ # Discord, Slack, Telegram, WhatsApp, Email, X
│ ├── data/ # Text, Number, Dict, List, Pack, Unpack
│ ├── enrichment/ # Apollo, WebSearch, SpeechToText
│ ├── flow/ # Gate, HumanQuery, HumanTrigger
│ ├── storage/ # Postgres, Memory
│ └── triggers/ # Cron, webhooks, polling
├── crates/
│ ├── weft-core/ # Type system, compiler, executor, Restate objects
│ ├── weft-nodes/ # Node trait, registry, sandbox, node runner
│ ├── weft-api/ # REST API (triggers, files, infra, usage)
│ └── weft-orchestrator/ # Restate services + Axum project executor
├── dashboard/ # Web UI (SvelteKit + Svelte 5)
├── extension/ # Browser extension for human-in-the-loop (WXT)
└── scripts/
└── catalog-link.sh # Symlinks catalog into crates + dashboard./dev.sh serverdocker ps./cleanup.sh.env./cleanup.sh --services && ./dev.sh serverPackDictDictT?Gate./cleanup.sh # Wipes Restate journal and DB, full resetcargo build.sqlxcargo build # Works without a running database
cargo test # Works without a running databasebackend.rsfrontend.tscatalog/<category>/<node>/./scripts/catalog-link.shDESIGN.mdROADMAP.mdCONTRIBUTING.md