Loading...
Loading...
Integrate Honcho memory and social cognition into existing Python or TypeScript codebases. Use when adding Honcho SDK, setting up peers, configuring sessions, or implementing the dialectic chat endpoint for AI agents.
npx skill4agent add plastic-labs/honcho honcho-integration**/*.py**/*.tsobserve_me=Falsehoncho-ai@honcho-ai/sdkuv add honcho-aibun add @honcho-ai/sdkfrom honcho import Honcho
import os
honcho = Honcho(
workspace_id="your-app-name",
api_key=os.environ["HONCHO_API_KEY"],
environment="production"
)import { Honcho } from '@honcho-ai/sdk';
const honcho = new Honcho({
workspaceId: "your-app-name",
apiKey: process.env.HONCHO_API_KEY,
environment: "production"
});# Human users
user = honcho.peer("user-123")
# AI assistants - set observe_me=False so Honcho doesn't model the AI
assistant = honcho.peer("assistant", config={"observe_me": False})
support_bot = honcho.peer("support-bot", config={"observe_me": False})// Human users
const user = await honcho.peer("user-123");
// AI assistants - set observe_me=False
const assistant = await honcho.peer("assistant", { config: { observe_me: false } });
const supportBot = await honcho.peer("support-bot", { config: { observe_me: false } });from honcho import SessionPeerConfig
session = honcho.session("conversation-123")
# User is observed (Honcho builds a model of them)
user_config = SessionPeerConfig(observe_me=True, observe_others=True)
# AI is NOT observed (no model built of the AI)
ai_config = SessionPeerConfig(observe_me=False, observe_others=True)
session.add_peers([
(user, user_config),
(assistant, ai_config)
])const session = await honcho.session("conversation-123");
await session.addPeers([
[user, { observeMe: true, observeOthers: true }],
[assistant, { observeMe: false, observeOthers: true }]
]);session.add_messages([
user.message("I'm having trouble with my account"),
assistant.message("I'd be happy to help. What seems to be the issue?"),
user.message("I can't reset my password")
])await session.addMessages([
user.message("I'm having trouble with my account"),
assistant.message("I'd be happy to help. What seems to be the issue?"),
user.message("I can't reset my password")
]);import openai
from honcho import Honcho
honcho = Honcho(workspace_id="my-app", api_key=os.environ["HONCHO_API_KEY"])
# Define the tool for your agent
honcho_tool = {
"type": "function",
"function": {
"name": "query_user_context",
"description": "Query Honcho to retrieve relevant context about the user based on their history and preferences. Use this when you need to understand the user's background, preferences, past interactions, or goals.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "A natural language question about the user, e.g. 'What are this user's main goals?' or 'What communication style does this user prefer?'"
}
},
"required": ["query"]
}
}
}
def handle_honcho_tool_call(user_id: str, query: str) -> str:
"""Execute the Honcho chat tool call."""
peer = honcho.peer(user_id)
return peer.chat(query)
# Use in your agent loop
def run_agent(user_id: str, user_message: str):
messages = [{"role": "user", "content": user_message}]
response = openai.chat.completions.create(
model="gpt-4",
messages=messages,
tools=[honcho_tool]
)
# Handle tool calls
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
if tool_call.function.name == "query_user_context":
import json
args = json.loads(tool_call.function.arguments)
result = handle_honcho_tool_call(user_id, args["query"])
# Continue conversation with tool result...import OpenAI from 'openai';
import { Honcho } from '@honcho-ai/sdk';
const honcho = new Honcho({
workspaceId: "my-app",
apiKey: process.env.HONCHO_API_KEY
});
const honchoTool: OpenAI.ChatCompletionTool = {
type: "function",
function: {
name: "query_user_context",
description: "Query Honcho to retrieve relevant context about the user based on their history and preferences.",
parameters: {
type: "object",
properties: {
query: {
type: "string",
description: "A natural language question about the user"
}
},
required: ["query"]
}
}
};
async function handleHonchoToolCall(userId: string, query: string): Promise<string> {
const peer = await honcho.peer(userId);
return await peer.chat(query);
}def get_user_context_for_prompt(user_id: str) -> dict:
"""Fetch key user attributes via targeted Honcho queries."""
peer = honcho.peer(user_id)
return {
"communication_style": peer.chat("What communication style does this user prefer? Be concise."),
"expertise_level": peer.chat("What is this user's technical expertise level? Be concise."),
"current_goals": peer.chat("What are this user's current goals or priorities? Be concise."),
"preferences": peer.chat("What key preferences should I know about this user? Be concise.")
}
def build_system_prompt(user_context: dict) -> str:
return f"""You are a helpful assistant. Here's what you know about this user:
Communication style: {user_context['communication_style']}
Expertise level: {user_context['expertise_level']}
Current goals: {user_context['current_goals']}
Key preferences: {user_context['preferences']}
Tailor your responses accordingly."""async function getUserContextForPrompt(userId: string): Promise<Record<string, string>> {
const peer = await honcho.peer(userId);
const [style, expertise, goals, preferences] = await Promise.all([
peer.chat("What communication style does this user prefer? Be concise."),
peer.chat("What is this user's technical expertise level? Be concise."),
peer.chat("What are this user's current goals or priorities? Be concise."),
peer.chat("What key preferences should I know about this user? Be concise.")
]);
return {
communicationStyle: style,
expertiseLevel: expertise,
currentGoals: goals,
preferences: preferences
};
}context()import openai
session = honcho.session("conversation-123")
user = honcho.peer("user-123")
assistant = honcho.peer("assistant", config={"observe_me": False})
# Get context formatted for your LLM
context = session.context(
tokens=2000,
peer_target=user.id, # Include representation of this user
summary=True # Include conversation summaries
)
# Convert to OpenAI format
messages = context.to_openai(assistant=assistant)
# Or Anthropic format
# messages = context.to_anthropic(assistant=assistant)
# Add the new user message
messages.append({"role": "user", "content": "What should I focus on today?"})
response = openai.chat.completions.create(
model="gpt-4",
messages=messages
)
# Store the exchange
session.add_messages([
user.message("What should I focus on today?"),
assistant.message(response.choices[0].message.content)
])response_stream = peer.chat("What do we know about this user?", stream=True)
for chunk in response_stream.iter_text():
print(chunk, end="", flush=True)uv add honcho-aibun add @honcho-ai/sdkHONCHO_API_KEYobserve_me=Falseobserve_me=Falseadd_messages()get_deriver_status()