Loading...
Loading...
Compare original and translation side by side
Setup: See Installation Guide. For JavaScript, usepackages only. For deeper SDK details, read JavaScript SDK Reference or Python SDK Reference.@elevenlabs/*
设置说明: 查看安装指南。JavaScript环境下仅使用包。如需深入了解SDK细节,请阅读JavaScript SDK参考文档或Python SDK参考文档。@elevenlabs/*
@elevenlabs/react@elevenlabs/clientagents@elevenlabs/react@elevenlabs/clientagentssendResponse()send_response()sendResponse()send_response()ELEVENLABS_API_KEYngrok http 3001ws_urlwsUrl/wsELEVENLABS_SPEECH_ENGINE_IDengine.serve(...)speechEngine.attach(...)ELEVENLABS_API_KEYconversationTokenoverrides.agent.firstMessageELEVENLABS_API_KEYngrok http 3001ws_urlwsUrl/wsELEVENLABS_SPEECH_ENGINE_IDengine.serve(...)speechEngine.attach(...)ELEVENLABS_API_KEYconversationTokenoverrides.agent.firstMessageimport asyncio
import os
from dotenv import load_dotenv
from elevenlabs import AsyncElevenLabs
load_dotenv()
elevenlabs = AsyncElevenLabs(api_key=os.getenv("ELEVENLABS_API_KEY"))
async def main():
engine = await elevenlabs.speech_engine.create(
name="My Speech Engine",
speech_engine={"ws_url": os.environ["PUBLIC_WS_URL"]},
)
print(engine.engine_id)
asyncio.run(main())import asyncio
import os
from dotenv import load_dotenv
from elevenlabs import AsyncElevenLabs
load_dotenv()
elevenlabs = AsyncElevenLabs(api_key=os.getenv("ELEVENLABS_API_KEY"))
async def main():
engine = await elevenlabs.speech_engine.create(
name="My Speech Engine",
speech_engine={"ws_url": os.environ["PUBLIC_WS_URL"]},
)
print(engine.engine_id)
asyncio.run(main())import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
import "dotenv/config";
const elevenlabs = new ElevenLabsClient({
apiKey: process.env.ELEVENLABS_API_KEY,
});
const engine = await elevenlabs.speechEngine.create({
name: "My Speech Engine",
speechEngine: { wsUrl: process.env.PUBLIC_WS_URL! },
});
console.log(engine.engineId);PUBLIC_WS_URLhttps://example.ngrok.app/wsttsasrturnspeech_engine.request_headersspeechEngine.requestHeadersprivacyimport { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
import "dotenv/config";
const elevenlabs = new ElevenLabsClient({
apiKey: process.env.ELEVENLABS_API_KEY,
});
const engine = await elevenlabs.speechEngine.create({
name: "My Speech Engine",
speechEngine: { wsUrl: process.env.PUBLIC_WS_URL! },
});
console.log(engine.engineId);PUBLIC_WS_URLhttps://example.ngrok.app/wsttsasrturnspeech_engine.request_headersspeechEngine.requestHeadersprivacyimport asyncio
import os
from dotenv import load_dotenv
from elevenlabs import AsyncElevenLabs
from openai import AsyncOpenAI
load_dotenv()
elevenlabs = AsyncElevenLabs(api_key=os.getenv("ELEVENLABS_API_KEY"))
openai = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
async def on_transcript(transcript, session):
stream = await openai.responses.create(
model=os.environ["OPENAI_MODEL"],
instructions="You are a concise, conversational voice assistant.",
input=[
{
"role": "assistant" if message.role == "agent" else message.role,
"content": message.content,
}
for message in transcript
],
stream=True,
)
await session.send_response(stream)
async def main():
engine = await elevenlabs.speech_engine.get(os.environ["ELEVENLABS_SPEECH_ENGINE_ID"])
await engine.serve(
port=3001,
path="/ws",
debug=True,
on_transcript=on_transcript,
)
asyncio.run(main())import asyncio
import os
from dotenv import load_dotenv
from elevenlabs import AsyncElevenLabs
from openai import AsyncOpenAI
load_dotenv()
elevenlabs = AsyncElevenLabs(api_key=os.getenv("ELEVENLABS_API_KEY"))
openai = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
async def on_transcript(transcript, session):
stream = await openai.responses.create(
model=os.environ["OPENAI_MODEL"],
instructions="You are a concise, conversational voice assistant.",
input=[
{
"role": "assistant" if message.role == "agent" else message.role,
"content": message.content,
}
for message in transcript
],
stream=True,
)
await session.send_response(stream)
async def main():
engine = await elevenlabs.speech_engine.get(os.environ["ELEVENLABS_SPEECH_ENGINE_ID"])
await engine.serve(
port=3001,
path="/ws",
debug=True,
on_transcript=on_transcript,
)
asyncio.run(main())import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
import { createServer } from "node:http";
import OpenAI from "openai";
import "dotenv/config";
const elevenlabs = new ElevenLabsClient({
apiKey: process.env.ELEVENLABS_API_KEY,
});
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const httpServer = createServer();
await elevenlabs.speechEngine.attach(
process.env.ELEVENLABS_SPEECH_ENGINE_ID!,
httpServer,
"/ws",
{
debug: true,
async onTranscript(transcript, signal, session) {
const response = await openai.responses.create(
{
model: process.env.OPENAI_MODEL!,
instructions: "You are a concise, conversational voice assistant.",
input: transcript.map((message) => ({
role: message.role === "agent" ? "assistant" : message.role,
content: message.content,
})),
stream: true,
},
{ signal },
);
session.sendResponse(response);
},
},
);
httpServer.listen(3001);AbortSignalonTranscriptimport { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
import { createServer } from "node:http";
import OpenAI from "openai";
import "dotenv/config";
const elevenlabs = new ElevenLabsClient({
apiKey: process.env.ELEVENLABS_API_KEY,
});
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const httpServer = createServer();
await elevenlabs.speechEngine.attach(
process.env.ELEVENLABS_SPEECH_ENGINE_ID!,
httpServer,
"/ws",
{
debug: true,
async onTranscript(transcript, signal, session) {
const response = await openai.responses.create(
{
model: process.env.OPENAI_MODEL!,
instructions: "You are a concise, conversational voice assistant.",
input: transcript.map((message) => ({
role: message.role === "agent" ? "assistant" : message.role,
content: message.content,
})),
stream: true,
},
{ signal },
);
session.sendResponse(response);
},
},
);
httpServer.listen(3001);onTranscriptAbortSignalimport express from "express";
import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
import "dotenv/config";
const app = express();
const elevenlabs = new ElevenLabsClient();
app.get("/api/token", async (_req, res) => {
const response = await elevenlabs.conversationalAi.conversations.getWebrtcToken({
agentId: process.env.ELEVENLABS_SPEECH_ENGINE_ID!,
});
res.json({ token: response.token });
});@elevenlabs/reactimport { useConversation } from "@elevenlabs/react";
export function VoiceControls() {
const conversation = useConversation({
onConnect: () => console.log("connected"),
onDisconnect: () => console.log("disconnected"),
onError: (error) => console.error(error),
});
async function startConversation() {
await navigator.mediaDevices.getUserMedia({ audio: true });
const { token } = await fetch("/api/token").then((res) => res.json());
await conversation.startSession({
conversationToken: token,
overrides: {
agent: { firstMessage: "Hello! How can I help you today?" },
},
});
}
return <button onClick={startConversation}>Start conversation</button>;
}/rtc/v1v1 RTC path not foundcould not establish pc connectionlivekit-client2.16.1package.json{
"overrides": {
"livekit-client": "2.16.1"
}
}import express from "express";
import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
import "dotenv/config";
const app = express();
const elevenlabs = new ElevenLabsClient();
app.get("/api/token", async (_req, res) => {
const response = await elevenlabs.conversationalAi.conversations.getWebrtcToken({
agentId: process.env.ELEVENLABS_SPEECH_ENGINE_ID!,
});
res.json({ token: response.token });
});@elevenlabs/reactimport { useConversation } from "@elevenlabs/react";
export function VoiceControls() {
const conversation = useConversation({
onConnect: () => console.log("connected"),
onDisconnect: () => console.log("disconnected"),
onError: (error) => console.error(error),
});
async function startConversation() {
await navigator.mediaDevices.getUserMedia({ audio: true });
const { token } = await fetch("/api/token").then((res) => res.json());
await conversation.startSession({
conversationToken: token,
overrides: {
agent: { firstMessage: "Hello! How can I help you today?" },
},
});
}
return <button onClick={startConversation}>Start conversation</button>;
}/rtc/v1v1 RTC path not foundcould not establish pc connectionpackage.jsonlivekit-client2.16.1{
"overrides": {
"livekit-client": "2.16.1"
}
}| Event | TypeScript callback | Python callback | Notes |
|---|---|---|---|
| | | Full conversation history for the current turn |
| | | Conversation ID becomes available |
| | | Clean disconnect |
| | | Unexpected WebSocket drop |
| | | Protocol or WebSocket error |
"user""agent""agent""assistant"| 事件 | TypeScript回调 | Python回调 | 说明 |
|---|---|---|---|
| | | 当前话轮的完整对话历史 |
| | | 对话ID可用 |
| | | 正常断开连接 |
| | | WebSocket意外断开 |
| | | 协议或WebSocket错误 |
"user""agent""agent""assistant"