Loading...
Loading...
Build production-ready Tavily integrations with best practices baked in. Reference documentation for developers using coding assistants (Claude Code, Cursor, etc.) to implement web search, content extraction, crawling, and research in agentic workflows, RAG systems, or autonomous agents.
npx skill4agent add tavily-ai/skills tavily-best-practices~/.claude/settings.json{
"env": {
"TAVILY_API_KEY": "tvly-YOUR_API_KEY"
}
}pip install tavily-pythonnpm install @tavily/corefrom tavily import TavilyClient
# Option 1: Uses TAVILY_API_KEY env var (recommended)
client = TavilyClient()
# Option 2: Explicit API key
client = TavilyClient(api_key="tvly-YOUR_API_KEY")
# Option 3: With project tracking (for usage organization)
client = TavilyClient(api_key="tvly-YOUR_API_KEY", project_id="your-project-id")
# Async client for parallel queries
from tavily import AsyncTavilyClient
async_client = AsyncTavilyClient()| Need | Method |
|---|---|
| Web search results | |
| Content from specific URLs | |
| Content from entire site | |
| URL discovery from site | |
| Need | Method |
|---|---|
| End-to-end research with AI synthesis | |
response = client.search(
query="quantum computing breakthroughs", # Keep under 400 chars
max_results=10,
search_depth="advanced", # 2 credits, highest relevance
topic="general" # or "news", "finance"
)
for result in response["results"]:
print(f"{result['title']}: {result['score']}")querymax_resultssearch_depthtopicinclude_domainsexclude_domainstime_range# Two-step pattern (recommended for control)
search_results = client.search(query="Python async best practices")
urls = [r["url"] for r in search_results["results"] if r["score"] > 0.5]
extracted = client.extract(
urls=urls[:20],
query="async patterns", # Reranks chunks by relevance
chunks_per_source=3 # Prevents context explosion
)urlsextract_depthquerychunks_per_sourceresponse = client.crawl(
url="https://docs.example.com",
max_depth=2,
instructions="Find API documentation pages", # Semantic focus
chunks_per_source=3, # Token optimization
select_paths=["/docs/.*", "/api/.*"]
)urlmax_depthmax_breadthlimitinstructionschunks_per_sourceselect_pathsexclude_pathsresponse = client.map(
url="https://docs.example.com",
max_depth=2,
instructions="Find all API and guide pages"
)
api_docs = [url for url in response["results"] if "/api/" in url]import time
# For comprehensive multi-topic research
result = client.research(
input="Analyze competitive landscape for X in SMB market",
model="pro" # or "mini" for focused queries, "auto" when unsure
)
request_id = result["request_id"]
# Poll until completed
response = client.get_research(request_id)
while response["status"] not in ["completed", "failed"]:
time.sleep(10)
response = client.get_research(request_id)
print(response["content"]) # The research reportinputmodelstreamoutput_schemacitation_format