Loading...
Loading...
Use when cognee is a Python AI memory engine that transforms documents into knowledge graphs with vector and graph storage for semantic search and reasoning. Use this skill when writing code that calls cognee's Python API (add, cognify, search, memify, config, datasets, prune, session) or integrating cognee-mcp. Covers the full public API, SearchType modes, DataPoint custom models, pipeline tasks, and configuration for LLM/embedding/vector/graph providers. Do NOT use for general knowledge graph theory or unrelated Python libraries.
npx skill4agent add armelhbobdad/oh-my-skills cogneeimport cognee
import asyncio
async def main():
# Reset state
await cognee.prune.prune_data()
await cognee.prune.prune_system(metadata=True)
# 1. Ingest data
await cognee.add("Cognee turns documents into AI memory.") # [AST:cognee/api/v1/add/add.py:L22]
# 2. Build knowledge graph
await cognee.cognify() # [AST:cognee/api/v1/cognify/cognify.py:L47]
# 3. Search
results = await cognee.search( # [AST:cognee/api/v1/search/search.py:L26]
query_text="What does Cognee do?"
)
for r in results:
print(r)
asyncio.run(main())await[EXT:docs.cognee.ai/getting-started/quickstart]uvuv pip install -e "." # minimal (SQLite + LanceDB + Kuzu)
uv pip install -e ".[postgres,neo4j]" # with PostgreSQL + Neo4jpostgrespostgres-binaryneo4jneptunechromadbqdrantredisollamaanthropicgeminimistralgroqhuggingfacellama-cppawslangchainllama-indexgraphitibamldltdoclingcodegraphscrapingdocsmonitoringdistributeddevdebugLLM_API_KEY="your_openai_api_key"
LLM_MODEL="openai/gpt-4o-mini".venvDATA_ROOT_DIRECTORYSYSTEM_ROOT_DIRECTORYawait cognee.add(data) → await cognee.cognify() → await cognee.search(query_text)await cognee.add(["/path/to/file.pdf", "raw text", open("doc.txt","rb")], dataset_name="my_data")config = {"ontology_config": {"ontology_resolver": RDFLibOntologyResolver(ontology_file=path)}}await cognee.cognify(config=config)[EXT:docs.cognee.ai/guides/ontology-support]await cognee.search(query_text="Q1", session_id="conv_1")await cognee.search(query_text="Follow-up", session_id="conv_1")[EXT:docs.cognee.ai/guides/sessions]class MyEntity(DataPoint): name: str; metadata = {"index_fields": ["name"]}await add_data_points([entity])[EXT:docs.cognee.ai/guides/custom-data-models]| Function | Purpose | Key Params |
|---|---|---|
| Ingest text, files, binary data | |
| Build knowledge graph from ingested data | |
| Query knowledge graph | |
| Enrich existing graph with custom tasks | |
| Runtime configuration (LLM, DB, vectors) | static methods |
| List, inspect, delete datasets | static methods |
| Clean up data and system resources | |
| Update existing data items | |
| Session history and feedback | |
| Execute custom task pipelines | |
| Enum of 14 search modes | |
| Render knowledge graph to HTML | |
| Enable OpenTelemetry tracing | |
| Run Alembic database migrations | — |
| Launch local Cognee UI (frontend + backend + MCP servers) | |
| Render knowledge graph to interactive HTML | |
| Module re-export: Task, run_tasks, run_tasks_parallel, run_pipeline | — |
.env# Azure OpenAI
LLM_PROVIDER="azure"
LLM_MODEL="azure/gpt-4o-mini"
LLM_ENDPOINT="https://YOUR-RESOURCE.openai.azure.com/openai/deployments/gpt-4o-mini"
LLM_API_KEY="your_key"
LLM_API_VERSION="2024-12-01-preview"
# Anthropic (requires: pip install cognee[anthropic])
LLM_PROVIDER="anthropic"
LLM_MODEL="claude-3-5-sonnet-20241022"
LLM_API_KEY="your_key"
# Ollama (requires: pip install cognee[ollama])
LLM_PROVIDER="ollama"
LLM_MODEL="llama3.1:8b"
LLM_ENDPOINT="http://localhost:11434/v1"
LLM_API_KEY="ollama"
EMBEDDING_PROVIDER="ollama"
EMBEDDING_MODEL="nomic-embed-text:latest"
EMBEDDING_ENDPOINT="http://localhost:11434/api/embed"
HUGGINGFACE_TOKENIZER="nomic-ai/nomic-embed-text-v1.5"
# AWS Bedrock (requires: pip install cognee[aws])
LLM_PROVIDER="bedrock"
LLM_MODEL="anthropic.claude-3-sonnet-20240229-v1:0"
AWS_REGION="us-east-1"
# Custom / OpenRouter / vLLM
LLM_PROVIDER="custom"
LLM_MODEL="openrouter/google/gemini-2.0-flash-lite-preview-02-05:free"
LLM_ENDPOINT="https://openrouter.ai/api/v1"LLM_RATE_LIMIT_ENABLED=trueLLM_RATE_LIMIT_REQUESTS=60LLM_RATE_LIMIT_INTERVAL=60STRUCTURED_OUTPUT_FRAMEWORK="instructor""baml"cognee[baml]LLM_INSTRUCTOR_MODE="json_schema_mode"# PostgreSQL (requires: pip install cognee[postgres])
DB_PROVIDER=postgres
DB_HOST=localhost DB_PORT=5432 DB_USERNAME=cognee DB_PASSWORD=cognee DB_NAME=cognee_db
# PGVector (requires: pip install cognee[postgres])
VECTOR_DB_PROVIDER=pgvector
VECTOR_DB_URL=postgresql://cognee:cognee@localhost:5432/cognee_db
# Neo4j (requires: pip install cognee[neo4j])
GRAPH_DATABASE_PROVIDER=neo4j
GRAPH_DATABASE_URL=bolt://localhost:7687
GRAPH_DATABASE_USERNAME=neo4j GRAPH_DATABASE_PASSWORD=yourpassword
# S3 storage (requires: pip install cognee[aws])
STORAGE_BACKEND="s3"
STORAGE_BUCKET_NAME="your-bucket"
DATA_ROOT_DIRECTORY="s3://your-bucket/cognee/data"ACCEPT_LOCAL_FILE_PATH=True # Allow local file paths in add()
ALLOW_HTTP_REQUESTS=True # Allow HTTP fetches
ALLOW_CYPHER_QUERY=True # Allow raw Cypher in SearchType.CYPHER
REQUIRE_AUTHENTICATION=False # Enable API auth
ENABLE_BACKEND_ACCESS_CONTROL=True # Multi-tenant dataset isolationfrom cognee.modules.pipelines.tasks.Task import Task
async def my_task(data):
return process(data)
task = Task(my_task)from cognee.infrastructure.databases.graph import get_graph_engine
from cognee.infrastructure.databases.vector import get_vector_engine
graph_engine = await get_graph_engine()
vector_engine = await get_vector_engine()from cognee.infrastructure.llm.get_llm_client import get_llm_client
llm_client = get_llm_client()
response = await llm_client.acreate_structured_output(
text_input="prompt", system_prompt="instructions", response_model=YourPydanticModel
)python src/server.py # stdio (default)
python src/server.py --transport sse # SSE
python src/server.py --transport http --host 127.0.0.1 --port 8000 --path /mcp
# API mode (connect to running Cognee API):
python src/server.py --transport sse --api-url http://localhost:8000 --api-token TOKENdocker run -e TRANSPORT_MODE=sse --env-file .env -p 8000:8000 cognee/cognee-mcp:mainHUGGINGFACE_TOKENIZERLLM_INSTRUCTOR_MODE="json_schema_mode"DB_HOST=host.docker.internalLITELLM_LOG="DEBUG"ENV="development"TELEMETRY_DISABLED=1delete()datasets.delete_data()[SRC:cognee/api/v1/delete/__init__.py:L13]memify()[QMD:cognee-temporal:prs.md]update()[QMD:cognee-temporal:issues.md]visualize_graph()[QMD:cognee-temporal:issues.md]start_ui()[QMD:cognee-temporal:issues.md][AST:cognee/modules/search/types/SearchType.py:L4]GRAPH_COMPLETIONRAG_COMPLETIONCHUNKSSUMMARIESTRIPLET_COMPLETIONGRAPH_SUMMARY_COMPLETIONCYPHERNATURAL_LANGUAGEGRAPH_COMPLETION_COTGRAPH_COMPLETION_CONTEXT_EXTENSIONFEELING_LUCKYTEMPORALCODING_RULESCHUNKS_LEXICAL[AST:cognee/modules/pipelines/tasks/task.py][EXT:docs.cognee.ai/guides/custom-data-models]PARAGRAPHSENTENCELANGCHAIN_CHARACTER[AST:cognee/shared/data_models.py:L83]enable_tracing()disable_tracing()[AST:cognee-mcp/src/server.py]cognee --add "data" # Ingest data
cognee --cognify # Build knowledge graph
cognee --search "query" # Search
cognee --debug # Enable debug logging
cognee --ui # Launch local UI[AST:cognee/cli/_cognee.py:L32][AST:cognee/modules/search/types/SearchType.py:L4]class SearchType(str, Enum):
SUMMARIES = "SUMMARIES" # Vector similarity on TextSummary nodes
CHUNKS = "CHUNKS" # Vector similarity on DocumentChunk nodes
RAG_COMPLETION = "RAG_COMPLETION" # LLM-backed with chunk context
TRIPLET_COMPLETION = "TRIPLET_COMPLETION" # Graph triplet-based retrieval
GRAPH_COMPLETION = "GRAPH_COMPLETION" # Default — LLM + graph traversal
GRAPH_SUMMARY_COMPLETION = "GRAPH_SUMMARY_COMPLETION"
CYPHER = "CYPHER" # Raw Cypher query
NATURAL_LANGUAGE = "NATURAL_LANGUAGE" # NL → Cypher translation
GRAPH_COMPLETION_COT = "GRAPH_COMPLETION_COT" # Chain-of-thought graph
GRAPH_COMPLETION_CONTEXT_EXTENSION = "GRAPH_COMPLETION_CONTEXT_EXTENSION"
FEELING_LUCKY = "FEELING_LUCKY" # Single best result
TEMPORAL = "TEMPORAL" # Time-aware search
CODING_RULES = "CODING_RULES" # Code rule retrieval
CHUNKS_LEXICAL = "CHUNKS_LEXICAL" # BM25 keyword search on chunks[EXT:docs.cognee.ai/guides/custom-data-models]metadata = {"index_fields": ["field"]}Edge(weight, relationship_type)DocumentChunkTextSummaryCodeSummaryDatabaseSchemaSchemaTableTranslatedContentGraphitiNodeWebPage[AST:cognee/modules/pipelines/tasks/task.py]class Task:
def __init__(self, executable, *args, task_config=None, **kwargs)task_config={"batch_size": N}@task_summary("Processed {n} items")[SRC:cognee/modules/pipelines/__init__.py:L1]Taskrun_tasksrun_tasks_parallelrun_pipelineBaseModelBaseSettings[AST:cognee-mcp/src/server.py]cognify(data, graph_model_file, graph_model_name, custom_prompt)search(search_query, search_type, top_k)save_interaction(...)list_data(dataset_id)delete(data_id, dataset_id, mode)prune()cognify_status()FastMCP("Cognee")[EXT:docs.cognee.ai/setup-configuration/overview].envcognee.config.*LLM_API_KEYLLM_MODELLLM_PROVIDEREMBEDDING_PROVIDEREMBEDDING_MODELVECTOR_DB_PROVIDERGRAPH_DB_PROVIDERLOG_LEVEL=DEBUGTELEMETRY_DISABLED=true