Loading...
Loading...
Compare original and translation side by side
from langgraph.graph import StateGraph, END
def route_based_on_quality(state: WorkflowState) -> str:
"""Decide next step based on quality score."""
if state["quality_score"] >= 0.8:
return "publish"
elif state["retry_count"] < 3:
return "retry"
else:
return "manual_review"
workflow.add_conditional_edges(
"quality_check",
route_based_on_quality,
{
"publish": "publish_node",
"retry": "generator",
"manual_review": "review_queue"
}
)from langgraph.graph import StateGraph, END
def route_based_on_quality(state: WorkflowState) -> str:
"""根据质量分数决定下一步。"""
if state["quality_score"] >= 0.8:
return "publish"
elif state["retry_count"] < 3:
return "retry"
else:
return "manual_review"
workflow.add_conditional_edges(
"quality_check",
route_based_on_quality,
{
"publish": "publish_node",
"retry": "generator",
"manual_review": "review_queue"
}
)def route_after_quality_gate(state: AnalysisState) -> str:
"""Route based on quality gate result."""
if state["quality_passed"]:
return "compress_findings"
elif state["retry_count"] < 2:
return "supervisor" # Retry
else:
return END # Return partial results
workflow.add_conditional_edges(
"quality_gate",
route_after_quality_gate,
{
"compress_findings": "compress_findings",
"supervisor": "supervisor",
END: END
}
)def route_after_quality_gate(state: AnalysisState) -> str:
"""根据质量门结果进行路由。"""
if state["quality_passed"]:
return "compress_findings"
elif state["retry_count"] < 2:
return "supervisor" # 重试
else:
return END # 返回部分结果
workflow.add_conditional_edges(
"quality_gate",
route_after_quality_gate,
{
"compress_findings": "compress_findings",
"supervisor": "supervisor",
END: END
}
)def llm_call_with_retry(state):
"""Retry failed LLM calls."""
try:
result = call_llm(state["input"])
state["output"] = result
state["retry_count"] = 0
return state
except Exception as e:
state["retry_count"] += 1
state["error"] = str(e)
return state
def should_retry(state) -> str:
if state.get("output"):
return "success"
elif state["retry_count"] < 3:
return "retry"
else:
return "failed"
workflow.add_conditional_edges(
"llm_call",
should_retry,
{
"success": "next_step",
"retry": "llm_call", # Loop back
"failed": "error_handler"
}
)def llm_call_with_retry(state):
"""重试失败的LLM调用。"""
try:
result = call_llm(state["input"])
state["output"] = result
state["retry_count"] = 0
return state
except Exception as e:
state["retry_count"] += 1
state["error"] = str(e)
return state
def should_retry(state) -> str:
if state.get("output"):
return "success"
elif state["retry_count"] < 3:
return "retry"
else:
return "failed"
workflow.add_conditional_edges(
"llm_call",
should_retry,
{
"success": "next_step",
"retry": "llm_call", # 循环返回
"failed": "error_handler"
}
)Sequential: A → B → C (simple edges)
Branching: A → (B or C) (conditional edges)
Looping: A → B → A (retry logic)
Convergence: (A or B) → C (multiple inputs)
Diamond: A → (B, C) → D (parallel then merge)顺序执行: A → B → C (简单边)
分支执行: A → (B 或 C) (条件边)
循环执行: A → B → A (重试逻辑)
汇聚执行: (A 或 B) → C (多输入)
菱形模式: A → (B, C) → D (并行后合并)def dynamic_router(state: WorkflowState) -> str:
"""Route based on multiple state conditions."""
if state.get("error"):
return "error_handler"
if not state.get("validated"):
return "validator"
if state["confidence"] < 0.5:
return "enhance"
return "finalize"def dynamic_router(state: WorkflowState) -> str:
"""基于多状态条件进行路由。"""
if state.get("error"):
return "error_handler"
if not state.get("validated"):
return "validator"
if state["confidence"] < 0.5:
return "enhance"
return "finalize"from langgraph.types import Command
from typing import Literalfrom langgraph.types import Command
from typing import Literalundefinedundefinedfrom sentence_transformers import SentenceTransformer
import numpy as np
embedder = SentenceTransformer("all-MiniLM-L6-v2")from sentence_transformers import SentenceTransformer
import numpy as np
embedder = SentenceTransformer("all-MiniLM-L6-v2")# Calculate cosine similarities
similarities = {}
for route, route_embedding in ROUTE_EMBEDDINGS.items():
similarity = np.dot(query_embedding, route_embedding) / (
np.linalg.norm(query_embedding) * np.linalg.norm(route_embedding)
)
similarities[route] = similarity
# Return highest similarity route
best_route = max(similarities, key=similarities.get)
# Optional: threshold check
if similarities[best_route] < 0.3:
return "general" # Fallback
return best_routeundefined# 计算余弦相似度
similarities = {}
for route, route_embedding in ROUTE_EMBEDDINGS.items():
similarity = np.dot(query_embedding, route_embedding) / (
np.linalg.norm(query_embedding) * np.linalg.norm(route_embedding)
)
similarities[route] = similarity
# 返回相似度最高的路由
best_route = max(similarities, key=similarities.get)
# 可选:阈值检查
if similarities[best_route] < 0.3:
return "general" # 兜底路由
return best_routeundefined| Decision | Recommendation |
|---|---|
| Max retries | 2-3 for LLM calls |
| Fallback | Always have END fallback |
| Routing function | Keep pure (no side effects) |
| Edge mapping | Explicit mapping for clarity |
| Command vs Conditional | Command when updating state + routing |
| Semantic routing | Pre-compute embeddings, use cosine similarity |
| 决策项 | 建议方案 |
|---|---|
| 最大重试次数 | LLM调用建议设置2-3次 |
| 兜底路由 | 始终设置END作为兜底 |
| 路由函数 | 保持纯函数(无副作用) |
| 边映射 | 使用显式映射以提升可读性 |
| Command vs 条件边 | 同时更新状态+路由时使用Command |
| 语义路由 | 预计算嵌入向量,使用余弦相似度 |
langgraph-statelanggraph-supervisorlanggraph-parallellanggraph-human-in-looplanggraph-toolsagent-loopslanggraph-statelanggraph-supervisorlanggraph-parallellanggraph-human-in-looplanggraph-toolsagent-loops