|
|
@@ -0,0 +1,67 @@
|
|
|
+import os
|
|
|
+from langgraph.graph import StateGraph, START, END
|
|
|
+from langgraph.graph.message import add_messages
|
|
|
+from langchain_openai import ChatOpenAI
|
|
|
+
|
|
|
+# --------------- 基本配置 ----------------
|
|
|
+
|
|
|
+# 1) 设置 API Key & Base URL
|
|
|
+# 假设你已经通过 DeepSeek QPI 获取到了兼容 OpenAI 的 key & endpoint
|
|
|
+os.environ["OPENAI_API_KEY"] = "YOUR_QPI_API_KEY"
|
|
|
+# 如果 DeepSeek QPI 需要自定义 Base URL,请设置:
|
|
|
+# os.environ["OPENAI_API_BASE"] = "https://your-provider-url/v1"
|
|
|
+
|
|
|
+# 2) 初始化 LLM
|
|
|
+# deepseek/deepseek-r1 通常在 QPI/OpenRouter 兼容 API 下可调用
|
|
|
+llm = ChatOpenAI(model="deepseek/deepseek-r1:latest", temperature=0.7)
|
|
|
+
|
|
|
+
|
|
|
+# --------------- LangGraph 节点 ----------------
|
|
|
+
|
|
|
+def call_deepseek(state):
|
|
|
+ """
|
|
|
+ 一个简单的函数节点,它用 LLM 理解 state["messages"]
|
|
|
+ 并返回下一步 messages
|
|
|
+ """
|
|
|
+ user_msgs = state["messages"]
|
|
|
+
|
|
|
+ # 调用 LLM
|
|
|
+ response = llm(
|
|
|
+ # LangChain 格式要求 messages 是 dict 列表
|
|
|
+ messages=user_msgs
|
|
|
+ )
|
|
|
+
|
|
|
+ # 获取模型输出的 text
|
|
|
+ ai_msg = response["choices"][0]["message"]
|
|
|
+
|
|
|
+ # 将 AI 的回复追加回状态
|
|
|
+ return {"messages": user_msgs + [ai_msg]}
|
|
|
+
|
|
|
+
|
|
|
+# --------------- 构建状态图 ----------------
|
|
|
+
|
|
|
+# 状态类型使用 LangChain 的消息状态对象
|
|
|
+from langgraph.graph import MessagesState
|
|
|
+
|
|
|
+graph = StateGraph(MessagesState)
|
|
|
+
|
|
|
+# 添加节点到图
|
|
|
+graph.add_node(call_deepseek)
|
|
|
+
|
|
|
+# 定义边 (Start → 我们的 LLM 节点 → End)
|
|
|
+graph.add_edge(START, "call_deepseek")
|
|
|
+graph.add_edge("call_deepseek", END)
|
|
|
+
|
|
|
+# 编译图
|
|
|
+compiled_graph = graph.compile()
|
|
|
+
|
|
|
+# --------------- 调用运行 ----------------
|
|
|
+
|
|
|
+result = compiled_graph.invoke({
|
|
|
+ "messages": [
|
|
|
+ {"role": "user", "content": "你好,帮我写一段 LangGraph 入门示例说明"}
|
|
|
+ ]
|
|
|
+})
|
|
|
+
|
|
|
+# 输出最终状态
|
|
|
+print(result["messages"][-1]["content"])
|