Troubleshooting Null Destination in "Migrating from MultiPromptChain": Issue with LangChain Code Implementation #27064
-
Checked other resources
Commit to Help
Example Codefrom operator import itemgetter
from typing import Literal
import os
import asyncio
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, StateGraph
from typing_extensions import TypedDict
from langchain.globals import set_debug
set_debug(True)
llm = ChatOpenAI(
model='Meta-Llama-3.1-405B-Instruct',
base_url="https://api.sambanova.ai/v1",
api_key=os.environ.get("SAMBANOVA_API_KEY"),
)
# Define the prompts we will route to
prompt_1 = ChatPromptTemplate.from_messages(
[
("system", "You are an expert on animals."),
("human", "{input}"),
]
)
prompt_2 = ChatPromptTemplate.from_messages(
[
("system", "You are an expert on vegetables."),
("human", "{input}"),
]
)
# Construct the chains we will route to. These format the input query
# into the respective prompt, run it through a chat model, and cast
# the result to a string.
chain_1 = prompt_1 | llm | StrOutputParser()
chain_2 = prompt_2 | llm | StrOutputParser()
# Next: define the chain that selects which branch to route to.
# Here we will take advantage of tool-calling features to force
# the output to select one of two desired branches.
route_system = "Route the user's query to either the animal or vegetable expert."
route_prompt = ChatPromptTemplate.from_messages(
[
("system", route_system),
("human", "{input}"),
]
)
# Define schema for output:
class RouteQuery(TypedDict):
"""Route query to destination expert."""
destination: Literal["animal", "vegetable"]
route_chain = route_prompt | llm.with_structured_output(RouteQuery)
# For LangGraph, we will define the state of the graph to hold the query,
# destination, and final answer.
class State(TypedDict):
query: str
destination: RouteQuery
answer: str
# We define functions for each node, including routing the query:
async def route_query(state: State, config: RunnableConfig):
destination = await route_chain.ainvoke(state["query"], config)
return {"destination": destination}
# And one node for each prompt
async def prompt_1(state: State, config: RunnableConfig):
return {"answer": await chain_1.ainvoke(state["query"], config)}
async def prompt_2(state: State, config: RunnableConfig):
return {"answer": await chain_2.ainvoke(state["query"], config)}
# We then define logic that selects the prompt based on the classification
def select_node(state: State) -> Literal["prompt_1", "prompt_2"]:
if state["destination"] == "animal":
return "prompt_1"
else:
return "prompt_2"
# Finally, assemble the multi-prompt chain. This is a sequence of two steps:
# 1) Select "animal" or "vegetable" via the route_chain, and collect the answer
# alongside the input query.
# 2) Route the input query to chain_1 or chain_2, based on the
# selection.
graph = StateGraph(State)
graph.add_node("route_query", route_query)
graph.add_node("prompt_1", prompt_1)
graph.add_node("prompt_2", prompt_2)
graph.add_edge(START, "route_query")
graph.add_conditional_edges("route_query", select_node)
graph.add_edge("prompt_1", END)
graph.add_edge("prompt_2", END)
app = graph.compile()
async def main():
# Invoke the chain
state = await app.ainvoke({"query": "what color are carrots", "destination": None, "answer": None})
print(state["destination"])
print(state["answer"])
# Run the main async function
asyncio.run(main()) DescriptionI have tried the exact code from Migrating from MultiPromptChain but I did not get the expected result. The destination part returns null.
And here is the log.
System Info
|
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 1 reply
-
Hey there, @104-wonohfor! 👋 I'm here to help you with any bugs, questions, or contributions while we wait for a human maintainer. To troubleshoot the issue of a null destination in your LangChain code implementation when migrating from
By ensuring that the routing logic is correctly determining the destination and that the output parsing is correctly extracting the |
Beta Was this translation helpful? Give feedback.
-
This is my solution. I removed RouteQuery and modified route_system. import os
import asyncio
import json
from typing import Literal
from typing_extensions import TypedDict
from langchain.globals import set_debug
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, StateGraph
from langchain_core.runnables import RunnableConfig
set_debug(True)
llm = ChatOpenAI(
model='Meta-Llama-3.1-405B-Instruct',
base_url="https://api.sambanova.ai/v1",
api_key=os.environ.get("SAMBANOVA_API_KEY"),
)
# Define the prompts we will route to
prompt_1 = ChatPromptTemplate.from_messages(
[
("system", "You are an expert on animals."),
("human", "{input}"),
]
)
prompt_2 = ChatPromptTemplate.from_messages(
[
("system", "You are an expert on vegetables."),
("human", "{input}"),
]
)
# Construct the chains we will route to. These format the input query
# into the respective prompt, run it through a chat model, and cast
# the result to a string.
chain_1 = prompt_1 | llm | StrOutputParser()
chain_2 = prompt_2 | llm | StrOutputParser()
# Define the chain that selects which branch to route to.
route_system = """
Given a raw text input to a language model, select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for.
<< CANDIDATE PROMPTS >>
animals: prompt for animal expert
vegetables: prompt for a vegetable expert
<< INPUT >>
{input}
<< OUTPUT >>
Return a JSON object with the key "destination" and the value being either "animals" or "vegetables".
"""
route_prompt = ChatPromptTemplate.from_messages(
[
("system", route_system),
("human", "{input}"),
]
)
chain = route_prompt | llm
# For LangGraph, we will define the state of the graph to hold the query,
# destination, and final answer.
class State(TypedDict):
query: str
destination: str
answer: str
# We define functions for each node, including routing the query:
async def route_query(state: State, config: RunnableConfig):
result = await chain.ainvoke({"input": state["query"]}, config)
result_json = json.loads(result.content)
destination = result_json["destination"]
return {"destination": destination}
# And one node for each prompt
async def prompt_1(state: State, config: RunnableConfig):
return {"answer": await chain_1.ainvoke(state["query"], config)}
async def prompt_2(state: State, config: RunnableConfig):
return {"answer": await chain_2.ainvoke(state["query"], config)}
# We then define logic that selects the prompt based on the classification
def select_node(state: State) -> Literal["prompt_1", "prompt_2"]:
if state["destination"] == "animals":
return "prompt_1"
else:
return "prompt_2"
# Finally, assemble the multi-prompt chain. This is a sequence of two steps:
# 1) Select "animals" or "vegetables" via the route_chain, and collect the answer
# alongside the input query.
graph = StateGraph(State)
graph.add_node("route_query", route_query)
graph.add_node("prompt_1", prompt_1)
graph.add_node("prompt_2", prompt_2)
graph.add_edge(START, "route_query")
graph.add_conditional_edges("route_query", select_node)
graph.add_edge("prompt_1", END)
graph.add_edge("prompt_2", END)
app = graph.compile()
async def main():
# Invoke the chain
state = await app.ainvoke({"query": "What color are carrots?", "destination": None, "answer": None})
print(state["destination"])
print(state["answer"])
# Run the main async function
asyncio.run(main()) |
Beta Was this translation helpful? Give feedback.
This is my solution. I removed RouteQuery and modified route_system.