Skip to content

Prebuilt

ToolNode

from langgraph.prebuilt import ToolNode

Bases: RunnableCallable

A node that runs the tools requested in the last AIMessage. It can be used either in StateGraph with a "messages" key or in MessageGraph. If multiple tool calls are requested, they will be run in parallel. The output will be a list of ToolMessages, one for each tool call.

Source code in langgraph/prebuilt/tool_node.py
class ToolNode(RunnableCallable):
    """
    A node that runs the tools requested in the last AIMessage. It can be used
    either in StateGraph with a "messages" key or in MessageGraph. If multiple
    tool calls are requested, they will be run in parallel. The output will be
    a list of ToolMessages, one for each tool call.
    """

    def __init__(
        self,
        tools: Sequence[BaseTool],
        *,
        name: str = "tools",
        tags: Optional[list[str]] = None,
    ) -> None:
        super().__init__(self._func, self._afunc, name=name, tags=tags, trace=False)
        self.tools_by_name = {tool.name: tool for tool in tools}

    def _func(
        self, input: Union[list[AnyMessage], dict[str, Any]], config: RunnableConfig
    ) -> Any:
        if isinstance(input, list):
            output_type = "list"
            message: AnyMessage = input[-1]
        elif messages := input.get("messages", []):
            output_type = "dict"
            message = messages[-1]
        else:
            raise ValueError("No message found in input")

        if not isinstance(message, AIMessage):
            raise ValueError("Last message is not an AIMessage")

        def run_one(call: ToolCall):
            output = self.tools_by_name[call["name"]].invoke(call["args"], config)
            return ToolMessage(
                content=str_output(output), name=call["name"], tool_call_id=call["id"]
            )

        with get_executor_for_config(config) as executor:
            outputs = [*executor.map(run_one, message.tool_calls)]
            if output_type == "list":
                return outputs
            else:
                return {"messages": outputs}

    async def _afunc(
        self, input: Union[list[AnyMessage], dict[str, Any]], config: RunnableConfig
    ) -> Any:
        if isinstance(input, list):
            output_type = "list"
            message: AnyMessage = input[-1]
        elif messages := input.get("messages", []):
            output_type = "dict"
            message = messages[-1]
        else:
            raise ValueError("No message found in input")

        if not isinstance(message, AIMessage):
            raise ValueError("Last message is not an AIMessage")

        async def run_one(call: ToolCall):
            output = await self.tools_by_name[call["name"]].ainvoke(
                call["args"], config
            )
            return ToolMessage(
                content=str_output(output), name=call["name"], tool_call_id=call["id"]
            )

        outputs = await asyncio.gather(*(run_one(call) for call in message.tool_calls))
        if output_type == "list":
            return outputs
        else:
            return {"messages": outputs}

ToolExecutor

from langgraph.prebuilt import ToolExecutor

Bases: RunnableCallable

Executes a tool invocation.

Parameters:

  • tools (Sequence[BaseTool]) –

    A sequence of tools that can be invoked.

  • invalid_tool_msg_template (str, default: INVALID_TOOL_MSG_TEMPLATE ) –

    The template for the error message when an invalid tool is requested. Defaults to INVALID_TOOL_MSG_TEMPLATE.

Examples:

    from langchain_core.tools import tool
    from langgraph.prebuilt.tool_executor import ToolExecutor, ToolInvocation


    @tool
    def search(query: str) -> str:
        """Search engine."""
        return f"Searching for: {query}"


    tools = [search]
    executor = ToolExecutor(tools)

    invocation = ToolInvocation(tool="search", tool_input="What is the capital of France?")
    result = executor.invoke(invocation)
    print(result)  # Output: "Searching for: What is the capital of France?"

    invocation = ToolInvocation(
        tool="nonexistent", tool_input="What is the capital of France?"
    )
    result = executor.invoke(invocation)
    print(result)  # Output: "nonexistent is not a valid tool, try one of [search]."
Source code in langgraph/prebuilt/tool_executor.py
class ToolExecutor(RunnableCallable):
    """Executes a tool invocation.

    Args:
        tools (Sequence[BaseTool]): A sequence of tools that can be invoked.
        invalid_tool_msg_template (str, optional): The template for the error message
            when an invalid tool is requested. Defaults to INVALID_TOOL_MSG_TEMPLATE.

    Examples:

            from langchain_core.tools import tool
            from langgraph.prebuilt.tool_executor import ToolExecutor, ToolInvocation


            @tool
            def search(query: str) -> str:
                \"\"\"Search engine.\"\"\"
                return f"Searching for: {query}"


            tools = [search]
            executor = ToolExecutor(tools)

            invocation = ToolInvocation(tool="search", tool_input="What is the capital of France?")
            result = executor.invoke(invocation)
            print(result)  # Output: "Searching for: What is the capital of France?"

            invocation = ToolInvocation(
                tool="nonexistent", tool_input="What is the capital of France?"
            )
            result = executor.invoke(invocation)
            print(result)  # Output: "nonexistent is not a valid tool, try one of [search]."
    """

    def __init__(
        self,
        tools: Sequence[BaseTool],
        *,
        invalid_tool_msg_template: str = INVALID_TOOL_MSG_TEMPLATE,
    ) -> None:
        super().__init__(self._execute, afunc=self._aexecute, trace=False)
        self.tools = tools
        self.tool_map = {t.name: t for t in tools}
        self.invalid_tool_msg_template = invalid_tool_msg_template

    def _execute(
        self, tool_invocation: ToolInvocationInterface, config: RunnableConfig
    ) -> Any:
        if tool_invocation.tool not in self.tool_map:
            return self.invalid_tool_msg_template.format(
                requested_tool_name=tool_invocation.tool,
                available_tool_names_str=", ".join([t.name for t in self.tools]),
            )
        else:
            tool = self.tool_map[tool_invocation.tool]
            output = tool.invoke(tool_invocation.tool_input, config)
            return output

    async def _aexecute(
        self, tool_invocation: ToolInvocationInterface, config: RunnableConfig
    ) -> Any:
        if tool_invocation.tool not in self.tool_map:
            return self.invalid_tool_msg_template.format(
                requested_tool_name=tool_invocation.tool,
                available_tool_names_str=", ".join([t.name for t in self.tools]),
            )
        else:
            tool = self.tool_map[tool_invocation.tool]
            output = await tool.ainvoke(tool_invocation.tool_input, config)
            return output

ToolInvocation

from langgraph.prebuilt import ToolInvocation

Bases: Serializable

Information about how to invoke a tool.

Attributes:

  • tool (str) –

    The name of the Tool to execute.

  • tool_input (Union[str, dict]) –

    The input to pass in to the Tool.

Examples:

    invocation = ToolInvocation(
        tool="search",
        tool_input="What is the capital of France?"
    )
Source code in langgraph/prebuilt/tool_executor.py
class ToolInvocation(Serializable):
    """Information about how to invoke a tool.

    Attributes:
        tool (str): The name of the Tool to execute.
        tool_input (Union[str, dict]): The input to pass in to the Tool.

    Examples:

            invocation = ToolInvocation(
                tool="search",
                tool_input="What is the capital of France?"
            )
    """

    tool: str
    tool_input: Union[str, dict]

chat_agent_executor.create_tool_calling_executor

from langgraph.prebuilt.chat_agent_executor import create_tool_calling_executor

tools_condition

from langgraph.prebuilt import tools_condition

Use in the conditional_edge to route to the ToolNode if the last message

has tool calls. Otherwise, route to the end.

Parameters:

  • state (Union[list[AnyMessage], dict[str, Any]]) –

    The state to check for tool calls. Must have a list of messages (MessageGraph) or have the "messages" key (StateGraph).

Returns:

  • Literal['action', '__end__']

    Literal["tools", "end"]: The next node to route to.

Examples:

from langchain_anthropic import ChatAnthropic
from langchain_core.tools import tool

from langgraph.graph import MessageGraph
from langgraph.prebuilt import ToolNode, tools_condition


@tool
def divide(a: float, b: float) -> int:
    """Return a / b."""
    return a / b


llm = ChatAnthropic(model="claude-3-haiku-20240307")
tools = [divide]

graph_builder = MessageGraph()
graph_builder.add_node("tools", ToolNode(tools))
graph_builder.add_node("chatbot", llm.bind_tools(tools))
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_conditional_edges(
    "chatbot",
    # highlight-next-line
    tools_condition,
    {
        # If it returns 'action', route to the 'tools' node
        "action": "tools",
        # If it returns '__end__', route to the end
        "__end__": "__end__",
    },
)
graph_builder.set_entry_point("chatbot")
graph = graph_builder.compile()
graph.invoke([("user", "What's 329993 divided by 13662?")])
Source code in langgraph/prebuilt/tool_node.py
def tools_condition(
    state: Union[list[AnyMessage], dict[str, Any]],
) -> Literal["action", "__end__"]:
    """Use in the conditional_edge to route to the ToolNode if the last message

    has tool calls. Otherwise, route to the end.

    Args:
        state (Union[list[AnyMessage], dict[str, Any]]): The state to check for
            tool calls. Must have a list of messages (MessageGraph) or have the
            "messages" key (StateGraph).

    Returns:
        Literal["tools", "__end__"]: The next node to route to.


    Examples:

        from langchain_anthropic import ChatAnthropic
        from langchain_core.tools import tool

        from langgraph.graph import MessageGraph
        from langgraph.prebuilt import ToolNode, tools_condition


        @tool
        def divide(a: float, b: float) -> int:
            \"\"\"Return a / b.\"\"\"
            return a / b


        llm = ChatAnthropic(model="claude-3-haiku-20240307")
        tools = [divide]

        graph_builder = MessageGraph()
        graph_builder.add_node("tools", ToolNode(tools))
        graph_builder.add_node("chatbot", llm.bind_tools(tools))
        graph_builder.add_edge("tools", "chatbot")
        graph_builder.add_conditional_edges(
            "chatbot",
            # highlight-next-line
            tools_condition,
            {
                # If it returns 'action', route to the 'tools' node
                "action": "tools",
                # If it returns '__end__', route to the end
                "__end__": "__end__",
            },
        )
        graph_builder.set_entry_point("chatbot")
        graph = graph_builder.compile()
        graph.invoke([("user", "What's 329993 divided by 13662?")])
    """
    if isinstance(state, list):
        ai_message = state[-1]
    elif messages := state.get("messages", []):
        ai_message = messages[-1]
    else:
        raise ValueError(f"No messages found in input state to tool_edge: {state}")
    if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
        return "action"
    return "__end__"