model.py — langchain Source File
Architecture documentation for model.py, a python file in the langchain codebase. 12 imports, 0 dependents.
Entity Profile
Dependency Diagram
graph LR cb4161b5_9941_d15a_c442_0038360558d0["model.py"] 7025b240_fdc3_cf68_b72f_f41dac94566b["json"] cb4161b5_9941_d15a_c442_0038360558d0 --> 7025b240_fdc3_cf68_b72f_f41dac94566b cfe2bde5_180e_e3b0_df2b_55b3ebaca8e7["collections.abc"] cb4161b5_9941_d15a_c442_0038360558d0 --> cfe2bde5_180e_e3b0_df2b_55b3ebaca8e7 aac5f8ad_7f2a_3a8e_3b4b_b07d681cbdcf["dataclasses"] cb4161b5_9941_d15a_c442_0038360558d0 --> aac5f8ad_7f2a_3a8e_3b4b_b07d681cbdcf 8e2034b7_ceb8_963f_29fc_2ea6b50ef9b3["typing"] cb4161b5_9941_d15a_c442_0038360558d0 --> 8e2034b7_ceb8_963f_29fc_2ea6b50ef9b3 f3bc7443_c889_119d_0744_aacc3620d8d2["langchain_core.callbacks"] cb4161b5_9941_d15a_c442_0038360558d0 --> f3bc7443_c889_119d_0744_aacc3620d8d2 ba43b74d_3099_7e1c_aac3_cf594720469e["langchain_core.language_models"] cb4161b5_9941_d15a_c442_0038360558d0 --> ba43b74d_3099_7e1c_aac3_cf594720469e d758344f_537f_649e_f467_b9d7442e86df["langchain_core.messages"] cb4161b5_9941_d15a_c442_0038360558d0 --> d758344f_537f_649e_f467_b9d7442e86df ac2a9b92_4484_491e_1b48_ec85e71e1d58["langchain_core.outputs"] cb4161b5_9941_d15a_c442_0038360558d0 --> ac2a9b92_4484_491e_1b48_ec85e71e1d58 2ceb1686_0f8c_8ae0_36d1_7c0b702fda1c["langchain_core.runnables"] cb4161b5_9941_d15a_c442_0038360558d0 --> 2ceb1686_0f8c_8ae0_36d1_7c0b702fda1c 43d88577_548b_2248_b01b_7987bae85dcc["langchain_core.tools"] cb4161b5_9941_d15a_c442_0038360558d0 --> 43d88577_548b_2248_b01b_7987bae85dcc 6e58aaea_f08e_c099_3cc7_f9567bfb1ae7["pydantic"] cb4161b5_9941_d15a_c442_0038360558d0 --> 6e58aaea_f08e_c099_3cc7_f9567bfb1ae7 91721f45_4909_e489_8c1f_084f8bd87145["typing_extensions"] cb4161b5_9941_d15a_c442_0038360558d0 --> 91721f45_4909_e489_8c1f_084f8bd87145 style cb4161b5_9941_d15a_c442_0038360558d0 fill:#6366f1,stroke:#818cf8,color:#fff
Relationship Graph
Source Code
import json
from collections.abc import Callable, Sequence
from dataclasses import asdict, is_dataclass
from typing import (
Any,
Literal,
)
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel, LanguageModelInput
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from typing_extensions import override
class FakeToolCallingModel(BaseChatModel):
tool_calls: list[list[ToolCall]] | list[list[dict[str, Any]]] | None = None
structured_response: Any | None = None
index: int = 0
tool_style: Literal["openai", "anthropic"] = "openai"
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call."""
is_native = kwargs.get("response_format")
if self.tool_calls:
if is_native:
tool_calls = (
self.tool_calls[self.index] if self.index < len(self.tool_calls) else []
)
else:
tool_calls = self.tool_calls[self.index % len(self.tool_calls)]
else:
tool_calls = []
if is_native and not tool_calls:
if isinstance(self.structured_response, BaseModel):
content_obj = self.structured_response.model_dump()
elif is_dataclass(self.structured_response) and not isinstance(
self.structured_response, type
):
content_obj = asdict(self.structured_response)
elif isinstance(self.structured_response, dict):
content_obj = self.structured_response
message = AIMessage(content=json.dumps(content_obj), id=str(self.index))
else:
messages_string = "-".join([m.text for m in messages])
message = AIMessage(
content=messages_string,
id=str(self.index),
tool_calls=tool_calls.copy(),
)
self.index += 1
return ChatResult(generations=[ChatGeneration(message=message)])
@property
def _llm_type(self) -> str:
return "fake-tool-call-model"
@override
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type | Callable[..., Any] | BaseTool],
*,
tool_choice: str | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
if len(tools) == 0:
msg = "Must provide at least one tool"
raise ValueError(msg)
tool_dicts = []
for tool in tools:
if isinstance(tool, dict):
tool_dicts.append(tool)
continue
if not isinstance(tool, BaseTool):
msg = "Only BaseTool and dict is supported by FakeToolCallingModel.bind_tools"
raise TypeError(msg)
# NOTE: this is a simplified tool spec for testing purposes only
if self.tool_style == "openai":
tool_dicts.append(
{
"type": "function",
"function": {
"name": tool.name,
},
}
)
elif self.tool_style == "anthropic":
tool_dicts.append(
{
"name": tool.name,
}
)
return self.bind(tools=tool_dicts, **kwargs)
Domain
Subdomains
Classes
Dependencies
- collections.abc
- dataclasses
- json
- langchain_core.callbacks
- langchain_core.language_models
- langchain_core.messages
- langchain_core.outputs
- langchain_core.runnables
- langchain_core.tools
- pydantic
- typing
- typing_extensions
Source
Frequently Asked Questions
What does model.py do?
model.py is a source file in the langchain codebase, written in python. It belongs to the CoreAbstractions domain, MessageSchema subdomain.
What does model.py depend on?
model.py imports 12 module(s): collections.abc, dataclasses, json, langchain_core.callbacks, langchain_core.language_models, langchain_core.messages, langchain_core.outputs, langchain_core.runnables, and 4 more.
Where is model.py in the architecture?
model.py is located at libs/langchain_v1/tests/unit_tests/agents/model.py (domain: CoreAbstractions, subdomain: MessageSchema, directory: libs/langchain_v1/tests/unit_tests/agents).
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free