test_astream() — langchain Function Reference
Architecture documentation for the test_astream() function in test_base.py from the langchain codebase.
Entity Profile
Dependency Diagram
graph TD 6eb1019d_7e08_bd8f_3bf4_efe9592a0318["test_astream()"] bd382a4e_442c_13ae_530c_6e34bc43623d["test_base.py"] 6eb1019d_7e08_bd8f_3bf4_efe9592a0318 -->|defined in| bd382a4e_442c_13ae_530c_6e34bc43623d style 6eb1019d_7e08_bd8f_3bf4_efe9592a0318 fill:#6366f1,stroke:#818cf8,color:#fff
Relationship Graph
Source Code
libs/partners/openai/tests/integration_tests/chat_models/test_base.py lines 324–391
async def test_astream() -> None:
"""Test streaming tokens from OpenAI."""
async def _test_stream(stream: AsyncIterator, expect_usage: bool) -> None:
full: BaseMessageChunk | None = None
chunks_with_token_counts = 0
chunks_with_response_metadata = 0
async for chunk in stream:
assert isinstance(chunk.content, str)
full = chunk if full is None else full + chunk
assert isinstance(chunk, AIMessageChunk)
if chunk.usage_metadata is not None:
chunks_with_token_counts += 1
if chunk.response_metadata and not set(
chunk.response_metadata.keys()
).issubset({"model_provider", "output_version"}):
chunks_with_response_metadata += 1
assert isinstance(full, AIMessageChunk)
if chunks_with_response_metadata != 1:
msg = (
"Expected exactly one chunk with metadata. "
"AIMessageChunk aggregation can add these metadata. Check that "
"this is behaving properly."
)
raise AssertionError(msg)
assert full.response_metadata.get("finish_reason") is not None
assert full.response_metadata.get("model_name") is not None
if expect_usage:
if chunks_with_token_counts != 1:
msg = (
"Expected exactly one chunk with token counts. "
"AIMessageChunk aggregation adds counts. Check that "
"this is behaving properly."
)
raise AssertionError(msg)
assert full.usage_metadata is not None
assert full.usage_metadata["input_tokens"] > 0
assert full.usage_metadata["output_tokens"] > 0
assert full.usage_metadata["total_tokens"] > 0
else:
assert chunks_with_token_counts == 0
assert full.usage_metadata is None
llm = ChatOpenAI(model="gpt-4.1-mini", temperature=0, max_tokens=MAX_TOKEN_COUNT) # type: ignore[call-arg]
await _test_stream(llm.astream("Hello", stream_usage=False), expect_usage=False)
await _test_stream(
llm.astream("Hello", stream_options={"include_usage": True}), expect_usage=True
)
await _test_stream(llm.astream("Hello", stream_usage=True), expect_usage=True)
llm = ChatOpenAI(
model="gpt-4.1-mini",
temperature=0,
max_tokens=MAX_TOKEN_COUNT, # type: ignore[call-arg]
model_kwargs={"stream_options": {"include_usage": True}},
)
await _test_stream(llm.astream("Hello"), expect_usage=True)
await _test_stream(
llm.astream("Hello", stream_options={"include_usage": False}),
expect_usage=False,
)
llm = ChatOpenAI(
model="gpt-4.1-mini",
temperature=0,
max_tokens=MAX_TOKEN_COUNT, # type: ignore[call-arg]
stream_usage=True,
)
await _test_stream(llm.astream("Hello"), expect_usage=True)
await _test_stream(llm.astream("Hello", stream_usage=False), expect_usage=False)
Domain
Subdomains
Source
Frequently Asked Questions
What does test_astream() do?
test_astream() is a function in the langchain codebase, defined in libs/partners/openai/tests/integration_tests/chat_models/test_base.py.
Where is test_astream() defined?
test_astream() is defined in libs/partners/openai/tests/integration_tests/chat_models/test_base.py at line 324.
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free