test_cache.py — langchain Source File
Architecture documentation for test_cache.py, a python file in the langchain codebase. 5 imports, 0 dependents.
Entity Profile
Dependency Diagram
graph LR 4448d00a_7fa0_afd0_1877_b0eb9e910890["test_cache.py"] 8e2034b7_ceb8_963f_29fc_2ea6b50ef9b3["typing"] 4448d00a_7fa0_afd0_1877_b0eb9e910890 --> 8e2034b7_ceb8_963f_29fc_2ea6b50ef9b3 91721f45_4909_e489_8c1f_084f8bd87145["typing_extensions"] 4448d00a_7fa0_afd0_1877_b0eb9e910890 --> 91721f45_4909_e489_8c1f_084f8bd87145 e51e78c8_f355_3edd_309e_1aec4323616a["langchain_core.caches"] 4448d00a_7fa0_afd0_1877_b0eb9e910890 --> e51e78c8_f355_3edd_309e_1aec4323616a 85390fd0_d51c_6478_9be2_2d6a9c15d720["langchain_core.globals"] 4448d00a_7fa0_afd0_1877_b0eb9e910890 --> 85390fd0_d51c_6478_9be2_2d6a9c15d720 ba43b74d_3099_7e1c_aac3_cf594720469e["langchain_core.language_models"] 4448d00a_7fa0_afd0_1877_b0eb9e910890 --> ba43b74d_3099_7e1c_aac3_cf594720469e style 4448d00a_7fa0_afd0_1877_b0eb9e910890 fill:#6366f1,stroke:#818cf8,color:#fff
Relationship Graph
Source Code
from typing import Any
from typing_extensions import override
from langchain_core.caches import RETURN_VAL_TYPE, BaseCache
from langchain_core.globals import set_llm_cache
from langchain_core.language_models import FakeListLLM
class InMemoryCache(BaseCache):
"""In-memory cache used for testing purposes."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
"""Look up based on `prompt` and `llm_string`."""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on `prompt` and `llm_string`."""
self._cache[prompt, llm_string] = return_val
@override
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
async def test_local_cache_generate_async() -> None:
global_cache = InMemoryCache()
local_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=local_cache, responses=["foo", "bar"])
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
assert global_cache._cache == {}
assert len(local_cache._cache) == 1
finally:
set_llm_cache(None)
def test_local_cache_generate_sync() -> None:
global_cache = InMemoryCache()
local_cache = InMemoryCache()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=local_cache, responses=["foo", "bar"])
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
assert global_cache._cache == {}
assert len(local_cache._cache) == 1
finally:
set_llm_cache(None)
class InMemoryCacheBad(BaseCache):
"""In-memory cache used for testing purposes."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
"""Look up based on `prompt` and `llm_string`."""
msg = "This code should not be triggered"
raise NotImplementedError(msg)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on `prompt` and `llm_string`."""
msg = "This code should not be triggered"
raise NotImplementedError(msg)
@override
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
def test_no_cache_generate_sync() -> None:
global_cache = InMemoryCacheBad()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=False, responses=["foo", "bar"])
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
output = llm.generate(["foo"])
assert output.generations[0][0].text == "bar"
assert global_cache._cache == {}
finally:
set_llm_cache(None)
async def test_no_cache_generate_async() -> None:
global_cache = InMemoryCacheBad()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=False, responses=["foo", "bar"])
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "bar"
assert global_cache._cache == {}
finally:
set_llm_cache(None)
Domain
Subdomains
Functions
Classes
Dependencies
- langchain_core.caches
- langchain_core.globals
- langchain_core.language_models
- typing
- typing_extensions
Source
Frequently Asked Questions
What does test_cache.py do?
test_cache.py is a source file in the langchain codebase, written in python. It belongs to the CoreAbstractions domain, RunnableInterface subdomain.
What functions are defined in test_cache.py?
test_cache.py defines 4 function(s): test_local_cache_generate_async, test_local_cache_generate_sync, test_no_cache_generate_async, test_no_cache_generate_sync.
What does test_cache.py depend on?
test_cache.py imports 5 module(s): langchain_core.caches, langchain_core.globals, langchain_core.language_models, typing, typing_extensions.
Where is test_cache.py in the architecture?
test_cache.py is located at libs/core/tests/unit_tests/language_models/llms/test_cache.py (domain: CoreAbstractions, subdomain: RunnableInterface, directory: libs/core/tests/unit_tests/language_models/llms).
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free