ConversationSummaryBufferMemory Class — langchain Architecture
Architecture documentation for the ConversationSummaryBufferMemory class in summary_buffer.py from the langchain codebase.
Entity Profile
Dependency Diagram
graph TD f7800961_f1f1_7fa8_1fe5_64c9aba0b253["ConversationSummaryBufferMemory"] 48fb025a_9570_1ddd_d527_ec72c1ae6a6f["BaseChatMemory"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|extends| 48fb025a_9570_1ddd_d527_ec72c1ae6a6f b59972c7_2658_48b0_f600_7b1f592481af["SummarizerMixin"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|extends| b59972c7_2658_48b0_f600_7b1f592481af 86c82e32_9c80_4fc6_ea59_bc38e5c36cfb["summary_buffer.py"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|defined in| 86c82e32_9c80_4fc6_ea59_bc38e5c36cfb a699225e_2c8c_b6a9_b2d6_c50b16352811["buffer()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| a699225e_2c8c_b6a9_b2d6_c50b16352811 a854b30d_1538_6542_a3e9_b289c046450c["abuffer()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| a854b30d_1538_6542_a3e9_b289c046450c f6e1cb46_4a45_0e0a_e480_c01623426ace["memory_variables()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| f6e1cb46_4a45_0e0a_e480_c01623426ace 8a3e60da_3668_865f_fe8a_7478e32887da["load_memory_variables()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| 8a3e60da_3668_865f_fe8a_7478e32887da 10c4371c_db0c_5181_61bf_09aedcca2b15["aload_memory_variables()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| 10c4371c_db0c_5181_61bf_09aedcca2b15 a1f57fb6_139e_aaea_37fc_a29e5b5e8c5b["validate_prompt_input_variables()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| a1f57fb6_139e_aaea_37fc_a29e5b5e8c5b 2969edee_c986_9fa2_dc7a_e44fb800b832["save_context()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| 2969edee_c986_9fa2_dc7a_e44fb800b832 c470c907_8097_22c1_6d1f_84616428b19f["asave_context()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| c470c907_8097_22c1_6d1f_84616428b19f 970900c9_b0e6_a6cb_7ee0_3f55359d29ec["prune()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| 970900c9_b0e6_a6cb_7ee0_3f55359d29ec 6795ba4d_a89f_491b_adce_ad14a42f7963["aprune()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| 6795ba4d_a89f_491b_adce_ad14a42f7963 25623012_e96d_c79d_1ade_682095697e02["clear()"] f7800961_f1f1_7fa8_1fe5_64c9aba0b253 -->|method| 25623012_e96d_c79d_1ade_682095697e02
Relationship Graph
Source Code
libs/langchain/langchain_classic/memory/summary_buffer.py lines 20–148
class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
"""Buffer with summarizer for storing conversation memory.
Provides a running summary of the conversation together with the most recent
messages in the conversation under the constraint that the total number of
tokens in the conversation does not exceed a certain limit.
"""
max_token_limit: int = 2000
moving_summary_buffer: str = ""
memory_key: str = "history"
@property
def buffer(self) -> str | list[BaseMessage]:
"""String buffer of memory."""
return self.load_memory_variables({})[self.memory_key]
async def abuffer(self) -> str | list[BaseMessage]:
"""Async memory buffer."""
memory_variables = await self.aload_memory_variables({})
return memory_variables[self.memory_key]
@property
def memory_variables(self) -> list[str]:
"""Will always return list of memory variables."""
return [self.memory_key]
@override
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer."""
buffer = self.chat_memory.messages
if self.moving_summary_buffer != "":
first_messages: list[BaseMessage] = [
self.summary_message_cls(content=self.moving_summary_buffer),
]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
@override
async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Asynchronously return key-value pairs given the text input to the chain."""
buffer = await self.chat_memory.aget_messages()
if self.moving_summary_buffer != "":
first_messages: list[BaseMessage] = [
self.summary_message_cls(content=self.moving_summary_buffer),
]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
@pre_init
def validate_prompt_input_variables(cls, values: dict) -> dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
msg = (
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
raise ValueError(msg)
return values
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
Extends
Source
Frequently Asked Questions
What is the ConversationSummaryBufferMemory class?
ConversationSummaryBufferMemory is a class in the langchain codebase, defined in libs/langchain/langchain_classic/memory/summary_buffer.py.
Where is ConversationSummaryBufferMemory defined?
ConversationSummaryBufferMemory is defined in libs/langchain/langchain_classic/memory/summary_buffer.py at line 20.
What does ConversationSummaryBufferMemory extend?
ConversationSummaryBufferMemory extends BaseChatMemory, SummarizerMixin.
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free