Home / File/ test_text_splitter.py — langchain Source File

test_text_splitter.py — langchain Source File

Architecture documentation for test_text_splitter.py, a python file in the langchain codebase. 5 imports, 0 dependents.

File python LangChainCore MessageInterface 5 imports 8 functions

Entity Profile

Dependency Diagram

graph LR
  d35bbf8f_3f92_b567_0710_bd1ead1e275e["test_text_splitter.py"]
  f69d6389_263d_68a4_7fbf_f14c0602a9ba["pytest"]
  d35bbf8f_3f92_b567_0710_bd1ead1e275e --> f69d6389_263d_68a4_7fbf_f14c0602a9ba
  c29ae04f_6e26_fc73_5938_d57db6543f18["transformers"]
  d35bbf8f_3f92_b567_0710_bd1ead1e275e --> c29ae04f_6e26_fc73_5938_d57db6543f18
  7c6676be_7003_53c4_f08f_05a4d67a1cee["langchain_text_splitters"]
  d35bbf8f_3f92_b567_0710_bd1ead1e275e --> 7c6676be_7003_53c4_f08f_05a4d67a1cee
  1f147eca_0e2e_9025_cd91_f3609fa7b93f["langchain_text_splitters.character"]
  d35bbf8f_3f92_b567_0710_bd1ead1e275e --> 1f147eca_0e2e_9025_cd91_f3609fa7b93f
  987e3c26_efbd_02f3_cc26_86775197f7ef["langchain_text_splitters.sentence_transformers"]
  d35bbf8f_3f92_b567_0710_bd1ead1e275e --> 987e3c26_efbd_02f3_cc26_86775197f7ef
  style d35bbf8f_3f92_b567_0710_bd1ead1e275e fill:#6366f1,stroke:#818cf8,color:#fff

Relationship Graph

Source Code

"""Test text splitters that require an integration."""

import pytest
from transformers import AutoTokenizer

from langchain_text_splitters import (
    TokenTextSplitter,
)
from langchain_text_splitters.character import CharacterTextSplitter
from langchain_text_splitters.sentence_transformers import (
    SentenceTransformersTokenTextSplitter,
)


def test_huggingface_type_check() -> None:
    """Test that type checks are done properly on input."""
    with pytest.raises(
        ValueError,
        match="Tokenizer received was not an instance of PreTrainedTokenizerBase",
    ):
        CharacterTextSplitter.from_huggingface_tokenizer("foo")  # type: ignore[arg-type]


def test_huggingface_tokenizer() -> None:
    """Test text splitter that uses a HuggingFace tokenizer."""
    tokenizer = AutoTokenizer.from_pretrained("gpt2")
    text_splitter = CharacterTextSplitter.from_huggingface_tokenizer(
        tokenizer, separator=" ", chunk_size=1, chunk_overlap=0
    )
    output = text_splitter.split_text("foo bar")
    assert output == ["foo", "bar"]


def test_token_text_splitter() -> None:
    """Test no overlap."""
    splitter = TokenTextSplitter(chunk_size=5, chunk_overlap=0)
    output = splitter.split_text("abcdef" * 5)  # 10 token string
    expected_output = ["abcdefabcdefabc", "defabcdefabcdef"]
    assert output == expected_output


def test_token_text_splitter_overlap() -> None:
    """Test with overlap."""
    splitter = TokenTextSplitter(chunk_size=5, chunk_overlap=1)
    output = splitter.split_text("abcdef" * 5)  # 10 token string
    expected_output = ["abcdefabcdefabc", "abcdefabcdefabc", "abcdef"]
    assert output == expected_output


def test_token_text_splitter_from_tiktoken() -> None:
    splitter = TokenTextSplitter.from_tiktoken_encoder(model_name="gpt-3.5-turbo")
    expected_tokenizer = "cl100k_base"
    actual_tokenizer = splitter._tokenizer.name
    assert expected_tokenizer == actual_tokenizer


@pytest.mark.requires("sentence_transformers")
def test_sentence_transformers_count_tokens() -> None:
    splitter = SentenceTransformersTokenTextSplitter(
        model_name="sentence-transformers/paraphrase-albert-small-v2"
    )
    text = "Lorem ipsum"

    token_count = splitter.count_tokens(text=text)

    expected_start_stop_token_count = 2
    expected_text_token_count = 5
    expected_token_count = expected_start_stop_token_count + expected_text_token_count

    assert expected_token_count == token_count


@pytest.mark.requires("sentence_transformers")
def test_sentence_transformers_split_text() -> None:
    splitter = SentenceTransformersTokenTextSplitter(
        model_name="sentence-transformers/paraphrase-albert-small-v2"
    )
    text = "lorem ipsum"
    text_chunks = splitter.split_text(text=text)
    expected_text_chunks = [text]
    assert expected_text_chunks == text_chunks


@pytest.mark.requires("sentence_transformers")
def test_sentence_transformers_multiple_tokens() -> None:
    splitter = SentenceTransformersTokenTextSplitter(chunk_overlap=0)
    text = "Lorem "

    text_token_count_including_start_and_stop_tokens = splitter.count_tokens(text=text)
    count_start_and_end_tokens = 2
    token_multiplier = (
        count_start_and_end_tokens
        + (splitter.maximum_tokens_per_chunk - count_start_and_end_tokens)
        // (
            text_token_count_including_start_and_stop_tokens
            - count_start_and_end_tokens
        )
        + 1
    )

    # `text_to_split` does not fit in a single chunk
    text_to_embed = text * token_multiplier

    text_chunks = splitter.split_text(text=text_to_embed)

    expected_number_of_chunks = 2

    assert expected_number_of_chunks == len(text_chunks)
    actual = splitter.count_tokens(text=text_chunks[1]) - count_start_and_end_tokens
    expected = (
        token_multiplier * (text_token_count_including_start_and_stop_tokens - 2)
        - splitter.maximum_tokens_per_chunk
    )
    assert expected == actual

Domain

Subdomains

Dependencies

  • langchain_text_splitters
  • langchain_text_splitters.character
  • langchain_text_splitters.sentence_transformers
  • pytest
  • transformers

Frequently Asked Questions

What does test_text_splitter.py do?
test_text_splitter.py is a source file in the langchain codebase, written in python. It belongs to the LangChainCore domain, MessageInterface subdomain.
What functions are defined in test_text_splitter.py?
test_text_splitter.py defines 8 function(s): test_huggingface_tokenizer, test_huggingface_type_check, test_sentence_transformers_count_tokens, test_sentence_transformers_multiple_tokens, test_sentence_transformers_split_text, test_token_text_splitter, test_token_text_splitter_from_tiktoken, test_token_text_splitter_overlap.
What does test_text_splitter.py depend on?
test_text_splitter.py imports 5 module(s): langchain_text_splitters, langchain_text_splitters.character, langchain_text_splitters.sentence_transformers, pytest, transformers.
Where is test_text_splitter.py in the architecture?
test_text_splitter.py is located at libs/text-splitters/tests/integration_tests/test_text_splitter.py (domain: LangChainCore, subdomain: MessageInterface, directory: libs/text-splitters/tests/integration_tests).

Analyze Your Own Codebase

Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.

Try Supermodel Free