import asyncio
import bs4,os
from langchain_community.document_loaders import WebBaseLoader,UnstructuredURLLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
page_url = "https://python.langchain.com/docs/how_to/chatbots_memory/"
import tiktoken
async def load_partial_web():
    loader = WebBaseLoader(
        web_paths=[page_url],
        bs_kwargs={
            "parse_only": bs4.SoupStrainer(class_="theme-doc-markdown markdown"),
        },
        bs_get_text_kwargs={"separator": " | ", "strip": True},
        )
    docs = []
    async for doc in loader.alazy_load():
        docs.append(doc)
    assert len(docs) == 1
    doc = docs[0]
    print(f"{doc.metadata}\n")
    return docs

async def split_web_texts():
    docs = await load_partial_web()
    text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
        encoding_name="cl100k_base",
        chunk_size=40,
        chunk_overlap=0,
    )
    texts = text_splitter.split_text(docs[0].page_content[:2000])
    # 获取编码器
    encoding = tiktoken.get_encoding("cl100k_base")
    # 显示token数量
    tokens = encoding.encode(docs[0].page_content[:2000])
    print(f"Total tokens: {len(tokens)}")
    print(f"Total characters: {len(docs[0].page_content[:2000])}")

    # 迭代输出分割后的文本块
    for i, text_chunk in enumerate(texts):
        print(f"=== Chunk {i+1} ===")
        print(text_chunk)
        print(f"Length: {len(text_chunk)} characters\n")
        print("-" * 50 + "\n")

asyncio.run(split_web_texts())
