import asyncio
import bs4,os
from langchain_community.document_loaders import WebBaseLoader,UnstructuredURLLoader
from langchain_unstructured import UnstructuredLoader
from typing import List
from langchain_core.documents import Document
from langchain_ollama.embeddings import OllamaEmbeddings
from langchain_core.vectorstores import InMemoryVectorStore

page_url = "https://python.langchain.com/docs/how_to/chatbots_memory/"
# 设置USER_AGENT避免警告
os.environ["USER_AGENT"] = "MyApp/1.0"

# 替换 OpenAIEmbeddings 配置为 Ollama 本地模型
embeddings = OllamaEmbeddings(
    model="nomic-embed-text",  # 或其他支持的本地模型如 "mistral", "nomic-embed-text"
    base_url="http://192.168.2.208:11434"  # Ollama 默认地址
)

async def load_whole_web():
    loader = WebBaseLoader(web_paths=[page_url])
    pages = []
    async for page in loader.alazy_load():
        pages.append(page)
    print(f"{pages[0].metadata}\n")
    print(pages[0].page_content[:500].strip())

async def load_partial_web():
    loader = WebBaseLoader(
        web_paths=[page_url],
        bs_kwargs={
            "parse_only": bs4.SoupStrainer(class_="theme-doc-markdown markdown"),
        },
        bs_get_text_kwargs={"separator": " | ", "strip": True},
        )
    docs = []
    async for doc in loader.alazy_load():
        docs.append(doc)
    assert len(docs) == 1
    doc = docs[0]
    print(f"{doc.metadata}\n")
    print(doc.page_content[:500])
    print(doc.page_content[-500:])

os.environ["USER_AGENT"] = "MyApp/1.0"

async def load_web_with_unstructured():
    loader = UnstructuredURLLoader(
        urls=[page_url],  # 替换为实际URL
        mode="elements",
        strategy='fast'
    )
    docs = []
    async for doc in loader.alazy_load():
        docs.append(doc)
    for doc in docs[:9]:
        print(f'{doc.metadata["category"]}: {doc.page_content}')
    return docs

async def _get_setup_docs() -> List[Document]:
    page_urls = [
       "https://python.langchain.com/docs/how_to/chatbots_memory/",
        "https://python.langchain.com/docs/how_to/chatbots_tools/",
    ]
    setup_docs = []
    for url in page_urls:
        page_setup_docs = await _get_setup_docs_from_url(url)
        setup_docs.extend(page_setup_docs)
    return setup_docs

async def _get_setup_docs_from_url(url: str) -> List[Document]:
    loader = UnstructuredLoader(web_url=url)
    setup_docs = []
    parent_id = -1
    async for doc in loader.alazy_load():
        metadata = doc.metadata
        if metadata.get("category") == "Title" and doc.page_content.startswith("Setup"):
            parent_id = metadata.get("element_id")
        if metadata.get("parent_id") == parent_id:
            setup_docs.append(doc)
    return setup_docs

async def search_vector_store():
    setup_docs =await _get_setup_docs()
    vector_store = InMemoryVectorStore.from_documents(setup_docs, embeddings)
    retrieved_docs = vector_store.similarity_search("Install Tavily", k=2)
    for doc in retrieved_docs:
        print(f'Page {doc.metadata["url"]}: {doc.page_content[:300]}\n')

asyncio.run(search_vector_store())