import os
import shutil
from dotenv import load_dotenv
from langchain_chroma import Chroma
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
from prompts import USER_PROMPTS
from langchain_core.runnables import RunnablePassthrough
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser

load_dotenv()

class RagSys:
    def __init__(self):
        self.llm = None
        self.embedding = None
        self.vectorstore = None
        self.batch_size: int = 32
        self.retriever = None

        self._init_llm()
        self._init_embedding()
        self._init_db()

    def _init_llm(self):
        """初始化LLM"""
        llm = ChatOpenAI(
            base_url=os.getenv('OPENAI_API_BASE'),
            api_key=os.getenv('OPENAI_API_KEY'),
            model=os.getenv('OPENAI_MODEL'),
            temperature=0.1
        )
        self.llm = llm

    def _init_embedding(self):
        """初始化Embedding模型"""
        embeddings_model = OpenAIEmbeddings(
            base_url=os.getenv('EMBEDDING_API_BASE'),
            api_key=os.getenv('EMBEDDING_API_KEY'),
            model=os.getenv('EMBEDDING_MODEL')
        )
        self.embedding = embeddings_model

    def _init_db(self):
        """初始化Chroma"""
        if self.embedding is None:
            raise ValueError("请先初始化Embedding模型...")
        vectorstore = Chroma(
            collection_name="my_docs",
            embedding_function=self.embedding,
            persist_directory="./chroma_data"  # 🔹 指定保存路径
        )
        self.vectorstore = vectorstore

    @staticmethod
    def batched(iterable, n: int = 32):
        """将 iterable 分批，每批大小为 n"""
        for i in range(0, len(iterable), n):
            yield iterable[i:i + n]

    @staticmethod
    def format_docs(docs):
        return "\n\n".join(doc.page_content for doc in docs)

    def run_interactive(self):
        """运行交互式问答"""
        # Prompt提示词
        prompt = PromptTemplate(
            input_variables=["context", "question"],
            template=USER_PROMPTS
        )

        while True:
            user_search = input("\n您的问题: ").strip()

            if not user_search:
                continue

            if user_search.lower() == 'quit':
                break

            # 进行检索
            rag_chain = (
                    {"context": self.retriever | self.format_docs, "question": RunnablePassthrough()}
                    | prompt
                    | self.llm
                    | StrOutputParser()
            )

            resp = rag_chain.invoke(user_search)
            print(resp)

    def search_data(self, urls: list[str]) -> list[Document]:
        loader = WebBaseLoader(
            web_paths=tuple(urls),
            requests_kwargs={
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
                'Accept-Encoding': 'gzip, deflate, br, zstd',
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7'
            },
            # bs_kwargs=dict(
            #     parse_only=bs4.SoupStrainer(
            #         class_=("post-content", "post-title", "post-header")
            #     )
            # ),
        )
        loader.requests_kwargs = {'verify': False}
        return loader.load()

    def init_data_module(self, urls: list[str]):
        """初始化Chroma"""
        # 1. 在加在线文件
        docs = self.search_data(urls=urls)

        # 2. 进行文档切分
        # 递归字符分块
        text_splitter = RecursiveCharacterTextSplitter(
            separators=["\n\n", "\n", "。", "，", " ", ""],  # 分隔符优先级
            chunk_size=200,
            chunk_overlap=20
        )

        paragraphs = []
        for doc in docs:
            paragraphs.extend(text_splitter.create_documents([doc.page_content]))

        # 3. 数据写入
        for batch in list(self.batched(paragraphs, self.batch_size)):
            self.vectorstore.add_documents(batch)

        print(f"写入{len(paragraphs)}条数据")
        for i in paragraphs:
            print(i)

        # 4. 构建索引
        self.retriever = self.vectorstore.as_retriever(search_kwargs={"k": 3})

    def __del__(self):
        """删除数据"""
        if os.path.exists("./chroma_data"):
            shutil.rmtree("./chroma_data")

def main():
    rag_sys = RagSys()

    search_uri_input = input("\n请输入需要查询的网址: ").strip()
    search_urls = search_uri_input.split(",")
    if not search_urls:
        raise ValueError("请输入urls，用，分隔")
    rag_sys.init_data_module(urls=search_urls)
    rag_sys.run_interactive()


if __name__ == '__main__':
    main()

