import re
import os
import numpy as np
import hashlib
import time
from langchain_core.documents import Document
from langchain_community.vectorstores import FAISS
from langchain_ollama import OllamaEmbeddings, ChatOllama
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain_core.prompts import ChatPromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
import faiss
import json

class SmartBikeRAGSystem:
    def __init__(self, data_dir="../docs", model_name="llama3:8b"):
        self.MODEL_NAME = model_name
        self.data_dir = data_dir
        self.vector_store = None
        self.qa_chain = None
        self.is_initialized = False
        self.chat_id_set = set()

    # ===============================
    # 1️⃣ 文档加载
    # ===============================
    def load_documents(self):
        """加载文档"""
        documents = []
        if not os.path.exists(self.data_dir):
            os.makedirs(self.data_dir)
            print(f"⚠️ 文档目录 {self.data_dir} 不存在，已创建空目录")
            return documents

        txt_files = [f for f in os.listdir(self.data_dir) if f.endswith(".txt")]
        if not txt_files:
            print(f"⚠️ 在 {self.data_dir} 中未找到.txt文件")
            return documents

        for file_name in txt_files:
            file_path = os.path.join(self.data_dir, file_name)
            try:
                with open(file_path, "r", encoding="utf-8") as f:
                    content = f.read()
                documents.append(
                    Document(
                        page_content=content,
                        metadata={"source": file_name.replace(".txt", "")}
                    )
                )
                print(f"✅ 已加载文档: {file_name}")
            except Exception as e:
                print(f"❌ 加载文档 {file_name} 时出错: {e}")
        return documents

    # ===============================
    # 2️⃣ 中文自然段切割
    # ===============================
    def chinese_paragraph_chunker(self, text, chunk_size=250, chunk_overlap=100):
        """中文段落切割"""
        paragraphs = [p.strip() for p in text.split('\n') if p.strip()]
        chunks = []
        buffer = ""

        for para in paragraphs:
            sentences = re.split(r'(。|！|？|\n)', para)
            sentences = [s for s in sentences if s.strip()]
            for s in sentences:
                if len(buffer) + len(s) <= chunk_size:
                    buffer += s
                else:
                    if buffer:
                        chunks.append(buffer)
                    buffer = buffer[-chunk_overlap:] + s if chunk_overlap < len(buffer) else buffer + s

        if buffer:
            chunks.append(buffer)

        return chunks

    # ===============================
    # 3️⃣ 系统初始化
    # ===============================
    def initialize_system(self):
        """初始化整个RAG系统"""
        start_time = time.time()

        # 加载文档
        documents = self.load_documents()
        print(f"📚 加载文档数量：{len(documents)}")

        if not documents:
            print("⚠️ 没有加载到文档，系统将以空数据库运行")
            # 创建一个空的向量数据库
            self._create_empty_vector_db()
            self.is_initialized = True
            return

        # 切分 chunk 并去重
        chunked_documents = []
        seen_hashes = set()
        for doc in documents:
            chunks = self.chinese_paragraph_chunker(doc.page_content, chunk_size=250, chunk_overlap=100)
            for i, chunk in enumerate(chunks):
                chunk_hash = hashlib.sha256(chunk.encode("utf-8")).hexdigest()
                if chunk_hash in seen_hashes:
                    continue
                seen_hashes.add(chunk_hash)
                chunked_documents.append(
                    Document(
                        page_content=chunk,
                        metadata={**doc.metadata, "chunk_index": i}
                    )
                )
        print(f"✂️ 切分后有效 chunk 数量（去重后）：{len(chunked_documents)}")

        # 创建向量数据库
        self._create_vector_db(chunked_documents)

        # 创建问答链
        self._create_qa_chain()

        self.is_initialized = True
        initialization_time = time.time() - start_time
        print(f"✅ 系统初始化完成，耗时: {initialization_time:.2f}秒")

    def _create_empty_vector_db(self):
        """创建空的向量数据库"""
        embedding_model = OllamaEmbeddings(model=self.MODEL_NAME)
        # 创建一个小的测试向量来获取维度
        test_vector = embedding_model.embed_query("test")
        dim = len(test_vector)

        index = faiss.IndexFlatL2(dim)
        docstore = InMemoryDocstore({})

        self.vector_store = FAISS(
            index=index,
            embedding_function=embedding_model,
            docstore=docstore,
            index_to_docstore_id={}
        )

    def _create_vector_db(self, chunked_documents):
        """创建向量数据库"""
        embedding_model = OllamaEmbeddings(model=self.MODEL_NAME)
        vectors = [embedding_model.embed_query(doc.page_content) for doc in chunked_documents]
        vectors_np = np.array(vectors).astype("float32")
        dim = vectors_np.shape[1]

        index = faiss.IndexFlatL2(dim)
        index.add(vectors_np)

        docstore = InMemoryDocstore({str(i): doc for i, doc in enumerate(chunked_documents)})

        self.vector_store = FAISS(
            index=index,
            embedding_function=embedding_model,
            docstore=docstore,
            index_to_docstore_id={i: str(i) for i in range(len(chunked_documents))}
        )
        print("✅ FAISS 向量库创建完成")

    def _create_qa_chain(self):
        """创建问答链"""
        prompt_template = """
        以下是你和用户之间的历史对话：
        {chat_history}

        请在理解上下文的基础上回答当前问题，仅在必要时参考以下文档内容。
        要求：
        - 用简体中文回答
        - 尽量准确完整
        - 若文档信息不足，可结合已有知识补充

        文档内容：
        {context}

        当前问题：
        {question}
        """
        prompt = ChatPromptTemplate.from_template(prompt_template)
        llm = ChatOllama(model=self.MODEL_NAME)

        retriever = self.vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 5})

        # ✅ 添加对历史对话的支持
        memory = ConversationBufferMemory(
            memory_key="chat_history",
            return_messages=True,
            output_key="answer"  # ✅ 指定保存到memory的字段
        )

        # ✅ 把 prompt 传给 chain，而不是 memory
        self.qa_chain = ConversationalRetrievalChain.from_llm(
            llm=llm,
            retriever=retriever,
            memory=memory,
            combine_docs_chain_kwargs={"prompt": prompt},  # ✅ 生效位置！
            return_source_documents=True,
            output_key="answer"
        )

    # ===============================
    # 4️⃣ 问答接口
    # ===============================
    def ask_question(self, question, chat_id='zzx is so handsome'):
        """回答问题"""
        if not self.is_initialized:
            raise Exception("系统未初始化")

        # 检查是否是第一次对话
        if chat_id not in self.chat_id_set:
            self.chat_id_set.add(chat_id)
            # 加载对话记忆
            memory = self.qa_chain.memory

            # 读取 JSON 文件
            file_path = f"../docs/history_chat/{chat_id}.json"
            if os.path.exists(file_path):
                with open(file_path, "r", encoding="utf-8") as f:
                    data = json.load(f)
                context = data.get("context", [])
                for msg in context:
                    msg_lower = msg.lower()
                    if msg_lower.startswith("human:"):
                        memory.chat_memory.add_user_message(msg[len("human:"):].strip())
                    elif msg_lower.startswith("ai:"):
                        memory.chat_memory.add_ai_message(msg[len("ai:"):].strip())

        start_time = time.time()

        try:
            result = self.qa_chain.invoke({"question": question})
            processing_time = time.time() - start_time
            # 格式化源文档
            source_docs = []
            for doc in result['source_documents']:
                source_docs.append({
                    "source": doc.metadata.get('source', '未知'),
                    "chunk_index": doc.metadata.get('chunk_index', 'N/A'),
                    "content_preview": doc.page_content[:200] + "..." if len(
                        doc.page_content) > 200 else doc.page_content
                })

            # 持久化文档
            file_path = f"../docs/history_chat/{chat_id}.json"
            if os.path.exists(file_path):
                try:
                    with open(file_path, "r", encoding="utf-8") as f:
                        data = json.load(f)
                except json.JSONDecodeError:
                    data = {"chat_id": chat_id, "context": []}
            else:
                data = {"chat_id": chat_id, "context": []}

            # 追加新消息
            data["context"].append(f"human: {question}")
            data["context"].append(f"ai: {result['answer']}")

            # 写回文件（保持格式化）
            with open(file_path, "w", encoding="utf-8") as f:
                json.dump(data, f, ensure_ascii=False, indent=2)

            return {
                "answer": result['answer'],
                "source_documents": source_docs,
                "processing_time": round(processing_time, 2)
            }

        except Exception as e:
            raise Exception(f"问答处理失败: {str(e)}")


# 单例实例
system_instance = SmartBikeRAGSystem()


# def check_memory_state():
#     system = SmartBikeRAGSystem()
#     system.initialize_system()
#
#     # 进行几轮对话后检查内存
#     system.ask_question("你好")
#     system.ask_question("介绍一下智能自行车")
#
#     # 检查内存中的对话历史
#     if hasattr(system.qa_chain, 'memory'):
#         memory = system.qa_chain.memory
#
#         print("当前对话历史:")
#         for i, msg in enumerate(memory.chat_memory.messages):
#             print(f"{i + 1}. {msg.type}: {msg.content[:100]}...")
#
#
# # 验证多轮对话是否生效
# check_memory_state()
