#coding:utf-8

import os
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core.schema import NodeWithScore, TextNode
from llama_index.core import QueryBundle, SimpleDirectoryReader
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.vector_stores import VectorStoreQuery
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.retrievers import BM25Retriever
import chromadb
from typing import Any, List, Optional
import sys
import time
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
if project_root not in sys.path:
    sys.path.append(project_root)
import re
import uuid
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import sys
sys.stdout.reconfigure(encoding='utf-8')
import sqlite3
from typing import List
import time
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain.chains.retrieval import create_retrieval_chain
from langchain.embeddings.base import Embeddings
from langchain_core.documents import Document
from langchain_core.messages import HumanMessage, AIMessage
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.chains.combine_documents import create_stuff_documents_chain
# from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.prompts.chat import ChatPromptTemplate
from langchain.prompts import MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
from langchain_chroma import Chroma
from langchain_core.runnables import RunnablePassthrough, RunnableWithMessageHistory
from langchain_community.chat_models import ChatZhipuAI
from zhipuai import ZhipuAI
from dotenv import load_dotenv
from langchain.chains.history_aware_retriever import create_history_aware_retriever
from langchain_community.document_loaders import TextLoader

load_dotenv()

from config.config import Config
from RAG.langchain_chroma_sqlite3_rag import store_chat_history,get_contextualize_question_prompt,setup_database

def clean_text(text: str) -> str:
    """文本预处理: 去除多余空格、换行符、特殊字符，规范化空格"""
    text = text.strip()  # 去除首尾空格
    text = text.replace("\n", " ")  # 换行符替换为空格
    text = re.sub(r"\s+", " ", text)  # 合并多个空格
    text = text.replace("\u3000", " ")  # 替换全角空格
    text = re.sub(r"[^\S\r\n]", " ", text)  # 处理隐藏空白字符
    return text


def count_pdf_files(data_path: str) -> int:
    """统计目录下的 PDF 文件数量"""
    pdf_files = [f for f in os.listdir(data_path) if f.endswith(".pdf")]
    return len(pdf_files)


def load_data(data_path: str) -> List[TextNode]:
    print(f"正在加载数据目录: {data_path}")

    # **统计 PDF 文件数量**
    num_files = count_pdf_files(data_path)
    print(f"发现 {num_files} 个 PDF 文件")

    # **加载 PDF 文件**
    reader = SimpleDirectoryReader(
        input_dir=data_path,
        recursive=True,
        required_exts=[".pdf"],
    )
    documents = reader.load_data()

    text_parser = SentenceSplitter(chunk_size=256)  # 文本分块
    text_chunks = []
    doc_idxs = []

    for doc_idx, doc in enumerate(documents):
        cur_text_chunks = text_parser.split_text(doc.text)
        text_chunks.extend(cur_text_chunks)
        doc_idxs.extend([doc_idx] * len(cur_text_chunks))

    nodes = []
    for idx, text_chunk in enumerate(text_chunks):
        clean_chunk = clean_text(text_chunk)  # **清洗文本**

        node = TextNode(text=clean_chunk)  # **存入清理后的文本**
        node.metadata = documents[doc_idxs[idx]].metadata
        node.metadata["id"] = str(uuid.uuid4())  # **手动生成唯一 ID**
        nodes.append(node)

    print(f"加载完成，共 {len(nodes)} 个文本块")
    return nodes


# **加载 & 存储向量数据库**
def load_vector_database(config: Config, persist_dir: str, data_path: str) -> ChromaVectorStore:
    print('启动向量数据库')

    # **创建数据库存储目录**
    os.makedirs(persist_dir, exist_ok=True)

    # **加载 ChromaDB**
    chroma_client = chromadb.PersistentClient(path=persist_dir)
    # collection_names = chroma_client.list_collections()
    collection_names = [col.name for col in chroma_client.list_collections()]

    if "travel" in collection_names:
        print(f"发现现有集合 'travel'")
        chroma_collection = chroma_client.get_collection("travel")
    else:
        print(f"集合 'travel' 不存在，正在创建...")
        chroma_collection = chroma_client.create_collection("travel")

    vector_store = ChromaVectorStore(chroma_collection=chroma_collection)

    # **如果数据库为空，自动加载并存储数据**
    if chroma_collection.count() == 0:
        print(f"向量数据库为空，正在加载数据...")
        nodes = load_data(data_path)

        # **初始化嵌入模型**
        embed_model = config.embed_model

        # **存入向量数据库**
        for node in nodes:
            embedding = embed_model.get_text_embedding(node.text)  # 计算嵌入
            node_id = node.metadata["id"]  # 取出生成的唯一 ID

            chroma_collection.add(
                ids=[node_id],  # **使用唯一 ID**
                embeddings=[embedding],  # 存储向量
                metadatas=[node.metadata],  # 存储元数据
                documents=[node.text]  # 存储文本
            )

        print(f"数据存入成功，当前文档数: {chroma_collection.count()}")
    else:
        print(f"向量数据库已存在，当前文档数: {chroma_collection.count()}")

    return vector_store


# rerank_path = './model/rerank_model'
# rerank_model_name = 'BAAI/bge-reranker-large'


def extract_cities_from_text(text):
    # 从文本中提取城市名称，假设使用jieba进行分词和提取地名
    import jieba.posseg as pseg
    words = pseg.cut(text)
    cities = [word for word, flag in words if flag == "ns"]
    return cities


def find_pdfs_with_city(cities, pdf_directory):
    matched_pdfs = {}
    for city in cities:
        matched_pdfs[city] = []
        for root, _, files in os.walk(pdf_directory):
            for file in files:
                if file.endswith(".pdf") and city in file:
                    matched_pdfs[city].append(os.path.join(r"D:\code\BERT-NER-Pytorch\wanderlust_companion\data", file))
    return matched_pdfs


def get_embedding_pdf(text, pdf_directory):
    # 从文本中提取城市名称
    cities = extract_cities_from_text(text)
    # 根据城市名称匹配PDF文件
    city_to_pdfs = find_pdfs_with_city(cities, pdf_directory)
    return city_to_pdfs


# def load_rerank_model(model_name=rerank_model_name):
#     """
#     加载重排名模型。
#
#     参数:
#     - model_name (str): 模型的名称。默认为 'BAAI/bge-reranker-large'。
#
#     返回:
#     - FlagReranker 实例。
#
#     异常:
#     - ValueError: 如果模型名称不在批准的模型列表中。
#     - Exception: 如果模型加载过程中发生任何其他错误。
#     """
#     if not os.path.exists(rerank_path):
#         os.makedirs(rerank_path, exist_ok=True)
#     rerank_model_path = os.path.join(rerank_path, model_name.split('/')[1] + '.pkl')
#     # print(rerank_model_path)
#     logger.info('Loading rerank model...')
#     if os.path.exists(rerank_model_path):
#         try:
#             with open(rerank_model_path, 'rb') as f:
#                 reranker_model = pickle.load(f)
#                 logger.info('Rerank model loaded.')
#                 return reranker_model
#         except Exception as e:
#             logger.error(f'Failed to load embedding model from {rerank_model_path}')
#     else:
#         try:
#             os.system('apt install git')
#             os.system('apt install git-lfs')
#             os.system(f'git clone https://code.openxlab.org.cn/answer-qzd/bge_rerank.git {rerank_path}')
#             os.system(f'cd {rerank_path} && git lfs pull')
#
#             with open(rerank_model_path, 'rb') as f:
#                 reranker_model = pickle.load(f)
#                 logger.info('Rerank model loaded.')
#                 return reranker_model
#
#         except Exception as e:
#             logger.error(f'Failed to load rerank model: {e}')


# def rerank(reranker, query, contexts, select_num):
#     merge = [[query, context] for context in contexts]
#     scores = reranker.compute_score(merge)
#     sorted_indices = np.argsort(scores)[::-1]
#
#     return [contexts[i] for i in sorted_indices[:select_num]]


def embedding_make(text_input, pdf_directory=r"D:\code\BERT-NER-Pytorch\wanderlust_companion\data"):
    from dwspark.config import Config
    from dwspark.models import ChatModel, EmbeddingModel
    config = Config('36f6a1f2','a9b7b68d8bc752e79c4e5dfa7e802a9c','N2IzZDk0NjYzZDRjNmY3ZGUxY2U4MDA4')
    city_to_pdfs = get_embedding_pdf(text_input, pdf_directory)
    city_list = []
    for city, pdfs in city_to_pdfs.items():
        print(f"City: {city}")
        for pdf in pdfs:
            city_list.append(pdf)

    if len(city_list) != 0:
        # all_pdf_pages = []
        all_text = ''
        for city in city_list:
            from src.pdf_read import FileOperation
            file_opr = FileOperation()
            try:
                text, error = file_opr.read(city)
            except:
                continue
            all_text += text

        pattern = re.compile(r'[^\u4e00-\u9fff](\n)[^\u4e00-\u9fff]', re.DOTALL)
        all_text = re.sub(pattern, lambda match: match.group(0).replace('\n', ''), all_text)

        text_spliter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=50)
        docs = text_spliter.create_documents([all_text])
        splits = text_spliter.split_documents(docs)
        question = text_input

        retriever = BM25Retriever.from_documents(splits)
        retriever.k = 10
        bm25_result = retriever.invoke(question)

        em = EmbeddingModel(config)
        chat = ChatZhipuAI(
            api_key="59bc6df0d11e4969b0b58825e4dd98b5.mn27i6G3VcLgdz2d",
            model="GLM-4-Flash",
            temperature=0.8,
        )
        # store = {}
        #
        # def get_session_history(session_id: str) -> ChatMessageHistory:
        #     if session_id not in store:
        #         store[session_id] = ChatMessageHistory()
        #     return store[session_id]

        from langchain_community.chat_message_histories import ChatMessageHistory

        def retrieve_chat_history(conn):
            cursor = conn.cursor()
            cursor.execute("SELECT user_message, model_response FROM chat_history")
            rows = cursor.fetchall()
            history = []
            for user_message, model_response in rows:
                history.append(HumanMessage(content=user_message))
                history.append(AIMessage(content=model_response))
            return history

        def get_session_history() -> ChatMessageHistory:
            conn = sqlite3.connect("../web/chat_history.db")
            chat_history = retrieve_chat_history(conn)  # Get the list of messages
            history = ChatMessageHistory(messages=chat_history)  # Wrap it in ChatMessageHistory
            return history

        question_prompt = get_contextualize_question_prompt()
        contextualize_question_chain = RunnableWithMessageHistory(
            question_prompt | chat,
            get_session_history,  # Pass the callable function here
            input_messages_key="input",
            history_messages_key="chat_history"
        )
        res = contextualize_question_chain.invoke({
            "input": question
        }, config={
            "configurable": {"session_id": "test456"}
        })
        question1 = res.content
        print("改写后内容：\n" + res.content)
        question_vector = em.get_embedding(question1)
        pdf_vector_list = []

        for i in range(len(bm25_result)):
            x = em.get_embedding(bm25_result[i].page_content)
            pdf_vector_list.append(x)
            time.sleep(0.65)

        query_embedding = np.array(question_vector)
        query_embedding = query_embedding.reshape(1, -1)

        similarities = cosine_similarity(query_embedding, pdf_vector_list)

        # top_k = 10
        top_k = 3
        top_k_indices = np.argsort(similarities[0])[-top_k:][::-1]

        emb_list = []
        for idx in top_k_indices:
            all_page = splits[idx].page_content
            emb_list.append(all_page)
        print(len(emb_list))
        reranked = ''.join(emb_list)

        # reranker_model = load_rerank_model()
        #
        # documents = rerank(reranker_model, question, emb_list, 3)
        # logger.info("After rerank...")
        # reranked = []
        # for doc in documents:
        #     reranked.append(doc)
        # print(len(reranked))
        # reranked = ''.join(reranked)

        model_input = f'你是一个旅游攻略小助手，你的任务是，根据收集到的信息：\n{reranked}.\n来精准回答用户所提出的问题：{question}。'

        print(reranked)
        print(model_input)
        #
        # model = ChatModel(config, stream=False)
        # output = model.generate([ChatMessage(role="user", content=model_input)])

        return model_input
    else:
        return "请在输入中提及想要咨询的城市！"


if __name__ == "__main__":
    prompt = "请问上海有哪些好玩的地方？"
    real_prompt = embedding_make(prompt)
