from langchain_community.document_loaders import UnstructuredWordDocumentLoader, TextLoader, PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
import re
from openai import OpenAI
from tqdm import tqdm
from typing import List
import numpy as np

import sqlite3
import json


class TextFragment:
    def __init__(self, file_name, text, page, embedding, similarity=None):
        """
        初始化TextFragment类的实例。

        :file_name: str, 文本片段所属文件名。
        :param text: str, 文本片段的内容。
        :param page: int, 文本片段所在的页面号。
        :param embedding: list, 文本片段的嵌入向量。
        """
        self.file_name = file_name
        self.text = text
        self.page = page
        self.embedding = embedding
        self.similarity = similarity

    def to_dict(self):
        """
        将对象转换为字典。
        """
        return {
            'file_name': self.file_name,
            'text': self.text,
            'page': self.page,
            'similarity': self.similarity
        }


def len_cacu(text):
    words = re.findall(r'[a-zA-Z]+|[\u4e00-\u9fff]', text)
    return len(words)

def cosine_similarity(v1, v2):
    """计算两个向量v1和v2的余弦相似度。"""
    dot_product = np.dot(v1, v2)
    norm_v1 = np.linalg.norm(v1)
    norm_v2 = np.linalg.norm(v2)
    similarity = dot_product / (norm_v1 * norm_v2)
    return similarity

class RAG:
    def __init__(self, embedding_model, llm_model) -> None:
        self.text_splitter = RecursiveCharacterTextSplitter(
            separators=["\n\n", "\n", "。", ".", " ", ",", "，", ""],
            chunk_size=200,
            chunk_overlap=10,
            length_function=len_cacu,
            is_separator_regex=False
        )
        self.ai_client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")
        self.db: List[TextFragment] = []
        self.embedding_model = embedding_model
        self.llm_model = llm_model

        self.init_sqlite()
    
    def add_document(self, file_name):
        extension = file_name.split(".")[-1]
        file_path = f"uploads/{file_name}"
        if extension == "doc" or extension == "docx":
            loader = UnstructuredWordDocumentLoader(file_path)
        elif extension == "pdf":
            loader = PyPDFLoader(file_path)
        else:
            loader = TextLoader(file_path, encoding='utf-8')
        
        document = loader.load()

        self.clear_file_name_records(file_name) # 删除所有文件名为file_name的记录，已覆盖重名文件

        documen_fragments = self.text_splitter.split_documents(document)
        text_fragments = []
        for doc in tqdm(documen_fragments):
            emb = self.embedding(doc.page_content)
            page = doc.metadata['page'] + 1 if "page" in doc.metadata else 1
            text_fragments.append(TextFragment(text=doc.page_content, file_name=file_name, page=page, embedding=emb))
        self.db += text_fragments
    
    def embedding(self, query):
        return self.ai_client.embeddings.create(input = query, model=self.embedding_model).data[0].embedding


    def db_discribe(self):
        file_fragments_count = {}
        for doc in self.db:
            if doc.file_name not in file_fragments_count:
                file_fragments_count[doc.file_name] = 1
            else:
                file_fragments_count[doc.file_name] += 1
        return file_fragments_count

    def clear_file_name_records(self, file_name):
        """
        删除文件名为fil_name的记录
        """
        self.db = [x for x in self.db if x.file_name != file_name]

    def recall_records(self, query, file_names, top_n=3):
        target_db = [x for x in self.db if x.file_name in file_names]

        query_emb = np.array(self.embedding(query))
        target_db_embs = np.array([x.embedding for x in target_db])

        similarities = np.array([cosine_similarity(query_emb, v) for v in target_db_embs])
        # 获取最高相似度的Top N个索引
        top_indices = np.argsort(-similarities)[:top_n]

        top_similarities = []
        for i in top_indices.tolist():
            target_db[i].similarity = similarities[i].tolist()
            top_similarities.append(target_db[i])
        
        return top_similarities
    
    def chat(self, query, history, targets_content):
        SYSTEM_PROMPT = f"你是RAG系统，根据以下召回的信息，详细准确地回答用户的问题： \n{targets_content}"
        assert len(history) % 2 == 0

        messages = [{"role": "system", "content": SYSTEM_PROMPT}]
        messages += [{"role": "user" if i % 2 == 0 else "assistant", "content": x } for i, x in  enumerate(history)]
        messages.append({"role": "user", "content": query})

        completion = self.ai_client.chat.completions.create(
            model=self.llm_model,
            messages=messages,
            temperature=0.5,
            stream=True
        )
        for i in completion:
            yield { "finish_reason": i.choices[0].finish_reason, "content": i.choices[0].delta.content }

    def chat_no_files(self, query, history):
        SYSTEM_PROMPT = "Perform the task to the best of your ability."
        assert len(history) % 2 == 0

        messages = [{"role": "system", "content": SYSTEM_PROMPT}]
        messages += [{"role": "user" if i % 2 == 0 else "assistant", "content": x } for i, x in  enumerate(history)]
        messages.append({"role": "user", "content": query})

        completion = self.ai_client.chat.completions.create(
            model=self.llm_model,
            messages=messages,
            temperature=0.5,
            stream=True
        )
        for i in completion:
            yield { "finish_reason": i.choices[0].finish_reason, "content": i.choices[0].delta.content }

    # 初始化sqlite
    def init_sqlite(self):
        # 连接到SQLite数据库（如果数据库文件不存在，它会自动创建一个）
        conn = sqlite3.connect('feedback.db')
        # 创建一个表（如果表不存在）
        conn.execute('''CREATE TABLE IF NOT EXISTS records
            (id INTEGER PRIMARY KEY AUTOINCREMENT,
              history TEXT,
              recalls TEXT,
              result TEXT)''')
        conn.close()

    # 添加反馈记录
    def add_feedback(self, history, recalls, result):
        conn = sqlite3.connect('feedback.db')
        history = json.dumps(history)
        recalls = json.dumps(recalls)
        conn.execute(f"INSERT INTO records (history, recalls, result) VALUES (?, ?, ?)", (history, recalls, result))
        conn.commit()
        conn.close()


# 问题例子
# 详细介绍一下CHES数据集。
# 实验的训练集、验证集和测试集划分比例是多少？