import gzip

# # os.environ['CUDA_VISIBLE_DEVICES']='7'
# # 选择第一个CUDA设备
# device = torch.device('cuda:7')
import json
import pickle
import time

from pathlib import Path
from typing import Union

from typing import List, Tuple
import faiss
import numpy as np
from faiss import normalize_L2
import torch
from transformers import AutoModel, AutoTokenizer
import shutil
import requests
import os
import glob


def openpkl(path: str):
    with gzip.open(path, "r") as file:
        documents = pickle.load(file)
    return documents


def writepkl(path: str, document_list):
    with gzip.open(path, "wb") as file:
        pickle.dump(document_list, file)


def writeTxt(path: str, documents):
    with open(path, "w") as file:
        for i in documents:
            file.write(str(i))
            file.write("\n")


def read_qa_json(path):
    querys = []
    answers = []
    sources = []
    chunks = []
    required_keys = {"file", "chunk", "question", "answer"}
    with open(path, encoding="utf-8") as file:
        data = json.load(file)
    for item in data:
        if not required_keys.issubset(item.keys()):
            continue
        querys.append(item["question"])
        answers.append(item["answer"])
        sources.append(item["file"])
        chunks.append(item["chunk"])
    return querys, answers, sources, chunks


def read_qa(data):
    querys = []
    answers = []
    sources = []
    chunks = []
    required_keys = {"file", "chunk", "question", "answer"}
    for item in data:
        if not required_keys.issubset(item.keys()):
            continue
        querys.append(item["question"])
        answers.append(item["answer"])
        sources.append(item["file"])
        chunks.append(item["chunk"])
    return querys, answers, sources, chunks


class create_save:
    def __init__(self, model_name: str, tokenizer_path: str, device: str):
        # 模型
        self.model_name = model_name
        self.tokenizer_path = tokenizer_path
        self.device = device

    def load_base(self, content_file: str):
        with gzip.open(content_file, "r") as file:
            documents = pickle.load(file)
        return documents

    def get_embedding(self, document_list):
        model_name = self.model_name
        tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_path)

        database_inputs = tokenizer(
            document_list,
            max_length=256,
            padding=True,
            truncation=True,
            return_tensors="pt",
        )

        database_inputs = database_inputs.to(self.device)
        model = AutoModel.from_pretrained(model_name, ignore_mismatched_sizes=True)
        model = model.to(self.device)
        model.eval()
        with torch.no_grad():
            database_outputs = model(**database_inputs)
            database_embeddings = (
                database_outputs.last_hidden_state.mean(dim=1).squeeze().cpu().numpy()
            )
        return database_embeddings

    def create_index(
        self, query_embeddings_shape, database_embeddings, index_file, nlist
    ):
        # 定义索引参数
        # d = database_embeddings.shape[1]  # 向量维度
        quantizer = faiss.IndexFlatL2(query_embeddings_shape)
        # 归一化，使用余弦距离
        normalize_L2(database_embeddings)
        index = faiss.IndexIVFFlat(
            quantizer, query_embeddings_shape, nlist, faiss.METRIC_INNER_PRODUCT
        )
        # 逐批加载向量并构建索引
        batch_size = 1000  # 每次加载的向量数量
        for i in range(0, len(database_embeddings), batch_size):
            batch_vectors = database_embeddings[i : i + batch_size]
            index.train(batch_vectors)
            index.add(batch_vectors)
        # 保存索引
        faiss.write_index(index, index_file)
        print(f"已生成 {index_file} 文件")

    def create_flat_index(
        self, query_embeddings_shape, database_embeddings, index_file
    ):
        index = faiss.IndexFlatIP(query_embeddings_shape)
        # 训练IMI索引
        normalize_L2(database_embeddings)  # 归一化

        index.add(database_embeddings)
        # 保存索引
        faiss.write_index(index, index_file)
        print(f"已生成 {index_file} 文件")


class LongListIterator:
    def __init__(self, long_list, batch_size):
        self.long_list = long_list
        self.batch_size = batch_size
        self.current_index = 0

    def __iter__(self):
        return self

    def __next__(self):
        if self.current_index >= len(self.long_list):
            raise StopIteration
        # 获取当前批次的起始和结束索引
        start_index = self.current_index
        end_index = min(self.current_index + self.batch_size, len(self.long_list))

        # 获取当前批次的数据
        batch_data = self.long_list[start_index:end_index]

        # 更新当前索引
        self.current_index += self.batch_size

        return batch_data


def load_data(path):
    query_list = []
    answer_list = []
    # 读取 JSONL 文件
    with open(path, encoding="utf-8") as file:
        data = json.load(file)
    error_count = 0
    # 提取question和answer
    for item in data:
        answers = item["answer"]

        # 将qa对按\n\n分割
        sentences = answers.split("\n\n")
        for sentence in sentences:
            # if len(sentence) < 2:
            #     error_count += 1
            #     continue
            qa_pair = sentence.split("\n")
            if len(qa_pair) < 2:
                error_count += 1
                continue
            if len(qa_pair[0].split("Question:")) < 2:
                print(qa_pair[0])
                error_count += 1
            elif len(qa_pair[1].split("Answer:")) < 2:
                print(qa_pair[1])
                error_count += 1
            else:
                question = qa_pair[0].split("Question:")[1].strip()
                answer = qa_pair[1].split("Answer:")[1].strip()
                query_list.append(question)
                answer_list.append(answer)

    return query_list, answer_list


def is_string_empty(s):
    return not s.strip()  # 如果s只包含空白字符，返回True，否则返回False


def get_text_qa(path):  # TODO: format
    """获取qa对
    :param path:
    :return:
    """
    query_list, answer_list = [], []
    with open(path, encoding="utf-8") as json_file:
        data = json.load(json_file)
        for item in data:
            # question = item.get("question", "")
            question = item.get("question", "") or item.get("Question", "")

            answer = item.get("answer", "")
            answer = item.get("answer", "") or item.get("Answer", "")
            query_list.append(question)
            answer_list.append(answer)
    return query_list, answer_list


def split_content(content: str):
    chunks = []
    current_chunk = ""
    lines = content.split("\n")
    for line in lines:
        if line.startswith("## ") and current_chunk:
            # 检查当前chunk是否只包含标题（即检查最后一行是否为标题）
            if "\n" in current_chunk.strip() and not current_chunk.strip().split("\n")[
                -1
            ].startswith("## "):
                # 如果当前块有实际内容
                if len(current_chunk.strip()) < 150:
                    # 如果当前块长度小于150，不立即添加，而是与下一个块合并
                    current_chunk += line + "\n"
                else:
                    # 如果当前块长度大于等于150，则保存它
                    chunks.append(current_chunk.strip())
                    current_chunk = line + "\n"  # 开始新的章节
            else:
                # 如果当前块只有标题，则继续添加到当前块
                current_chunk += line + "\n"
        else:
            current_chunk += line + "\n"  # 继续添加到当前章节

    # 添加最后一个章节（如果有）
    if current_chunk:
        chunks.append(current_chunk.strip())

    final_chunks = []
    for chunk in chunks:
        if len(chunk) > 3000:
            # 使用 "Page X" 作为分割点，但只在超过3000字符后开始切分
            sub_chunks = []
            current_sub_chunk = ""
            char_count = 0
            for line in chunk.split("\n"):
                if char_count + len(line) + 1 > 3000:  # 检查添加这行是否会超过3000
                    if line.startswith("Page ") and char_count > 0:
                        # 如果是新的 "Page" 且当前子块非空，则在这里分割
                        sub_chunks.append(current_sub_chunk.strip())
                        current_sub_chunk = line + "\n"
                        char_count = len(line) + 1
                    elif char_count > 0:
                        # 如果不是 "Page" 开头，在当前位置分割
                        sub_chunks.append(current_sub_chunk.strip())
                        current_sub_chunk = line + "\n"
                        char_count = len(line) + 1
                    else:
                        # 如果当前子块为空，说明单行就超过3000字符，强制分割
                        sub_chunks.append(line[:3000])
                        current_sub_chunk = line[3000:] + "\n"
                        char_count = len(current_sub_chunk)
                else:
                    current_sub_chunk += line + "\n"
                    char_count += len(line) + 1

            # 处理最后一个子块
            if current_sub_chunk:
                sub_chunks.append(current_sub_chunk.strip())

            # 确保所有子块都不超过3000字符
            final_sub_chunks = []
            for sub_chunk in sub_chunks:
                while len(sub_chunk) > 3000:
                    split_index = sub_chunk.rfind("\n", 0, 3000)
                    if split_index == -1 or split_index == 0:
                        split_index = 3000
                    final_sub_chunks.append(sub_chunk[:split_index].strip())
                    sub_chunk = sub_chunk[split_index:].strip()
                if sub_chunk:
                    final_sub_chunks.append(sub_chunk)

            final_chunks.extend(final_sub_chunks)
        else:
            final_chunks.append(chunk)

    return final_chunks


def find_md_files(directory: str) -> List[str]:
    """
    查找指定目录下所有.md文件的路径，并返回一个列表。

    参数:
    directory (str): 要查找的目录路径。

    返回:
    List[str]: 包含所有找到的.md文件路径的列表。
    """
    md_files = []
    # 使用os.walk遍历目录及其子目录
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.endswith(".md"):
                # 将找到的.md文件的完整路径添加到列表中
                md_files.append(os.path.join(root, file))
    return md_files


def extract_source(path: str) -> str:
    """
    提取给定.md文件路径的倒数第二和第三层作为来源，并返回格式化字符串。

    参数:
    path (str): Markdown 文件的完整路径。

    返回:
    str: 格式化的来源字符串，包含倒数第三层和倒数第二层，或文件名。
    """
    # 分割路径并提取倒数第二和第三层
    parts = path.split(os.sep)

    if len(parts) >= 3:  # 至少有三层
        source = f"{parts[-3]} / {parts[-2]}"  # 格式化为 "倒数第三层 / 倒数第二层"
    elif len(parts) == 2:  # 只有两层
        source = f"{parts[-2]} / {parts[-1]}"  # 格式化为 "倒数第二层 / 文件名"
    else:  # 只有一层
        source = parts[-1]  # 仅返回文件名

    return source


def find_matching_file(filename, json_file_path):
    """
    根据md文件路径匹配json文件中的信息。

    Args:
        file_name (str): md文件的路径.
        json_file_path (str): json文件的路径.

    Returns:
        tuple: 返回一个元组，包含文件名和id，如果未找到则返回("-1", "-1").
    """
    try:
        with open(json_file_path, "r", encoding="utf-8") as f:
            data = json.load(f)

        # 将 data 转换为字典
        data_dict = {str(item["id"]): item["file_name"] for item in data}

        # 查找
        filename_str = str(filename)
        if filename_str in data_dict:
            return (data_dict[filename_str], filename)  # (str, str)

        print(f"Warning: 未找到匹配的文件: {filename}")
        return ("-1", "-1")

    except Exception as e:
        print(f"Error: {e}")
        return ("-1", "-1")


import re


class SimpleRAG:
    def __init__(
        self,
        results_path: Union[str, None] = None,
        embedding_model_path=None,
        device="cuda:0",
    ):
        # 基础路径配置
        self.base_path = Path("./SimpleRAG")
        self.data_path = self.base_path / "data"
        self.results_path = self.data_path / "results"
        self.device = device
        if results_path:
            self.results_path = self.results_path / results_path

        # 确保必要目录存在
        self.results_path.mkdir(parents=True, exist_ok=True)

        # 文件路径配置
        self.chunk_file = self.results_path / "chunk.pkl"
        self.index_file = self.results_path / "faiss.index"
        self.source_file = self.results_path / "source.pkl"

        # 模型配置
        self.model_name = embedding_model_path
        self.tokenizer_path = self.model_name

        # 索引参数配置
        self.nlist = 3  # IMI聚类中心数
        self.batch_size = 256  # 向量批处理大小

    def build_qa_library(self, qa_file: str, answer_file: str) -> Tuple[List, List]:
        """
        构建问答库

        Args:
            qa_file: QA对文件路径
            answer_file: 答案存储路径

        Returns:
            问题列表和答案列表的元组
        """
        print("开始构建QA库...")
        q_list, a_list = get_text_qa(qa_file)
        writepkl(self.chunk_file, a_list)
        print(f"QA库构建完成，答案保存至: {answer_file}")

        return q_list, a_list

    def generate_embeddings(self, data_list: List) -> np.ndarray:
        """
        生成文本嵌入向量

        Args:
            data_list: 待处理的文本列表

        Returns:
            嵌入向量数组
        """
        print("开始生成嵌入向量...")
        geb = create_save(self.model_name, self.tokenizer_path, self.device)

        # 批量处理生成嵌入向量
        iterator = LongListIterator(data_list, self.batch_size)
        embeddings = []
        start_time = time.time()

        for batch_data in iterator:
            # print("处理批次中...")
            batch_embeddings = geb.get_embedding(batch_data)

            if batch_embeddings.size > 0:  # 只有在嵌入列表不为空时才扩展
                embeddings.extend(batch_embeddings)
            else:
                print(f"Embedding 获取失败")

        processing_time = time.time() - start_time
        print(f"嵌入向量生成完成，耗时: {processing_time:.2f}秒")

        return np.array(embeddings).astype(np.float32), geb

    def append_to_flat_index(self, new_embeddings: np.ndarray) -> bool:
        """
        将新的向量添加到现有的Flat索引中

        Args:
            new_embeddings: 新的嵌入向量数组

        Returns:
            bool: 是否成功添加
        """
        try:
            print("开始添加新向量到现有Flat索引...")
            start_time = time.time()

            # 加载现有索引
            index = faiss.read_index(self.index_file.as_posix())
            original_size = index.ntotal

            # 确保是Flat索引
            if not isinstance(index, faiss.IndexFlatIP):
                print("错误：现有索引不是Flat索引类型")
                return False

            # 归一化新向量
            normalize_L2(new_embeddings)

            # 直接添加到索引中
            index.add(new_embeddings)

            # 验证添加是否成功
            if index.ntotal != original_size + len(new_embeddings):
                print("错误：索引更新后的大小与预期不符")
                return False

            # 保存更新后的索引
            faiss.write_index(index, self.index_file.as_posix())

            indexing_time = time.time() - start_time
            print(f"向量添加完成，耗时: {indexing_time:.2f}秒")
            print(f"索引大小: {original_size} -> {index.ntotal}")
            return True

        except Exception as e:
            print(f"添加向量到索引时发生错误: {str(e)}")
            return False

    def update_storage_files(self, new_data: list, new_sources: list) -> bool:
        """
        更新存储文件（chunk.pkl 和 source.pkl）

        Args:
            new_data: 新的数据列表
            new_sources: 新的来源列表

        Returns:
            bool: 是否成功更新
        """
        try:
            # 读取现有数据
            existing_data = openpkl(self.chunk_file) if self.chunk_file.exists() else []
            existing_sources = (
                openpkl(self.source_file) if self.source_file.exists() else []
            )

            # 合并数据
            updated_data = existing_data + new_data
            updated_sources = existing_sources + new_sources

            # 保存更新后的数据
            writepkl(self.chunk_file, updated_data)
            writepkl(self.source_file, updated_sources)

            print(f"存储文件更新完成，当前数据量: {len(updated_data)}")
            return True

        except Exception as e:
            print(f"更新存储文件时发生错误: {str(e)}")
            return False

    def build_qa(self, a_list, sources):
        """构建问答索引，支持增量更新"""
        if self.index_file.exists():
            print(f"发现现有索引文件{self.index_file}，进行增量更新...")

            # 首先更新存储文件
            if not self.update_storage_files(a_list, sources):
                print("更新存储文件失败")
                return False

            # 生成新数据的向量
            embeddings, _ = self.generate_embeddings(a_list)

            # 添加到现有索引
            return self.append_to_flat_index(embeddings)
        else:
            print("创建新的索引文件...")
            # 保存初始数据
            writepkl(self.chunk_file, a_list)
            writepkl(self.source_file, sources)

            # 生成向量并创建新索引
            embeddings, geb = self.generate_embeddings(a_list)
            self.create_search_index(embeddings, geb)
            return True

    def build_qq(self, q_list, a_list, sources):
        """构建问题索引，支持增量更新"""
        if self.index_file.exists():
            print(f"发现现有索引文件{self.index_file}，进行增量更新...")

            # 首先更新存储文件
            if not self.update_storage_files(a_list, sources):
                print("更新存储文件失败")
                return False

            # 生成新数据的向量
            embeddings, _ = self.generate_embeddings(q_list)

            # 添加到现有索引
            return self.append_to_flat_index(embeddings)
        else:
            print("创建新的索引文件...")
            # 保存初始数据
            writepkl(self.chunk_file, a_list)
            writepkl(self.source_file, sources)

            # 生成向量并创建新索引
            embeddings, geb = self.generate_embeddings(q_list)
            self.create_search_index(embeddings, geb)
            return True
    def build_qc(self, a_list, sources):
        """构建问答索引，支持增量更新"""
        if self.index_file.exists():
            print(f"发现现有索引文件{self.index_file}，进行增量更新...")

            # 首先更新存储文件
            if not self.update_storage_files(a_list, sources):
                print("更新存储文件失败")
                return False

            # 生成新数据的向量
            embeddings, _ = self.generate_embeddings(a_list)

            # 添加到现有索引
            return self.append_to_flat_index(embeddings)
        else:
            print("创建新的索引文件...")
            # 保存初始数据
            writepkl(self.chunk_file, a_list)
            writepkl(self.source_file, sources)

            # 生成向量并创建新索引
            embeddings, geb = self.generate_embeddings(a_list)
            self.create_search_index(embeddings, geb)
            return True
    def create_search_index(self, embeddings: np.ndarray, geb) -> None:
        """创建Flat索引"""
        print("开始创建Flat搜索索引...")
        start_time = time.time()
        vector_dim = len(embeddings[0])

        # 直接使用Flat索引
        geb.create_flat_index(vector_dim, embeddings, self.index_file.as_posix())

        indexing_time = time.time() - start_time
        print(f"索引创建完成，耗时: {indexing_time:.2f}秒")

def read_json(path):
    with open(path, encoding="utf-8") as file:
        data = json.load(file)
    return data

def create_q_a_base(processed_file_path, qac_data, embedding_model_path, device):
    # q-q：0 q-a：1
    # 优先从返回数据中读取qac，否则从文件中读取
    if qac_data:
        q_list, a_list, sources, chunks = read_qa(qac_data)
    else:
        q_list, a_list, sources, chunks = read_qa_json(processed_file_path)
        print(f"read from {processed_file_path}")
    print(
        "length of q_list, a_list, sources, chunks:\n",
        len(q_list),
        len(a_list),
        len(sources),
        len(chunks),
    )

    rag = SimpleRAG("0", embedding_model_path, device)
    rag.build_qq(q_list, a_list, sources)

    rag = SimpleRAG("1", embedding_model_path, device)
    rag.build_qa(a_list, sources)

    # 构建问答库

def create_q_c_base(qc_file_path, qc_source_path, embedding_model_path, device):
    # q-c：2
    # 优先从返回数据中读取qac，否则从文件中读取

    chunks = read_json(qc_file_path)
    sources = read_json(qc_source_path)

    print(
        "length of chunk_list, source_list:\n",
        len(sources),
        len(chunks),
    )

    rag = SimpleRAG("2", embedding_model_path, device)
    rag.build_qc(chunks, sources)

    # 构建问答库
def delete_qa_base(base_path: str) -> bool:
    """
    删除知识库目录下的所有文件

    Args:
        base_path (str): 知识库基础路径

    Returns:
        bool: 删除操作是否成功
    """
    try:
        path = Path(base_path)

        # 检查路径是否存在
        if not path.exists():
            print(f"Path does not exist: {base_path}")
            return True  # 如果路径不存在，认为是删除成功

        # 如果是文件，直接删除
        if path.is_file():
            path.unlink()
            return True

        # 如果是目录，删除目录中的所有内容
        if path.is_dir():
            # 遍历删除目录中的所有文件和子目录
            for item in path.iterdir():
                if item.is_file():
                    item.unlink()  # 删除文件
                elif item.is_dir():
                    shutil.rmtree(item)  # 删除子目录及其内容

            # 检查目录是否为空
            remaining_files = list(path.iterdir())
            if not remaining_files:
                print(f"Successfully deleted all contents in: {base_path}")
                return True
            else:
                print(f"Some files could not be deleted in: {base_path}")
                return False

    except Exception as e:
        print(f"Error while deleting files: {str(e)}")
        return False


if __name__ == "__main__":
    emb_model_path = "/root/data/SimpleRAG_project/models/mixed_model_1"
    # os.CUDA_VISIBLE_DEVICES = "5"
    device = torch.device("cuda:5" if torch.cuda.is_available() else "cpu")
    # create_q_a_base(
    #     processed_file_path="/root/data/navy_poc/qac_data/part1.json",
    #     qac_data=None,
    #     embedding_model_path=emb_model_path,
    #     device=device,
    # )
    create_q_c_base(
        qc_file_path="/root/data/mindqa/qac_data/all.json",
        qc_source_path="/root/data/mindqa/qac_data/all_sources.json",
        embedding_model_path=emb_model_path,
        device=device,
    )