import faiss
import logging
from faiss import normalize_L2

# from Dataset import Dataset
import pickle
import gzip
import os
import numpy
import torch
from transformers import AutoModel, AutoTokenizer
from typing import Any


def openpkl(path: str):
    with gzip.open(path, "rb") as file:
        documents = pickle.load(file)
    return documents


class SimpleRetriever:
    def __init__(
        self, index_path: str, content_path: str, emb_model_path: str, device: str
    ):
        self.device = device
        self.index = faiss.read_index(index_path)  # 加载FAISS索引
        self.content = openpkl(content_path)
        self.tokenizer = AutoTokenizer.from_pretrained(emb_model_path)
        self.model: torch.nn.Module = AutoModel.from_pretrained(emb_model_path).to(
            self.device
        )
        self.source = self.check_source_file_existence(
            content_path
        )  # 检查source.pkl是否存在, 并读取内容

    def query_to_embedding(self, query_text: str):
        inputs = self.tokenizer(
            query_text,
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=512,
        ).to(self.device)
        outputs = self.model(**inputs)
        embeddings: numpy.ndarray[numpy.float32, Any] = (
            outputs.last_hidden_state.mean(1).detach().to("cpu").numpy()
        )  # 获取句子嵌入
        return embeddings

    def check_source_file_existence(self, content_path: str) -> bool:
        # 获取父目录
        parent_dir = os.path.dirname(content_path)
        # 构建source.pkl的完整路径
        source_file_path = os.path.join(parent_dir, "source.pkl")

        # 检查source.pkl是否存在
        if os.path.exists(source_file_path):
            # 如果存在，读取内容
            source_data = openpkl(source_file_path)
            return source_data
        else:
            # 如果不存在，返回None
            return None

    def calc_iot(self, str_a: str, str_b: str):
        len2 = min(len(str_a), len(str_b))
        sim = len(list(set(str_a) & set(str_b))) / len(list(set(str_a) | set(str_b)))
        sim = len(list(set(str_a) & set(str_b))) / len2
        return sim

    def search(self, query_text: str, top_k=5):

        query_embedding = self.query_to_embedding(query_text)
        # start_time = time.time()
        normalize_L2(query_embedding)
        D, I_ = self.index.search(query_embedding, top_k)  # 使用FAISS进行搜索

        # end_time = time.time()
        # elapsed_time = end_time - start_time
        # print(f"搜索完成，耗时: {elapsed_time} 秒")

        return D, I_

    def index2content(self, D, I, top_k=5):
        results = []  # 用于存储所有最佳匹配结果的列表
        dist_list = []
        chunk_list = []

        # 遍历每个最佳匹配结果
        for i in range(top_k):
            min_dist = D[0][i]

            best_match_idx = I[0][i]
            chunk_info = self.chunk_info.get(
                str(best_match_idx)
            )  # 安全获取最佳匹配的chunk信息
            if chunk_info:  # 如果chunk信息存在
                current_chunk = chunk_info["chunk"]
                # prev_chunk = self.chunk_info.get(str(best_match_idx - 1), {}).get('chunk', '')
                # next_chunk = self.chunk_info.get(str(best_match_idx + 1), {}).get('chunk', '')
                # min_dist2 = self.calc_iot(query_text, current_chunk)
                min_dist2 = 0
                min_dist += min_dist2
                dist_list.append(min_dist)
                chunk_list.append(current_chunk)
                # full_context = f"{prev_chunk} {current_chunk} {next_chunk}".strip()
                # results.append((current_chunk, float(min_dist)))  # 将结果添加到列表中
            else:
                logging.warning(f"没有找到索引为 {best_match_idx} 的chunk信息。")

        # sort
        # print(chunk_list)
        sorted_idx = [i for i in range(len(chunk_list))]
        # print(sorted_idx)
        sorted_idx.sort(key=lambda x: dist_list[x], reverse=True)

        for i in range(top_k):
            results.append(chunk_list[sorted_idx[i]])

        return results

    def recalled_data2csv(self, recalled_data, query, key="Answer"):
        row_list = []

        try:
            content_list = [x[key] if key in x.keys() else None for x in recalled_data]
        except Exception:
            content_list = recalled_data
        # print(len(content_list))
        # print(content_list)
        # 去重
        # deduplicate
        content_list = sorted(set(content_list), key=content_list.index)
        row_list = [[query, x] for x in content_list][:20]
        # if len(row_list) < 10:
        #     print(len(row_list))
        #     raise ValueError('not enough samples')

        return row_list
