import faiss
import numpy as np
import jsonlines
import pickle
import logging
import os
from sentence_transformers import SentenceTransformer

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class SemanticIndex:
    def __init__(self, model_name='all-MiniLM-L6-v2'):
        """初始化语义索引系统"""
        # 加载句子转换模型
        self.model = SentenceTransformer(model_name)
        self.dimension = self.model.get_sentence_embedding_dimension()
        logging.info(f"加载模型 {model_name}，向量维度: {self.dimension}")

        # 初始化索引和数据存储
        self.index = None
        self.vectors = None
        self.unit_ids = []

    def build_from_jsonlines(self, structured_file, index_output_path, vector_output_path,
                             content_key='content'):  # 内容字段名参数
        """从JSON Lines文件构建语义索引"""
        # 检查输入文件是否存在
        if not os.path.exists(structured_file):
            raise FileNotFoundError(f"找不到输入文件: {structured_file}")

        # 1. 提取知识内容与单元ID
        logging.info(f"从 {structured_file} 加载知识单元...")
        unit_ids = []
        knowledge_contents = []

        with jsonlines.open(structured_file, mode="r") as f:
            for i, unit in enumerate(f, 1):
                try:
                    # 使用指定的内容字段名
                    unit_ids.append(unit["unit_id"])
                    knowledge_contents.append(unit[content_key])
                except KeyError as e:
                    # 详细错误提示，方便定位问题
                    logging.error(f"第{i}条记录缺少必要字段: {e}")
                    logging.error(f"该记录内容: {unit}")
                    raise  # 终止执行，直到修复数据问题

        logging.info(f"共加载 {len(unit_ids)} 个知识单元")

        # 2. 生成语义向量
        logging.info("正在生成语义向量...")
        vectors = self.model.encode(knowledge_contents, batch_size=32, show_progress_bar=True)
        self.vectors = np.array(vectors, dtype=np.float32)
        self.unit_ids = unit_ids

        # 3. 构建FAISS FlatL2索引
        logging.info("构建FAISS索引...")
        self.index = faiss.IndexFlatL2(self.dimension)  # 使用L2距离度量
        self.index.add(self.vectors)

        # 确保输出目录存在
        output_dir = os.path.dirname(index_output_path)
        if output_dir and not os.path.exists(output_dir):
            os.makedirs(output_dir, exist_ok=True)

        # 4. 保存索引、向量和单元ID
        faiss.write_index(self.index, index_output_path)
        np.save(vector_output_path, self.vectors)

        unit_ids_path = os.path.splitext(index_output_path)[0] + "_unit_ids.pkl"
        with open(unit_ids_path, "wb") as f:
            pickle.dump(self.unit_ids, f)

        logging.info(f"索引构建完成，已保存至:")
        logging.info(f"  - FAISS索引: {index_output_path}")
        logging.info(f"  - 向量文件: {vector_output_path}")
        logging.info(f"  - 单元ID文件: {unit_ids_path}")

    def load_index(self, index_path, vector_path, unit_ids_path):
        """加载已保存的索引和数据"""
        # 检查文件是否存在
        for path in [index_path, vector_path, unit_ids_path]:
            if not os.path.exists(path):
                raise FileNotFoundError(f"找不到索引文件: {path}")

        logging.info("加载已保存的索引...")
        self.index = faiss.read_index(index_path)
        self.vectors = np.load(vector_path)

        with open(unit_ids_path, "rb") as f:
            self.unit_ids = pickle.load(f)

        logging.info(f"索引加载完成，包含 {len(self.unit_ids)} 个知识单元")

    def search(self, query, top_k=5):
        """语义检索"""
        if not self.index:
            raise ValueError("索引未初始化，请先构建或加载索引")

        # 生成查询向量
        query_vector = self.model.encode([query], dtype=np.float32)

        # 执行搜索
        distances, indices = self.index.search(query_vector, top_k)

        # 整理结果
        results = []
        for i in range(top_k):
            idx = indices[0][i]
            if idx < len(self.unit_ids):  # 确保索引有效
                results.append({
                    "unit_id": self.unit_ids[idx],
                    "distance": float(distances[0][i])  # 距离越小相似度越高
                })

        return results


def get_project_root():
    """动态获取项目根目录"""
    current_dir = os.path.dirname(os.path.abspath(__file__))
    project_root = os.path.dirname(current_dir)

    test_path = os.path.join(project_root, "knowledge_units.jsonl")
    if os.path.exists(test_path):
        return project_root
    else:
        test_path = os.path.join(current_dir, "knowledge_units.jsonl")
        if os.path.exists(test_path):
            return current_dir
        else:
            logging.warning("无法自动识别项目根目录，使用当前脚本所在目录")
            return current_dir


# 使用示例
if __name__ == "__main__":
    project_root = get_project_root()
    logging.info(f"自动识别的项目根目录: {project_root}")

    semantic_index = SemanticIndex(model_name='all-MiniLM-L6-v2')

    jsonlines_file = os.path.join(project_root, "data\knowledge_units.jsonl")
    index_path = os.path.join(project_root, "data\semantic_index.faiss")
    vector_path = os.path.join(project_root, "data\knowledge_vectors.npy")
    unit_ids_path = os.path.join(project_root, "data\semantic_index_unit_ids.pkl")

    logging.info(f"JSON文件路径: {jsonlines_file}")
    logging.info(f"文件是否存在: {os.path.exists(jsonlines_file)}")

    # 根据错误信息，实际内容字段名为'core_content'
    content_field_name = 'core_content'  # 已修改为正确的字段名

    if not (os.path.exists(index_path) and os.path.exists(vector_path) and os.path.exists(unit_ids_path)):
        try:
            # 传递实际的内容字段名
            semantic_index.build_from_jsonlines(
                jsonlines_file,
                index_path,
                vector_path,
                content_key=content_field_name
            )
        except FileNotFoundError as e:
            logging.error(f"构建索引失败: {e}")
            logging.error(f"请确认knowledge_units.jsonl文件在项目根目录下: {project_root}")
        except KeyError as e:
            logging.error(f"JSON文件中缺少必要字段: {e}")
            logging.error(f"请检查JSON文件格式，确保包含'{content_field_name}'字段")
    else:
        semantic_index.load_index(index_path, vector_path, unit_ids_path)

    queries = [
        "LTE上行采用哪种多址方式以改善峰均比？",
        "LTE上行多址技术有什么特点？",
        "LTE系统中上行使用的多址技术是什么？"
    ]

    for query in queries:
        print(f"\n查询: {query}")
        try:
            results = semantic_index.search(query, top_k=3)
            print("Top-3 相关知识单元ID:")
            for i, result in enumerate(results, 1):
                print(f"{i}. 单元ID: {result['unit_id']}, 距离分数: {result['distance']:.4f}")
        except Exception as e:
            print(f"查询失败: {e}")
