from sentence_transformers import SentenceTransformer
import faiss
import numpy as np
import json


# === 修复后的检索代码 ===

def load_paragraphs_and_titles():
    """正确加载段落和标题映射"""
    paragraphs = {}
    titles = {}

    # 加载段落（注意：索引对应doc_id，不是行号）
    print("正在加载段落...")
    with open("wiki_paragraphs.tsv", encoding="utf-8") as f:
        for line in f:
            parts = line.strip().split("\t", 1)
            if len(parts) == 2:
                doc_id, content = parts
                paragraphs[int(doc_id)] = content

    # 加载标题映射
    print("正在加载标题映射...")
    with open("wiki_titles.json", encoding="utf-8") as f:
        title_data = json.load(f)
        titles = {int(k): v for k, v in title_data.items()}

    print(f"成功加载 {len(paragraphs)} 个段落和 {len(titles)} 个标题")
    return paragraphs, titles


def search_paragraphs(query, model_path="/share/models/BAAI/bge-base-en-v1.5", top_k=5):
    """执行检索"""

    # 加载数据
    paragraphs, titles = load_paragraphs_and_titles()

    # 加载模型
    print("正在加载模型...")
    model = SentenceTransformer(model_path)

    # 编码查询（确保归一化）
    print(f"正在编码查询: {query}")
    q_emb = model.encode([query], normalize_embeddings=True)
    print(f"查询向量维度: {q_emb.shape}")

    # 加载索引
    print("正在加载FAISS索引...")
    index = faiss.read_index("wiki_paragraphs.index")
    print(f"索引维度: {index.d}, 索引大小: {index.ntotal}")

    # 执行检索
    print("正在执行检索...")
    scores, doc_ids = index.search(q_emb, top_k)

    print(f"\n=== 检索结果 ===")
    print(f"查询: {query}")
    print(f"找到 {len(doc_ids[0])} 个结果\n")

    results = []
    for i, (score, doc_id) in enumerate(zip(scores[0], doc_ids[0])):
        if doc_id == -1:  # 无效结果
            continue

        content = paragraphs.get(doc_id, "内容未找到")
        title = titles.get(doc_id, "标题未知")

        result = {
            "rank": i + 1,
            "doc_id": doc_id,
            "score": float(score),
            "title": title,
            "content": content
        }
        results.append(result)

        print(f"#{i + 1} [得分: {score:.4f}] [ID: {doc_id}]")
        print(f"标题: {title}")
        print(f"内容: {content[:200]}{'...' if len(content) > 200 else ''}")
        print("-" * 80)

    return results


def debug_index_info():
    """调试：检查索引和数据文件的信息"""
    print("=== 调试信息 ===")

    # 检查段落文件
    try:
        paragraph_count = 0
        with open("wiki_paragraphs.tsv", encoding="utf-8") as f:
            for line in f:
                paragraph_count += 1
        print(f"段落文件行数: {paragraph_count}")
    except Exception as e:
        print(f"读取段落文件失败: {e}")

    # 检查标题文件
    try:
        with open("wiki_titles.json", encoding="utf-8") as f:
            titles = json.load(f)
        print(f"标题映射数量: {len(titles)}")
    except Exception as e:
        print(f"读取标题文件失败: {e}")

    # 检查FAISS索引
    try:
        index = faiss.read_index("wiki_paragraphs.index")
        print(f"FAISS索引维度: {index.d}")
        print(f"FAISS索引向量数量: {index.ntotal}")
    except Exception as e:
        print(f"读取FAISS索引失败: {e}")


def test_different_queries():
    """测试不同类型的查询"""

    # 你的原始查询
    original_query = "[ENTITY]: Craig Virgin | [QUESTION]: Who finished second in the race where Craig Virgin set his world record?"

    # 简化的查询版本
    simplified_queries = [
        "Craig Virgin world record race second place",
        "Craig Virgin running race record",
        "Who finished second Craig Virgin race",
        "Craig Virgin marathon running",
        "Craig Virgin athlete runner"
    ]

    print("=== 测试不同查询 ===\n")

    # 测试原始查询
    print("1. 原始查询:")
    results = search_paragraphs(original_query, top_k=3)

    # 测试简化查询
    for i, query in enumerate(simplified_queries, 2):
        print(f"\n{i}. 简化查询: {query}")
        results = search_paragraphs(query, top_k=3)


def quick_test():
    """快速测试几个常见查询"""
    test_queries = [
        "artificial intelligence machine learning",
        "Python programming language",
        "climate change global warming",
        "Craig Virgin runner athlete"
    ]

    paragraphs, titles = load_paragraphs_and_titles()
    model = SentenceTransformer("/share/models/BAAI/bge-base-en-v1.5")
    index = faiss.read_index("wiki_paragraphs.index")

    for query in test_queries:
        print(f"\n=== 测试查询: {query} ===")
        q_emb = model.encode([query], normalize_embeddings=True)
        scores, doc_ids = index.search(q_emb, 2)

        for score, doc_id in zip(scores[0], doc_ids[0]):
            if doc_id != -1:
                content = paragraphs.get(doc_id, "未找到")
                title = titles.get(doc_id, "未知")
                print(f"[{score:.4f}] {title}: {content[:100]}...")


if __name__ == "__main__":
    import sys

    if len(sys.argv) > 1:
        if sys.argv[1] == "debug":
            debug_index_info()
        elif sys.argv[1] == "test":
            test_different_queries()
        elif sys.argv[1] == "quick":
            quick_test()
        else:
            # 自定义查询
            query = " ".join(sys.argv[1:])
            search_paragraphs(query)
    else:
        # 默认测试你的原始查询
        query = "[ENTITY]: Craig Virgin | [QUESTION]: Who finished second in the race where Craig Virgin set his world record?"
        print("使用原始查询进行测试...")
        search_paragraphs(query)