import time
import numpy as np
import requests
import os
import jieba
from rank_bm25 import BM25Okapi
from flask import Flask, request, jsonify
from flask_cors import CORS


from pymongo import MongoClient
import threading

from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel

app = Flask(__name__)
# 配置CORS，允许特定来源、方法和头
CORS(app, resources={
    r"/api/*": {
        "origins": "*",
        "methods": ["GET", "POST", "OPTIONS"],
        "allow_headers": ["Content-Type", "Authorization"]
    }
})

# 配置您的OneAPI服务信息
ONEAPI_BASE_URL = "http://localhost:3001/v1"  # 您的OneAPI服务地址
ONEAPI_KEY = "sk-fnlLL7v1VLyy7hEvBbC95cD6CcCb4954954dF4A1CaEb2563"  # 您的OneAPI密钥

DB_NAME = "pdfchatbot"
COLLECTION_NAME = "advancedRAGParentChild"

# 全局变量
bm25 = None
all_docs = []
tokenized_corpus = []


# 验证OneAPI服务有效性
def validate_oneapi_key():
    try:
        print(f"验证OneAPI服务: {ONEAPI_BASE_URL}")
        headers = {
            "Authorization": f"Bearer {ONEAPI_KEY}",
            "Content-Type": "application/json"
        }

        # 发送测试请求到嵌入端点
        test_data = {
            "input": "测试向量",
            "model": "Embedding-2"
        }

        response = requests.post(
            f"{ONEAPI_BASE_URL}/embeddings",
            headers=headers,
            json=test_data,
            timeout=10
        )

        if response.status_code == 200:
            embedding_data = response.json()
            vector = embedding_data["data"][0]["embedding"]
            print(f"OneAPI服务验证成功")
            print(f"  向量维度: {len(vector)}")
            return True
        else:
            print(f"OneAPI服务验证失败: HTTP {response.status_code}")
            print(f"  错误信息: {response.text[:200]}")
            return False

    except requests.exceptions.ConnectionError:
        print("无法连接到OneAPI服务，请检查地址和网络连接")
        print("  确认OneAPI服务是否在localhost:3001上运行")
        return False
    except Exception as e:
        print(f"OneAPI验证失败: {str(e)}")
        return False


# 检查OneAPI服务
if not validate_oneapi_key():
    exit(1)


# 自定义嵌入查询函数
def custom_embed_query(text):
    """使用OneAPI生成文本嵌入向量"""
    headers = {
        "Authorization": f"Bearer {ONEAPI_KEY}",
        "Content-Type": "application/json"
    }

    data = {
        "input": text,
        "model": "Embedding-2"
    }

    for attempt in range(3):
        try:
            response = requests.post(
                f"{ONEAPI_BASE_URL}/embeddings",
                headers=headers,
                json=data,
                timeout=15
            )

            if response.status_code == 200:
                result = response.json()
                return result["data"][0]["embedding"]
            else:
                print(f"嵌入请求失败 (尝试 {attempt + 1}/3): HTTP {response.status_code}")
                print(f"  错误详情: {response.text[:200]}")
                time.sleep(1)  # 等待后重试

        except requests.exceptions.RequestException as e:
            print(f"嵌入请求异常 (尝试 {attempt + 1}/3): {str(e)}")
            time.sleep(1)

    raise Exception(f"无法生成嵌入向量: {text}")


# MongoDB连接
uri = "mongodb://root:example@localhost:27017/?directConnection=true&authSource=admin"
try:
    print("\n🔌 连接MongoDB...")
    client = MongoClient(uri, serverSelectionTimeoutMS=3000)
    client.admin.command('ping')
    print("MongoDB 连接成功")
except Exception as e:
    print(f"MongoDB连接失败: {str(e)}")
    exit(1)

db = client[DB_NAME]
collection = db[COLLECTION_NAME]
print(f"使用数据库: {DB_NAME}, 集合: {COLLECTION_NAME}")


# 创建测试数据结构
def create_test_structure():
    global bm25, all_docs, tokenized_corpus
    print("\n创建测试数据结构...")
    collection.delete_many({})
    print("🗑 已清空集合")

    # 创建文档数据
    documents = [
        {
            "title": "OneAPI 配置指南",
            "content": "OneAPI 提供了兼容OpenAI的API接口，允许您使用不同的后端模型",
            "category": "技术文档"
        },
        {
            "title": "OneAPI 配置指南",
            "content": "认证使用Bearer令牌，路径为/v1/completions，参数与OpenAI官方API兼容",
            "category": "开发指南"
        },
        {
            "title": "OneAPI 配置指南",
            "content": "通过将文本转换为向量表示，可以在高维空间中计算文本之间的相似度",
            "category": "AI理论"
        },
        {
            "title": "混合检索优势",
            "content": "结合关键词检索和语义检索可以提高复杂查询的准确率",
            "category": "最佳实践"
        },
        {
            "title": "混合检索优势",
            "content": "BM25是基于词频和文档长度的经典信息检索算法",
            "category": "信息检索"
        },
        {
            "title": "混合检索优势",
            "content": "Dense Passage Retrieval使用双编码器进行深度语义匹配",
            "category": "深度学习"
        },
        {
            "title": "嵌入向量应用",
            "content": "对于长查询，使用分块处理和上下文融合可以显著提高相关性",
            "category": "性能优化"
        },
        {
            "title": "嵌入向量应用",
            "content": "中文检索需要有效的分词算法来处理无空格分隔的文本",
            "category": "自然语言处理"
        },
        {
            "title": "嵌入向量应用",
            "content": "嵌入向量可以用于语义搜索、文本分类和推荐系统等多种任务",
            "category": "应用场景"
        },
        {
            "title": "嵌入向量应用",
            "content": "通过批处理请求和缓存机制可以提高嵌入API的性能",
            "category": "性能优化"
        }
    ]

    # 插入文档并生成向量
    print("生成文档嵌入向量...")
    tokenized_corpus = []  # 用于BM25的语料库
    for doc in documents:
        try:
            content = doc["content"]
            # 中文分词
            tokenized_content = list(jieba.cut(content))
            tokenized_corpus.append(tokenized_content)

            # 使用自定义的嵌入函数生成向量
            vector = custom_embed_query(content)
            doc["embedding"] = vector

            # 插入数据库
            collection.insert_one(doc)
            print(f"插入文档: {doc['title']}")

            # 避免API调用过快的限制
            time.sleep(0.2)
        except Exception as e:
            print(f"处理文档失败: {str(e)}")

    # 初始化BM25模型
    if tokenized_corpus:
        bm25 = BM25Okapi(tokenized_corpus)
        print("BM25模型初始化完成")
    else:
        print("没有足够的语料库数据初始化BM25模型")
        bm25 = None

    print("测试数据结构创建完成")

    # 加载所有文档到内存
    all_docs = list(collection.find({}))
    return documents, tokenized_corpus


# 混合检索函数
def hybrid_search(query, article_title, k=5, bm25_top_n=8):
    """执行混合检索：BM25初筛 + DPR精排"""
    print(f"\n混合检索查询: '{query}'，文章标题: '{article_title}'")
    start_time = time.time()

    # 根据文章标题筛选文档
    filtered_docs = list(collection.find({"title": article_title}))
    if not filtered_docs:
        return [], 0, 0, 0, "文章标题不可用"

    # 阶段1: BM25关键词初筛
    bm25_start = time.time()

    if not filtered_docs:
        print("没有找到文档")
        return [], 0, 0, 0, ""

    # 重新构建BM25语料库
    tokenized_corpus_filtered = []
    for doc in filtered_docs:
        content = doc["content"]
        tokenized_content = list(jieba.cut(content))
        tokenized_corpus_filtered.append(tokenized_content)

    if not tokenized_corpus_filtered:
        print("没有有效的语料库数据进行BM25检索")
        return [], 0, 0, 0, "语料库数据无效"

    bm25_filtered = BM25Okapi(tokenized_corpus_filtered)

    # 中文分词查询
    try:
        tokenized_query = list(jieba.cut(query))
    except:
        tokenized_query = list(query)  # 如果分词失败，使用字符级

    # 计算BM25分数
    try:
        bm25_scores = bm25_filtered.get_scores(tokenized_query)
    except Exception as e:
        print(f"BM25计算失败: {str(e)}")
        # 如果BM25失败，使用所有文档作为候选
        candidate_docs = filtered_docs
        bm25_scores = [0.0] * len(filtered_docs)
    else:
        # 获取BM25 top N文档
        bm25_top_indices = np.argsort(bm25_scores)[::-1][:bm25_top_n]
        candidate_docs = [filtered_docs[i] for i in bm25_top_indices]

    bm25_time = time.time() - bm25_start
    print(f"BM25初筛完成 ({bm25_time:.2f}s): 从{len(filtered_docs)}篇文档中筛选出{len(candidate_docs)}篇候选文档")

    # 阶段2: DPR风格的精排
    dpr_start = time.time()

    try:
        # 生成查询向量
        query_embedding = custom_embed_query(query)
    except Exception as e:
        print(f"查询嵌入生成失败: {str(e)}")
        return [], bm25_time, 0, time.time() - start_time, ""

    # 计算候选文档的语义相似度（使用点积）
    results = []
    for doc in candidate_docs:
        if "embedding" not in doc:
            continue

        doc_vector = doc["embedding"]

        # 检查向量维度是否匹配
        if len(doc_vector) != len(query_embedding):
            print(
                f"维度不匹配: 文档 {doc.get('title', '无标题')} (期望: {len(query_embedding)}, 实际: {len(doc_vector)})")
            continue

        # 计算点积相似度 (DPR风格)
        try:
            # 点积计算（DPR使用的相似度计算方法）
            dot_product = np.dot(query_embedding, doc_vector)

            # 获取该文档在原始列表中的索引
            doc_index = next((i for i, d in enumerate(filtered_docs) if d['_id'] == doc['_id']), -1)
            bm25_score = bm25_scores[doc_index] if doc_index != -1 else 0.0

            # 计算混合分数
            hybrid_score = 0.4 * bm25_score + 0.6 * dot_product
            results.append({
                "doc": doc,
                "bm25_score": bm25_score,
                "dpr_score": dot_product,
                "hybrid_score": hybrid_score
            })
        except Exception as e:
            print(f"计算相似度失败: {str(e)}")
            continue

    # 按混合分数排序
    results.sort(key=lambda x: x["hybrid_score"], reverse=True)
    top_results = results[:k]

    dpr_time = time.time() - dpr_start
    total_time = time.time() - start_time
    print(
        f"DPR精排完成 ({dpr_time:.2f}s): 从{len(candidate_docs)}篇候选文档中筛选出{len(top_results)}篇结果 (总耗时: {total_time:.2f}s)")

    return top_results, bm25_time, dpr_time, total_time, ""


def tfidf_expansion_vector_search(query, article_title, k=5, expansion_terms=3):
    """
    TF-IDF扩展查询 + 向量检索策略
    返回格式: (results, bm25_time, dpr_time, total_time, error_msg)
    """
    print(f"\nTF-IDF扩展向量检索查询: '{query}'，文章标题: '{article_title}'")
    start_time = time.time()
    bm25_time = 0  # 此方法不使用BM25阶段
    dpr_time = 0  # 此方法不使用DPR阶段

    # 根据文章标题筛选文档
    filtered_docs = list(collection.find({"title": article_title}))
    if not filtered_docs:
        return [], bm25_time, dpr_time, time.time() - start_time, "文章标题不可用"

    # 阶段1: TF-IDF查询扩展
    tfidf_start = time.time()

    # 构建语料库
    corpus = [doc["content"] for doc in filtered_docs]

    # 创建并拟合TF-IDF向量器
    vectorizer = TfidfVectorizer(tokenizer=jieba.cut)
    try:
        tfidf_matrix = vectorizer.fit_transform(corpus)
    except ValueError as e:
        error_msg = f"TF-IDF矩阵创建失败: {str(e)}"
        return [], bm25_time, dpr_time, time.time() - start_time, error_msg

    # 将查询转换为TF-IDF向量
    query_vec = vectorizer.transform([query])

    # 计算查询与文档的相似度
    cosine_similarities = linear_kernel(query_vec, tfidf_matrix).flatten()

    # 获取最相关的文档用于查询扩展
    top_indices = cosine_similarities.argsort()[::-1][:3]  # 取前3个相关文档
    top_docs = [filtered_docs[i] for i in top_indices]

    # 从相关文档中提取重要词汇
    try:
        feature_names = vectorizer.get_feature_names_out()
    except AttributeError:
        feature_names = vectorizer.get_feature_names()  # 兼容旧版本sklearn

    expanded_terms = set()

    for idx in top_indices:
        # 获取文档的TF-IDF向量
        doc_vec = tfidf_matrix[idx]

        # 获取最重要的词汇（TF-IDF值最高的词）
        sorted_terms = doc_vec.toarray().argsort()[0][::-1]
        for term_idx in sorted_terms[:10]:  # 每个文档取前10个重要词
            term = feature_names[term_idx]
            # 排除数字、单个字符和已在查询中的词
            if (len(term) > 1 and
                    not term.isdigit() and
                    term not in query):
                expanded_terms.add(term)
                if len(expanded_terms) >= expansion_terms:
                    break
        if len(expanded_terms) >= expansion_terms:
            break

    # 构建扩展查询
    expanded_query = query + " " + " ".join(expanded_terms)
    print(f"原始查询: '{query}' → 扩展查询: '{expanded_query}' (新增词汇: {', '.join(expanded_terms)})")

    tfidf_time = time.time() - tfidf_start

    # 阶段2: 向量检索
    vector_start = time.time()

    try:
        # 生成扩展查询的向量
        query_embedding = custom_embed_query(expanded_query)
    except Exception as e:
        error_msg = f"查询嵌入生成失败: {str(e)}"
        print(error_msg)
        return [], bm25_time, dpr_time, time.time() - start_time, error_msg

    # 计算所有文档的相似度
    results = []
    for doc in filtered_docs:
        if "embedding" not in doc:
            continue

        doc_vector = doc["embedding"]
        if len(doc_vector) != len(query_embedding):
            continue

        # 计算点积相似度
        similarity = np.dot(query_embedding, doc_vector)
        results.append({
            "doc": doc,
            "score": similarity,
            "original_score": np.dot(custom_embed_query(query), doc_vector)
        })

    # 按相似度排序
    results.sort(key=lambda x: x["score"], reverse=True)
    top_results = results[:k]

    vector_time = time.time() - vector_start
    total_time = time.time() - start_time

    print(f"TF-IDF扩展向量检索完成 ({total_time:.2f}s): "
          f"TF-IDF阶段{tfidf_time:.2f}s, 向量阶段{vector_time:.2f}s, "
          f"返回{len(top_results)}个结果")

    return top_results, bm25_time, dpr_time, total_time, ""


def improved_keyword_search(query, article_title, k=5):
    """
    改进的基于文本关键词的全文检索策略（BM25+位置加权+标题匹配）
    返回格式: (results, bm25_time, dpr_time, total_time, error_msg)
    """
    print(f"\n改进关键词检索查询: '{query}'，文章标题: '{article_title}'")
    start_time = time.time()
    bm25_time = 0  # 此方法不使用BM25阶段
    dpr_time = 0   # 此方法不使用DPR阶段

    # 根据文章标题筛选文档
    filtered_docs = list(collection.find({"title": article_title}))
    if not filtered_docs:
        return [], bm25_time, dpr_time, time.time() - start_time, "文章标题不可用"

    # 阶段1: 构建增强型BM25模型
    bm25_start = time.time()

    # 中文分词处理
    tokenized_corpus = []
    for doc in filtered_docs:
        content = doc["content"]
        tokenized_content = list(jieba.cut(content))
        tokenized_corpus.append(tokenized_content)

    if not tokenized_corpus:
        error_msg = "语料库数据无效"
        return [], bm25_time, dpr_time, time.time() - start_time, error_msg

    bm25 = BM25Okapi(tokenized_corpus)

    # 查询分词
    try:
        tokenized_query = list(jieba.cut(query))
    except:
        tokenized_query = list(query)

    # 计算基础BM25分数
    bm25_scores = bm25.get_scores(tokenized_query)

    # 阶段2: 增强特征计算
    enhanced_scores = []
    for i, doc in enumerate(filtered_docs):
        content = doc["content"]
        score = bm25_scores[i]

        # 1. 位置加权：关键词出现在文档开头给予更高权重
        first_occurrence = float('inf')
        for term in tokenized_query:
            pos = content.find(term)
            if 0 <= pos < first_occurrence:
                first_occurrence = pos

        # 位置权重：前10%内容出现加分
        position_weight = 1.0
        if first_occurrence < len(content) * 0.1 and len(content) > 0:
            position_weight = 1.5
        elif first_occurrence < len(content) * 0.3 and len(content) > 0:
            position_weight = 1.2

        # 2. 标题匹配：查询词出现在标题中加分
        title_match = 1.0
        title = doc.get("title", "")
        for term in tokenized_query:
            if term in title:
                title_match += 0.3  # 每个匹配词加分

        # 3. 词频增强：查询词在文档中的频率
        term_freq_boost = 1.0
        for term in tokenized_query:
            term_count = content.count(term)
            if term_count > 3:
                term_freq_boost += 0.1 * min(term_count, 10)  # 最大加1分

        # 最终增强分数 = BM25 × 位置权重 × 标题匹配 × 词频增强
        enhanced_score = bm25_scores[i] * position_weight * title_match * term_freq_boost
        enhanced_scores.append(enhanced_score)

    # 获取top k结果
    top_indices = np.argsort(enhanced_scores)[::-1][:k]
    results = [{
        "doc": filtered_docs[i],
        "score": enhanced_scores[i],
        "bm25_score": bm25_scores[i],
        "position_weight": position_weight,
        "title_match": title_match,
        "term_freq_boost": term_freq_boost
    } for i in top_indices]

    total_time = time.time() - start_time
    print(f"增强关键词检索完成 ({total_time:.2f}s): 返回{len(results)}个结果")

    return results, bm25_time, dpr_time, total_time, ""
# 初始化数据（在应用启动时调用）
def initialize_data():
    global bm25, all_docs, tokenized_corpus
    try:
        documents, tokenized_corpus = create_test_structure()
        all_docs = list(collection.find({}))
        print("数据初始化完成")
    except Exception as e:
        print(f"数据初始化失败: {str(e)}")





def ask_glm4_airx(query, context):
    """使用GLM-4-AirX模型回答问题"""
    headers = {
        "Authorization": f"Bearer {ONEAPI_KEY}",
        "Content-Type": "application/json"
    }

    # 构建符合要求的prompt
    prompt = f"""使用 <Data></Data> 标记中的内容作为你的知识:

<Data>
{context}
</Data>

回答要求：
- 如果你不清楚答案，你需要澄清。
- 避免提及你是从 <Data></Data> 获取的知识。
- 保持答案与 <Data></Data> 中描述的一致。
- 使用 Markdown 语法优化回答格式。
- 使用与问题相同的语言回答。

问题:
{query}"""

    data = {
        "model": "GLM-4-AirX",
        "messages": [
            {"role": "user", "content": prompt}
        ],
        "temperature": 0.3,
        "max_tokens": 1024
    }

    try:
        print(f"调用GLM-4-AirX模型，问题: '{query[:30]}...'")
        start_time = time.time()

        response = requests.post(
            f"{ONEAPI_BASE_URL}/chat/completions",
            headers=headers,
            json=data,
            timeout=60
        )

        if response.status_code == 200:
            result = response.json()
            answer = result['choices'][0]['message']['content']
            elapsed = time.time() - start_time
            print(f"模型响应成功 ({elapsed:.2f}s)")
            return answer
        else:
            error_msg = f"模型请求失败: HTTP {response.status_code}, {response.text[:200]}"
            print(error_msg)
            return error_msg

    except Exception as e:
        error_msg = f"模型调用异常: {str(e)}"
        print(error_msg)
        return error_msg


# 新增API端点：问答接口
@app.route('/api/ask', methods=['POST'])
def ask():
    try:
        data = request.get_json()
        query = data.get('query', '')
        article_title = data.get('article_title', '')
        k = data.get('k', 3)  # 默认返回3个文档
        search_strategy = data.get('search_strategy', 'hybrid_search')

        if not query:
            return jsonify({"error": "查询不能为空"}), 400

        # 确保数据已初始化
        if bm25 is None:
            initialize_data()

        if search_strategy == 'hybrid_search':
            search_results, bm25_time, dpr_time, total_time, error_msg = hybrid_search(query, article_title, k)
        elif search_strategy == 'tfidf_expansion_vector_search':
            search_results, _, _, total_time, error_msg = tfidf_expansion_vector_search(query, article_title, k)
            bm25_time = 0
            dpr_time = 0
        elif search_strategy == 'improved_keyword_search':
            search_results, bm25_time, dpr_time, total_time, error_msg = improved_keyword_search(query, article_title,
                                                                                                 k)
        else:
            return jsonify({"error": "无效的检索策略"}), 400

        if error_msg:
            return jsonify({
                "query": query,
                "search_results": [],
                "model_answer": error_msg,
                "stats": {
                    "bm25_time": bm25_time,
                    "dpr_time": dpr_time,
                    "total_time": total_time,
                    "total_docs": len(all_docs) if all_docs else 0,
                    "context_length": 0
                }
            })

        # 构建知识上下文
        context_parts = []
        formatted_results = []

        for result in search_results:
            doc = result.get('doc') if isinstance(result, dict) else result
            context_parts.append(f"标题: {doc.get('title', '')}\n内容: {doc.get('content', '')}")

            formatted_result = {
                "title": doc.get('title', ''),
                "content": doc.get('content', ''),
                "category": doc.get('category', ''),
            }
            if'score' in result:
                formatted_result['score'] = result['score']
            if 'bm25_score' in result:
                formatted_result['bm25_score'] = result['bm25_score']
            if 'dpr_score' in result:
                formatted_result['dpr_score'] = result['dpr_score']
            if 'hybrid_score' in result:
                formatted_result['hybrid_score'] = result['hybrid_score']
            formatted_results.append(formatted_result)

        context = "\n\n".join(context_parts) if context_parts else "未找到相关信息"

        # 调用大模型生成回答
        model_answer = ask_glm4_airx(query, context)

        # 返回整合结果
        return jsonify({
            "query": query,
            "search_results": formatted_results,
            "model_answer": model_answer,
            "stats": {
                "bm25_time": bm25_time,
                "dpr_time": dpr_time,
                "total_time": total_time,
                "total_docs": len(all_docs) if all_docs else 0,
                "context_length": len(context)
            }
        })

    except Exception as e:
        print(f"问答处理错误: {str(e)}")
        return jsonify({"error": str(e)}), 500


if __name__ == "__main__":
    print("启动混合检索API服务...")
    # 初始化数据
    initialize_data()
    app.run(host='0.0.0.0', port=5000, debug=True)