import re
import time
from transformers import AutoTokenizer, AutoModel, AutoModelForSequenceClassification
import torch
from pymilvus import connections, Collection, db
import gradio as gr

# 初始化 BiomedBERT 模型和 tokenizer（用于生成初始嵌入）
tokenizer = AutoTokenizer.from_pretrained("microsoft/BiomedNLP-BiomedBERT-large-uncased-abstract")
model = AutoModel.from_pretrained("microsoft/BiomedNLP-BiomedBERT-large-uncased-abstract")

# 初始化重排模型和 tokenizer（BAAI/bge-reranker-v2-m3）
reranker_tokenizer = AutoTokenizer.from_pretrained("BAAI/bge-reranker-v2-m3")
reranker_model = AutoModelForSequenceClassification.from_pretrained("BAAI/bge-reranker-v2-m3")
reranker_model.eval()

# 配置
DATABASE_NAME = "my_medical_db"
COLLECTION_NAME = "new_medical_docs"
DIMENSION = 1024

# 连接 Milvus
def connect_milvus():
    try:
        connections.connect(host='localhost', port='19530')
        db.using_database(DATABASE_NAME)
    except Exception as e:
        print(f"Milvus 连接失败: {e}")
        raise

# 生成查询嵌入
def generate_query_embedding(query_text):
    inputs = tokenizer(query_text, return_tensors="pt", padding=True, truncation=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        embedding = outputs.last_hidden_state[:, 0, :].squeeze().numpy().tolist()
    return embedding

# 高亮关键词
def highlight_keywords(text, keywords):
    for keyword in keywords:
        text = re.sub(
            f"(?i)({keyword})",
            r'<span style="background-color: #ffcccc; padding: 2px;">\1</span>',
            text
        )
    return text

# 语义搜索
def semantic_search(collection, query_text, top_k):
    query_embedding = generate_query_embedding(query_text)
    collection.load()
    search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
    results = collection.search(
        data=[query_embedding],
        anns_field="embedding",
        param=search_params,
        limit=top_k,
        output_fields=["id", "chunk_text"]
    )
    return [(result.entity.get("id"), result.entity.get("chunk_text"), 1 / (1 + result.distance)) for result in results[0]]

# 关键词搜索
def keyword_search(collection, keywords, top_k):
    collection.load()
    expr = " || ".join(f"chunk_text like '%{kw}%'" for kw in keywords)
    try:
        results = collection.query(expr=expr, limit=top_k, output_fields=["id", "chunk_text"])
        keyword_results = []
        for res in results:
            text = res["chunk_text"]
            matched_keywords = sum(1 for kw in keywords if re.search(f"(?i){kw}", text))
            score = min(0.5 + matched_keywords * 0.15, 1.0)
            keyword_results.append((res["id"], text, score))
        return keyword_results
    except Exception as e:
        print(f"关键词搜索失败: {e}")
        return []

# 重排函数
def rerank_results(query_text, results):
    pairs = [[query_text, result["text"]] for result in results]
    with torch.no_grad():
        inputs = reranker_tokenizer(pairs, padding=True, truncation=True, return_tensors="pt", max_length=512)
        outputs = reranker_model(**inputs)
        logits = outputs.logits
        if logits.shape[1] == 1:
            scores = torch.sigmoid(logits).squeeze().tolist()
        else:
            scores = logits.softmax(dim=1)[:, 1].tolist()
        if not isinstance(scores, list):
            scores = [scores]
        for result, score in zip(results, scores):
            result["score"] = score
    return sorted(results, key=lambda x: x["score"], reverse=True)

# 混合检索并对比原始排序与重排
def hybrid_search(collection, query_text, keywords, top_k=50):
    start_time = time.time()

    results_dict = {}
    if query_text:
        semantic_results = semantic_search(collection, query_text, top_k)
        for id_, text, score in semantic_results:
            results_dict[id_] = {"id": id_, "text": text, "score": score}

    if keywords:
        keyword_results = keyword_search(collection, keywords, top_k)
        for id_, text, score in keyword_results:
            if id_ in results_dict:
                results_dict[id_]["score"] = max(results_dict[id_]["score"], score)
            else:
                results_dict[id_] = {"id": id_, "text": text, "score": score}

    original_results = []
    for item in results_dict.values():
        highlighted_text = highlight_keywords(item["text"], keywords) if keywords else item["text"]
        original_results.append({"id": item["id"], "text": highlighted_text, "score": item["score"]})
    original_results = sorted(original_results, key=lambda x: x["score"], reverse=True)

    final_results = original_results.copy()
    if final_results and query_text:
        final_results = rerank_results(query_text, final_results)

    search_time = time.time() - start_time
    return original_results, final_results, search_time

# 分页显示
def paginate_results(results, page, per_page):
    start_idx = (page - 1) * per_page
    end_idx = start_idx + per_page
    return results[start_idx:end_idx], len(results)

# Gradio 搜索函数
def search(query_text, keywords_input, page, per_page):
    empty_output = "", ""
    if not query_text and not keywords_input:
        return "请输入查询内容或关键词", "", "", ""

    try:
        connect_milvus()
        collection = Collection(COLLECTION_NAME)

        keywords = keywords_input.split() if keywords_input else []
        original_results, final_results, search_time = hybrid_search(collection, query_text, keywords)

        if not final_results:
            return "未找到匹配结果，请检查关键词或查询内容", f"搜索耗时: {search_time:.2f}秒", query_text, keywords_input

        original_page_results, total_results = paginate_results(original_results, page, per_page)
        final_page_results, _ = paginate_results(final_results, page, per_page)
        total_pages = (total_results + per_page - 1) // per_page

        output = f"查询: {query_text or '无'}, 关键词: {keywords or '无'}<br>"
        output += f"总结果数: {total_results}, 当前页: {page}/{total_pages}, 每页显示: {per_page}<br>"
        output += f"搜索耗时: {search_time:.2f}秒<br><br>"

        # 原始排序结果（浅蓝色背景）
        output += '<h3>原始排序结果</h3><div style="background-color: #e6f3ff; padding: 10px;">'
        for result in original_page_results:
            output += f"ID: {result['id']}<br>"
            output += f"Score: {result['score']:.4f}<br>"
            output += f"Text: {result['text']}<br>"
            output += "-" * 50 + "<br>"
        output += "</div>"

        # 重排后结果（浅绿色背景）
        output += '<h3>重排后结果</h3><div style="background-color: #e6ffe6; padding: 10px;">'
        for result in final_page_results:
            output += f"ID: {result['id']}<br>"
            output += f"Score: {result['score']:.4f}<br>"
            output += f"Text: {result['text']}<br>"
            output += "-" * 50 + "<br>"
        output += "</div>"

        return output, f"搜索耗时: {search_time:.2f}秒", query_text, keywords_input

    except Exception as e:
        print(f"搜索过程中发生错误: {e}")
        return f"搜索失败: {str(e)}", "", query_text, keywords_input

# 清空输入框函数
def clear_inputs():
    return "", "", 1, 10, "", ""

# Gradio 界面
with gr.Blocks(title="医学文献搜索") as demo:
    gr.Markdown("# 医学文献搜索系统")

    with gr.Row():
        query_input = gr.Textbox(label="查询内容（可选）", placeholder="请输入查询内容...")
        keywords_input = gr.Textbox(label="关键词（可选，用空格分隔）", placeholder="请输入关键词...")

    with gr.Row():
        page_input = gr.Slider(minimum=1, maximum=10, value=1, step=1, label="页码")
        per_page_input = gr.Dropdown(choices=[5, 10, 20], value=10, label="每页显示数量")

    with gr.Row():
        search_button = gr.Button("搜索")
        clear_button = gr.Button("清空")  # 添加清空按钮

    output = gr.HTML(label="搜索结果", value="")
    time_output = gr.Textbox(label="响应时间", interactive=False, value="")

    search_button.click(
        fn=search,
        inputs=[query_input, keywords_input, page_input, per_page_input],
        outputs=[output, time_output, query_input, keywords_input]  # 返回输入框值以保持状态
    )
    clear_button.click(
        fn=clear_inputs,
        inputs=[],
        outputs=[query_input, keywords_input, page_input, per_page_input, output, time_output]  # 清空所有输入和输出
    )

demo.launch()