import re
import time
from transformers import AutoTokenizer, AutoModel, AutoModelForSequenceClassification
import torch
from pymilvus import connections, Collection, db
import gradio as gr
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

# 设置请求重试机制
session = requests.Session()
retry_strategy = Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
adapter = HTTPAdapter(max_retries=retry_strategy)
session.mount("https://", adapter)

# 初始化 BiomedBERT 模型和 tokenizer（用于生成初始嵌入）
try:
    tokenizer = AutoTokenizer.from_pretrained(
        "microsoft/BiomedNLP-BiomedBERT-large-uncased-abstract",
        cache_dir="./model_cache",  # 设置本地缓存路径
        use_offline=True if not torch.cuda.is_available() else False  # 如果无网络，使用离线模式
    )
    model = AutoModel.from_pretrained(
        "microsoft/BiomedNLP-BiomedBERT-large-uncased-abstract",
        cache_dir="./model_cache"
    )
except Exception as e:
    print(f"加载 BiomedBERT 失败: {e}")
    raise

# 初始化重排模型和 tokenizer（BAAI/bge-reranker-v2-m3）
try:
    reranker_tokenizer = AutoTokenizer.from_pretrained(
        "BAAI/bge-reranker-v2-m3",
        cache_dir="./model_cache"
    )
    reranker_model = AutoModelForSequenceClassification.from_pretrained(
        "BAAI/bge-reranker-v2-m3",
        cache_dir="./model_cache"
    )
    reranker_model.eval()
except Exception as e:
    print(f"加载重排模型失败: {e}")
    raise

# 配置
DATABASE_NAME = "my_medical_db"
COLLECTION_NAME = "new_medical_docs"
DIMENSION = 1024


# 连接 Milvus
def connect_milvus():
    try:
        connections.connect(host='localhost', port='19530')
        db.using_database(DATABASE_NAME)
    except Exception as e:
        print(f"Milvus 连接失败: {e}")
        raise


# 生成查询嵌入
def generate_query_embedding(query_text):
    inputs = tokenizer(query_text, return_tensors="pt", padding=True, truncation=True, max_length=512)
    with torch.no_grad():
        outputs = model(**inputs)
        embedding = outputs.last_hidden_state[:, 0, :].squeeze().numpy().tolist()
    return embedding


# 高亮关键词
def highlight_keywords(text, keywords):
    for keyword in keywords:
        text = re.sub(
            f"(?i)({keyword})",
            r'<span style="background-color: #ffcccc; padding: 2px;">\1</span>',
            text
        )
    return text


# 语义搜索
def semantic_search(collection, query_text, top_k):
    query_embedding = generate_query_embedding(query_text)
    collection.load()
    search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
    results = collection.search(
        data=[query_embedding],
        anns_field="embedding",
        param=search_params,
        limit=top_k,
        output_fields=["id", "chunk_text"]
    )
    return [(result.entity.get("id"), result.entity.get("chunk_text"), 1 / (1 + result.distance)) for result in
            results[0]]


# 关键词搜索
def keyword_search(collection, keywords, top_k):
    collection.load()
    expr = " || ".join(f"chunk_text like '%{kw}%'" for kw in keywords)
    try:
        results = collection.query(expr=expr, limit=top_k, output_fields=["id", "chunk_text"])
        keyword_results = []
        for res in results:
            text = res["chunk_text"]
            matched_keywords = sum(1 for kw in keywords if re.search(f"(?i){kw}", text))
            score = min(0.5 + matched_keywords * 0.15, 1.0)
            keyword_results.append((res["id"], text, score))
        return keyword_results
    except Exception as e:
        print(f"关键词搜索失败: {e}")
        return []


# 重排函数
def rerank_results(query_text, results):
    pairs = [[query_text, result["text"]] for result in results]
    with torch.no_grad():
        inputs = reranker_tokenizer(pairs, padding=True, truncation=True, return_tensors="pt", max_length=512)
        outputs = reranker_model(**inputs)
        logits = outputs.logits
        if logits.shape[1] == 1:
            scores = torch.sigmoid(logits).squeeze().tolist()
        else:
            scores = logits.softmax(dim=1)[:, 1].tolist()
        if not isinstance(scores, list):
            scores = [scores]
        for result, score in zip(results, scores):
            result["score"] = score
    return sorted(results, key=lambda x: x["score"], reverse=True)


# 混合检索并对比原始排序与重排
def hybrid_search(collection, query_text, keywords, top_k=50):
    start_time = time.time()

    results_dict = {}
    semantic_final_results = []
    keyword_final_results = []

    # 语义搜索及重排
    if query_text:
        semantic_results = semantic_search(collection, query_text, top_k)
        for id_, text, score in semantic_results:
            results_dict[id_] = {"id": id_, "text": text, "score": score}
        semantic_original = [{"id": item["id"], "text": item["text"], "score": item["score"]}
                             for item in results_dict.values()]
        semantic_final_results = rerank_results(query_text, semantic_original)

    # 关键词搜索及重排
    if keywords:
        keyword_results = keyword_search(collection, keywords, top_k)
        for id_, text, score in keyword_results:
            if id_ in results_dict:
                results_dict[id_]["score"] = max(results_dict[id_]["score"], score)
            else:
                results_dict[id_] = {"id": id_, "text": text, "score": score}
        keyword_original = [
            {"id": item["id"], "text": highlight_keywords(item["text"], keywords), "score": item["score"]}
            for item in results_dict.values()]
        keyword_query = " ".join(keywords)
        keyword_final_results = rerank_results(keyword_query, keyword_original) if keyword_original else []

    # 合并结果
    original_results = semantic_final_results + [r for r in keyword_original if
                                                 r["id"] not in {x["id"] for x in semantic_final_results}]
    final_results = semantic_final_results + [r for r in keyword_final_results if
                                              r["id"] not in {x["id"] for x in semantic_final_results}]

    # 去重并按得分排序
    original_results = sorted(list({r["id"]: r for r in original_results}.values()), key=lambda x: x["score"],
                              reverse=True)
    final_results = sorted(list({r["id"]: r for r in final_results}.values()), key=lambda x: x["score"], reverse=True)

    search_time = time.time() - start_time
    return original_results, final_results, search_time


# 分页显示
def paginate_results(results, page, per_page):
    start_idx = (page - 1) * per_page
    end_idx = start_idx + per_page
    return results[start_idx:end_idx], len(results)


# Gradio 搜索函数
def search(query_text, keywords_input, page, per_page):
    empty_output = "", ""
    if not query_text and not keywords_input:
        return "请输入查询内容或关键词", ""

    try:
        connect_milvus()
        collection = Collection(COLLECTION_NAME)

        keywords = keywords_input.split() if keywords_input else []
        original_results, final_results, search_time = hybrid_search(collection, query_text, keywords)

        if not final_results:
            return "未找到匹配结果，请检查关键词或查询内容", f"搜索耗时: {search_time:.2f}秒"

        original_page_results, total_results = paginate_results(original_results, page, per_page)
        final_page_results, _ = paginate_results(final_results, page, per_page)
        total_pages = (total_results + per_page - 1) // per_page

        output = f"查询: {query_text or '无'}, 关键词: {keywords or '无'}<br>"
        output += f"总结果数: {total_results}, 当前页: {page}/{total_pages}, 每页显示: {per_page}<br>"
        output += f"搜索耗时: {search_time:.2f}秒<br><br>"

        output += "<h3>原始排序结果</h3>"
        for result in original_page_results:
            output += f"ID: {result['id']}<br>"
            output += f"Score: {result['score']:.4f}<br>"
            output += f"Text: {result['text']}<br>"
            output += "-" * 50 + "<br>"

        output += "<h3>重排后结果</h3>"
        for result in final_page_results:
            output += f"ID: {result['id']}<br>"
            output += f"Score: {result['score']:.4f}<br>"
            output += f"Text: {result['text']}<br>"
            output += "-" * 50 + "<br>"

        return output, f"搜索耗时: {search_time:.2f}秒"

    except Exception as e:
        print(f"搜索过程中发生错误: {e}")
        return f"搜索失败: {str(e)}", ""


# Gradio 界面
with gr.Blocks(title="医学文献搜索") as demo:
    gr.Markdown("# 医学文献搜索系统")

    with gr.Row():
        query_input = gr.Textbox(label="查询内容（可选）", placeholder="请输入查询内容...")
        keywords_input = gr.Textbox(label="关键词（可选，用空格分隔）", placeholder="请输入关键词...")

    with gr.Row():
        page_input = gr.Slider(minimum=1, maximum=10, value=1, step=1, label="页码")
        per_page_input = gr.Dropdown(choices=[5, 10, 20], value=10, label="每页显示数量")

    search_button = gr.Button("搜索")
    output = gr.HTML(label="搜索结果", value="")
    time_output = gr.Textbox(label="响应时间", interactive=False, value="")

    search_button.click(
        fn=search,
        inputs=[query_input, keywords_input, page_input, per_page_input],
        outputs=[output, time_output]
    )

demo.launch()