# --------------------------
# 基于GPT模型的文献检索工具
# --------------------------

# 导入必要的库
import os
import json
import warnings
warnings.filterwarnings("ignore")
from openai import OpenAI
from .papersearch import get_pubmed_papers,Semantic_Scholar,arxiv_search
from .reranker import siliconflow_reranker
from .milvus_ulits import insert_milvus,search_milvus,init_milvus
from .embedding import siliconflow_embedding

# --------------------1 患者问题的意图识别（判断患者问题是否是文献检索）------------------
def intent_recognition(question:str,token:str):
    """
    判断患者问题是否是文献检索
    """
    prompt = f"""
    你是一名助手，需要判断用户的问题是否与文献检索相关。文献检索包括但不限于：
    如果用户问题涉及寻找学术资源、数据库搜索、文献综述，返回 True。
    任何其他问题，比如普通信息查询、非学术话题等，返回 False。
    输出格式：
    仅输出 True 或 False，无需其他说明

用户问题：{question}
    """
    client = OpenAI(    
        api_key=token,  # 此处传token，不带Bearer
        base_url="https://internlm-chat.intern-ai.org.cn/puyu/api/v1/",
    )
    response = client.chat.completions.create(
        model="internlm2.5-latest",
        messages=[{"role": "user", "content": prompt}],
        max_tokens=100,
    )
    # 判断回答是否带有True或False
    if "True" in response.choices[0].message.content:
        return True
    else:
        return False


# ----------------------2 文献检索-------------------------
# ----------------------2.1 生成检索关键词------------------
def generate_search_keywords(question:str,token:str):
    """
    生成检索关键词（以适应不同检索工具）
    暂时支持 paperscraper, semantic_scholar
    """
    search_keys = {

    }
    # 1 paperscraper
    messages = [
        {"role": "user", "content": "Given the following text or abstract, extract key concepts and terms that can be used to find similar articles in PubMed. The output should include both specific and related terms for the subject matter, with a strong emphasis on species information or research themes (e.g., plants, animals, humans) to ensure relevance. Also, include terms that indicate the type of literature (e.g., review, methods, genomics analysis). Format the search query in a nested list structure [[list]], where each inner list contains keywords with their synonyms.\n\nText/Abstract: \"Your input text or abstract here.\"\n\nOutput Format Example:\n[\n    [\n        ['species_info', 'related_species_info'], \n        ['specific_term1', 'related_term1'], \n        ['specific_term2', 'fuzzy_term2'], \n        ['literature_type1', 'literature_type2']\n    ],\n    [\n        ['species_info', 'related_species_info'], \n        ['specific_term3', 'related_term3'], \n        ['specific_term4', 'fuzzy_term4'], \n        ['literature_type3', 'literature_type4']\n    ],\n    [\n        ['species_info', 'related_species_info'], \n        ['specific_term5', 'related_term5'], \n        ['specific_term6', 'fuzzy_term6'], \n        ['literature_type5', 'literature_type6']\n    ]\n]\n\nReplace 'species_info' with relevant species or research theme (e.g., 'plant', 'Arabidopsis', 'human', 'cancer'), 'specific_term1', 'specific_term2', etc., with the relevant subject-specific terms, and 'literature_type1', 'literature_type2', etc., with terms identifying the type of study (e.g., 'review', 'methods', 'genomics', 'transcriptomics')."},
        {"role": "assistant", "content": "Please provide relevant sections of the document (e.g., abstract, methods, descriptions, or other text) that you would like analyzed..  I need the content to extract the key concepts and terms. \n\nOnce you provide the text, I will:\n\n1. **Identify the key concepts and terms.**\n2. **Group related terms and synonyms together.**\n3. **Categorize these terms into species/research theme, specific terms, and literature type.**\n4. **Format the output as a nested list as requested.**\n\n**Example:**\n\nLet's say you provided the following text:\n\n\"This review explores the role of microRNAs in regulating drought stress response in Arabidopsis thaliana. We discuss recent advances in understanding the complex regulatory networks involved and highlight potential applications for improving drought tolerance in crops.\"\n\nThen, the output would be:\n\n```\n[\n    [\n        ['Arabidopsis thaliana', 'Arabidopsis', 'plant', 'crop'], \n        ['microRNA', 'miRNA', 'gene regulation'], \n        ['drought stress', 'drought tolerance', 'abiotic stress'], \n        ['review', 'literature review']\n    ],\n    [\n        ['plant', 'crop'],\n        ['gene expression', 'regulatory network', 'molecular mechanism'],\n        ['drought response', 'stress response'],\n        ['review', 'research article']\n    ]\n]\n```\n\n**Looking forward to receiving your text!** \n"},
        {"role": "user", "content": "The text is（please only provide the text in plaintext format and english words）: " + question},
    ]
    client = OpenAI(    
        api_key=token,  # 此处传token，不带Bearer
        base_url="https://internlm-chat.intern-ai.org.cn/puyu/api/v1/",
    )
    
    response = client.chat.completions.create(
        model="internlm2.5-latest",
        messages=messages,
        max_tokens=4096,
        # response_format={"type": "json_object"},
    )
    res = response.choices[0].message.content
    # 筛选出第一个[到最后一个]之间的内容
    res = res[res.find("["):res.rfind("]")+1]
    res = res.replace("'", '"')
    res = json.loads(res)
    search_keys['paperscraper'] = res

    # 2 arxiv


    # 3 google_scholar_search



    # 4 semantic_scholar（将用户问题转化为semantic_scholar的搜索关键词）
    prompt = f"""Prompt:
You are tasked with transforming a user's question into a clear, concise, and search-friendly description suitable for querying Semantic Scholar. The result must retain the essential keywords, concepts, and context while removing any extraneous or conversational elements.

Guidelines:
Preserve core keywords and key details from the original question.
Rephrase the input into a declarative, precise sentence.
Focus on creating a query-friendly structure that works well for academic searches.
Output only the transformed description.
Example:
Input:
"Where can I find research papers about the applications of machine learning in cancer diagnosis over the past 5 years?"

Output:
"Research on machine learning applications in cancer diagnosis from the last 5 years."

User question（only output the question, no other words,with english words）: {question}
"""

    messages = [
        {"role": "user", "content": prompt},
    ]
    response = client.chat.completions.create(
        model="internlm2.5-latest",
        messages=messages,
        max_tokens=4096,
    )
    res = response.choices[0].message.content
    search_keys['semantic_scholar'] = res

    return search_keys


# ----------------------2.3 执行文献检索------------------
def execute_search(question:str,token:str,search_keys:dict):
    """
    # 1 基于检索词执行文献检索并统一好格式
    """
    # 1.1 基于检索词执行文献检索
    # ["title", "abstract", "authors", "date", "journal", "url"] 规范格式
    # 解析semantic_scholar的检索词
    semantic_scholar_text = search_keys['semantic_scholar'].strip('"')
    semantic_scholar_results = Semantic_Scholar(semantic_scholar_text, limit=100)
    # 1.2 paperscraper 
    paperscraper_results = get_pubmed_papers(keywords=search_keys['paperscraper'][0],max_results=100)
    
    # 两个结果合并基于title 去重
    print(len(semantic_scholar_results),len(paperscraper_results))
    results = semantic_scholar_results + paperscraper_results
    title_list = []
    for i in results:
        if i['title'] not in title_list:
            title_list.append(i['title'])
            results.append(i)
    return results

# ----------------------2.4 计算相关性得分------------------
# 计算相似性并存储进数据库中
def cal_embedding(results, path:str="data/milvus_demo.db"):
    """
    将检索到的文献计算embedding并存储到milvus中
    """
    os.makedirs(os.path.dirname(path), exist_ok=True)
    if os.path.exists(path):
        os.remove(path)
        if os.path.exists(path + "/.milvus_demo.db.lock"):
            os.remove(path + "/.milvus_demo.db.lock")
    init_milvus(path)
    
    # Convert authors list to string and clean up results
    cleaned_results = []
    for result in results:
        if result['title'] and result['abstract']:
            # Convert authors list to string if it's a list
            if isinstance(result['authors'], list):
                result['authors'] = ', '.join(result['authors'])
            # date 转化为字符串
            
            cleaned_results.append(result)
    
    texts = [result['title'] + result['abstract'] for result in cleaned_results]
    texts = [text[:3000] for text in texts]
    
    all_embeddings = []
    for i in range(0, len(texts), 50):
        embeddings = siliconflow_embedding(texts[i:i+50], model_name="BAAI/bge-m3")
        all_embeddings.extend(embeddings)
    
    # Combine embeddings with cleaned results
    for i in range(len(all_embeddings)):
        cleaned_results[i]['vector'] = all_embeddings[i]
    
    # Store in milvus
    insert_milvus(cleaned_results, path=path)
    return True

# ----------------------2.5 排名和筛选----------------------
def rank_and_filter(question:str,token:str,path:str="data/milvus_demo.db",top_n:int=30):
    """
    基于milvus进行检索和排序
    """
    results = search_milvus(question,path,top_n=50)
    results = [result['entity'] for result in results[0]]
    texts = [result['title'] + result['abstract'] for result in results]
    # texts 进行截断, 截断长度为2000
    texts = [text[:2000] for text in texts]
    index_list = siliconflow_reranker(question,texts,top_n=top_n)
    results = [results[i] for i in index_list]
    return results,index_list



# ---------------------2.6 最终推荐文献绘制和报告生成------------------
def generate_report(question:str,token:str,results:list,index_list:list,top_n:int=10):
    """
    生成报告,基于top_n篇文献与用户问题生成一份简单的报告
    """
    # 1 绘制文献推荐图
    # 2 生成报告
    prompt = f"""
    请根据以下编号文献列表，针对用户提出的问题生成解答报告。要求从文献列表中筛选**最相关**的内容进行回答，解答中需引用文献编号，并在相关句子后使用 文献编号[(数字)] 格式标注。

**输入格式**：
文献编号表：{[f"[{i+1}] {result['title']} {result['abstract']} {result['date']}" for i,result in enumerate(results[0:top_n])]}


用户问题：{question}


**输出格式**：
1. **解答内容**：详细回答用户问题，句子后使用 [文献编号] 标注引用出处。

例如：
用户问题：针对肺部X射线图像检测慢性感染的方法是什么？
解答内容：
一种有效的慢性感染检测方法是结合离散小波变换（DWT）和局部二值模式（LBP）技术，使用预训练的深度学习模型如 AlexNet 和 Xception 提取特征，再进行特征融合与分类。研究表明，AlexNet 与 DWT 结合后达到了 99.7% 的准确率，表现最优 [1]。

2. **结论**：总结回答的要点。

**注意事项**：
- 回答内容要基于文献列表，逻辑清晰，重点突出，避免冗余。
- 引用格式为句子后加 [文献编号]。
- 结论部分简洁归纳核心信息。
- 不用参考文献。
"""
    client = OpenAI(    
        api_key=token,  # 此处传token，不带Bearer
        base_url="https://internlm-chat.intern-ai.org.cn/puyu/api/v1/",
    )
    response = client.chat.completions.create(
        model="internlm2.5-latest",
        messages=[{"role": "user", "content": prompt}],
        max_tokens=4096,
    )
    return response.choices[0].message.content





if __name__ == "__main__":
    
    question = "我想查找关于脓毒症治疗指南的最新进展，包括脓毒症患者的早期识别、诊断标准、初始液体复苏策略、抗生素使用指征与选择、感染源控制措施、器官功能支持与监测、以及针对特定病原体或宿主因素的个性化治疗方案等方面的详细指导和推荐"
    token = "eyJ0eXBlIjoiSldUVfUkVHSVNURVIiLCJpc3MiOiJPcGVuWExhYiIsImlhdCI6MTczNDM0NjQ4NSwiY2xpZW50SWQiOiJlYm1ydm9kNnlvMG5semFlazF5cCIsInBob25lIjoiMTg1MDg0MzY4OTMiLCJ1dWlkIjoiMTQ3MjkxNDUtNDcyZC00NGQzLWI4NWItNzgzNmY2OGQxMTY2IiwiZW1haWwiOiIiLCJleHAiOjE3NDk4OTg0ODV9.QX4S4jqXk3aI3_my_b5BEfTg02Rm8yLyYqabm8m4y5MYTDRO1lsf2_7hcABcXhMwoAbDC3izSvWIf5vZzwFvSQ"
    # print(intent_recognition(question,token))

    search_keys = generate_search_keywords(question,token)
    # print(search_keys)
    results = execute_search(question,token,search_keys)
    cal_embedding(results,path="data/milvus_demo.db")
    results,index_list = rank_and_filter(question,token,path="data/milvus_demo.db",top_n=30)
    report = generate_report(question,token,results,index_list,top_n=10)
    print(report)