
# -*- coding: utf-8 -*-
"""
基于Neo4j、BERT和PyVis的医疗知识图谱构建与问答系统

本文件包含构建医疗知识图谱和问答系统的完整代码。

"""

import requests
from bs4 import BeautifulSoup
import spacy
from neo4j import GraphDatabase
import pandas as pd
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, TrainingArguments, Trainer
from datasets import Dataset
from transformers import pipeline
from pyvis.network import Network
import networkx as nx
import os

# 设置环境变量 (可选，用于配置Neo4j连接)
os.environ['NEO4J_URI'] = 'bolt://localhost:7687'
os.environ['NEO4J_USER'] = 'neo4j'
os.environ['NEO4J_PASSWORD'] = 'password'  # 替换为您的Neo4j密码


# --- 数据爬取 ---
def get_pubmed_abstract(pmid):
    """
    根据PubMed ID (PMID) 获取论文摘要。
    """
    url = f"https://pubmed.ncbi.nlm.nih.gov/{pmid}/"
    try:
        response = requests.get(url)
        response.raise_for_status()  # 检查请求是否成功
        soup = BeautifulSoup(response.text, 'html.parser')
        abstract_div = soup.find('div', class_='abstract-content')
        if abstract_div:
            return abstract_div.text.strip()
        else:
            return None
    except requests.exceptions.RequestException as e:
        print(f"Error fetching PMID {pmid}: {e}")
        return None


# --- 知识抽取 (示例) ---
def extract_entities_spacy(text):
    """
    使用spaCy进行命名实体识别 (NER)。
    """
    nlp = spacy.load("en_core_sci_lg")
    doc = nlp(text)
    entities = []
    for ent in doc.ents:
        entities.append({"text": ent.text, "label": ent.label_})
    return entities


# --- Neo4j连接 ---
def get_neo4j_driver():
    """
    获取Neo4j数据库连接。
    """
    uri = os.environ.get('NEO4J_URI')
    user = os.environ.get('NEO4J_USER')
    password = os.environ.get('NEO4J_PASSWORD')
    try:
        driver = GraphDatabase.driver(uri, auth=(user, password))
        return driver
    except Exception as e:
        print(f"Error connecting to Neo4j: {e}")
        return None


# --- 知识图谱构建 ---
def create_graph(tx, disease, symptoms, drugs, side_effects, tests, surgeries, department, genes):
    """
    在Neo4j中创建知识图谱的节点和关系。
    """
    # 创建疾病节点
    tx.run("MERGE (d:Disease {name: $disease})", disease=disease)

    # 创建症状节点及关系
    for symptom in symptoms.split(';'):
        tx.run("MERGE (s:Symptom {name: $symptom})", symptom=symptom)
        tx.run("MATCH (d:Disease {name: $disease}), (s:Symptom {name: $symptom}) "
               "MERGE (d)-[:HAS_SYMPTOM]->(s)", disease=disease, symptom=symptom)

    # 创建药物节点及关系
    for drug in drugs.split(';'):
        tx.run("MERGE (dr:Drug {name: $drug})", drug=drug)
        tx.run("MATCH (d:Disease {name: $disease}), (dr:Drug {name: $drug}) "
               "MERGE (d)-[:TREATED_WITH]->(dr)", disease=disease, drug=drug)

        # 创建副作用节点及关系
        for side_effect in side_effects.split(';'):
            tx.run("MERGE (se:SideEffect {name: $side_effect})", side_effect=side_effect)
            tx.run("MATCH (dr:Drug {name: $drug}), (se:SideEffect {name: $side_effect}) "
                   "MERGE (dr)-[:HAS_SIDE_EFFECT]->(se)", drug=drug, side_effect=side_effect)

    # 创建检查节点及关系
    for test in tests.split(';'):
        tx.run("MERGE (t:Test {name: $test})", test=test)
        tx.run("MATCH (d:Disease {name: $disease}), (t:Test {name: $test}) "
               "MERGE (d)-[:INDICATES]->(t)", disease=disease, test=test)

    # 创建手术节点及关系
    for surgery in surgeries.split(';'):
        tx.run("MERGE (su:Surgery {name: $surgery})", surgery=surgery)
        tx.run("MATCH (d:Disease {name: $disease}), (su:Surgery {name: $surgery}) "
               "MERGE (d)-[:PERFORMED_FOR]->(su)", disease=disease, surgery=surgery)

    # 创建科室节点及关系
    tx.run("MERGE (dep:Department {name: $department})", department=department)
    tx.run("MATCH (d:Disease {name: $disease}), (dep:Department {name: $department}) "
               "MERGE (d)-[:BELONGS_TO]->(dep)", disease=disease, department=department)

    # 创建基因节点及关系
    if genes is not None:
        for gene in genes.split(';'):
            tx.run("MERGE (g:Gene {name: $gene})", gene=gene)
            tx.run("MATCH (d:Disease {name: $disease}), (g:Gene {name: $gene}) "
                   "MERGE (d)-[:RELATED_TO]->(g)", disease=disease, gene=gene)


def build_knowledge_graph(data_file):
    """
    从CSV文件中读取数据，构建知识图谱。
    """
    driver = get_neo4j_driver()
    if driver is None:
        return

    data = pd.read_csv(data_file)
    with driver.session() as session:
        for _, row in data.iterrows():
            try:
                session.write_transaction(create_graph, row['疾病'], row['症状'], row['药物'],
                                        row['副作用'], row['检查'], row['手术'], row['科室'], row['基因'])
            except Exception as e:
                print(f"Error processing row: {row}\nError: {e}")

    print("医疗知识图谱已构建！")
    driver.close()


# --- 知识图谱查询 ---
def query_knowledge_graph(query, params=None):
    """
    执行Cypher查询。
    """
    driver = get_neo4j_driver()
    if driver is None:
        return []

    with driver.session() as session:
        try:
            results = session.run(query, params)
            return [record for record in results]
        except Exception as e:
            print(f"Error executing query: {query}\nError: {e}")
            return []


# --- BERT模型微调 (可选) ---
def fine_tune_bert(train_data, model_name="dmis-lab/biobert-v1.1", output_dir="./fine_tuned_biobert"):
    """
    微调BioBERT模型。
    (由于训练需要较长时间和计算资源，这里只提供代码框架)
    """
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForQuestionAnswering.from_pretrained(model_name)

    # 将数据转换为Dataset对象
    train_dataset = Dataset.from_dict({
        "context": [item["context"] for item in train_data],
        "question": [item["question"] for item in train_data],
        "answers": [item["answers"] for item in train_data]
    })

    # 定义数据预处理函数
    def preprocess_function(examples):
        inputs = tokenizer(
            examples["question"],
            examples["context"],
            truncation="only_second",
            max_length=384,
            stride=128,
            return_overflowing_tokens=True,
            return_offsets_mapping=True,
            padding="max_length",
        )

        offset_mapping = inputs.pop("offset_mapping")
        sample_map = inputs.pop("overflow_to_sample_mapping")
        answers = examples["answers"]
        start_positions = []
        end_positions = []

        for i, offset in enumerate(offset_mapping):
            sample_idx = sample_map[i]
            answer = answers[sample_idx]
            start_char = answer["answer_start"][0]
            end_char = answer["answer_start"][0] + len(answer["text"][0])
            sequence_ids = inputs.sequence_ids(i)

            # Find the start and end of the context
            idx = 0
            while sequence_ids[idx] != 1:
                idx += 1
            context_start = idx
            while sequence_ids[idx] == 1:
                idx += 1
            context_end = idx - 1

            # If the answer is not fully inside the context, label it (0, 0)
            if offset[context_start][0] > end_char or offset[context_end][1] < start_char:
                start_positions.append(0)
                end_positions.append(0)
            else:
                # Otherwise it's the start and end token positions
                idx = context_start
                while idx <= context_end and offset[idx][0] <= start_char:
                    idx += 1
                start_positions.append(idx - 1)

                idx = context_end
                while idx >= context_start and offset[idx][1] >= end_char:
                    idx -= 1
                end_positions.append(idx + 1)

        inputs["start_positions"] = start_positions
        inputs["end_positions"] = end_positions
        return inputs

    # 对训练数据进行预处理
    train_dataset = train_dataset.map(preprocess_function, batched=True)

    # 定义训练参数
    training_args = TrainingArguments(
        output_dir=output_dir,
        evaluation_strategy="epoch",
        learning_rate=2e-5,
        per_device_train_batch_size=8,
        per_device_eval_batch_size=8,
        num_train_epochs=3,
        weight_decay=0.01,
    )

    # 创建Trainer对象
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        # eval_dataset=eval_dataset, # 如果有验证集，可以在这里添加
        tokenizer=tokenizer,
    )

    # 开始训练
    trainer.train()

    # 保存微调后的模型
    trainer.save_model(output_dir)


# --- 问答系统 ---
def answer_question_with_kg(question):
    """
     结合知识图谱回答问题
    """
    driver = get_neo4j_driver()
    if not driver:
        print("无法连接到Neo4j")
        return "抱歉，知识库连接失败，请稍后再试。"
    if "症状" in question:
        try:
            disease_name = question.split('的')[0].strip()
            query = """
            MATCH (d:Disease {name: $disease_name})-[:HAS_SYMPTOM]->(s:Symptom)
            RETURN s.name AS symptom
            """
            with driver.session() as session:
                result = session.run(query, disease_name=disease_name)
                symptoms = [record["symptom"] for record in result]
                if symptoms:
                    return f"{disease_name}的症状包括：{', '.join(symptoms)}。"
                else:
                    return f"抱歉，知识库中没有找到{disease_name}的症状信息。"
        except Exception as e:
            print(f"查询出错：{e}")
            return "抱歉，查询出错，请稍后再试。"
    else:
        return "抱歉，目前只能回答关于疾病症状的问题。"

def answer_question(question, model_path="./fine_tuned_biobert"):
    """
    结合知识图谱和BERT回答问题。
    """
    # 1. 尝试从知识图谱中查找答案
    kg_answer = answer_question_with_kg(question)
    if kg_answer and "抱歉" not in kg_answer:  # 检查是否从知识图谱成功获得答案
        return kg_answer

    # 2. 如果知识图谱中没有找到答案，使用BERT模型
    try:
        qa_model = pipeline("question-answering", model=model_path)
        context = "请根据医学常识回答问题。"  # 默认的context
        # 在这里可以添加代码，根据问题从知识图谱中检索相关信息，构建更具体的context
        response = qa_model(question=question, context=context)
        return response['answer']
    except Exception as e:
        print(f"BERT模型加载或预测出错: {e}")
        return "抱歉，无法回答该问题。"

# --- 可视化 ---
def create_networkx_graph():
    """
    从Neo4j中查询数据，构建NetworkX图。
    """
    query = """
    MATCH (n)-[r]->(m)
    RETURN n, r, m
    """
    results = query_knowledge_graph(query)
    G = nx.Graph()
    for record in results:
        source = record['n']
        target = record['m']
        relationship = record['r']

        # 添加节点，并根据节点类型设置属性
        for node in [source, target]:
            if 'Disease' in node.labels:
                G.add_node(node['name'], color='red', title=str(node.labels))  # 疾病节点为红色
            elif 'Symptom' in node.labels:
                G.add_node(node['name'], color='blue', title=str(node.labels))  # 症状节点为蓝色
            elif 'Drug' in node.labels:
                G.add_node(node['name'], color='green', title=str(node.labels))  # 药物节点为绿色
            elif 'Test' in node.labels:
                G.add_node(node['name'], color='orange', title=str(node.labels))
            elif 'Surgery' in node.labels:
                G.add_node(node['name'], color='purple', title=str(node.labels))
            elif 'Department' in node.labels:
                G.add_node(node['name'], color='yellow', title=str(node.labels))
            elif 'Gene' in node.labels:
                G.add_node(node['name'], color='pink', title=str(node.labels))
            else:
                G.add_node(node['name'], title=str(node.labels))  # 其他节点默认颜色

        # 添加边，并根据关系类型设置属性
        if relationship.type == 'HAS_SYMPTOM':
            G.add_edge(source['name'], target['name'], color='gray')  # HAS_SYMPTOM关系为灰色
        elif relationship.type == 'TREATED_WITH':
            G.add_edge(source['name'], target['name'], color='green')  # TREATED_WITH关系为绿色
        else:
            G.add_edge(source['name'], target['name'], color='black')  # 其他关系为黑色

    return G

def visualize_knowledge_graph(output_file="medical_knowledge_graph.html"):
    """
    可视化知识图谱。
    """
    G = create_networkx_graph()
    net = Network(notebook=True, width="1000px", height="800px")
    net.from_nx(G)
    net.show_buttons(filter_=['physics'])
    net.show(output_file)

# --- 主程序 (示例) ---
if __name__ == "__main__":
    # 1. 构建知识图谱
    data_file = 'medical_data_extended.csv'
    build_knowledge_graph(data_file)

    # 2. 可视化知识图谱
    visualize_knowledge_graph()

    # 3. 问答示例
    question1 = "流感有哪些症状？"
    answer1 = answer_question(question1)
    print(f"问题: {question1}\n答案: {answer1}")

    question2 = "糖尿病应该怎么治疗？"  # 知识图谱中没有直接答案
    answer2 = answer_question(question2)
    print(f"问题: {question2}\n答案: {answer2}")

    # 4. (可选) 微调BERT模型
    # train_data = [
    #     {"context": "流感是一种常见的呼吸道疾病，症状包括发热、咳嗽和肌肉疼痛。流感可以通过奥司他韦治疗。",
    #      "question": "流感的症状是什么？",
    #      "answers": {"text": ["发热、咳嗽和肌肉疼痛"], "answer_start": [23]}},
    #     {"context": "糖尿病患者需要定期监测血糖，并可能需要注射胰岛素。",
    #      "question": "糖尿病患者需要做什么？",
    #      "answers": {"text": ["定期监测血糖"], "answer_start": [7]}}
    # ]
    # fine_tune_bert(train_data)

