# 网络环境不同可能会导致模型库下载失败，可以尝试运行simple_ner.py
import warnings
warnings.filterwarnings("ignore")

import argparse
from pathlib import Path
from typing import List, Dict, Any
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import (
    AutoTokenizer, AutoModelForTokenClassification,
    TrainingArguments, Trainer, DataCollatorForTokenClassification
)
from seqeval.metrics import classification_report, f1_score
import matplotlib.pyplot as plt
import seaborn as sns


class MedicalNERDataset(Dataset):
    def __init__(self, texts: List[str], labels: List[List[str]], tokenizer, max_length: int = 128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length
        
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]
        
        # 分词并获取标签
        tokens = self.tokenizer.tokenize(text)
        if len(tokens) > self.max_length - 2:
            tokens = tokens[:self.max_length - 2]
            label = label[:self.max_length - 2]
        
        # 添加特殊标记
        tokens = ['[CLS]'] + tokens + ['[SEP]']
        label = ['O'] + label + ['O']
        
        # 转换为ID
        input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
        attention_mask = [1] * len(input_ids)
        
        # 填充
        padding_length = self.max_length - len(input_ids)
        input_ids += [self.tokenizer.pad_token_id] * padding_length
        attention_mask += [0] * padding_length
        
        # 标签对齐
        label_ids = [self.label2id.get(l, 0) for l in label]
        label_ids += [0] * padding_length
        
        return {
            'input_ids': torch.tensor(input_ids, dtype=torch.long),
            'attention_mask': torch.tensor(attention_mask, dtype=torch.long),
            'labels': torch.tensor(label_ids, dtype=torch.long)
        }


def load_sample_data(data_path: Path) -> tuple:
    """加载示例医疗数据"""
    # 创建示例数据
    sample_texts = [
        "患者出现发热、咳嗽症状，诊断为肺炎，需要服用阿莫西林治疗。",
        "血压升高，建议进行心电图检查，服用降压药物。",
        "患者主诉头痛、恶心，CT检查显示脑部正常。",
        "糖尿病患者需要定期监测血糖，服用二甲双胍。",
        "患者出现胸痛症状，建议进行X光检查，排除心脏病。"
    ]
    sample_labels = [
        ["O", "O", "B-症状", "I-症状", "O", "B-症状", "I-症状", "O", "O", "B-疾病", "I-疾病", "O", "O", "O", "B-药物", "I-药物", "O", "O"],
        ["B-症状", "I-症状", "O", "O", "O", "O", "O", "B-检查", "I-检查", "O", "O", "O", "B-药物", "I-药物", "O"],
        ["O", "O", "B-症状", "I-症状", "O", "B-症状", "I-症状", "O", "B-检查", "I-检查", "O", "O", "O", "B-疾病", "I-疾病", "O", "O", "O"],
        ["B-疾病", "I-疾病", "O", "O", "O", "O", "O", "B-检查", "I-检查", "O", "O", "O", "B-药物", "I-药物", "O"],
        ["O", "O", "B-症状", "I-症状", "O", "O", "O", "O", "O", "B-检查", "I-检查", "O", "O", "B-疾病", "I-疾病", "O"]
    ]
    return sample_texts, sample_labels


def compute_metrics(eval_pred):
    """计算评估指标"""
    predictions, labels = eval_pred
    predictions = torch.argmax(torch.tensor(predictions), dim=-1)
    
    # 移除填充的标签
    true_predictions = []
    true_labels = []
    
    for prediction, label in zip(predictions, labels):
        for (p, l) in zip(prediction, label):
            if l != -100:
                true_predictions.append(p)
                true_labels.append(l)
    
    # 转换为标签名称
    id2label = {0: 'O', 1: 'B-疾病', 2: 'I-疾病', 3: 'B-症状', 4: 'I-症状', 5: 'B-药物', 6: 'I-药物', 7: 'B-检查', 8: 'I-检查'}
    
    true_predictions = [id2label[p] for p in true_predictions]
    true_labels = [id2label[l] for l in true_labels]
    
    return {
        'f1': f1_score([true_labels], [true_predictions])
    }


def visualize_predictions(text: str, predictions: List[str], output_path: Path):
    """可视化预测结果"""
    tokens = text.split()
    if len(tokens) != len(predictions):
        tokens = tokens[:len(predictions)]
    
    # 创建颜色映射
    color_map = {
        'O': 'lightgray',
        'B-疾病': 'red',
        'I-疾病': 'lightcoral',
        'B-症状': 'blue',
        'I-症状': 'lightblue',
        'B-药物': 'green',
        'I-药物': 'lightgreen',
        'B-检查': 'orange',
        'I-检查': 'moccasin'
    }
    
    colors = [color_map.get(pred, 'lightgray') for pred in predictions]
    
    plt.figure(figsize=(12, 6))
    bars = plt.bar(range(len(tokens)), [1] * len(tokens), color=colors)
    
    # 添加标签
    for i, (token, pred) in enumerate(zip(tokens, predictions)):
        plt.text(i, 0.5, f'{token}\n({pred})', ha='center', va='center', fontsize=10)
    
    plt.title('医疗实体识别结果')
    plt.xlabel('词汇位置')
    plt.ylabel('实体类型')
    plt.xticks(range(len(tokens)), tokens, rotation=45)
    plt.tight_layout()
    plt.savefig(output_path, dpi=150, bbox_inches='tight')
    plt.close()


def main():
    parser = argparse.ArgumentParser(description='BERT 医疗实体识别微调')
    parser.add_argument('--model_name', type=str, default='distilbert-base-uncased', help='预训练模型名称')
    parser.add_argument('--epochs', type=int, default=3, help='训练轮数')
    parser.add_argument('--batch_size', type=int, default=16, help='批次大小')
    parser.add_argument('--learning_rate', type=float, default=2e-5, help='学习率')
    parser.add_argument('--max_length', type=int, default=128, help='最大序列长度')
    args = parser.parse_args()
    
    project_root = Path(__file__).resolve().parent
    data_path = project_root / 'sample_medical.txt'
    output_dir = project_root / 'outputs'
    output_dir.mkdir(parents=True, exist_ok=True)
    
    # 标签映射
    label2id = {
        'O': 0, 'B-疾病': 1, 'I-疾病': 2, 'B-症状': 3, 'I-症状': 4,
        'B-药物': 5, 'I-药物': 6, 'B-检查': 7, 'I-检查': 8
    }
    id2label = {v: k for k, v in label2id.items()}
    
    print("加载数据...")
    texts, labels = load_sample_data(data_path)
    
    print("初始化模型和分词器...")
    tokenizer = AutoTokenizer.from_pretrained(args.model_name)
    model = AutoModelForTokenClassification.from_pretrained(
        args.model_name,
        num_labels=len(label2id),
        id2label=id2label,
        label2id=label2id
    )
    
    # 设置标签映射
    MedicalNERDataset.label2id = label2id
    
    print("准备数据集...")
    dataset = MedicalNERDataset(texts, labels, tokenizer, args.max_length)
    
    # 训练参数
    training_args = TrainingArguments(
        output_dir=str(output_dir / 'checkpoints'),
        num_train_epochs=args.epochs,
        per_device_train_batch_size=args.batch_size,
        per_device_eval_batch_size=args.batch_size,
        learning_rate=args.learning_rate,
        logging_dir=str(output_dir / 'logs'),
        logging_steps=10,
        save_steps=100,
        evaluation_strategy="no",
        save_total_limit=2,
    )
    
    # 数据整理器
    data_collator = DataCollatorForTokenClassification(tokenizer)
    
    # 训练器
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=dataset,
        data_collator=data_collator,
        compute_metrics=compute_metrics,
    )
    
    print("开始训练...")
    trainer.train()
    
    print("保存模型...")
    model.save_pretrained(output_dir / 'model')
    tokenizer.save_pretrained(output_dir / 'model')
    
    print("进行预测...")
    test_text = "患者出现发热症状，需要服用阿莫西林治疗。"
    inputs = tokenizer(test_text, return_tensors="pt", padding=True, truncation=True)
    
    with torch.no_grad():
        outputs = model(**inputs)
        predictions = torch.argmax(outputs.logits, dim=-1)
    
    # 转换预测结果
    pred_labels = [id2label[p.item()] for p in predictions[0][1:-1]]  # 去掉CLS和SEP
    
    print(f"测试文本: {test_text}")
    print(f"预测标签: {pred_labels}")
    
    # 可视化结果
    visualize_predictions(test_text, pred_labels, output_dir / 'prediction_result.png')
    print(f"预测结果已保存到: {output_dir / 'prediction_result.png'}")
    
    print("实验完成！")


if __name__ == '__main__':
    main()
