# 简化版本使用基础神经网络而不是预训练模型
import warnings
warnings.filterwarnings("ignore")

import os
# 解决 OpenMP 库冲突问题
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

import argparse
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
import matplotlib
# 设置中文字体
matplotlib.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'DejaVu Sans']
matplotlib.rcParams['axes.unicode_minus'] = False
import seaborn as sns
import numpy as np


class SimpleNERModel(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, num_labels):
        super(SimpleNERModel, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True, bidirectional=True)
        self.classifier = nn.Linear(hidden_dim * 2, num_labels)
        self.dropout = nn.Dropout(0.1)
        
    def forward(self, input_ids, attention_mask=None):
        embedded = self.embedding(input_ids)
        lstm_out, _ = self.lstm(embedded)
        lstm_out = self.dropout(lstm_out)
        logits = self.classifier(lstm_out)
        return logits


class SimpleNERDataset(Dataset):
    def __init__(self, texts, labels, vocab, label2id, max_length=128):
        self.texts = texts
        self.labels = labels
        self.vocab = vocab
        self.label2id = label2id
        self.max_length = max_length
        
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]
        
        # 简单的字符级编码
        input_ids = [self.vocab.get(char, 1) for char in text]  # 1 是 <UNK>
        label_ids = [self.label2id.get(l, 0) for l in label]
        
        # 截断到最大长度
        if len(input_ids) > self.max_length:
            input_ids = input_ids[:self.max_length]
            label_ids = label_ids[:self.max_length]
        
        # 填充到固定长度
        padding_length = self.max_length - len(input_ids)
        input_ids += [0] * padding_length  # 0 是 <PAD>
        label_ids += [0] * padding_length  # 0 是 O 标签
        
        return {
            'input_ids': torch.tensor(input_ids, dtype=torch.long),
            'labels': torch.tensor(label_ids, dtype=torch.long)
        }


def create_vocab_and_labels():
    """创建词汇表和标签映射"""
    # 示例数据
    texts = [
        "患者出现发热、咳嗽症状，诊断为肺炎，需要服用阿莫西林治疗。",
        "血压升高，建议进行心电图检查，服用降压药物。",
        "患者主诉头痛、恶心，CT检查显示脑部正常。"
    ]
    # 为每个字符生成标签（简化版本，按字符级别标注）
    labels = []
    for text in texts:
        char_labels = []
        for i, char in enumerate(text):
            if char in "发热咳嗽":
                char_labels.append("B-症状")
            elif char in "肺炎":
                char_labels.append("B-疾病")
            elif char in "阿莫西林":
                char_labels.append("B-药物")
            elif char in "血压":
                char_labels.append("B-症状")
            elif char in "心电图":
                char_labels.append("B-检查")
            elif char in "头痛恶心":
                char_labels.append("B-症状")
            elif char in "CT":
                char_labels.append("B-检查")
            elif char in "脑部":
                char_labels.append("B-疾病")
            else:
                char_labels.append("O")
        labels.append(char_labels)
    
    # 创建词汇表
    vocab = {'<PAD>': 0, '<UNK>': 1}
    for text in texts:
        for char in text:
            if char not in vocab:
                vocab[char] = len(vocab)
    
    # 创建标签映射
    label2id = {
        'O': 0, 'B-疾病': 1, 'I-疾病': 2, 'B-症状': 3, 'I-症状': 4,
        'B-药物': 5, 'I-药物': 6, 'B-检查': 7, 'I-检查': 8
    }
    id2label = {v: k for k, v in label2id.items()}
    
    return texts, labels, vocab, label2id, id2label


def train_model(model, dataloader, num_epochs=3):
    """训练模型"""
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    
    model.train()
    for epoch in range(num_epochs):
        total_loss = 0
        for batch in dataloader:
            optimizer.zero_grad()
            input_ids = batch['input_ids']
            labels = batch['labels']
            
            logits = model(input_ids)
            loss = criterion(logits.view(-1, logits.size(-1)), labels.view(-1))
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
        
        print(f"Epoch {epoch+1}/{num_epochs}, Loss: {total_loss/len(dataloader):.4f}")


def predict_and_visualize(model, text, vocab, id2label, output_path):
    """预测并可视化结果"""
    model.eval()
    
    # 准备输入
    input_ids = [vocab.get(char, 1) for char in text]  # 1 是 <UNK>
    input_ids = torch.tensor([input_ids], dtype=torch.long)
    
    with torch.no_grad():
        logits = model(input_ids)
        predictions = torch.argmax(logits, dim=-1)
    
    pred_labels = [id2label[p.item()] for p in predictions[0]]
    
    # 可视化
    color_map = {
        'O': 'lightgray',
        'B-疾病': 'red',
        'I-疾病': 'lightcoral',
        'B-症状': 'blue',
        'I-症状': 'lightblue',
        'B-药物': 'green',
        'I-药物': 'lightgreen',
        'B-检查': 'orange',
        'I-检查': 'moccasin'
    }
    
    colors = [color_map.get(pred, 'lightgray') for pred in pred_labels]
    
    plt.figure(figsize=(12, 6))
    bars = plt.bar(range(len(text)), [1] * len(text), color=colors)
    
    for i, (char, pred) in enumerate(zip(text, pred_labels)):
        plt.text(i, 0.5, f'{char}\n({pred})', ha='center', va='center', fontsize=10)
    
    plt.title('医疗实体识别结果（简化模型）')
    plt.xlabel('字符位置')
    plt.ylabel('实体类型')
    plt.xticks(range(len(text)), list(text), rotation=45)
    plt.tight_layout()
    plt.savefig(output_path, dpi=150, bbox_inches='tight')
    plt.close()
    
    return pred_labels


def main():
    parser = argparse.ArgumentParser(description='简化版医疗实体识别')
    parser.add_argument('--epochs', type=int, default=3, help='训练轮数')
    parser.add_argument('--batch_size', type=int, default=16, help='批次大小')
    parser.add_argument('--embedding_dim', type=int, default=64, help='嵌入维度')
    parser.add_argument('--hidden_dim', type=int, default=128, help='隐藏层维度')
    args = parser.parse_args()
    
    project_root = Path(__file__).resolve().parent
    output_dir = project_root / 'outputs'
    output_dir.mkdir(parents=True, exist_ok=True)
    
    print("创建数据和词汇表...")
    texts, labels, vocab, label2id, id2label = create_vocab_and_labels()
    
    print("准备数据集...")
    dataset = SimpleNERDataset(texts, labels, vocab, label2id)
    dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
    
    print("初始化模型...")
    model = SimpleNERModel(
        vocab_size=len(vocab),
        embedding_dim=args.embedding_dim,
        hidden_dim=args.hidden_dim,
        num_labels=len(label2id)
    )
    
    print("开始训练...")
    train_model(model, dataloader, args.epochs)
    
    print("进行预测...")
    test_text = "患者出现发热症状，需要服用阿莫西林治疗。"
    pred_labels = predict_and_visualize(
        model, test_text, vocab, id2label, 
        output_dir / 'simple_ner_result.png'
    )
    
    print(f"测试文本: {test_text}")
    print(f"预测标签: {pred_labels}")
    print(f"结果已保存到: {output_dir / 'simple_ner_result.png'}")
    
    print("实验完成！")


if __name__ == '__main__':
    main()
