
import json
import re
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
from pathlib import Path
import traceback
from collections import defaultdict
from sklearn.metrics import precision_recall_fscore_support, classification_report, confusion_matrix, roc_curve, auc
from transformers import AutoTokenizer, AutoModel, AutoModelForTokenClassification, TrainingArguments, Trainer
from datasets import Dataset
import matplotlib.pyplot as plt
import seaborn as sns

# 确保模型目录存在
model_dir = Path("/root/npl/data/biobert-v1.1")
model_dir.mkdir(parents=True, exist_ok=True)
print(f"检测到本地模型文件完整，目录: {model_dir.resolve()}")

# --- 模型定义 ---

class Generator(nn.Module):
    def __init__(self, vocab_size, hidden_dim, max_length):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, hidden_dim)
        self.lstm = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim, vocab_size)
        self.max_length = max_length

    def forward(self, x):
        embedded = self.embedding(x)
        lstm_out, _ = self.lstm(embedded)
        logits = self.fc(lstm_out)
        return logits

class Discriminator(nn.Module):
    def __init__(self, vocab_size, hidden_dim):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, hidden_dim)
        self.lstm = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        embedded = self.embedding(x)
        lstm_out, _ = self.lstm(embedded)
        out = self.fc(lstm_out[:, -1, :])
        return self.sigmoid(out)

class RelationExtractor(nn.Module):
    def __init__(self, input_dim, hidden_dim, num_relations):
        super().__init__()
        self.fc1 = nn.Linear(input_dim * 2, hidden_dim)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_dim, num_relations)

    def forward(self, entity1_emb, entity2_emb):
        combined = torch.cat((entity1_emb, entity2_emb), dim=-1)
        hidden = self.relu(self.fc1(combined))
        logits = self.fc2(hidden)
        return logits

# --- 数据处理函数 ---

def clean_dataset(file_content: str, dataset_name: str):
    """
    清洗数据集，提取实体和关系，生成模拟关系
    """
    documents = []
    entity_labels = defaultdict(list)
    relation_labels = []
    default_relations = ["治疗", "导致"]
    
    for line in file_content.strip().split('\n'):
        line = line.strip()
        if not line or '\t' not in line:
            continue
            
        doc_id, raw_text = line.split('\t', 1)
        doc_id, raw_text = doc_id.strip(), raw_text.strip()
        if not doc_id or not raw_text:
            continue
            
        entities = []
        for match in re.finditer(r'<category="([^"]+)">([^<]+)</category>', raw_text):
            entities.append({
                "label": match.group(1),
                "text": match.group(2),
                "start": match.start(),
                "end": match.end()
            })
            entity_labels[match.group(1)].append({"text": match.group(2), "start": match.start(), "end": match.end()})
        
        relations = []
        relation_matches = list(re.finditer(r'<relation="([^"]+)">([^|]+)\|([^<]+)</relation>', raw_text))
        if relation_matches:
            for match in relation_matches:
                relations.append({
                    "relation_type": match.group(1),
                    "entity1": match.group(2),
                    "entity2": match.group(3)
                })
                relation_labels.append(match.group(1))
        elif len(entities) >= 2:
            from random import choice
            entity_pair = np.random.choice(entities, size=2, replace=False)
            rel_type = choice(default_relations)
            relations.append({
                "relation_type": rel_type,
                "entity1": entity_pair[0]["text"],
                "entity2": entity_pair[1]["text"]
            })
            relation_labels.append(rel_type)
            print(f"警告: 文档 {doc_id} 无关系标注，已生成模拟关系: {rel_type}({entity_pair[0]['text']}|{entity_pair[1]['text']})")
        
        documents.append({
            "id": doc_id,
            "text": raw_text,
            "entities": entities,
            "relations": relations
        })
        
    print(f"{dataset_name}清洗完成: 有效样本={len(documents)}, 实体标签类型={set(entity_labels.keys())}, 关系类型={set(relation_labels)}")
    return documents, entity_labels, set(relation_labels)

def align_labels_with_tokens(text, entities, tokenized, label_map):
    """
    将实体标签对齐到 token
    """
    offset_mapping = tokenized["offset_mapping"]
    labels = [label_map["O"]] * len(offset_mapping)
    
    char_to_token = {}
    for token_idx, (start, end) in enumerate(offset_mapping):
        if start is None or end is None:
            continue
        for char_idx in range(int(start), int(end)):
            char_to_token[char_idx] = token_idx
    
    for entity in entities:
        entity_label, start_char, end_char = entity["label"], entity["start"], entity["end"]
        entity_tokens = set()
        for char_idx in range(start_char, end_char):
            if char_idx in char_to_token:
                entity_tokens.add(char_to_token[char_idx])
        
        if not entity_tokens:
            continue
        
        sorted_tokens = sorted(entity_tokens)
        for i, token_idx in enumerate(sorted_tokens):
            labels[token_idx] = label_map[f"B-{entity_label}" if i == 0 else f"I-{entity_label}"]
    
    return labels

def tokenize_and_align_labels(examples, tokenizer, label_map):
    """
    令牌化并对齐标签
    """
    tokenized_inputs = tokenizer(
        examples["text"], 
        padding="max_length", 
        truncation=True, 
        max_length=128,
        return_offsets_mapping=True,
        return_tensors="pt"
    )
    
    labels = []
    for i in range(len(examples["text"])):
        tokenized = {
            "input_ids": tokenized_inputs["input_ids"][i],
            "attention_mask": tokenized_inputs["attention_mask"][i],
            "offset_mapping": tokenized_inputs["offset_mapping"][i]
        }
        labels.append(align_labels_with_tokens(examples["text"][i], examples["entities"][i], tokenized, label_map))
    
    tokenized_inputs["labels"] = labels
    print(f"tokenized_inputs shapes: input_ids={tokenized_inputs['input_ids'].shape}, labels={np.shape(labels)}")
    return tokenized_inputs

# --- 评估函数 ---

def compute_metrics(p):
    """
    计算实体识别评价指标
    """
    predictions, labels = p.predictions, p.label_ids
    predictions = np.argmax(predictions, axis=2)
    
    true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels)]
    true_labels = [[label_list[l] for l in label if l != -100] for label in labels]
    
    flat_true_labels = [label for sublist in true_labels for label in sublist]
    flat_predictions = [label for sublist in true_predictions for label in sublist]
    
    precision, recall, f1, _ = precision_recall_fscore_support(flat_true_labels, flat_predictions, average='macro', zero_division=0)
    accuracy = np.mean([p == l for p, l in zip(flat_predictions, flat_true_labels)])
    
    class_report = classification_report(flat_true_labels, flat_predictions, output_dict=True, zero_division=0)
    
    results = {
        "overall_precision": precision,
        "overall_recall": recall,
        "overall_f1": f1,
        "overall_accuracy": accuracy
    }
    
    for label, metrics in class_report.items():
        if label.isdigit():
            label_name = id2label[int(label)]
            results[f"{label_name}_precision"] = metrics["precision"]
            results[f"{label_name}_recall"] = metrics["recall"]
            results[f"{label_name}_f1"] = metrics["f1-score"]
    
    return results

def compute_relation_metrics(predictions, labels, relation_list, id2relation):
    """
    计算关系抽取评价指标
    """
    flat_predictions = np.argmax(predictions, axis=1).flatten()
    flat_labels = np.array(labels, dtype=np.int32)
    
    print(f"flat_predictions shape: {flat_predictions.shape}, values: {flat_predictions[:5]}")
    print(f"flat_labels shape: {flat_labels.shape}, values: {flat_labels[:5]}")
    
    if not np.all(np.isin(flat_labels, range(len(relation_list)))):
        print("警告: 标签值超出关系类别范围，自动修正")
        flat_labels = np.clip(flat_labels, 0, len(relation_list) - 1)
    
    precision, recall, f1, _ = precision_recall_fscore_support(flat_labels, flat_predictions, average='macro', zero_division=0)
    accuracy = np.mean(flat_predictions == flat_labels)
    
    class_report = classification_report(flat_labels, flat_predictions, target_names=[id2relation[i] for i in range(len(relation_list))], output_dict=True, zero_division=0)
    
    results = {
        "relation_precision": precision,
        "relation_recall": recall,
        "relation_f1": f1,
        "relation_accuracy": accuracy
    }
    
    for label, metrics in class_report.items():
        if label in id2relation.values():
            results[f"{label}_precision"] = metrics["precision"]
            results[f"{label}_recall"] = metrics["recall"]
            results[f"{label}_f1"] = metrics["f1-score"]
    
    return results

def compute_confusion_matrix_and_roc(predictions, labels, id2label, dataset_name, mode="entity"):
    """
    计算混淆矩阵和ROC曲线
    """
    try:
        if mode == "entity":
            predictions = np.argmax(predictions, axis=2)
            true_predictions = [[id2label[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels)]
            true_labels = [[id2label[l] for l in label if l != -100] for label in labels]
            flat_true_labels = [label for sublist in true_labels for label in sublist]
            flat_predictions = [label for sublist in true_predictions for label in sublist]
            label_list = list(id2label.values())
        else:  # mode == "relation"
            flat_predictions = np.argmax(predictions, axis=1).flatten()
            flat_true_labels = np.array(labels, dtype=np.int32)
            label_list = list(id2label.values())
        
        cm = confusion_matrix(flat_true_labels, flat_predictions, labels=label_list)
        
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=label_list, yticklabels=label_list)
        plt.xlabel('预测标签')
        plt.ylabel('真实标签')
        plt.title(f'{dataset_name} {mode.capitalize()} 混淆矩阵')
        plt.savefig(f'./results/{dataset_name}_{mode}_confusion_matrix.png')
        plt.close()
        
        from sklearn.preprocessing import label_binarize
        from itertools import cycle
        
        y_true_bin = label_binarize(flat_true_labels, classes=label_list)
        y_pred_bin = label_binarize(flat_predictions, classes=label_list)
        
        fpr, tpr, roc_auc = {}, {}, {}
        for i in range(len(label_list)):
            fpr[i], tpr[i], _ = roc_curve(y_true_bin[:, i], y_pred_bin[:, i])
            roc_auc[i] = auc(fpr[i], tpr[i])
        
        plt.figure(figsize=(10, 8))
        colors = cycle(['aqua', 'darkorange', 'cornflowerblue', 'green', 'red', 'purple'])
        for i, color in zip(range(len(label_list)), colors):
            plt.plot(fpr[i], tpr[i], color=color, lw=2, label=f'ROC曲线 (类别 {label_list[i]}, AUC = {roc_auc[i]:.2f})')
        
        plt.plot([0, 1], [0, 1], 'k--', lw=2)
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('假阳性率')
        plt.ylabel('真阳性率')
        plt.title(f'{dataset_name} {mode.capitalize()} 多类别ROC曲线')
        plt.legend(loc="lower right")
        plt.savefig(f'./results/{dataset_name}_{mode}_roc_curve.png')
        plt.close()
        
        return cm, roc_auc
    
    except Exception as e:
        print(f"计算混淆矩阵和ROC曲线时出错: {str(e)}")
        return None, {}

# --- 数据生成与准备 ---

def generate_synthetic_data(generator, tokenizer, biobert_model, num_samples, max_length, vocab_size, device, target_labels=None, target_relations=None):
    """
    使用生成器和BioBERT生成高质量合成数据
    """
    generator.eval()
    biobert_model.eval()
    synthetic_texts, synthetic_entities, synthetic_relations = [], [], []
    default_relations = ["治疗", "导致"]
    
    with torch.no_grad():
        for _ in range(num_samples):
            noise = torch.randint(0, vocab_size, (1, max_length)).to(device)
            logits = generator(noise)
            probs = torch.softmax(logits, dim=-1)
            generated = torch.multinomial(probs.squeeze(0), num_samples=1).squeeze(-1)
            
            text = tokenizer.decode(generated, skip_special_tokens=True)
            
            tokenized = tokenizer(
                text,
                padding="max_length",
                truncation=True,
                max_length=128,
                return_offsets_mapping=True,
                return_tensors="pt"
            ).to(device)
            
            outputs = biobert_model(input_ids=tokenized["input_ids"], attention_mask=tokenized["attention_mask"])
            token_embeddings = outputs.last_hidden_state.squeeze(0)
            
            entities = []
            for match in re.finditer(r'(\w+)', text):
                if (target_labels and np.random.rand() > 0.5) or (not target_labels and np.random.rand() > 0.5):
                    label = np.random.choice(target_labels or list(all_labels))
                    start, end = match.start(), match.end()
                    token_indices = [i for i, (s, e) in enumerate(tokenized["offset_mapping"][0]) if s is not None and e is not None and start <= s < end]
                    if token_indices:
                        entity_emb = torch.mean(token_embeddings[token_indices], dim=0).cpu().numpy()
                        if np.linalg.norm(entity_emb) > 0.1:  # 筛选语义合理的实体
                            entities.append({
                                "label": label,
                                "text": match.group(1),
                                "start": start,
                                "end": end
                            })
            
            relations = []
            if len(entities) >= 2:
                entity_pair = np.random.choice(entities, size=2, replace=False)
                rel_type = np.random.choice(target_relations or default_relations)
                relations.append({
                    "relation_type": rel_type,
                    "entity1": entity_pair[0]["text"],
                    "entity2": entity_pair[1]["text"]
                })
            
            synthetic_texts.append(text)
            synthetic_entities.append(entities)
            synthetic_relations.append(relations)
    
    return synthetic_texts, synthetic_entities, synthetic_relations

def prepare_relation_data(documents, tokenizer, relation_map, biobert_model, device):
    """
    准备关系抽取数据，使用BioBERT生成实体嵌入
    """
    relation_inputs, relation_labels = [], []
    
    biobert_model.eval()
    with torch.no_grad():
        for doc in documents:
            text, entities, relations = doc["text"], doc["entities"], doc.get("relations", [])
            if not relations:
                print(f"警告: 文档 {doc['id']} 没有关系标注")
                continue
            
            tokenized = tokenizer(
                text,
                padding="max_length",
                truncation=True,
                max_length=128,
                return_offsets_mapping=True,
                return_tensors="pt"
            ).to(device)
            
            outputs = biobert_model(input_ids=tokenized["input_ids"], attention_mask=tokenized["attention_mask"])
            token_embeddings = outputs.last_hidden_state
            
            entity_embeddings, entity_texts = [], [entity["text"] for entity in entities]
            for entity in entities:
                entity_tokens = []
                for char_idx in range(entity["start"], entity["end"]):
                    for i, (s, e) in enumerate(tokenized["offset_mapping"][0]):
                        if s is not None and e is not None and s <= char_idx < e:
                            entity_tokens.append(i)
                if not entity_tokens:
                    print(f"警告: 实体 '{entity['text']}' 在文档 {doc['id']} 中未找到对应 token")
                    continue
                entity_emb = torch.mean(token_embeddings[0, entity_tokens], dim=0).detach()
                entity_embeddings.append(entity_emb)
            
            for rel in relations:
                entity1_text, entity2_text, rel_type = rel["entity1"], rel["entity2"], rel["relation_type"]
                if entity1_text not in entity_texts or entity2_text not in entity_texts:
                    print(f"警告: 关系 {rel_type} 的实体 ({entity1_text}, {entity2_text}) 在文档 {doc['id']} 中未找到")
                    continue
                
                entity1_emb, entity2_emb = None, None
                for i, entity in enumerate(entities):
                    if entity["text"] == entity1_text and i < len(entity_embeddings):
                        entity1_emb = entity_embeddings[i]
                    if entity["text"] == entity2_text and i < len(entity_embeddings):
                        entity2_emb = entity_embeddings[i]
                
                if entity1_emb is None or entity2_emb is None:
                    print(f"警告: 关系 {rel_type} 的实体嵌入 ({entity1_text}, {entity2_text}) 在文档 {doc['id']} 中无法生成")
                    continue
                
                relation_inputs.append((entity1_emb, entity2_emb))
                relation_labels.append(relation_map[rel_type])
        
    if not relation_inputs:
        print("错误: 未生成任何关系抽取数据")
    else:
        print(f"生成的关系输入数量: {len(relation_inputs)}")
        print(f"示例关系输入: entity1_emb.shape={relation_inputs[0][0].shape}, entity2_emb.shape={relation_inputs[0][1].shape}")
    
    return relation_inputs, relation_labels

# --- 训练函数 ---

def train_gan(generator, discriminator, train_texts, tokenizer, num_epochs, device):
    """
    训练GAN模型
    """
    g_optimizer = optim.Adam(generator.parameters(), lr=1e-3)
    d_optimizer = optim.Adam(discriminator.parameters(), lr=1e-3)
    criterion = nn.BCELoss()
    
    for epoch in range(num_epochs):
        for text in train_texts:
            input_ids = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128)["input_ids"].to(device)
            
            d_optimizer.zero_grad()
            real_labels = torch.ones(1, 1).to(device)
            real_output = discriminator(input_ids)
            d_real_loss = criterion(real_output, real_labels)
            
            noise = torch.randint(0, tokenizer.vocab_size, (1, 128)).to(device)
            gen_logits = generator(noise)
            gen_probs = torch.softmax(gen_logits, dim=-1)
            gen_ids = torch.multinomial(gen_probs.squeeze(0), num_samples=1).squeeze(-1).unsqueeze(0)
            fake_labels = torch.zeros(1, 1).to(device)
            fake_output = discriminator(gen_ids)
            d_fake_loss = criterion(fake_output, fake_labels)
            
            d_loss = d_real_loss + d_fake_loss
            d_loss.backward()
            d_optimizer.step()
            
            g_optimizer.zero_grad()
            gen_logits = generator(noise)
            gen_probs = torch.softmax(gen_logits, dim=-1)
            gen_ids = torch.multinomial(gen_probs.squeeze(0), num_samples=1).squeeze(-1).unsqueeze(0)
            gen_output = discriminator(gen_ids)
            g_loss = criterion(gen_output, real_labels)
            g_loss.backward()
            g_optimizer.step()
        
        print(f"GAN Epoch {epoch+1}/{num_epochs}, D Loss: {d_loss.item():.4f}, G Loss: {g_loss.item():.4f}")

def train_relation_extractor(relation_model, relation_inputs, relation_labels, device, num_epochs=5):
    """
    训练关系抽取模型
    """
    if not relation_inputs:
        print("错误: 关系抽取训练数据为空")
        return None
    
    optimizer = optim.Adam(relation_model.parameters(), lr=1e-3)
    criterion = nn.CrossEntropyLoss()
    
    relation_model.train()
    for epoch in range(num_epochs):
        total_loss = 0
        for i, ((entity1_emb, entity2_emb), label) in enumerate(zip(relation_inputs, relation_labels)):
            optimizer.zero_grad()
            outputs = relation_model(entity1_emb, entity2_emb)
            loss = criterion(outputs, torch.tensor([label], device=device))
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        
        print(f"Relation Extractor Epoch {epoch+1}/{num_epochs}, Avg Loss: {total_loss/len(relation_inputs):.4f}")
    
    return relation_model

def evaluate_relation_extractor(relation_model, relation_inputs, relation_labels, relation_list, id2relation, dataset_name, device):
    """
    评估关系抽取模型，仅保存测试集结果
    """
    if not relation_inputs or not relation_labels or relation_model is None:
        print(f"错误: {dataset_name} 关系抽取数据或模型无效")
        return {}, None, {}
    
    relation_model.eval()
    predictions = []
    with torch.no_grad():
        for entity1_emb, entity2_emb in relation_inputs:
            outputs = relation_model(entity1_emb, entity2_emb)
            predictions.append(outputs.cpu().numpy())
    
    predictions = np.vstack(predictions)
    metrics = compute_relation_metrics(predictions, relation_labels, relation_list, id2relation)
    
    cm, roc_auc = compute_confusion_matrix_and_roc(predictions, relation_labels, id2relation, dataset_name, mode="relation")
    
    if dataset_name == "测试集":
        with open(f"./results/{dataset_name}_relation_metrics.json", "w") as f:
            json.dump(metrics, f, indent=4)
    
    return metrics, cm, roc_auc

# --- 主函数 ---

def main():
    global label_list, id2label, all_labels, relation_list, id2relation, all_relations
    
    try:
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"使用设备: {device}")
        
        local_model_path = "/root/npl/data/biobert-v1.1"
        train_path = "/root/npl/data/NCBI_corpus_training.txt"
        val_path = "/root/npl/data/NCBI_corpus_validation.txt"
        test_path = "/root/npl/data/NCBI_corpus_testing.txt"
        
        tokenizer = AutoTokenizer.from_pretrained(local_model_path)
        biobert_model = AutoModel.from_pretrained(local_model_path).to(device)
        
        # 加载数据集
        print(f"\n读取训练数据集: {train_path}")
        with open(train_path, "r", encoding="utf-8", errors="ignore") as f:
            train_content = f.read()
        
        print(f"\n读取验证数据集: {val_path}")
        with open(val_path, "r", encoding="utf-8", errors="ignore") as f:
            val_content = f.read()
        
        print(f"\n读取测试数据集: {test_path}")
        with open(test_path, "r", encoding="utf-8", errors="ignore") as f:
            test_content = f.read()
        
        # 清洗数据集
        print("清洗训练数据集...")
        train_docs, train_labels, train_relations = clean_dataset(train_content, "训练集")
        print("清洗验证数据集...")
        val_docs, val_labels, val_relations = clean_dataset(val_content, "验证集")
        print("清洗测试数据集...")
        test_docs, test_labels, test_relations = clean_dataset(test_content, "测试集")
        
        if not train_docs:
            print("错误: 训练集清洗后无有效文档")
            return
        
        train_docs, val_docs, test_docs = train_docs[:50], val_docs[:20], test_docs[:20]
        print(f"使用前 {len(train_docs)} 个训练文档, {len(val_docs)} 个验证文档, {len(test_docs)} 个测试文档")
        
        # 调试信息
        sample = train_docs[0]
        print(f"\n训练集首样本ID: {sample['id']}")
        print(f"文本片段: {sample['text'][:150]}...")
        print(f"实体数量: {len(sample['entities'])}")
        if sample['entities']:
            entity = sample['entities'][0]
            print(f"示例实体: 文本='{entity['text']}', 类型='{entity['label']}', 位置={entity['start']}-{entity['end']}")
        if sample.get('relations'):
            relation = sample['relations'][0]
            print(f"示例关系: 类型='{relation['relation_type']}', 实体1='{relation['entity1']}', 实体2='{relation['entity2']}'")
        
        # 标签统计
        all_labels = set(train_labels.keys()) | set(val_labels.keys()) | set(test_labels.keys())
        all_relations = train_relations | val_relations | test_relations
        print(f"\n所有实体标签类型: {all_labels}")
        print(f"所有关系类型: {all_relations}")
        
        label_counts = defaultdict(int)
        for docs in [train_docs, val_docs, test_docs]:
            for doc in docs:
                for entity in doc["entities"]:
                    label_counts[entity["label"]] += 1
        print(f"原始实体标签数量统计: {dict(label_counts)}")
        
        # 生成合成数据
        synthetic_docs = []
        for label, count in label_counts.items():
            if count < 200:
                needed = 500 - count  # 增加样本量
                synthetic_texts, synthetic_entities, synthetic_relations = generate_synthetic_data(
                    Generator(tokenizer.vocab_size, 256, 128).to(device),
                    tokenizer,
                    biobert_model,
                    num_samples=needed,
                    max_length=128,
                    vocab_size=tokenizer.vocab_size,
                    device=device,
                    target_labels=[label],
                    target_relations=list(all_relations) or ["治疗", "导致"]
                )
                for i, (text, entities, relations) in enumerate(zip(synthetic_texts, synthetic_entities, synthetic_relations)):
                    synthetic_docs.append({
                        "id": f"synthetic_{label}_{i}",
                        "text": text,
                        "entities": entities,
                        "relations": relations
                    })
        
        train_docs.extend(synthetic_docs)
        print(f"生成合成数据后训练集大小: {len(train_docs)}")
        
        # 创建标签映射
        label_list = ["O"] + [f"{prefix}-{label}" for prefix in ["B", "I"] for label in sorted(all_labels)]
        label_map = {label: i for i, label in enumerate(label_list)}
        id2label = {i: label for i, label in enumerate(label_list)}
        relation_list = sorted(all_relations) if all_relations else ["治疗", "导致"]
        relation_map = {rel: i for i, rel in enumerate(relation_list)}
        id2relation = {i: rel for i, rel in enumerate(relation_list)}
        print(f"实体标签映射: 共{len(label_map)}个标签")
        print(f"关系标签映射: 共{len(relation_map)}个标签")
        
        # 训练GAN
        vocab_size, hidden_dim, max_length = tokenizer.vocab_size, 256, 128
        generator = Generator(vocab_size, hidden_dim, max_length).to(device)
        discriminator = Discriminator(vocab_size, hidden_dim).to(device)
        print("\n训练GAN模型...")
        train_gan(generator, discriminator, [doc["text"] for doc in train_docs], tokenizer, num_epochs=10, device=device)
        
        # 准备实体识别数据集
        for name, docs in [("训练集", train_docs), ("验证集", val_docs), ("测试集", test_docs)]:
            print(f"\n准备{name}实体识别数据集...")
            dataset = Dataset.from_dict({"text": [doc["text"] for doc in docs], "entities": [doc["entities"] for doc in docs]})
            globals()[f"tokenized_{name.lower()}"] = dataset.map(lambda x: tokenize_and_align_labels(x, tokenizer, label_map), batched=True)
            print(f"{name}实体识别数据集大小: {len(globals()[f'tokenized_{name.lower()}'])}")
        
        # 训练实体识别模型
        print("\n初始化实体识别模型...")
        model = AutoModelForTokenClassification.from_pretrained(
            local_model_path,
            num_labels=len(label_map),
            ignore_mismatched_sizes=True,
            id2label=id2label,
            label2id=label_map
        ).to(device)
        
        training_args = TrainingArguments(
            output_dir="./results",
            num_train_epochs=5,
            per_device_train_batch_size=4,
            per_device_eval_batch_size=4,
            weight_decay=0.01,
            logging_dir='./logs',
            logging_steps=5,
            save_steps=500,
            save_total_limit=1,
            report_to="none",
            learning_rate=5e-5,
            no_cuda=not torch.cuda.is_available(),
            lr_scheduler_type='linear',
            fp16=True,
            eval_steps=500
        )
        
        print("\n初始化实体识别训练器...")
        trainer = Trainer(
            model=model,
            args=training_args,
            train_dataset=tokenized_训练集,
            eval_dataset=tokenized_验证集,
            compute_metrics=compute_metrics
        )
        
        print("\n开始实体识别模型训练...")
        trainer.train()
        model.save_pretrained("./results/final_model")
        tokenizer.save_pretrained("./results/final_model")
        print("\n实体识别模型训练完成并保存!")
        
        # 评估实体识别（仅保存测试集结果）
        print("\n在测试集上评估实体识别模型...")
        test_predictions = trainer.predict(tokenized_测试集)
        test_metrics = test_predictions.metrics
        with open("./results/测试集_metrics.json", "w") as f:
            json.dump(test_metrics, f, indent=4)
        
        print("\n测试集实体识别评估结果:")
        if "eval_overall_precision" in test_metrics:
            print(f"总体准确率: {test_metrics['eval_overall_accuracy']:.4f}")
            print(f"总体精确率: {test_metrics['eval_overall_precision']:.4f}")
            print(f"总体召回率: {test_metrics['eval_overall_recall']:.4f}")
            print(f"总体F1分数: {test_metrics['eval_overall_f1']:.4f}")
            for key in test_metrics:
                if "precision" in key or "recall" in key or "f1" in key:
                    print(f"{key}: {test_metrics[key]:.4f}")
        
        test_cm, test_roc_auc = compute_confusion_matrix_and_roc(test_predictions.predictions, test_predictions.label_ids, id2label, "测试集")
        if test_cm is not None:
            print("\n测试集实体识别混淆矩阵:")
            print(test_cm)
            print("\n测试集实体识别每个类别的AUC:")
            for i, label in enumerate(label_list):
                print(f"{label}: {test_roc_auc.get(i, 0):.4f}")
        
        # 准备关系抽取数据集
        for name, docs in [("训练集", train_docs), ("验证集", val_docs), ("测试集", test_docs)]:
            print(f"\n准备{name}关系抽取数据集...")
            globals()[f"{name.lower()}_relation_inputs"], globals()[f"{name.lower()}_relation_labels"] = prepare_relation_data(docs, tokenizer, relation_map, biobert_model, device)
            print(f"{name}关系抽取数据集大小: {len(globals()[f'{name.lower()}_relation_inputs'])}")
        
        # 训练关系抽取模型
        print("\n初始化关系抽取模型...")
        relation_model = RelationExtractor(input_dim=768, hidden_dim=128, num_relations=len(relation_map)).to(device)
        print("\n开始关系抽取模型训练...")
        relation_model = train_relation_extractor(relation_model, train_relation_inputs, train_relation_labels, device)
        
        # 评估关系抽取（仅保存测试集结果）
        if relation_model:
            print("\n在测试集上评估关系抽取模型...")
            test_relation_metrics, test_relation_cm, test_relation_roc_auc = evaluate_relation_extractor(
                relation_model, test_relation_inputs, test_relation_labels, relation_list, id2relation, "测试集", device
            )
            
            print("\n测试集关系抽取评估结果:")
            if test_relation_metrics:
                print(f"总体准确率: {test_relation_metrics['relation_accuracy']:.4f}")
                print(f"总体精确率: {test_relation_metrics['relation_precision']:.4f}")
                print(f"总体召回率: {test_relation_metrics['relation_recall']:.4f}")
                print(f"总体F1分数: {test_relation_metrics['relation_f1']:.4f}")
                for key in test_relation_metrics:
                    if "precision" in key or "recall" in key or "f1" in key:
                        print(f"{key}: {test_relation_metrics[key]:.4f}")
            
            if test_relation_cm is not None:
                print("\n测试集关系抽取混淆矩阵:")
                print(test_relation_cm)
                print("\n测试集关系抽取每个类别的AUC:")
                for i, label in enumerate(relation_list):
                    print(f"{label}: {test_relation_roc_auc.get(i, 0):.4f}")
        
        print("\n测试集结果已保存至 ./results/")
        print("NLP处理流程完成!")
    
    except Exception as e:
        print(f"\n发生未预期的错误: {str(e)}")
        traceback.print_exc()

if __name__ == "__main__":
    main()

