import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizerFast, BertForTokenClassification
from torch.optim import AdamW
from seqeval.metrics import classification_report
import numpy as np
from tqdm import tqdm
import pandas as pd
import logging
import os
import re
from xml.etree import ElementTree as ET

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 自定义数据集类
class MedicalNERDataset(Dataset):
   def __init__(self, data_file, tokenizer, max_len=128, label2id=None):
       self.data = self.load_data(data_file)
       self.tokenizer = tokenizer
       self.max_len = max_len
       self.label2id = label2id if label2id else self._extract_labels()
       self.id2label = {v: k for k, v in self.label2id.items()}
       logger.info(f"从 {data_file} 加载了 {len(self.data)} 个样本，标签为: {self.label2id}")

   def _extract_labels(self):
       label_set = set(['O'])
       for _, labels in self.data:
           for label in labels:
               if label != 'O':
                   # 提取BIO标签（例如 B-SpecificDisease, I-SpecificDisease）
                   prefix, category = label.split('-', 1) if '-' in label else ('', label)
                   label_set.add(f'B-{category}')
                   label_set.add(f'I-{category}')
       return {label: i for i, label in enumerate(sorted(label_set))}

   def load_data(self, data_file):
       if not os.path.exists(data_file):
           logger.error(f"数据文件 {data_file} 不存在！")
           raise FileNotFoundError(f"数据文件 {data_file} 不存在！")

       sentences, labels = [], []
       try:
           with open(data_file, 'r', encoding='utf-8') as f:
               for line in f:
                   line = line.strip()
                   if not line:
                       continue
                   # 分割 PubMed ID、标题和摘要
                   parts = line.split('\t', 2)
                   if len(parts) != 3:
                       logger.warning(f"跳过无效行: {line}")
                       continue
                   pubmed_id, title, abstract = parts
                   text = title + ' ' + abstract  # 合并标题和摘要

                   # 解析XML标签并提取实体
                   tokens, token_labels = self._parse_text_with_entities(text)
                   if tokens and len(tokens) == len(token_labels):
                       sentences.append(tokens)
                       labels.append(token_labels)
                   else:
                       logger.warning(f"跳过无效句子（token和label长度不匹配）: {text[:50]}...")

           if not sentences:
               logger.error(f"从 {data_file} 未加载到有效句子！")
               raise ValueError(f"从 {data_file} 未加载到有效句子！")
           return list(zip(sentences, labels))
       except Exception as e:
           logger.error(f"加载 {data_file} 时出错: {str(e)}")
           raise

   def _parse_text_with_entities(self, text):
       tokens, labels = [], []
       current_pos = 0
       # 使用正则表达式匹配XML标签
       pattern = r'<category="([^"]+)">([^<]+)</category>'
       matches = list(re.finditer(pattern, text))

       for match in matches:
           category = match.group(1)
           entity_text = match.group(2)
           start, end = match.span()

           # 处理实体前的文本
           pre_text = text[current_pos:start]
           pre_tokens = pre_text.split()
           tokens.extend(pre_tokens)
           labels.extend(['O'] * len(pre_tokens))

           # 处理实体文本
           entity_tokens = entity_text.split()
           if entity_tokens:
               tokens.append(entity_tokens[0])
               labels.append(f'B-{category}')
               for token in entity_tokens[1:]:
                   tokens.append(token)
                   labels.append(f'I-{category}')

           current_pos = end

       # 处理剩余文本
       post_text = text[current_pos:]
       post_tokens = post_text.split()
       tokens.extend(post_tokens)
       labels.extend(['O'] * len(post_tokens))

       return tokens, labels

   def __len__(self):
       return len(self.data)

   def __getitem__(self, idx):
       sentence, labels = self.data[idx]
       # 验证标签是否在 label2id 中
       for label in labels:
           if label not in self.label2id:
               logger.warning(f"句子 {sentence} 中发现无效标签: {label}")
               return None  # 跳过无效句子
       encoding = self.tokenizer(
           sentence,
           is_split_into_words=True,
           return_tensors='pt',
           padding='max_length',
           truncation=True,
           max_length=self.max_len,
           return_offsets_mapping=True
       )

       # 对齐标签（处理subword tokenization）
       word_ids = encoding.word_ids()
       aligned_labels = [-100] * self.max_len  # -100会被忽略
       for i, word_id in enumerate(word_ids):
           if word_id is not None and word_id < len(labels):
               aligned_labels[i] = self.label2id[labels[word_id]]

       return {
           'input_ids': encoding['input_ids'].squeeze(),
           'attention_mask': encoding['attention_mask'].squeeze(),
           'labels': torch.tensor(aligned_labels, dtype=torch.long)
       }

# 训练函数
def train_epoch(model, data_loader, optimizer, device):
   model.train()
   total_loss = 0
   for batch in tqdm(data_loader, desc="训练"):
       if batch is None:  # 跳过无效批次
           continue
       input_ids = batch['input_ids'].to(device)
       attention_mask = batch['attention_mask'].to(device)
       labels = batch['labels'].to(device)

       optimizer.zero_grad()
       outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
       loss = outputs.loss
       loss.backward()
       optimizer.step()
       total_loss += loss.item()
   return total_loss / len(data_loader) if total_loss > 0 else 0

# 评估函数
def eval_epoch(model, data_loader, device, id2label):
   model.eval()
   true_labels, pred_labels = [], []
   with torch.no_grad():
       for batch in tqdm(data_loader, desc="评估"):
           if batch is None:  # 跳过无效批次
               continue
           input_ids = batch['input_ids'].to(device)
           attention_mask = batch['attention_mask'].to(device)
           labels = batch['labels'].to(device)

           outputs = model(input_ids, attention_mask=attention_mask)
           logits = outputs.logits
           predictions = torch.argmax(logits, dim=-1)

           for pred, true, mask in zip(predictions, labels, attention_mask):
               active_indices = mask == 1
               pred = pred[active_indices].cpu().numpy()
               true = true[active_indices].cpu().numpy()
               pred_labels.extend([id2label[p] for p in pred if p != -100])
               true_labels.extend([id2label[t] for t in true if t != -100])
   return classification_report(true_labels, pred_labels, output_dict=True)

# 主函数
if __name__ == "__main__":
   # 参数设置
   model_name = "dmis-lab/biobert-v1.1"
   data_files = {
       'train': "data/NCBI_corpus_training.txt",
       'valid': "data/NCBI_corpus_development.txt",
       'test': "data/NCBI_corpus_testing.txt"
   }
   max_len = 128
   batch_size = 16
   epochs = 5
   learning_rate = 2e-5
   device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
   logger.info(f"使用设备: {device}")

   if torch.cuda.is_available():
       logger.info(f"使用 GPU: {torch.cuda.get_device_name(0)}")
   else:
       logger.warning("GPU 不可用，将回退到 CPU。")

   # 加载快速分词器
   try:
       tokenizer = BertTokenizerFast.from_pretrained(model_name)
       logger.info("成功加载 BertTokenizerFast")
   except Exception as e:
       logger.error(f"加载 BertTokenizerFast 失败: {str(e)}")
       raise ValueError(
           "请安装 'tokenizers' 库并确保模型支持快速分词器。运行: pip install tokenizers")

   # 加载训练集以提取标签
   train_dataset = MedicalNERDataset(data_files['train'], tokenizer, max_len)
   label2id = train_dataset.label2id

   # 加载数据集（使用统一的 label2id）
   datasets = {}
   for split, data_file in data_files.items():
       if os.path.exists(data_file):
           datasets[split] = MedicalNERDataset(data_file, tokenizer, max_len, label2id=label2id)
       else:
           logger.warning(f"数据文件 {data_file} 不存在，跳过 {split} 数据集。")

   # 创建 DataLoader
   data_loaders = {}
   for split, dataset in datasets.items():
       data_loaders[split] = DataLoader(dataset, batch_size=batch_size, shuffle=(split == 'train'))

   # 加载模型
   model = BertForTokenClassification.from_pretrained(
       model_name,
       num_labels=len(label2id)
   ).to(device)

   # 优化器
   optimizer = AdamW(model.parameters(), lr=learning_rate)

   # 训练循环
   best_f1 = 0
   for epoch in range(epochs):
       logger.info(f"第 {epoch + 1}/{epochs} 轮")
       if 'train' in data_loaders:
           train_loss = train_epoch(model, data_loaders['train'], optimizer, device)
           logger.info(f"训练损失: {train_loss:.4f}")
       else:
           logger.error("训练数据集不可用！")
           break

       if 'valid' in data_loaders:
           eval_report = eval_epoch(model, data_loaders['valid'], device, train_dataset.id2label)
           f1_score = eval_report['weighted avg']['f1-score']
           logger.info("验证报告:")
           logger.info(pd.DataFrame(eval_report).T)
           if f1_score > best_f1:
               best_f1 = f1_score
               model.save_pretrained("./medical_ner_model_best")
               tokenizer.save_pretrained("./medical_ner_model_best")
               logger.info("最佳模型已保存！")
       else:
           logger.info("无验证数据，跳过评估。")

   # 测试集评估
   if 'test' in data_loaders:
       test_report = eval_epoch(model, data_loaders['test'], device, train_dataset.id2label)
       logger.info("测试报告:")
       logger.info(pd.DataFrame(test_report).T)
   else:
       logger.info("无测试数据，跳过测试评估。")

   # 保存最终模型
   model.save_pretrained("./medical_ner_model")
   tokenizer.save_pretrained("./medical_ner_model")
   logger.info("模型已保存至 ./medical_ner_model")
