
import os
os.environ["CURL_CA_BUNDLE"] = ""
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizerFast, BertForTokenClassification
from seqeval.metrics import classification_report
import numpy as np
from tqdm import tqdm
import pandas as pd
import logging
import re
import urllib3

# 禁用 SSL 警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 自定义数据集类
class MedicalNERDataset(Dataset):
    def __init__(self, data_file, tokenizer, max_len=512, label2id=None):
        self.data_file = data_file
        self.tokenizer = tokenizer
        self.max_len = max_len
        self.data = self.load_data(data_file)
        self.label2id = label2id if label2id else self._extract_labels()
        self.id2label = {v: k for k, v in self.label2id.items()}
        logger.info(f"从 {data_file} 加载了 {len(self.data)} 个样本，标签为: {self.label2id}")
        for i, (sentence, labels) in enumerate(self.data[:3]):
            logger.info(f"样本 {i+1}: 句子={' '.join(sentence[:10])}..., 标签={labels[:10]}...")

    def _extract_labels(self):
        label_set = set(['O'])
        for _, labels in self.data:
            for label in labels:
                if label != 'O' and label.startswith('B-'):
                    label_set.add(label)
                    label_set.add(f'I-{label[2:]}')
        return {label: i for i, label in enumerate(sorted(label_set))}

    def load_data(self, data_file):
        if not os.path.exists(data_file):
            logger.error(f"数据文件 {data_file} 不存在！")
            raise FileNotFoundError(f"数据文件 {data_file} 不存在！")

        sentences, labels = [], []
        try:
            with open(data_file, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if not line:
                        continue
                    parts = line.split('\t')
                    if len(parts) < 2:
                        logger.warning(f"跳过无效行: {line}")
                        continue
                    text = parts[1]  # 提取文本部分
                    sentence, sentence_labels = self._parse_xml_text(text)
                    if sentence and len(sentence) == len(sentence_labels):
                        sentences.append(sentence)
                        labels.append(sentence_labels)
                    else:
                        logger.warning(f"解析失败，句子或标签为空: {text[:50]}...")
            if not sentences:
                logger.error(f"从 {data_file} 未加载到有效句子！")
                raise ValueError(f"从 {data_file} 未加载到有效句子！")
            return list(zip(sentences, labels))
        except Exception as e:
            logger.error(f"加载 {data_file} 时出错: {str(e)}")
            raise

    def _parse_xml_text(self, text):
        tokens, token_labels = [], []
        current_pos = 0
        pattern = r'<category="([^"]+)">([^<]+)</category>'
        matches = list(re.finditer(pattern, text))
        for match in matches:
            category = match.group(1)
            entity_text = match.group(2).strip()
            start, end = match.span()
            pre_text = text[current_pos:start].strip()
            if pre_text:
                pre_tokens = pre_text.split()
                tokens.extend(pre_tokens)
                token_labels.extend(['O'] * len(pre_tokens))
            entity_tokens = entity_text.split()
            if entity_tokens:
                tokens.append(entity_tokens[0])
                token_labels.append(f'B-{category}')
                for token in entity_tokens[1:]:
                    tokens.append(token)
                    token_labels.append(f'I-{category}')
            current_pos = end
        post_text = text[current_pos:].strip()
        if post_text:
            post_tokens = post_text.split()
            tokens.extend(post_tokens)
            token_labels.extend(['O'] * len(post_tokens))
        return tokens, token_labels

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        sentence, labels = self.data[idx]
        labels = [lbl if lbl in self.label2id else 'O' for lbl in labels]

        encoding = self.tokenizer(
            sentence,
            is_split_into_words=True,
            return_tensors='pt',
            padding='max_length',
            truncation=True,
            max_length=self.max_len
        )

        word_ids = encoding.word_ids()
        aligned_labels = [-100] * self.max_len
        previous_word_id = None

        valid_word_ids = [wid for wid in word_ids if wid is not None]
        if len(valid_word_ids) != len(labels):
            logger.warning(f"句子 {' '.join(sentence[:50])}... 的 word_ids 长度 {len(valid_word_ids)} 不等于标签长度 {len(labels)}")
            labels = labels[:len(valid_word_ids)] + ['O'] * (len(valid_word_ids) - len(labels))

        for i, word_id in enumerate(word_ids):
            if word_id is None:
                aligned_labels[i] = -100
            elif word_id != previous_word_id:
                aligned_labels[i] = self.label2id[labels[word_id]]
            else:
                prev_label = self.id2label[aligned_labels[i-1]] if i > 0 and aligned_labels[i-1] != -100 else 'O'
                if prev_label.startswith('B-') and f'I-{prev_label[2:]}' in self.label2id:
                    aligned_labels[i] = self.label2id[f'I-{prev_label[2:]}']
                else:
                    aligned_labels[i] = aligned_labels[i-1]
            previous_word_id = word_id

        return {
            'input_ids': encoding['input_ids'].squeeze(),
            'attention_mask': encoding['attention_mask'].squeeze(),
            'labels': torch.tensor(aligned_labels, dtype=torch.long),
            'sentence': ' '.join(sentence[:50]),
            'word_ids': word_ids
        }

# 自定义 collate_fn
def custom_collate_fn(batch):
    batch = [item for item in batch if item is not None]
    if not batch:
        logger.warning("批次中没有有效样本")
        return None

    input_ids = torch.stack([item['input_ids'] for item in batch])
    attention_mask = torch.stack([item['attention_mask'] for item in batch])
    labels = torch.stack([item['labels'] for item in batch])
    sentences = [item['sentence'] for item in batch]
    word_ids = [item['word_ids'] for item in batch]

    return {
        'input_ids': input_ids,
        'attention_mask': attention_mask,
        'labels': labels,
        'sentence': sentences,
        'word_ids': word_ids
    }

# 评估函数
def eval_epoch(model, data_loader, device, id2label):
    model.eval()
    true_labels, pred_labels = [], []
    with torch.no_grad():
        for batch in tqdm(data_loader, desc="评估"):
            if batch is None:
                continue
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)
            sentences = batch['sentence']
            word_ids_batch = batch['word_ids']

            outputs = model(input_ids, attention_mask=attention_mask)
            logits = outputs.logits
            predictions = torch.argmax(logits, dim=-1)

            for idx, (pred, true, mask, sentence, word_ids) in enumerate(zip(predictions, labels, attention_mask, sentences, word_ids_batch)):
                active_indices = mask == 1
                pred = pred[active_indices].cpu().numpy()
                true = true[active_indices].cpu().numpy()
                pred_seq = [id2label[p] for p in pred if p in id2label]
                true_seq = [id2label[t] for t in true if t in id2label]
                if len(pred_seq) != len(true_seq):
                    logger.warning(f"标签序列长度不一致: 句子='{sentence[:50]}...', pred_len={len(pred_seq)}, true_len={len(true_seq)}")
                    min_len = min(len(pred_seq), len(true_seq))
                    pred_seq = pred_seq[:min_len]
                    true_seq = true_seq[:min_len]
                if pred_seq and true_seq:
                    pred_labels.append(pred_seq)
                    true_labels.append(true_seq)
                else:
                    logger.warning(f"空标签序列: 句子='{sentence[:50]}...'")

    if not true_labels or not pred_labels:
        logger.error("未收集到有效的标签序列，无法生成评估报告")
        return {}
    logger.info(f"收集到的标签序列数量: true={len(true_labels)}, pred={len(pred_labels)}")
    return classification_report(true_labels, pred_labels, output_dict=True)

# 主函数
if __name__ == "__main__":
    model_path = "/root/npl/bert-base-uncased"
    test_data_file = "/root/npl/data/parsed_bio_dataset_test.txt"
    
    max_len = 512
    batch_size = 16
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"使用设备: {device}")

    try:
        tokenizer = BertTokenizerFast.from_pretrained(model_path)
        temp_dataset = MedicalNERDataset(test_data_file, tokenizer, max_len)
        num_labels = len(temp_dataset.label2id)
        logger.info(f"从 {model_path} 成功加载分词器，num_labels={num_labels}")
    except Exception as e:
        logger.error(f"加载分词器失败: {str(e)}")
        raise

    try:
        model = BertForTokenClassification.from_pretrained(model_path, num_labels=num_labels)
        model.to(device)
        logger.info("成功加载BERT模型")
    except Exception as e:
        logger.error(f"加载模型失败: {str(e)}")
        raise

    try:
        test_dataset = MedicalNERDataset(test_data_file, tokenizer, max_len, label2id=temp_dataset.label2id)
        logger.info("成功加载测试数据集")
    except Exception as e:
        logger.error(f"加载测试数据集失败: {str(e)}")
        raise

    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=custom_collate_fn)

    logger.info("开始评估测试集...")
    test_report = eval_epoch(model, test_loader, device, test_dataset.id2label)
    if not test_report:
        logger.error("评估报告为空，请检查数据和模型")
    else:
        logger.info("测试报告:")
        report_df = pd.DataFrame(test_report).T
        logger.info(f"\n{report_df}")
        logger.info("\n主要指标:")
        if 'accuracy' in test_report:
            logger.info(f"准确率 (Accuracy): {test_report['accuracy']:.4f}")
        logger.info(f"宏平均 F1 分数 (Macro F1): {test_report['macro avg']['f1-score']:.4f}")
        logger.info(f"加权平均 F1 分数 (Weighted F1): {test_report['weighted avg']['f1-score']:.4f}")
        logger.info("\n各实体类别指标:")
        for label, metrics in test_report.items():
            if label not in ['accuracy', 'macro avg', 'weighted avg']:
                logger.info(f"{label}:")
                logger.info(f"  精确率 (Precision): {metrics['precision']:.4f}")
                logger.info(f"  召回率 (Recall): {metrics['recall']:.4f}")
                logger.info(f"  F1 分数: {metrics['f1-score']:.4f}")
                logger.info(f"  支持数 (Support): {metrics['support']}")

