
import re
import logging
from transformers import BertTokenizerFast
from torch.utils.data import Dataset
import torch
import numpy as np

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class XMLtoBIODataset(Dataset):
    def __init__(self, data_file, tokenizer, max_len=512, label2id=None, output_file=None):
        self.data_file = data_file
        self.tokenizer = tokenizer
        self.max_len = max_len
        self.output_file = output_file
        self.data = self.load_data(data_file)
        self.label2id = label2id if label2id else self._extract_labels()
        self.id2label = {v: k for k, v in self.label2id.items()}
        logger.info(f"从 {data_file} 加载了 {len(self.data)} 个样本，标签为: {self.label2id}")
        for i, (sentence, labels, id_) in enumerate(self.data[:3]):
            logger.info(f"样本 {i+1}: 句子={' '.join(sentence[:10])}..., 标签={labels[:10]}..., ID={id_}")
        if self.output_file:
            self.save_data()

    def _extract_labels(self):
        """从数据中提取所有可能的BIO标签"""
        label_set = set(['O'])
        for _, labels, _ in self.data:
            for label in labels:
                if label != 'O' and label.startswith('B-'):
                    label_set.add(label)
                    label_set.add(f'I-{label[2:]}')
        return {label: i for i, label in enumerate(sorted(label_set))}

    def load_data(self, data_file):
        """解析数据文件，生成句子、BIO标签和ID"""
        sentences, labels, ids = [], [], []
        try:
            with open(data_file, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if not line:
                        continue
                    parts = line.split('\t')
                    if len(parts) < 2:
                        logger.warning(f"跳过无效行: {line[:50]}...")
                        continue
                    id_, text = parts[0], parts[1]
                    sentence, sentence_labels = self._parse_xml_text(text)
                    if sentence and len(sentence) == len(sentence_labels):
                        sentences.append(sentence)
                        labels.append(sentence_labels)
                        ids.append(id_)
                    else:
                        logger.warning(f"解析失败，句子或标签为空: {text[:50]}...")
            if not sentences:
                logger.error(f"从 {data_file} 未加载到有效句子！")
                raise ValueError(f"从 {data_file} 未加载到有效句子！")
            return list(zip(sentences, labels, ids))
        except Exception as e:
            logger.error(f"加载 {data_file} 时出错: {str(e)}")
            raise

    def _parse_xml_text(self, text):
        """解析XML格式文本，生成BIO标签"""
        tokens, token_labels = [], []
        current_pos = 0
        pattern = r'<category="([^"]+)">([^<]+)</category>'
        matches = list(re.finditer(pattern, text))
        for match in matches:
            category = match.group(1)
            entity_text = match.group(2).strip()
            start, end = match.span()
            pre_text = text[current_pos:start].strip()
            if pre_text:
                pre_tokens = [t for t in re.split(r'(\s+|[.,!?;:\-\(\)])', pre_text) if t.strip() and t not in [' ', '\t']]
                tokens.extend(pre_tokens)
                token_labels.extend(['O'] * len(pre_tokens))
            entity_tokens = [t for t in re.split(r'(\s+|[.,!?;:\-\(\)])', entity_text) if t.strip() and t not in [' ', '\t']]
            if entity_tokens:
                tokens.append(entity_tokens[0])
                token_labels.append(f'B-{category}')
                for token in entity_tokens[1:]:
                    tokens.append(token)
                    token_labels.append(f'I-{category}' if token not in [',', '.', '!', '?', ';', '-', '(', ')'] else 'O')
            current_pos = end
        post_text = text[current_pos:].strip()
        if post_text:
            post_tokens = [t for t in re.split(r'(\s+|[.,!?;:\-\(\)])', post_text) if t.strip() and t not in [' ', '\t']]
            tokens.extend(post_tokens)
            token_labels.extend(['O'] * len(post_tokens))
        return tokens, token_labels

    def save_data(self):
        """将解析后的句子、BIO标签和ID保存到文件"""
        try:
            with open(self.output_file, 'w', encoding='utf-8') as f:
                for sentence, labels, id_ in self.data:
                    sentence_str = ' '.join(sentence)
                    labels_str = ' '.join(labels)
                    f.write(f"{id_}\t{sentence_str}\t{labels_str}\n")
            logger.info(f"解析后的数据集已保存到 {self.output_file}")
        except Exception as e:
            logger.error(f"保存数据集到 {self.output_file} 时出错: {str(e)}")
            raise

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        """生成BERT分词器输入和对齐的BIO标签"""
        sentence, labels, _ = self.data[idx]
        labels = [lbl if lbl in self.label2id else 'O' for lbl in labels]

        encoding = self.tokenizer(
            sentence,
            is_split_into_words=True,
            return_tensors='pt',
            padding='max_length',
            truncation=True,
            max_length=self.max_len,
            return_offsets_mapping=True
        )

        word_ids = encoding.word_ids()
        offsets = encoding.offset_mapping.squeeze().tolist()
        aligned_labels = [-100] * self.max_len
        label_idx = 0

        unique_word_ids = []
        seen = set()
        for wid in word_ids:
            if wid is not None and wid not in seen:
                unique_word_ids.append(wid)
                seen.add(wid)

        if len(unique_word_ids) != len(labels):
            logger.warning(f"句子 {' '.join(sentence[:50])}... 的唯一 word_ids 长度 {len(unique_word_ids)} 不等于标签长度 {len(labels)}")
            if len(unique_word_ids) < len(labels):
                labels = labels[:len(unique_word_ids)]
            else:
                labels = labels + ['O'] * (len(unique_word_ids) - len(labels))

        for i, (word_id, (start, end)) in enumerate(zip(word_ids, offsets)):
            if word_id is None or start == end:
                aligned_labels[i] = -100
            elif word_id != previous_word_id:
                if label_idx < len(labels):
                    aligned_labels[i] = self.label2id[labels[label_idx]]
                    label_idx += 1
                else:
                    aligned_labels[i] = self.label2id['O']
            else:
                prev_label = self.id2label[aligned_labels[i-1]] if i > 0 and aligned_labels[i-1] != -100 else 'O'
                if prev_label.startswith('B-') and f'I-{prev_label[2:]}' in self.label2id:
                    aligned_labels[i] = self.label2id[f'I-{prev_label[2:]}']
                else:
                    aligned_labels[i] = self.label2id['O']
            previous_word_id = word_id

        return {
            'input_ids': encoding['input_ids'].squeeze(),
            'attention_mask': encoding['attention_mask'].squeeze(),
            'labels': torch.tensor(aligned_labels, dtype=torch.long),
            'sentence': ' '.join(sentence[:50])
        }

def custom_collate_fn(batch):
    batch = [item for item in batch if item is not None]
    if not batch:
        logger.warning("批次中没有有效样本")
        return None

    input_ids = torch.stack([item['input_ids'] for item in batch])
    attention_mask = torch.stack([item['attention_mask'] for item in batch])
    labels = torch.stack([item['labels'] for item in batch])
    sentences = [item['sentence'] for item in batch]

    return {
        'input_ids': input_ids,
        'attention_mask': attention_mask,
        'labels': labels,
        'sentence': sentences
    }

def eval_epoch(model, data_loader, device, id2label):
    model.eval()
    true_labels, pred_labels = [], []
    with torch.no_grad():
        for batch in tqdm(data_loader, desc="评估"):
            if batch is None:
                continue
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)
            sentences = batch['sentence']

            outputs = model(input_ids, attention_mask=attention_mask)
            logits = outputs.logits
            predictions = torch.argmax(logits, dim=-1)

            for idx, (pred, true, mask, sentence) in enumerate(zip(predictions, labels, attention_mask, sentences)):
                active_indices = mask == 1
                pred = pred[active_indices].cpu().numpy()
                true = true[active_indices].cpu().numpy()
                pred_seq = []
                true_seq = []
                for p, t in zip(pred, true):
                    if t != -100:
                        if p in id2label and t in id2label:
                            pred_seq.append(id2label[p])
                            true_seq.append(id2label[t])
                        else:
                            logger.warning(f"无效标签: pred={p}, true={t}, 句子='{sentence[:50]}...'")
                            pred_seq.append('O')
                            true_seq.append('O')
                if len(pred_seq) != len(true_seq):
                    logger.warning(f"标签序列长度不一致: 句子='{sentence[:50]}...', pred_len={len(pred_seq)}, true_len={len(true_seq)}")
                    min_len = min(len(pred_seq), len(true_seq))
                    pred_seq = pred_seq[:min_len]
                    true_seq = true_seq[:min_len]
                if pred_seq and true_seq:
                    pred_labels.append(pred_seq)
                    true_labels.append(true_seq)
                else:
                    logger.warning(f"空标签序列: 句子='{sentence[:50]}...'")
                    logger.debug(f"pred={pred[:10]}, true={true[:10]}")

    if not true_labels or not pred_labels:
        logger.error("未收集到有效的标签序列，无法生成评估报告")
        return {}
    logger.info(f"收集到的标签序列数量: true={len(true_labels)}, pred={len(pred_labels)}")
    for i in range(min(3, len(true_labels))):
        logger.info(f"样本 {i+1}: true_labels={true_labels[i][:10]}..., pred_labels={pred_labels[i][:10]}...")
    try:
        report = classification_report(true_labels, pred_labels, output_dict=True, zero_division=0)
        return report
    except Exception as e:
        logger.error(f"生成评估报告失败: {str(e)}")
        return {}

# 测试和评估代码
if __name__ == "__main__":
    from transformers import BertForTokenClassification
    from torch.utils.data import DataLoader
    from tqdm import tqdm
    from seqeval.metrics import classification_report
    import pandas as pd

    data_file = "/root/npl/data/NCBI_corpus_testing.txt"
    output_file = "/root/npl/data/parsed_bio_dataset_test.txt"
    model_path = "/root/npl/bert-base-uncased"
    batch_size = 16
    max_len = 512
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 加载分词器和数据集
    tokenizer = BertTokenizerFast.from_pretrained(model_path)
    dataset = XMLtoBIODataset(data_file, tokenizer, max_len=max_len, output_file=output_file)
    
    # 加载模型
    num_labels = len(dataset.label2id)
    model = BertForTokenClassification.from_pretrained(model_path, num_labels=num_labels)
    model.to(device)

    # 创建 DataLoader
    test_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False, collate_fn=custom_collate_fn)

    # 评估
    logger.info("开始评估测试集...")
    test_report = eval_epoch(model, test_loader, device, dataset.id2label)
    if not test_report:
        logger.error("评估报告为空，请检查数据和模型")
    else:
        logger.info("测试报告:")
        report_df = pd.DataFrame(test_report).T
        logger.info(f"\n{report_df}")
        logger.info("\n主要指标:")
        if 'accuracy' in test_report:
            logger.info(f"准确率 (Accuracy): {test_report['accuracy']:.4f}")
        logger.info(f"宏平均 F1 分数 (Macro F1): {test_report['macro avg']['f1-score']:.4f}")
        logger.info(f"加权平均 F1 分数 (Weighted F1): {test_report['weighted avg']['f1-score']:.4f}")
        logger.info("\n各实体类别指标:")
        for label, metrics in test_report.items():
            if label not in ['accuracy', 'macro avg', 'weighted avg']:
                logger.info(f"{label}:")
                logger.info(f"  精确率 (Precision): {metrics['precision']:.4f}")
                logger.info(f"  召回率 (Recall): {metrics['recall']:.4f}")
                logger.info(f"  F1 分数: {metrics['f1-score']:.4f}")
                logger.info(f"  支持数 (Support): {metrics['support']}")

