import json
import re
import os
import numpy as np
import torch
from pathlib import Path
import traceback
from collections import defaultdict
from sklearn.metrics import precision_recall_fscore_support, classification_report

from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer
from datasets import Dataset

# 确保模型目录存在
model_dir = Path("/root/npl/data/biobert-v1.1")
model_dir.mkdir(parents=True, exist_ok=True)

print(f"检测到本地模型文件完整，目录: {model_dir.resolve()}")

def clean_dataset(file_content: str, dataset_name: str):
    """
    精简版数据集清洗函数
    """
    documents = []
    entity_labels = set()
    
    # 处理每一行
    for i, line in enumerate(file_content.strip().split('\n')):
        line = line.strip()
        if not line:
            continue
            
        # 分割ID和文本
        if '\t' not in line:
            continue
            
        parts = line.split('\t', 1)
        doc_id = parts[0].strip()
        raw_text = parts[1].strip()
        
        if not doc_id or not raw_text:
            continue
            
        # 提取实体
        entities = []
        # 查找所有实体标注 - 修复正则表达式字符串
        for match in re.finditer(r'<category="([^"]+)">([^<]+)</category>', raw_text):  # 修复了字符串结束
            label = match.group(1)
            entity_text = match.group(2)
            start = match.start()
            end = match.end()
            entities.append({
                "label": label,
                "text": entity_text,
                "start": start,
                "end": end
            })
            entity_labels.add(label)
        
        # 创建文档对象
        documents.append({
            "id": doc_id,
            "text": raw_text,
            "entities": entities
        })
        
    print(f"{dataset_name}清洗完成: 有效样本={len(documents)}, 标签类型={entity_labels}")
    return documents, entity_labels

def align_labels_with_tokens(text, entities, tokenized, label_map):
    """
    精确的标签对齐函数
    """
    # 获取token的偏移量
    offset_mapping = tokenized["offset_mapping"]
    
    # 初始化标签数组
    labels = [label_map["O"]] * len(offset_mapping)
    
    # 创建字符位置到token索引的映射
    char_to_token = {}
    for token_idx, (start, end) in enumerate(offset_mapping):
        if start is None or end is None:
            continue
        for char_idx in range(int(start), int(end)):
            char_to_token[char_idx] = token_idx
    
    # 对齐实体标签
    for entity in entities:
        entity_label = entity["label"]
        start_char = entity["start"]
        end_char = entity["end"]
        
        # 找到与实体相关的tokens
        entity_tokens = set()
        for char_idx in range(start_char, end_char):
            if char_idx in char_to_token:
                entity_tokens.add(char_to_token[char_idx])
        
        if not entity_tokens:
            continue
        
        # 按顺序分配标签
        sorted_tokens = sorted(entity_tokens)
        for i, token_idx in enumerate(sorted_tokens):
            if i == 0:
                labels[token_idx] = label_map[f"B-{entity_label}"]
            else:
                labels[token_idx] = label_map[f"I-{entity_label}"]
    
    return labels

def tokenize_and_align_labels(examples, tokenizer, label_map):
    """
    令牌化文本并精确对齐标签
    """
    tokenized_inputs = tokenizer(
        examples["text"], 
        padding="max_length", 
        truncation=True, 
        max_length=128,
        return_offsets_mapping=True
    )
    
    labels = []
    for i in range(len(examples["text"])):
        text = examples["text"][i]
        entities = examples["entities"][i]
        tokenized = {
            "input_ids": tokenized_inputs["input_ids"][i],
            "attention_mask": tokenized_inputs["attention_mask"][i],
            "offset_mapping": tokenized_inputs["offset_mapping"][i]
        }
        label_seq = align_labels_with_tokens(text, entities, tokenized, label_map)
        labels.append(label_seq)
    
    tokenized_inputs["labels"] = labels
    return tokenized_inputs

def compute_metrics(p):
    """
    计算评价指标：精确率、召回率、F1分数
    """
    predictions, labels = p
    # 移除填充位置（-100）
    predictions = np.argmax(predictions, axis=2)
    
    true_predictions = [
        [label_list[p] for (p, l) in zip(prediction, label) if l != -100]
        for prediction, label in zip(predictions, labels)
    ]
    true_labels = [
        [label_list[l] for l in label if l != -100]
        for label in labels
    ]
    
    # 扁平化列表以计算整体指标
    flat_true_labels = [label for sublist in true_labels for label in sublist]
    flat_predictions = [label for sublist in true_predictions for label in sublist]
    
    # 计算精确率、召回率、F1分数
    overall_precision, overall_recall, overall_f1, _ = precision_recall_fscore_support(
        flat_true_labels, flat_predictions, average='macro', zero_division=0
    )
    
    # 计算每个类别的指标
    class_report = classification_report(
        flat_true_labels, 
        flat_predictions, 
        output_dict=True,
        zero_division=0
    )
    
    # 准备结果
    results = {
        "overall_precision": overall_precision,
        "overall_recall": overall_recall,
        "overall_f1": overall_f1
    }
    
    # 添加每个类别的指标
    for label, metrics in class_report.items():
        if label.isdigit():  # 跳过汇总统计行
            label_name = id2label[int(label)]
            results[f"{label_name}_precision"] = metrics["precision"]
            results[f"{label_name}_recall"] = metrics["recall"]
            results[f"{label_name}_f1"] = metrics["f1-score"]
    
    return results

def main():
    global label_list, id2label
    
    try:
        # 检查CUDA是否可用
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"使用设备: {device}")
        
        # 使用本地模型
        local_model_path = "/root/npl/data/biobert-v1.1"
        
        # 数据集路径
        train_path = "/root/npl/data/NCBI_corpus_training.txt"
        test_path = "/root/npl/data/NCBI_corpus_testing.txt"
        
        # 加载本地tokenizer
        tokenizer = AutoTokenizer.from_pretrained(local_model_path)
        
        # 读取训练数据集内容
        print(f"\n读取训练数据集: {train_path}")
        with open(train_path, "r", encoding="utf-8", errors="ignore") as f:
            train_content = f.read()
        
        # 读取测试数据集内容
        print(f"\n读取测试数据集: {test_path}")
        with open(test_path, "r", encoding="utf-8", errors="ignore") as f:
            test_content = f.read()
        
        # 清洗数据集
        print("清洗训练数据集...")
        train_docs, train_labels = clean_dataset(train_content, "训练集")
        
        print("清洗测试数据集...")
        test_docs, test_labels = clean_dataset(test_content, "测试集")
        
        # 检查数据集是否为空
        if not train_docs:
            print("错误: 训练集清洗后无有效文档")
            return
        
        # 只处理前50个文档以节省时间和资源
        train_docs = train_docs[:50]
        test_docs = test_docs[:20]
        print(f"使用前 {len(train_docs)} 个训练文档")
        print(f"使用前 {len(test_docs)} 个测试文档")
        
        # 显示首样本详情
        sample = train_docs[0]
        print(f"\n训练集首样本ID: {sample['id']}")
        print(f"文本片段: {sample['text'][:150]}...")
        print(f"实体数量: {len(sample['entities'])}")
        if sample['entities']:
            entity = sample['entities'][0]
            print(f"示例实体: 文本='{entity['text']}', 类型='{entity['label']}', 位置={entity['start']}-{entity['end']}")
        else:
            print("警告: 首样本没有实体")
        
        # 创建标签映射
        all_labels = train_labels | test_labels
        print(f"\n所有标签类型: {all_labels}")
        label_list = ["O"] + [f"{prefix}-{label}" for label in all_labels for prefix in ["B", "I"]]
        label_map = {label: i for i, label in enumerate(label_list)}
        id2label = {i: label for i, label in enumerate(label_list)}
        print(f"创建标签映射: 共{len(label_map)}个标签")
        
        # 准备训练数据集
        print("\n准备训练数据集...")
        train_texts = [doc["text"] for doc in train_docs]
        train_entities = [doc["entities"] for doc in train_docs]
        
        train_dict = {
            "text": train_texts,
            "entities": train_entities
        }
        
        train_dataset = Dataset.from_dict(train_dict)
        tokenized_train = train_dataset.map(
            lambda examples: tokenize_and_align_labels(examples, tokenizer, label_map),
            batched=True
        )
        
        # 准备测试数据集
        print("\n准备测试数据集...")
        test_texts = [doc["text"] for doc in test_docs]  # 修复了变量名拼写错误
        test_entities = [doc["entities"] for doc in test_docs]
        
        test_dict = {
            "text": test_texts,
            "entities": test_entities
        }
        
        test_dataset = Dataset.from_dict(test_dict)
        tokenized_test = test_dataset.map(
            lambda examples: tokenize_and_align_labels(examples, tokenizer, label_map),
            batched=True
        )
        
        # 检查数据集大小
        print(f"训练集大小: {len(tokenized_train)}")
        print(f"测试集大小: {len(tokenized_test)}")
        
        # 初始化模型并移动到设备
        print("\n初始化模型...")
        model = AutoModelForTokenClassification.from_pretrained(
            local_model_path,
            num_labels=len(label_map),
            ignore_mismatched_sizes=True,
            id2label=id2label,
            label2id=label_map
        ).to(device)
        
        # 基本训练参数（完全兼容旧版本）
        training_args = TrainingArguments(
            output_dir="./results",
            num_train_epochs=3,
            per_device_train_batch_size=4,
            per_device_eval_batch_size=4,
            weight_decay=0.01,
            logging_dir='./logs',
            logging_steps=5,
            save_steps=500,  # 每隔500步保存一次
            save_total_limit=1,
            report_to="none",
            learning_rate=5e-5,
            no_cuda=not torch.cuda.is_available(),  # 明确设置CUDA使用
        )
        
        # 初始化训练器
        print("\n初始化训练器...")
        trainer = Trainer(
            model=model,
            args=training_args,
            train_dataset=tokenized_train,
            compute_metrics=compute_metrics
        )
        
        # 训练模型
        print("\n开始模型训练...")
        trainer.train()
        
        # 保存模型
        model.save_pretrained("./results/final_model")
        tokenizer.save_pretrained("./results/final_model")
        print("\n模型训练成功完成并保存!")
        
        # 在测试集上评估
        print("\n在测试集上评估模型...")
        test_metrics = trainer.evaluate(tokenized_test)
        
        # 保存测试指标
        with open("./results/test_metrics.json", "w") as f:
            json.dump(test_metrics, f, indent=4)
        
        # 打印评估结果
        print("\n评估结果:")
        if "eval_overall_precision" in test_metrics:
            print(f"总体精确率: {test_metrics['eval_overall_precision']:.4f}")
            print(f"总体召回率: {test_metrics['eval_overall_recall']:.4f}")
            print(f"总体F1分数: {test_metrics['eval_overall_f1']:.4f}")
        
        # 打印详细指标
        for key in test_metrics:
            if "precision" in key or "recall" in key or "f1" in key:
                print(f"{key}: {test_metrics[key]:.4f}")
        
        print("\nNLP处理流程完成!")
    
    except Exception as e:
        print(f"\n发生未预期的错误：{str(e)}")
        traceback.print_exc()

if __name__ == "__main__":
    main()