import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertModel, BertConfig
from torch import nn
import os
import json
import time
from typing import Dict, Any

# 添加安全全局变量
from torch.serialization import add_safe_globals

add_safe_globals([np.core.multiarray.scalar])


# 加载之前定义的模型类
class BertForMultiLabelClassification(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.num_labels = config.num_labels
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.sigmoid = nn.Sigmoid()

    def forward(self, input_ids=None, attention_mask=None):
        outputs = self.bert(input_ids, attention_mask=attention_mask)
        pooled_output = outputs[1]
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        logits = self.sigmoid(logits)
        return logits


# 修改后的数据预处理类，加载文本和标签
class MultiLabelDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_len, limit=None):
        self.texts = texts[:limit] if limit is not None else texts
        self.labels = labels[:limit] if limit is not None else labels
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, item):
        text = str(self.texts[item])
        label = self.labels[item]
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_len,
            return_token_type_ids=False,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt',
        )
        return {
            'text': text,
            'label': label,
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten()
        }


def predict(model, data_loader, device, threshold=0.1):
    """执行预测"""
    model.eval()
    predictions = []
    texts = []
    true_labels = []

    start_time = time.time()  # 记录开始时间
    total_samples = 0  # 总样本数计数器

    with torch.no_grad():
        for batch_idx, batch in enumerate(data_loader):
            batch_size = len(batch['text'])
            texts.extend(batch['text'])
            true_labels.extend(batch['label'])

            # 打印当前批次信息
            print(f"正在预测第 {batch_idx * data_loader.batch_size + 1}-"
                  f"{(batch_idx + 1) * data_loader.batch_size} 个数据...")

            outputs = model(
                input_ids=batch['input_ids'].to(device),
                attention_mask=batch['attention_mask'].to(device)
            )
            preds = outputs.cpu().numpy() > threshold
            predictions.extend(preds)

            total_samples += batch_size

    end_time = time.time()  # 记录结束时间
    total_time = end_time - start_time
    avg_time = total_time / total_samples if total_samples > 0 else 0

    print(f"\n预测完成! 总用时: {total_time:.2f}秒")
    print(f"平均每个数据用时: {avg_time:.4f}秒")
    print(f"共处理 {total_samples} 个数据\n")

    return texts, true_labels, np.array(predictions)


def format_labels(label_indices, num_classes):
    """格式化标签索引为类别字符串"""
    if isinstance(label_indices, str):
        # 处理字符串形式的标签（如"1,3,5"）
        label_indices = [int(x) for x in label_indices.split(',') if x.strip()]
    elif isinstance(label_indices, (int, float)):
        # 处理单个数字标签
        label_indices = [int(label_indices)]

    return [str(i) for i in range(0, num_classes ) if i in label_indices]


class TextClassifier:
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        # 加载类别映射
        category_mapping_path = config.get('category_mapping_path', 'category_mapping.json')
        with open(category_mapping_path, 'r', encoding='utf-8') as f:
            self.category_mapping = json.load(f)

        # 初始化tokenizer
        self.tokenizer = BertTokenizer.from_pretrained(config['model_path'])

        # 加载模型
        bert_config = BertConfig.from_pretrained(config['model_path'], num_labels=config['num_labels'])
        self.model = BertForMultiLabelClassification(bert_config).to(self.device)

        # 加载训练好的权重
        checkpoint = torch.load(config['saved_model'], map_location=self.device, weights_only=False)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.model.eval()




    def predict_text(self, text: str, threshold: float = 0.5) -> Dict[str, Any]:
        """预测单个文本并返回带有解释的结果"""
        # 预处理输入文本（保持与MultiLabelDataset完全一致）
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.config['max_len'],
            return_token_type_ids=False,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt',
        )

        # 确保输入格式与DataLoader产生的批次一致
        input_ids = encoding['input_ids'].to(self.device)
        attention_mask = encoding['attention_mask'].to(self.device)

        # 执行预测
        with torch.no_grad():
            outputs = self.model(
                input_ids=input_ids,
                attention_mask=attention_mask
            )
            # 确保处理方式与原predict一致
            preds = outputs.cpu().numpy().squeeze() > threshold  # 添加squeeze()处理单条输入

        # 获取预测类别（1-based索引）
        pred_indices = [i  for i, p in enumerate(preds) if p]

        if not pred_indices:
            return {
                "labels": [-1],
                "explanations": [""]
            }

        # 获取解释
        explanations = []
        valid_labels = []
        for idx in pred_indices:
            explanation = self.category_mapping.get(str(idx), "")
            if explanation:
                valid_labels.append(idx)
                explanations.append(explanation)

        return {
            "labels": valid_labels if valid_labels else [-1],
            "explanations": explanations if explanations else [""]
        }


def main():
    # 配置参数

    # 模型和数据处理的全局配置参数
    config = {
        # 数据文件路径（Excel格式）
        'data_path': '../dataProcess/2025_10_9_traffic_data/test_data.xlsx',

        # 预训练模型路径（qwen BERT中文基础模型）
        'model_path': '../models/nlp_bert_document-segmentation_chinese-base',

        # 训练后最优模型的保存路径（PyTorch格式）
        'saved_model': '../models/pretrained_models/best_model.pt',

        # 类别标签映射文件路径（JSON格式，存储类别编码与名称的对应关系）
        'category_mapping_path': '../dataProcess/datasets/cls_mapping.json',

        # 文本最大长度（BERT输入序列的截断/填充长度）
        'max_len': 256,

        # 批处理大小（影响内存使用和训练速度）
        'batch_size': 16,

        # 分类任务的类别总数
        'num_labels': 19,

        # 验证数据量上限（用于限制验证集规模，提升调试速度）
        'limit': 10000
    }

    # 1. 加载数据和标签（假设第二列是标签）
    df = pd.read_excel(config['data_path'], header=None, skiprows=1)
    texts = df.iloc[:, 0].astype(str).tolist()
    labels = df.iloc[:, 1].tolist()  # 假设标签在第二列

    # 2. 初始化tokenizer和数据加载器
    tokenizer = BertTokenizer.from_pretrained(config['model_path'])
    dataset = MultiLabelDataset(texts, labels, tokenizer, config['max_len'], limit=config['limit'])
    data_loader = DataLoader(dataset, batch_size=config['batch_size'])

    # 3. 加载模型
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    bert_config = BertConfig.from_pretrained(config['model_path'], num_labels=config['num_labels'])
    model = BertForMultiLabelClassification(bert_config).to(device)

    # 加载训练好的权重
    checkpoint = torch.load(config['saved_model'], map_location=device, weights_only=False)
    model.load_state_dict(checkpoint['model_state_dict'])

    # 4. 执行预测
    input_texts, true_labels, predictions = predict(model, data_loader, device, threshold=0.5)

    # 5. 打印结果（包含原始标注和预测结果）
    print(f"\n{'=' * 50}")
    print(f"正在验证前{config['limit']}个数据...")
    print(f"{'=' * 50}\n")

    # 加载mapping文件
    with open(config['category_mapping_path'], 'r', encoding='utf-8') as f:
        category_mapping = json.load(f)

    correct_count = 0
    # 用于存储正确预测的单个结果
    correct_single_predictions = []
    # 用于存储错误预测的记录
    incorrect_predictions = []
    # 用于跟踪每个类别是否有样本
    category_coverage = {str(i): False for i in range(config['num_labels'])}

    for i, (text, true_label, pred) in enumerate(zip(input_texts, true_labels, predictions)):
        # 格式化真实标签
        true_classes = format_labels(true_label, config['num_labels'])
        # 格式化预测标签
        pred_classes = [str(i) for i, p in enumerate(pred) if p]

        # 检查预测是否正确
        # 规则1: 原始标注为无类别而预测不为空时视为错误
        # 规则2: 原始标注属于预测类别的子级即为正确（保持原逻辑）
        is_correct = True
        if not true_classes:  # 原始标注为无类别
            if pred_classes:  # 预测类别不为空
                is_correct = False
        else:  # 原始标注有类别
            # 检查原始标注是否是预测类别的子级
            is_correct = set(true_classes).issubset(set(pred_classes))

        if is_correct:
            correct_count += 1
            # 只保存单个预测结果
            if len(pred_classes) == 1:
                pred_class = pred_classes[0]
                # 获取对应的mapping值
                mapping_value = category_mapping.get(pred_class, "未知类别")
                correct_single_predictions.append({
                    'text': text,
                    'prediction': pred_class,
                    'mapping_value': mapping_value
                })
                # 标记该类别已有样本
                category_coverage[pred_class] = True
        else:
            # 收集错误预测记录
            incorrect_predictions.append({
                '样本序号': i + 1,
                '输入文本': text,
                '原始标注': ', '.join(true_classes) if true_classes else '无类别',
                '预测结果': ', '.join(pred_classes) if pred_classes else '无类别'
            })

        print(f"样本 {i + 1}/{len(input_texts)}")
        print(f"输入内容: {text[:100]}{'...' if len(text) > 100 else ''}")
        print(f"原始标注: {', '.join(true_classes) if true_classes else '无类别'}")
        print(f"预测结果: {', '.join(pred_classes) if pred_classes else '无类别'}")
        print(f"状态: {'✓ 正确' if is_correct else '✗ 错误'}")
        print("-" * 80)

    # 打印总体准确率
    accuracy = correct_count / len(input_texts)
    print(f"\n{'=' * 50}")
    print(f"验证完成! 准确率: {accuracy:.2%} ({correct_count}/{len(input_texts)})")
    print(f"错误预测数: {len(incorrect_predictions)}/{len(input_texts)}")
    print(f"{'=' * 50}")

    # 检查哪些类别没有样本
    missing_categories = [cat for cat, covered in category_coverage.items() if not covered]
    if missing_categories:
        print("\n以下类别没有正确预测的单个结果样本:")
        for cat in missing_categories:
            print(f"类别 {cat}: {category_mapping.get(cat, '未知类别')}")
    else:
        print("\n所有类别都有至少一个正确预测的单个结果样本")

    # 将正确预测的单个结果保存到xlsx文件
    if correct_single_predictions:
        output_df = pd.DataFrame(correct_single_predictions)
        output_path = 'correct_single_predictions.xlsx'
        output_df.to_excel(output_path, index=False)
        print(f"\n已保存 {len(correct_single_predictions)} 个正确预测的单个结果到 {output_path}")
        print("文件包含三列: text(输入文本), prediction(预测类别), mapping_value(类别编码)")
    else:
        print("\n没有找到正确预测的单个结果")

    # 将错误预测的记录保存到xlsx文件
    if incorrect_predictions:
        error_df = pd.DataFrame(incorrect_predictions)
        error_path = 'incorrect_predictions.xlsx'
        error_df.to_excel(error_path, index=False)
        print(f"\n已保存 {len(incorrect_predictions)} 个错误预测的记录到 {error_path}")
        print("文件包含四列: 样本序号, 输入文本, 原始标注, 预测结果")
    else:
        print("\n没有发现错误预测的记录")


if __name__ == '__main__':
    main()