from datetime import timedelta
import time
import pandas as pd
import numpy as np
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertForSequenceClassification, get_linear_schedule_with_warmup, BertModel, \
    BertConfig
from torch.optim import AdamW
from sklearn.metrics import multilabel_confusion_matrix, classification_report
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
import matplotlib

matplotlib.use('Agg')  # 设置非交互式后端
import matplotlib.pyplot as plt
import os


# ====================== 数据加载与预处理 ======================
class MultiLabelDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_len):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, item):
        text = str(self.texts[item])
        label = self.labels[item]

        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_len,
            return_token_type_ids=False,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt',
        )

        return {
            'text': text,
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'labels': torch.FloatTensor(label)
        }


def load_and_preprocess_data(file_path,num_labels):
    """加载并预处理数据"""
    df = pd.read_excel(file_path, header=None, skiprows=1)
    texts = df.iloc[:, 0].astype(str).tolist()
    label_cols = df.iloc[:,1:2].fillna(0).astype(int).values.tolist()

    mlb = MultiLabelBinarizer(classes=range(0, num_labels ))
    labels = mlb.fit_transform(label_cols)

    return train_test_split(texts, labels, test_size=0.1, random_state=42), mlb


def create_data_loaders(train_texts, val_texts, train_labels, val_labels, tokenizer, max_len=256, batch_size=16):
    """创建训练和验证数据加载器"""
    train_dataset = MultiLabelDataset(train_texts, train_labels, tokenizer, max_len)
    val_dataset = MultiLabelDataset(val_texts, val_labels, tokenizer, max_len)

    return (
        DataLoader(train_dataset, batch_size=batch_size, shuffle=True),
        DataLoader(val_dataset, batch_size=batch_size)
    )


# ====================== 模型定义 ======================
class BertForMultiLabelClassification(BertForSequenceClassification):
    def __init__(self, config):
        super().__init__(config)
        self.num_labels = config.num_labels
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, config.num_labels)
        self.sigmoid = nn.Sigmoid()
        self.init_weights()

    def forward(self, input_ids=None, attention_mask=None, labels=None):
        outputs = self.bert(input_ids, attention_mask=attention_mask)
        pooled_output = outputs[1]
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        logits = self.sigmoid(logits)

        loss = None
        if labels is not None:
            loss_fct = nn.BCELoss()
            loss = loss_fct(logits, labels)

        return {'loss': loss, 'logits': logits}


def initialize_model(model_path, num_labels=19):
    """初始化模型并加载预训练权重"""
    config = BertConfig.from_pretrained(model_path, num_labels=num_labels)
    model = BertForMultiLabelClassification(config)

    # 加载BERT主体参数
    state_dict = torch.load(f"{model_path}/pytorch_model.bin")
    model.bert.load_state_dict(
        {k.replace("bert.", ""): v for k, v in state_dict.items() if "bert." in k},
        strict=False
    )
    return model


# ====================== 加载预训练模型函数 ======================
def load_pretrained_model(model_path, config_path, num_labels, device):
    """加载预训练模型（支持从.pt文件加载）"""
    if model_path.endswith('.pt'):
        # 从.pt文件加载
        checkpoint = torch.load(model_path, map_location=device,weights_only=False)
        config = BertConfig.from_pretrained(config_path, num_labels=num_labels)
        model = BertForMultiLabelClassification(config).to(device)
        model.load_state_dict(checkpoint['model_state_dict'])
        
        # 获取训练状态信息
        start_epoch = checkpoint.get('epoch', 0)
        best_val_loss = checkpoint.get('loss', float('inf'))
        
        print(f"✅ 成功加载预训练模型: {model_path}")
        print(f"📊 从第 {start_epoch} 轮继续训练")
        print(f"🏆 最佳验证损失: {best_val_loss:.4f}")
        
        return model, start_epoch, best_val_loss
    else:
        # 从预训练模型目录加载
        model = initialize_model(model_path, num_labels).to(device)
        return model, 0, float('inf')


# ====================== 训练与评估 ======================
def train_epoch(model, data_loader, optimizer, device, scheduler):
    """单个epoch的训练"""
    model.train()
    losses = []
    for batch in data_loader:
        optimizer.zero_grad()
        outputs = model(
            input_ids=batch['input_ids'].to(device),
            attention_mask=batch['attention_mask'].to(device),
            labels=batch['labels'].to(device)
        )
        loss = outputs['loss']
        losses.append(loss.item())
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        optimizer.step()
        scheduler.step()
    return np.mean(losses)


def train_model(model, train_loader, val_loader, epochs, device, output_dir, start_epoch=0, best_val_loss=float('inf')):
    """完整训练流程（支持从指定轮次继续训练）"""
    optimizer = AdamW(model.parameters(), lr=2e-5)
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=0,
        num_training_steps=len(train_loader) * (epochs - start_epoch)
    )

    train_losses, val_losses = [], []
    total_start_time = time.time()  # 记录训练开始时间

    print(f"\n{'=' * 50}")
    print(f"Starting training for {epochs} epochs")
    print(f"Continuing from epoch {start_epoch}")
    print(f"Total batches per epoch: {len(train_loader)}")
    print(f"{'=' * 50}\n")

    for epoch in range(start_epoch, epochs):
        epoch_start_time = time.time()  # 记录epoch开始时间

        # 训练阶段
        train_loss = train_epoch(model, train_loader, optimizer, device, scheduler)

        # 验证阶段
        val_loss, _, _ = evaluate_model(model, val_loader, device)

        # 时间计算
        epoch_time = time.time() - epoch_start_time
        elapsed_time = time.time() - total_start_time
        current_epoch = epoch + 1
        avg_time_per_epoch = elapsed_time / (current_epoch - start_epoch) if current_epoch > start_epoch else epoch_time
        remaining_time = avg_time_per_epoch * (epochs - current_epoch)

        # 记录损失
        train_losses.append(train_loss)
        val_losses.append(val_loss)

        # 打印epoch信息
        print(f"\nEpoch {current_epoch}/{epochs} | Time: {timedelta(seconds=int(epoch_time))}")
        print(f"Train loss: {train_loss:.4f} | Val loss: {val_loss:.4f}")
        print(f"Elapsed: {timedelta(seconds=int(elapsed_time))} | "
              f"Remaining: ~{timedelta(seconds=int(remaining_time))}")

        # 保存最佳模型
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            torch.save({
                'epoch': current_epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': val_loss,
            }, f"{output_dir}/best_model.pt")
            print(f"🌟 New best model saved (val_loss: {val_loss:.4f})")

        # 每30轮保存一次
        if (current_epoch) % 30 == 0:
            checkpoint_path = f"{output_dir}/model_epoch_{current_epoch}.pt"
            torch.save({
                'epoch': current_epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'train_loss': train_loss,
                'val_loss': val_loss,
            }, checkpoint_path)
            print(f"💾 Checkpoint saved at epoch {current_epoch}")

    # 保存最终模型
    total_time = time.time() - total_start_time
    torch.save({
        'epoch': epochs,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'train_loss': train_loss,
        'val_loss': val_loss,
        'total_training_time': total_time,  # 记录总训练时间
    }, f"{output_dir}/final_model.pt")

    # 打印最终总结
    print(f"\n{'=' * 50}")
    print(f"Training completed in {timedelta(seconds=int(total_time))}")
    print(f"Best validation loss: {best_val_loss:.4f}")
    print(f"{'=' * 50}")

    return train_losses, val_losses


def evaluate_model(model, data_loader, device):
    """模型评估"""
    model.eval()
    losses, predictions, true_labels = [], [], []
    with torch.no_grad():
        for batch in data_loader:
            outputs = model(
                input_ids=batch['input_ids'].to(device),
                attention_mask=batch['attention_mask'].to(device),
                labels=batch['labels'].to(device)
            )
            losses.append(outputs['loss'].item())
            predictions.extend(outputs['logits'].cpu().numpy())
            true_labels.extend(batch['labels'].cpu().numpy())

    predictions = np.array(predictions) > 0.5
    return np.mean(losses), predictions, np.array(true_labels)


# ====================== 可视化与保存 ======================
def plot_loss_curves(train_losses, val_losses, save_path='loss_curve.png'):
    """绘制损失曲线"""
    plt.figure(figsize=(10, 6))
    plt.plot(train_losses, label='Train Loss')
    plt.plot(val_losses, label='Validation Loss')
    plt.title('Training and Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.savefig(save_path)
    plt.close()


def plot_confusion_matrices(true_labels, pred_labels, num_classes=16, save_dir='confusion_matrices'):
    """绘制并保存混淆矩阵"""
    os.makedirs(save_dir, exist_ok=True)
    conf_matrices = multilabel_confusion_matrix(true_labels, pred_labels)

    plt.figure(figsize=(20, 15))
    for i, matrix in enumerate(conf_matrices):
        plt.subplot(5, 5, i + 1)
        plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.Blues)
        plt.title(f'Class {i}')
        plt.colorbar()
        plt.xlabel('Predicted')
        plt.ylabel('True')
    plt.tight_layout()
    plt.savefig(f'{save_dir}/all_classes.png')
    plt.close()


# ====================== 主函数 ======================
def main():
    # 模型训练和数据处理的核心配置参数
    config = {
        # 训练数据路径（Excel格式，包含文本和标签）
        'data_path': '../dataProcess/datasets/all_data.xlsx',

        # 预训练BERT模型路径（中文文档分割专用基础模型）
        'model_path': '../models/nlp_bert_document-segmentation_chinese-base',
        
        # 预训练模型配置路径（用于加载.pt文件时的配置）
        'config_path': '../models/nlp_bert_document-segmentation_chinese-base',

        # 预训练检查点路径（用于继续训练，设置为None则从头开始训练）
        'pretrained_checkpoint': None,  # 例如: '../models/pretrained_models/best_model.pt'

        # 输入文本的最大长度（超出部分截断，不足部分填充）
        # BERT模型通常支持512，但根据任务需求可调整
        'max_len': 256,

        # 每个训练批次的样本数量
        # 较大的batch_size可加速训练，但需要更多GPU内存
        'batch_size': 16,

        # 训练轮次（整个数据集完整遍历的次数）
        'epochs': 20,

        # 分类任务的类别总数（决定模型输出层维度）
        'num_labels': 19,

        # 训练输出目录（自动保存模型、日志和评估结果）
        'output_dir': 'output_train'
    }


    # 创建输出目录（确保存在）
    os.makedirs(config['output_dir'], exist_ok=True)

    # 1. 数据准备
    (train_texts, val_texts, train_labels, val_labels), mlb = load_and_preprocess_data(config['data_path'],config['num_labels'])
    tokenizer = BertTokenizer.from_pretrained(config['model_path'])
    train_loader, val_loader = create_data_loaders(
        train_texts, val_texts, train_labels, val_labels,
        tokenizer, config['max_len'], config['batch_size']
    )

    # 2. 模型初始化
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 检查是否使用预训练检查点
    if config['pretrained_checkpoint'] and os.path.exists(config['pretrained_checkpoint']):
        print("🚀 检测到预训练检查点，开始继续训练...")
        model, start_epoch, best_val_loss = load_pretrained_model(
            config['pretrained_checkpoint'], 
            config['config_path'], 
            config['num_labels'], 
            device
        )
    else:
        print("🆕 未检测到预训练检查点，开始从头训练...")
        model = initialize_model(config['model_path'], config['num_labels']).to(device)
        start_epoch = 0
        best_val_loss = float('inf')

    # 3. 训练模型
    train_losses, val_losses = train_model(
        model, train_loader, val_loader,
        config['epochs'], device, config['output_dir'],
        start_epoch=start_epoch, best_val_loss=best_val_loss
    )

    # 4. 评估与可视化
    plot_loss_curves(train_losses, val_losses, f"{config['output_dir']}/loss_curve.png")
    _, val_preds, val_labels = evaluate_model(model, val_loader, device)
    plot_confusion_matrices(val_labels, val_preds, config['num_labels'], config['output_dir'])
    print(classification_report(val_labels, val_preds))

    # 5. 保存最终模型信息
    print(f"\n✅ 训练完成！")
    print(f"📁 模型保存在: {config['output_dir']}")
    print(f"📊 最终验证集分类报告已生成")


# def continue_training_example():
#     """继续训练示例函数"""
#     print("\n📋 继续训练示例配置：")
#
#     # 示例配置
#     example_config = {
#         'data_path': '../dataProcess/datasets/all_data.xlsx',
#         'model_path': '../models/nlp_bert_document-segmentation_chinese-base',
#         'config_path': '../models/nlp_bert_document-segmentation_chinese-base',
#         'pretrained_checkpoint': 'models/pretrained_models/best_model.pt',  # 修改为权重实际路径
#         'max_len': 256,
#         'batch_size': 16,
#         'epochs': 40,  # 继续训练更多轮次
#         'num_labels': 19,
#         'output_dir': 'output_train'
#     }
#
#     print("要使用继续训练功能，请修改 main() 函数中的配置：")
#     print("1. 设置 'pretrained_checkpoint' 为预训练模型路径")
#     print("2. 调整 'epochs' 为总训练轮次")
#     print("3. 修改 'output_dir' 为新的输出目录")


if __name__ == '__main__':
    # 运行主训练函数
    main()
    
    # 显示继续训练示例
    # continue_training_example()