import torch
from torch.utils.data import DataLoader
import os
import numpy as np
from torch.optim import AdamW
from transformers import get_linear_schedule_with_warmup
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
from model_design import OptimizedTCMBert
from dataset_class import TCMBertDataset
from torch.cuda.amp import autocast, GradScaler
from tqdm import tqdm
import matplotlib.pyplot as plt


# 在绘图前添加以下代码指定字体（以 SimHei 为例，需确保系统有该字体，Windows 一般自带，Linux/Mac 可能需安装）
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示为方块的问题

def save_checkpoint(model, optimizer, scheduler, epoch, history, best_val_acc, file_path='checkpoint.pth'):
    torch.save({
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'scheduler_state_dict': scheduler.state_dict() if scheduler else None,
        'epoch': epoch,
        'history': history,
        'best_val_acc': best_val_acc
    }, file_path)
    print(f"检查点已保存至 {file_path}")


def load_checkpoint(model, optimizer, scheduler, file_path='checkpoint.pth'):
    if not os.path.exists(file_path):
        print("未找到检查点，从头开始训练")
        return 0, {'train_loss': [], 'val_loss': [], 'train_acc': [], 'val_acc': []}, 0.0

    checkpoint = torch.load(file_path)
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    if scheduler and checkpoint['scheduler_state_dict']:
        scheduler.load_state_dict(checkpoint['scheduler_state_dict'])

    print(f"从 epoch {checkpoint['epoch']} 继续训练")
    return checkpoint['epoch'], checkpoint['history'], checkpoint['best_val_acc']


def train_model(model, train_loader, val_loader, num_epochs=5, learning_rate=2e-5, weight_decay=0.01,
                warmup_steps=0, device='cuda', use_amp=True):
    model.to(device)
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay)

    total_steps = len(train_loader) * num_epochs
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=warmup_steps,
        num_training_steps=total_steps
    )

    start_epoch, history, best_val_acc = load_checkpoint(model, optimizer, scheduler)
    history.setdefault('train_loss', [])
    history.setdefault('val_loss', [])
    history.setdefault('train_acc', [])
    history.setdefault('val_acc', [])

    # 修复GradScaler警告
    from torch import amp
    scaler = amp.GradScaler(enabled=use_amp)

    # 自定义进度条格式
    bar_format = '{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}] {postfix}'

    # 训练总进度条
    overall_pbar = tqdm(
        total=num_epochs - start_epoch,
        desc="总进度",
        position=0,
        bar_format=bar_format
    )

    # 初始化验证结果变量
    val_preds = []
    val_true = []

    for epoch in range(start_epoch, num_epochs):
        # 更新总进度条
        overall_pbar.set_description(f"总进度 (Epoch {epoch + 1}/{num_epochs})")
        overall_pbar.update(1)

        # 训练阶段
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0

        # 训练批次进度条
        train_pbar = tqdm(
            train_loader,
            desc=f"训练",
            unit="batch",
            position=1,
            leave=False,
            bar_format=bar_format,
            mininterval=1.0
        )

        for batch in train_pbar:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['label'].to(device)

            structured_features = batch['structured_features'].to(device) if 'structured_features' in batch else None

            optimizer.zero_grad()

            with autocast(enabled=use_amp):
                outputs = model(input_ids, attention_mask, structured_features)
                loss = criterion(outputs, labels)

            train_loss += loss.item() * input_ids.size(0)

            if use_amp:
                scaler.scale(loss).backward()
                scaler.step(optimizer)
                scaler.update()
            else:
                loss.backward()
                optimizer.step()
            scheduler.step()

            _, preds = torch.max(outputs, 1)
            train_total += labels.size(0)
            train_correct += (preds == labels).sum().item()

            # 动态更新进度条信息
            train_acc_current = train_correct / train_total if train_total != 0 else 0
            train_pbar.set_postfix({
                'loss': f"{loss.item():.4f}",
                'acc': f"{train_acc_current:.4f}",
                'lr': f"{scheduler.get_last_lr()[0]:.2e}"
            })

        train_pbar.close()  # 手动关闭进度条

        avg_train_loss = train_loss / len(train_loader.dataset)
        train_acc = train_correct / train_total
        history['train_loss'].append(avg_train_loss)
        history['train_acc'].append(train_acc)

        # 验证阶段（添加检查）
        if val_loader is not None and len(val_loader) > 0:
            model.eval()
            val_loss = 0.0
            val_correct = 0
            val_total = 0
            val_preds = []  # 重新初始化
            val_true = []  # 重新初始化

            # 验证批次进度条
            val_pbar = tqdm(
                val_loader,
                desc=f"验证",
                unit="batch",
                position=1,
                leave=False,
                bar_format=bar_format,
                mininterval=1.0
            )

            with torch.no_grad():
                for batch in val_pbar:
                    input_ids = batch['input_ids'].to(device)
                    attention_mask = batch['attention_mask'].to(device)
                    labels = batch['label'].to(device)
                    structured_features = batch['structured_features'].to(
                        device) if 'structured_features' in batch else None

                    with autocast(enabled=use_amp):
                        outputs = model(input_ids, attention_mask, structured_features)
                        loss = criterion(outputs, labels)

                    val_loss += loss.item() * input_ids.size(0)

                    _, preds = torch.max(outputs, 1)
                    val_total += labels.size(0)
                    val_correct += (preds == labels).sum().item()
                    val_preds.extend(preds.cpu().numpy())
                    val_true.extend(labels.cpu().numpy())

                    # 动态更新进度条信息
                    val_acc_current = val_correct / val_total if val_total != 0 else 0
                    val_pbar.set_postfix({
                        'loss': f"{loss.item():.4f}",
                        'acc': f"{val_acc_current:.4f}"
                    })

            val_pbar.close()  # 手动关闭进度条

            avg_val_loss = val_loss / len(val_loader.dataset)
            val_acc = val_correct / val_total
            history['val_loss'].append(avg_val_loss)
            history['val_acc'].append(val_acc)

            # 打印验证结果
            overall_pbar.write(f"Epoch {epoch + 1} 验证结果: loss={avg_val_loss:.4f}, acc={val_acc:.4f}")
        else:
            # 验证集为空时的默认值
            avg_val_loss = float('inf')
            val_acc = 0.0
            overall_pbar.write("警告: 验证集为空，使用默认值")

        # 在总进度条上更新当前 epoch 指标
        overall_pbar.set_postfix({
            'train_loss': f"{avg_train_loss:.4f}",
            'train_acc': f"{train_acc:.4f}",
            'val_loss': f"{avg_val_loss:.4f}",
            'val_acc': f"{val_acc:.4f}",
            'best_val': f"{best_val_acc:.4f}"
        })

        # 保存最佳模型和检查点
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), 'best_tcm_model.pth')
            save_checkpoint(model, optimizer, scheduler, epoch + 1, history, best_val_acc)
            overall_pbar.write(f"  🔔 保存最佳模型（验证准确率 {best_val_acc:.4f}）")

    overall_pbar.close()
    return model, history, val_preds, val_true



def evaluate_model(model, val_loader, syndrome_list, device='cuda'):
    model.eval()
    val_preds = []
    val_true = []

    with torch.no_grad():
        for batch in val_loader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['label'].to(device)

            if 'structured_features' in batch:
                structured_features = batch['structured_features'].to(device)
                outputs = model(input_ids, attention_mask, structured_features)
            else:
                outputs = model(input_ids, attention_mask)

            _, preds = torch.max(outputs, 1)
            val_preds.extend(preds.cpu().numpy())
            val_true.extend(labels.cpu().numpy())

    # 计算准确率
    accuracy = accuracy_score(val_true, val_preds)
    print(f"模型准确率: {accuracy:.4f}")

    # 处理类别和标签
    unique_classes = sorted(set(val_true))
    valid_syndrome_list = [syndrome_list[i] for i in unique_classes]

    # 检查 valid_syndrome_list 长度
    if len(valid_syndrome_list) != len(unique_classes):
        raise ValueError("valid_syndrome_list 长度与类别数量不匹配，当前 valid_syndrome_list 长度为 "
                         f"{len(valid_syndrome_list)}, 类别数量为 {len(unique_classes)}")

    # 计算混淆矩阵
    cm = confusion_matrix(val_true, val_preds, labels=unique_classes)

    # 检查混淆矩阵格式
    if not isinstance(cm, np.ndarray) or len(cm.shape) != 2 or cm.shape[0] != cm.shape[1]:
        raise ValueError("混淆矩阵格式异常，应为二维方阵，当前形状为 {cm.shape}")

    # 绘制混淆矩阵
    plt.figure(figsize=(20, 18))
    sns.heatmap(
        cm,
        annot=True,
        fmt='d',
        cmap='Blues',
        xticklabels=valid_syndrome_list,
        yticklabels=valid_syndrome_list,
        annot_kws={"size": 6},
    )
    plt.xticks(fontsize=8)
    plt.yticks(fontsize=8)
    plt.xlabel('预测证型', fontsize=12)
    plt.ylabel('真实证型', fontsize=12)
    plt.title('混淆矩阵', fontsize=14)
    plt.tight_layout()
    plt.savefig('confusion_matrix.png')

    # 生成分类报告
    report = classification_report(val_true, val_preds, target_names=valid_syndrome_list)
    print("分类报告:\n", report)

    return accuracy, report, cm


def plot_training_history(history):
    plt.figure(figsize=(12, 4))

    plt.subplot(1, 2, 1)
    plt.plot(history['train_loss'], label='训练损失')
    plt.plot(history['val_loss'], label='验证损失')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.title('损失曲线')

    plt.subplot(1, 2, 2)
    plt.plot(history['train_acc'], label='训练准确率')
    plt.plot(history['val_acc'], label='验证准确率')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.title('准确率曲线')

    plt.tight_layout()
    plt.savefig('training_history.png')


# 示例使用
if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 假设你已完成数据集、模型初始化，示例伪代码：
    # train_dataset = TCMBertDataset(...)
    # val_dataset = TCMBertDataset(...)
    # train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
    # val_loader = DataLoader(val_dataset, batch_size=8, shuffle=False)
    # model = OptimizedTCMBert(num_classes=..., model_path=..., use_structured=...)

    # 实际训练时取消注释以下内容：
    # model, history, val_preds, val_true = train_model(
    #     model, train_loader, val_loader,
    #     num_epochs=5, device=device
    # )
    # evaluate_model(model, val_loader, syndrome_list=["证型1", "证型2"])
    # plot_training_history(history)