import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.optim import AdamW
import numpy as np
from tqdm import tqdm
import copy
import os

# 导入配置和模型
from configs.prototype_config import PrototypeConfig
from utils.data_utils import load_dataset
from models.prototype import PrototypeEnhancedCNNTransformer
from utils.metrics import accuracy, plot_metrics
from utils.visualization import visualize_prototypes
from utils.sampler import get_weighted_sampler


class EarlyStopper:
    def __init__(self, patience=5, delta=0.0):
        self.patience = patience
        self.delta = delta
        self.counter = 0
        self.best_score = None
        self.early_stop = False

    def __call__(self, val_loss):
        if self.best_score is None:
            self.best_score = val_loss
        elif val_loss > self.best_score - self.delta:
            self.counter += 1
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = val_loss
            self.counter = 0


def main():
    # 初始化配置
    config = PrototypeConfig()
    os.makedirs('results', exist_ok=True)

    # 1. 数据加载和预处理
    print("\n==== 加载数据 ====")
    train_dataset, test_dataset = load_dataset(config)
    print(f"训练集样本: {len(train_dataset)}, 测试集样本: {len(test_dataset)}")
    print(f"输入特征形状: {train_dataset[0][0].shape}")
    print(f"标签分布 - 训练集: {np.bincount(train_dataset.labels)}, 测试集: {np.bincount(test_dataset.labels)}")

    # 2. 创建数据加载器(使用加权采样)
    weighted_sampler = get_weighted_sampler(train_dataset)

    train_loader = DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        sampler=weighted_sampler,
        pin_memory=True if config.device == "cuda" else False
    )
    test_loader = DataLoader(
        test_dataset,
        batch_size=config.batch_size,
        pin_memory=True if config.device == "cuda" else False
    )

    # 3. 初始化模型
    print("\n==== 初始化模型 ====")
    model = PrototypeEnhancedCNNTransformer(config).to(config.device)
    print(f"模型参数量: {sum(p.numel() for p in model.parameters()) / 1e6:.2f}M")

    # 4. 设置优化器和损失函数(带类别权重)
    optimizer = AdamW(
        model.parameters(),
        lr=config.lr,
        weight_decay=config.weight_decay
    )

    # 使用配置中的类别权重
    class_weights = torch.tensor(config.class_weights, dtype=torch.float32).to(config.device)
    criterion = nn.CrossEntropyLoss(weight=class_weights)

    early_stopper = EarlyStopper(
        patience=config.early_stopping_patience,
        delta=config.early_stopping_delta
    )

    # 5. 训练循环
    print("\n==== 开始训练 ====")
    best_acc = 0.0
    best_fall_acc = 0.0
    best_model_wts = copy.deepcopy(model.state_dict())
    history = {
        'train_loss': [],
        'train_acc': [],
        'val_loss': [],
        'val_acc': [],
        'fall_acc': []  # 专门记录fall类别的准确率
    }

    for epoch in range(config.epochs):
        model.train()
        total_loss = 0.0
        total_prototype_loss = 0.0
        total_classification_loss = 0.0

        # 训练进度条
        pbar = tqdm(train_loader, desc=f"Epoch {epoch + 1}/{config.epochs}")
        for X, y in pbar:
            X, y = X.to(config.device), y.to(config.device)

            # 前向传播
            optimizer.zero_grad()
            logits, prototype_loss, _ = model(X, y)
            classification_loss = criterion(logits, y)

            # 组合损失
            loss = (1 - config.prototype_loss_weight) * classification_loss + \
                   config.prototype_loss_weight * prototype_loss

            # 反向传播
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), config.clip_grad)
            optimizer.step()

            # 更新统计
            total_loss += loss.item()
            total_classification_loss += classification_loss.item()
            total_prototype_loss += prototype_loss.item()

            pbar.set_postfix({
                'total_loss': f"{total_loss / (pbar.n + 1):.4f}",
                'cls_loss': f"{total_classification_loss / (pbar.n + 1):.4f}",
                'proto_loss': f"{total_prototype_loss / (pbar.n + 1):.4f}",
                'lr': f"{optimizer.param_groups[0]['lr']:.2e}"
            })

        # 每个epoch结束后评估
        train_acc, train_preds, train_labels = accuracy(model, train_loader, config.device)
        val_acc, val_preds, val_labels = accuracy(model, test_loader, config.device)

        # 计算fall类别的准确率
        fall_mask = np.array(val_labels) == 10
        fall_acc = np.mean(np.array(val_preds)[fall_mask] == np.array(val_labels)[fall_mask]) if any(fall_mask) else 0.0

        # 计算验证损失
        model.eval()
        val_loss = 0.0
        with torch.no_grad():
            for X, y in test_loader:
                X, y = X.to(config.device), y.to(config.device)
                logits, prototype_loss, _ = model(X)
                classification_loss = criterion(logits, y)
                loss = (1 - config.prototype_loss_weight) * classification_loss + \
                       config.prototype_loss_weight * prototype_loss
                val_loss += loss.item()

        val_loss /= len(test_loader)
        train_loss = total_loss / len(train_loader)

        # 记录历史
        history['train_loss'].append(train_loss)
        history['val_loss'].append(val_loss)
        history['train_acc'].append(train_acc)
        history['val_acc'].append(val_acc)
        history['fall_acc'].append(fall_acc)

        print(f"Epoch {epoch + 1:02d}: "
              f"Train Loss = {train_loss:.4f}, Val Loss = {val_loss:.4f}, "
              f"Train Acc = {train_acc:.4f}, Val Acc = {val_acc:.4f}, "
              f"Fall Acc = {fall_acc:.4f}")

        # 早停检查
        early_stopper(val_loss)
        if early_stopper.early_stop:
            print(f"Early stopping triggered at epoch {epoch + 1}")
            break

        # 保存最佳模型(基于整体准确率)
        if val_acc > best_acc:
            best_acc = val_acc
            best_fall_acc = fall_acc
            best_model_wts = copy.deepcopy(model.state_dict())
            torch.save({
                'epoch': epoch + 1,
                'model_state_dict': model.state_dict(),
                'val_acc': val_acc,
                'fall_acc': fall_acc,
                'config': vars(config)
            }, "results/best_model.pth")
            print(f"✨ 新的最佳模型保存，整体准确率: {val_acc:.4f}, Fall准确率: {fall_acc:.4f}")

    # 训练结束，加载最佳模型
    model.load_state_dict(best_model_wts)
    print(f"训练完成，最佳整体准确率: {best_acc:.4f}, 对应Fall准确率: {best_fall_acc:.4f}")

    # 保存最终模型
    torch.save({
        'model_state_dict': model.state_dict(),
        'config': vars(config)
    }, "results/final_model.pth")

    # 可视化训练过程
    plot_metrics(history)

    # 可视化原型
    visualize_prototypes(model, test_loader, config.device)


if __name__ == "__main__":
    main()