import os
import torch
import argparse
import numpy as np
from torchvision import transforms
from my_zsl.train_utils.train_eval_utils import train_one_epoch, valid_one_epoch
from my_zsl.models.DecouplingNetwork import TripleDecouplingNetwork, TripleDecouplingLoss
from my_zsl.train_utils.dataset import create_dataloaders
from my_zsl.tools.tool import calculate_metric, initialize_results_file, append_to_results_file, make_save_dirs
from my_zsl.tools.distributed_utils import is_main_process


def main(args):
    # 设备配置
    device = torch.device(args.device if torch.cuda.is_available() else "cpu")

    # 创建保存目录
    save_root = os.path.join(args.save_dir, f"exp_{args.exp_name}")
    img_dir, model_dir = make_save_dirs(save_root)

    # 数据增强与加载
    data_transform = transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    train_loader, val_loader = create_dataloaders(
        data_path=args.data_path,
        batch_size=args.batch_size,
        transform=data_transform,
        num_workers=args.num_workers,
        train_shuffle=True
    )

    # 模型、损失函数和优化器
    model = TripleDecouplingNetwork(feature_dim=args.feature_dim).to(device)
    criterion = TripleDecouplingLoss(
        lambda_recon=args.lambda_recon,
        lambda_corr=args.lambda_corr,
        lambda_supervised=args.lambda_supervised
    )
    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=args.lr,
        weight_decay=args.weight_decay
    )
    lr_scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer,
        step_size=args.lr_step_size,
        gamma=args.lr_gamma
    )

    # 初始化结果文件
    result_cols = ['epoch', 'train_loss', 'val_loss', 'accuracy', 'precision', 'recall', 'f1_score', 'lr']
    results_file = os.path.join(save_root, "training_results.txt")
    initialize_results_file(results_file, result_cols)

    # 加载预训练模型（如果有）
    best_f1 = 0.0
    start_epoch = 0
    if args.resume:
        checkpoint = torch.load(args.resume, map_location=device)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        start_epoch = checkpoint['epoch'] + 1
        best_f1 = checkpoint['best_f1']
        print(f"Resume training from epoch {start_epoch}, best f1: {best_f1:.4f}")

    # 获取类别信息
    classes = train_loader.dataset.classes

    # 开始训练
    for epoch in range(start_epoch, args.epochs):
        print(f"\nEpoch {epoch}/{args.epochs - 1}")
        print("-" * 50)

        # 训练一个epoch
        train_loss = train_one_epoch(
            model=model,
            device=device,
            criterion=criterion,
            train_loader=train_loader,
            optimizer=optimizer,
            epoch=epoch,
            warmup=(epoch == 0 and args.warmup)
        )

        # 验证
        val_result = valid_one_epoch(
            model=model,
            device=device,
            criterion=criterion,
            val_loader=val_loader,
            epoch=epoch
        )

        # 计算评估指标
        metrics = calculate_metric(
            all_labels=val_result['y_true'],
            all_predictions=val_result['y_pred'],
            classes=classes,
            class_metric=False
        )

        # 打印当前epoch结果
        print(f"Train Loss: {train_loss:.4f}")
        print(f"Val Loss: {val_result['val_loss']:.4f}")
        print(f"Accuracy: {metrics['accuracy']:.4f}")
        print(f"Precision: {metrics['precision']:.4f}")
        print(f"Recall: {metrics['recall']:.4f}")
        print(f"F1 Score: {metrics['f1-score']:.4f}")

        # 更新学习率
        lr_scheduler.step()

        # 保存结果到文件
        if is_main_process():
            result_dict = {
                'epoch': epoch,
                'train_loss': train_loss,
                'val_loss': val_result['val_loss'],
                'accuracy': metrics['accuracy'],
                'precision': metrics['precision'],
                'recall': metrics['recall'],
                'f1_score': metrics['f1-score'],
                'lr': optimizer.param_groups[0]['lr']
            }
            append_to_results_file(
                file_path=results_file,
                data_dict=result_dict,
                column_order=result_cols
            )

        # 保存最佳模型
        if metrics['f1-score'] > best_f1:
            best_f1 = metrics['f1-score']
            checkpoint = {
                'model_state_dict': model.state_dict(),
                }
            torch.save(checkpoint, os.path.join(model_dir, "best_model.pth"))
            print(f"Saved best model at epoch {epoch} with F1: {best_f1:.4f}")

        # 定期保存模型
        if (epoch + 1) % args.save_interval == 0 and is_main_process():
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'lr_scheduler': lr_scheduler.state_dict(),
                'best_f1': best_f1
            }, os.path.join(model_dir, f"model_epoch_{epoch}.pth"))

    print(f"Training complete! Best F1 score: {best_f1:.4f}")


def get_args(args=None):
    parser = argparse.ArgumentParser(description="Triple Decoupling Network Training")

    # 数据配置
    parser.add_argument("--data-path", type=str, default=r'D:\Code\2-ZSL\Zero-Shot-Learning\data\0HP\dataset',
                        help="Path to dataset")
    parser.add_argument("--batch-size", type=int, default=128, help="Batch size for training")
    parser.add_argument("--num-workers", type=int, default=0, help="Number of workers for data loading")

    # 模型配置
    parser.add_argument("--feature-dim", type=int, default=128, help="Dimension of feature vectors")
    parser.add_argument("--lambda-recon", type=float, default=1.0, help="Weight for reconstruction loss")
    parser.add_argument("--lambda-corr", type=float, default=1.0, help="Weight for correlation loss")
    parser.add_argument("--lambda-supervised", type=float, default=1.0, help="Weight for supervised loss")

    # 训练配置
    parser.add_argument("--epochs", type=int, default=100, help="Number of training epochs")
    parser.add_argument("--lr", type=float, default=0.1, help="Initial learning rate")
    parser.add_argument("--weight-decay", type=float, default=1e-4, help="Weight decay")
    parser.add_argument("--lr-step-size", type=int, default=30, help="Learning rate step size")
    parser.add_argument("--lr-gamma", type=float, default=0.1, help="Learning rate gamma")
    parser.add_argument("--warmup", default=True, help="Use warmup in first epoch")

    # 保存配置
    parser.add_argument("--save-dir", type=str, default="../../results", help="Directory to save results")
    parser.add_argument("--exp-name", type=str, default="exp1", help="Experiment name")
    parser.add_argument("--save-interval", type=int, default=10, help="Interval for saving models")
    parser.add_argument("--resume", type=str, default="", help="Path to resume checkpoint")

    # 设备配置
    parser.add_argument("--device", type=str, default="cuda:0", help="Device to use for training")
    return parser.parse_args(args if args else [])


if __name__ == "__main__":
    opts = get_args()
    print(opts)
    main(opts)