import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from torchvision.datasets import ImageFolder
from tqdm import tqdm
import matplotlib.pyplot as plt
import argparse
from torch.cuda.amp import autocast, GradScaler
from model import AdvancedResNet50
import random
from datetime import datetime
from colorama import init, Fore, Style
from trainer import PrettyTrainer

# 初始化colorama
init(autoreset=True)

# 设置随机种子以确保可重复性
def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

# MixUp数据增强
def mixup_data(x, y, alpha=0.2, device='cuda'):
    """执行MixUp数据增强"""
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1

    batch_size = x.size()[0]
    index = torch.randperm(batch_size).to(device)

    mixed_x = lam * x + (1 - lam) * x[index, :]
    y_a, y_b = y, y[index]
    return mixed_x, y_a, y_b, lam

def mixup_criterion(criterion, pred, y_a, y_b, lam):
    """计算MixUp损失"""
    return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)

# 获取高级数据增强
def get_advanced_transforms(image_size=224):
    """获取高级数据增强变换"""
    train_transform = transforms.Compose([
        transforms.Resize((image_size, image_size)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.RandomRotation(15),
        transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
        transforms.RandomAffine(degrees=0, translate=(0.1, 0.1), scale=(0.9, 1.1)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    test_transform = transforms.Compose([
        transforms.Resize((image_size, image_size)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    return train_transform, test_transform

# 获取简单数据增强
def get_simple_transforms(image_size=224):
    """获取简单数据增强变换"""
    train_transform = transforms.Compose([
        transforms.Resize((image_size, image_size)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    test_transform = transforms.Compose([
        transforms.Resize((image_size, image_size)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    return train_transform, test_transform

def train_advanced(model, train_loader, test_loader, device, epochs=50, lr=1e-4, use_mixup=True, mixup_alpha=0.2, use_amp=True, patience=10):
    """
    高级训练函数，包含MixUp数据增强和混合精度训练
    
    Args:
        model: 模型实例
        train_loader: 训练数据加载器
        test_loader: 测试数据加载器
        device: 训练设备
        epochs: 训练轮数
        lr: 学习率
        use_mixup: 是否使用MixUp数据增强
        mixup_alpha: MixUp alpha参数
        use_amp: 是否使用混合精度训练
        patience: 早停耐心值，连续多少轮验证集性能没有提升就停止训练
        
    Returns:
        best_acc: 最佳测试准确率
    """
    # 初始化PrettyTrainer
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    save_dir = os.path.join("run", f"AdvancedResNet50_{timestamp}")
    trainer = PrettyTrainer("AdvancedResNet50", epochs, save_dir=save_dir)
    
    # 创建模型保存目录
    model_dir = "model"
    os.makedirs(model_dir, exist_ok=True)
    
    # 打印模型信息
    trainer.model_summary({
        'model_type': 'AdvancedResNet50',
        'total_params': sum(p.numel() for p in model.parameters()),
        'trainable_params': sum(p.numel() for p in model.parameters() if p.requires_grad),
        'optimizer': 'AdamW',
        'learning_rate': lr,
        'loss_function': 'CrossEntropyLoss'
    })
    
    # 定义优化器和损失函数
    optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()  # 使用交叉熵损失
    
    # 定义学习率调度器
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)
    
    # 混合精度训练
    scaler = GradScaler() if use_amp else None
    
    # 记录最佳准确率和对应的模型
    best_acc = 0.0
    best_epoch = 0
    
    # 早停计数器
    no_improve_counter = 0
    
    # 训练历史记录
    history = {
        'train_loss': [],
        'train_acc': [],
        'val_loss': [],
        'val_acc': []
    }
    
    # 开始训练
    start_time = time.time()
    trainer._log_system_info()
    
    for epoch in range(epochs):
        epoch_start_time = time.time()
        
        # 初始化当前轮次的数据
        epoch_data = {
            'progress': {
                'train': {'loss': [], 'acc': []},
                'val': {'loss': [], 'acc': []}
            },
            'metrics': {}
        }
        
        # 训练阶段
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0
        
        # 使用tqdm创建一个简单的进度条，避免闪烁
        train_pbar = tqdm(train_loader, 
                          desc=f"{Fore.GREEN}训练 Epoch {epoch+1}/{epochs}{Style.RESET_ALL}",
                          leave=True,
                          ncols=100,
                          position=0)
        
        for inputs, targets in train_pbar:
            inputs, targets = inputs.to(device), targets.to(device)
            
            # 梯度清零
            optimizer.zero_grad()
            
            # 初始化mixup变量
            targets_a = targets
            targets_b = targets
            lam = 1.0
            
            # 使用MixUp数据增强
            if use_mixup and np.random.random() > 0.5:
                inputs, targets_a, targets_b, lam = mixup_data(inputs, targets, alpha=mixup_alpha, device=device)
                mixed_inputs = inputs
            else:
                mixed_inputs = inputs
            
            # 使用混合精度训练
            if use_amp:
                with autocast():
                    outputs = model(mixed_inputs)
                    if use_mixup and np.random.random() > 0.5:
                        loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)
                    else:
                        loss = criterion(outputs, targets)
                
                # 反向传播和优化
                scaler.scale(loss).backward()
                scaler.step(optimizer)
                scaler.update()
            else:
                outputs = model(mixed_inputs)
                if use_mixup and np.random.random() > 0.5:
                    loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)
                else:
                    loss = criterion(outputs, targets)
                
                # 反向传播和优化
                loss.backward()
                optimizer.step()
            
            # 计算训练指标
            train_loss += loss.item()
            _, predicted = outputs.max(1)
            train_total += targets.size(0)
            
            # 如果使用了MixUp，准确率计算会有所不同
            if use_mixup and np.random.random() > 0.5:
                train_correct += (lam * predicted.eq(targets_a).sum().float() + (1 - lam) * predicted.eq(targets_b).sum().float())
            else:
                train_correct += predicted.eq(targets).sum().item()
            
            # 简单更新进度条，减少闪烁
            train_pbar.set_postfix({
                'loss': f"{loss.item():.4f}",
                'acc': f"{train_correct/train_total*100:.2f}%"
            })
            
        train_pbar.close()
        train_loss = train_loss / len(train_loader)
        train_acc = train_correct / train_total
        
        # 测试阶段
        model.eval()
        test_loss = 0.0
        test_correct = 0
        test_total = 0
        
        # 使用tqdm创建一个简单的进度条，避免闪烁
        test_pbar = tqdm(test_loader, 
                         desc=f"{Fore.BLUE}测试{Style.RESET_ALL}",
                         leave=True,
                         ncols=100,
                         position=0)
        
        with torch.no_grad():
            for inputs, targets in test_pbar:
                inputs, targets = inputs.to(device), targets.to(device)
                
                # 使用混合精度
                if use_amp:
                    with autocast():
                        outputs = model(inputs)
                        loss = criterion(outputs, targets)
                else:
                    outputs = model(inputs)
                    loss = criterion(outputs, targets)
                
                # 计算测试指标
                test_loss += loss.item()
                _, predicted = outputs.max(1)
                test_total += targets.size(0)
                test_correct += predicted.eq(targets).sum().item()
                
                # 简单更新进度条，减少闪烁
                test_pbar.set_postfix({
                    'loss': f"{loss.item():.4f}",
                    'acc': f"{test_correct/test_total*100:.2f}%"
                })
                
        test_pbar.close()
        test_loss = test_loss / len(test_loader)
        test_acc = test_correct / test_total
        
        # 更新学习率
        scheduler.step()
        
        # 记录训练历史
        history['train_loss'].append(train_loss)
        history['train_acc'].append(train_acc)
        history['val_loss'].append(test_loss)
        history['val_acc'].append(test_acc)
        
        # 更新epoch_data
        epoch_data['metrics'] = {
            'train_loss': train_loss,
            'train_acc': train_acc,
            'val_loss': test_loss,
            'val_acc': test_acc
        }
        
        # 打印当前轮次的指标
        epoch_time = time.time() - epoch_start_time
        trainer.update_metrics(epoch+1, epoch_data['metrics'], save_plot=(epoch % 5 == 0 or epoch == epochs - 1))
        
        # 保存最佳模型
        if test_acc > best_acc:
            best_acc = test_acc
            best_epoch = epoch
            # 保存模型到model文件夹
            torch.save(model.state_dict(), os.path.join(model_dir, "best_model_advanced.pth"))
            print(f"{Fore.CYAN}发现更好的模型！保存到 {os.path.join(model_dir, 'best_model_advanced.pth')}{Style.RESET_ALL}")
            # 重置早停计数器
            no_improve_counter = 0
        else:
            # 增加早停计数器
            no_improve_counter += 1
            print(f"{Fore.YELLOW}验证集准确率没有提升，已经连续 {no_improve_counter} 轮没有提升{Style.RESET_ALL}")
            
        # 早停机制
        if no_improve_counter >= patience:
            print(f"{Fore.RED}早停：验证集准确率已经连续 {patience} 轮没有提升，停止训练{Style.RESET_ALL}")
            break
        
        # 每5轮保存一次训练曲线
        if (epoch + 1) % 5 == 0 or epoch == epochs - 1:
            trainer._plot_training_history()
    
    # 训练结束，打印总结
    total_time = time.time() - start_time
    print(f"\n{Fore.GREEN}训练完成！{Style.RESET_ALL}")
    print(f"总用时: {total_time:.2f}秒")
    print(f"最佳验证准确率: {best_acc:.4f}，在第 {best_epoch+1} 轮")
    print(f"最佳模型已保存到: {os.path.join(model_dir, 'best_model_advanced.pth')}")
    
    # 保存最终的训练曲线
    trainer._plot_training_history()
    
    return best_acc

def main():
    # 设置默认参数，不需要从命令行解析
    class Args:
        def __init__(self):
            self.data_dir = 'animal_data'  # 默认数据集路径
            self.batch_size = 8  # 默认批次大小
            self.image_size = 224  # 默认图像大小
            self.lr = 1e-4  # 默认学习率
            self.epochs = 50  # 默认训练轮数
            self.use_mixup = True  # 默认使用MixUp数据增强
            self.use_amp = True  # 默认使用混合精度训练
            self.advanced_augmentation = True  # 默认使用高级数据增强
            self.use_split_dataset = True  # 默认使用已分割的数据集
            self.patience = 10  # 早停耐心值，连续多少轮验证集性能没有提升就停止训练

    args = Args()
    
    # 设置随机种子
    set_seed(42)
    
    # 检查GPU可用性
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"{Fore.CYAN}使用设备: {device}{Style.RESET_ALL}")
    
    # 获取数据增强
    if args.advanced_augmentation:
        train_transform, test_transform = get_advanced_transforms(args.image_size)
        print(f"{Fore.GREEN}使用高级数据增强{Style.RESET_ALL}")
    else:
        train_transform, test_transform = get_simple_transforms(args.image_size)
        print(f"{Fore.YELLOW}使用简单数据增强{Style.RESET_ALL}")
    
    # 加载数据集
    if args.use_split_dataset:
        print(f"{Fore.GREEN}使用已分割的数据集{Style.RESET_ALL}")
        train_dir = os.path.join(args.data_dir, 'train')
        test_dir = os.path.join(args.data_dir, 'test')
        
        train_dataset = ImageFolder(train_dir, transform=train_transform)
        test_dataset = ImageFolder(test_dir, transform=test_transform)
        
        classes = train_dataset.classes
    else:
        # 使用与1.ipynb相同的数据集分割方式
        dataset = ImageFolder(args.data_dir, transform=train_transform)
        test_dataset_full = ImageFolder(args.data_dir, transform=test_transform)
        
        # 计算分割大小 - 按照1.ipynb中的比例
        total_size = len(dataset)
        train_size = int(total_size * 0.83)  # 大约5000/(5000+1044)
        test_size = total_size - train_size
        
        # 随机分割数据集
        indices = list(range(total_size))
        np.random.shuffle(indices)
        train_indices = indices[:train_size]
        test_indices = indices[train_size:]
        
        # 创建子集
        from torch.utils.data import Subset
        train_dataset = Subset(dataset, train_indices)
        test_dataset = Subset(test_dataset_full, test_indices)
        
        classes = dataset.classes
    
    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
    
    print(f"{Fore.YELLOW}类别: {classes}{Style.RESET_ALL}")
    print(f"{Fore.YELLOW}类别数量: {len(classes)}{Style.RESET_ALL}")
    print(f"{Fore.YELLOW}训练样本: {len(train_dataset)}{Style.RESET_ALL}")
    print(f"{Fore.YELLOW}测试样本: {len(test_dataset)}{Style.RESET_ALL}")
    
    # 创建模型
    model = AdvancedResNet50(num_classes=len(classes))
    model = model.to(device)
    
    # 训练模型
    best_acc = train_advanced(
        model=model,
        train_loader=train_loader,
        test_loader=test_loader,
        device=device,
        epochs=args.epochs,
        lr=args.lr,
        use_mixup=args.use_mixup,
        use_amp=args.use_amp,
        patience=args.patience
    )
    
    print(f"{Fore.GREEN}{Style.BRIGHT}训练完成！ 最佳准确率: {best_acc*100:.2f}%{Style.RESET_ALL}")

if __name__ == '__main__':
    main()
