import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, Subset
from torchvision import transforms, models
from sklearn.model_selection import KFold
from PIL import Image
import logging
from datetime import datetime
from tqdm import tqdm
import argparse  # 添加argparse模块
from dataset import CatDogDataset
from net import CatDogClassifier

# -------------------- 0. 初始化日志 --------------------
def setup_logging(log_dir='./logs'):
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    
    current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
    log_file = os.path.join(log_dir, f'training_{current_time}.log')
    
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(log_file),
            logging.StreamHandler()
        ]
    )
    return log_file

# -------------------- 3. 训练与验证函数 --------------------
def train_model(train_loader, val_loader, model, criterion, optimizer, args, fold=0):
    best_val_acc = 0.0
    total_steps = args.epochs * len(train_loader)
    warmup_steps = total_steps * args.warmup_ratio  # 计算warmup的总步数
    
    for epoch in range(args.epochs):
        model.train()
        epoch_train_loss = 0.0
        epoch_train_correct = 0
        epoch_train_total = 0
        
        # 训练阶段（带进度条）
        train_bar = tqdm(train_loader, desc=f'Fold {fold} Epoch {epoch+1}/{args.epochs} [Train]')
        for batch_idx, (images, labels) in enumerate(train_bar):
            # 计算当前全局步数
            global_step = epoch * len(train_loader) + batch_idx
            
            # 学习率回火
            if global_step < warmup_steps:
                # 线性增加学习率
                lr_scale = min(1.0, float(global_step + 1) / warmup_steps)
                for param_group in optimizer.param_groups:
                    param_group['lr'] = args.learning_rate * lr_scale
            
            images, labels = images.to(device), labels.to(device)
            
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
            # 计算batch指标
            batch_loss = loss.item()
            _, predicted = torch.max(outputs.data, 1)
            batch_total = labels.size(0)
            batch_correct = (predicted == labels).sum().item()
            batch_acc = batch_correct / batch_total
            
            # 更新epoch累计指标
            epoch_train_loss += batch_loss
            epoch_train_correct += batch_correct
            epoch_train_total += batch_total
            
            # 实时打印batch指标和学习率
            current_lr = optimizer.param_groups[0]['lr']
            train_bar.set_postfix({
                'batch': f'{batch_idx+1}/{len(train_loader)}',
                'loss': f'{batch_loss:.4f}',
                'acc': f'{batch_acc:.4f}',
                'lr': f'{current_lr:.6f}'
            })
            
            # 记录batch日志（可选）
            if batch_idx % 10 == 0:  # 每10个batch记录一次
                logging.info(
                    f"Fold {fold} | Epoch {epoch+1} | Batch {batch_idx+1}/{len(train_loader)} | "
                    f"Train Loss: {batch_loss:.4f} | Train Acc: {batch_acc:.4f} | LR: {current_lr:.6f}"
                )
        
        # 计算epoch训练指标
        epoch_train_acc = epoch_train_correct / epoch_train_total
        avg_epoch_train_loss = epoch_train_loss / len(train_loader)
        
        # 验证阶段
        val_loss, val_acc = evaluate_model(val_loader, model, criterion, fold, epoch)
        
        # 打印epoch总结
        print(f"\nFold {fold} | Epoch {epoch+1}/{args.epochs} Summary:")
        print(f"Train Loss: {avg_epoch_train_loss:.4f} | Train Acc: {epoch_train_acc:.4f}")
        print(f"Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.4f}")
        
        # 记录epoch日志
        logging.info(f"\nFold {fold} | Epoch {epoch+1}/{args.epochs} Summary:")
        logging.info(f"Train Loss: {avg_epoch_train_loss:.4f} | Train Acc: {epoch_train_acc:.4f}")
        logging.info(f"Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.4f}")
        
        # 保存最佳模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            save_model(model, fold, epoch, val_acc, args.model_dir)

def evaluate_model(loader, model, criterion, fold=None, epoch=None):
    model.eval()
    val_loss = 0.0
    correct = 0
    total = 0
    
    # 验证进度条
    val_bar = tqdm(loader, desc=f'Fold {fold} Epoch {epoch+1} [Val]') if epoch is not None else tqdm(loader, desc='Evaluating')
    
    with torch.no_grad():
        for batch_idx, (images, labels) in enumerate(val_bar):
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            
            # 计算batch指标
            batch_loss = loss.item()
            _, predicted = torch.max(outputs.data, 1)
            batch_total = labels.size(0)
            batch_correct = (predicted == labels).sum().item()
            batch_acc = batch_correct / batch_total
            
            # 更新累计指标
            val_loss += batch_loss
            correct += batch_correct
            total += batch_total
            
            # 实时打印验证batch指标
            val_bar.set_postfix({
                'batch': f'{batch_idx+1}/{len(loader)}',
                'loss': f'{batch_loss:.4f}',
                'acc': f'{batch_acc:.4f}'
            })
            
            # 记录验证batch日志（可选）
            if batch_idx % 5 == 0 and epoch is not None:  # 每5个batch记录一次
                logging.info(
                    f"Fold {fold} | Epoch {epoch+1} | Val Batch {batch_idx+1}/{len(loader)} | "
                    f"Val Loss: {batch_loss:.4f} | Val Acc: {batch_acc:.4f}"
                )
    
    return val_loss/len(loader), correct/total

# -------------------- 4. 模型保存函数 --------------------
def save_model(model, fold, epoch, val_acc, model_dir='./saved_models'):
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    
    model_name = f"fold{fold}_epoch{epoch}_acc{val_acc:.4f}.pth"
    model_path = os.path.join(model_dir, model_name)
    torch.save(model.state_dict(), model_path)
    logging.info(f"Saved model to: {model_path}")
    print(f"Saved model to: {model_path}")

# -------------------- 5. 参数解析 --------------------
def get_args():
    parser = argparse.ArgumentParser(description='Train the CatDog Classifier')
    parser.add_argument('--epochs', '-e', type=int, default=2, help='Number of epochs')
    parser.add_argument('--batch-size', '-b', type=int, default=64, help='Batch size')
    parser.add_argument('--learning-rate', '-l', type=float, default=0.001, help='Learning rate')
    parser.add_argument('--data-dir', '-d', type=str, default=r'data/train', help='Directory of training data')
    parser.add_argument('--model-dir', '-m', type=str, default='./saved_models', help='Directory to save models')
    parser.add_argument('--log-dir', '-o', type=str, default='./logs', help='Directory to save logs')
    parser.add_argument('--folds', '-f', type=int, default=5, help='Number of folds for cross-validation')
    parser.add_argument('--seed', '-s', type=int, default=42, help='Random seed')
    parser.add_argument('--load', type=str, default=r"saved_models\fold0_epoch2_acc0.9344.pth", help='Path to load a pretrained model')
    parser.add_argument('--amp', action='store_true', help='Use mixed precision training')
    parser.add_argument('--warmup-ratio', type=float, default=0.1, help='Ratio of total steps for learning rate warmup (default: 0.1)')
    return parser.parse_args()

# -------------------- 6. 主流程 --------------------
if __name__ == '__main__':
    args = get_args()
    
    # 设置随机种子
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    
    # 初始化日志
    log_file = setup_logging(args.log_dir)
    logging.info("Initialized logging at: " + log_file)
    logging.info("\nTraining arguments:")
    for arg, value in vars(args).items():
        logging.info(f"{arg}: {value}")
    
    # 设备设置
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logging.info(f"Using device: {device}")
    print(f"\nUsing device: {device}")
    
    # 数据增强
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    
    # 加载数据集
    dataset = CatDogDataset(args.data_dir, transform=transform)
    
    # K折交叉验证
    kf = KFold(n_splits=args.folds, shuffle=True, random_state=args.seed)
    fold_results = []
    
    for fold, (train_idx, val_idx) in enumerate(kf.split(dataset)):
        logging.info(f"\n=== Starting Fold {fold + 1} ===")
        print(f"\n=== Starting Fold {fold + 1} ===")
        
        # 数据划分
        train_subset = Subset(dataset, train_idx)
        val_subset = Subset(dataset, val_idx)
        train_loader = DataLoader(train_subset, batch_size=args.batch_size, shuffle=True)
        val_loader = DataLoader(val_subset, batch_size=args.batch_size)
        
        # 初始化模型
        model = CatDogClassifier().to(device)
        
        # 加载预训练模型（如果指定）
        if args.load:
            model.load_state_dict(torch.load(args.load))
            logging.info(f"Loaded pretrained model from: {args.load}")
        
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
        
        # 训练和验证
        train_model(train_loader, val_loader, model, criterion, optimizer, args, fold=fold)
        
        # 最终验证
        val_loss, val_acc = evaluate_model(val_loader, model, criterion)
        fold_results.append(val_acc)
        
        logging.info(f"Fold {fold+1} Final Val Accuracy: {val_acc:.4f}")
        print(f"Fold {fold+1} Final Val Accuracy: {val_acc:.4f}")
    
    # -------------------- 7. 结果汇总 --------------------
    logging.info("\n=== Final Cross-Validation Results ===")
    logging.info(f"All Fold Accuracies: {fold_results}")
    logging.info(f"Mean Accuracy: {np.mean(fold_results):.4f} ± {np.std(fold_results):.4f}")

    print("\n=== Final Cross-Validation Results ===")
    print(f"All Fold Accuracies: {fold_results}")
    print(f"Mean Accuracy: {np.mean(fold_results):.4f} ± {np.std(fold_results):.4f}")