import os
import torch
import torch.nn as nn
import torch.optim as optim
from lightning.fabric import Fabric
from lightning.fabric.strategies import DDPStrategy
import torch.distributed as dist
from torch.utils.data import DataLoader
from models.yolo import YOLOv8
from models.loss import YOLOLoss
from data.dataset import create_dataloader
import argparse
import logging
from utils.logger import setup_logger

# 设置日志记录器（在开发时使用 DEBUG 级别，生产环境使用 INFO 级别）
logger = setup_logger(level=logging.DEBUG)  # 或 logging.INFO


def train_step(batch, model, criterion):
    images, targets = batch
    predictions = model(images)
    loss = criterion(predictions, targets)
    return loss


def main(args):
    # Initialize Fabric for multi-GPU training
    logger.info("Starting training with arguments: %s", vars(args))
    fabric = Fabric(accelerator="cuda", devices=args.num_gpus, strategy="ddp")
    fabric.launch()
    logger.info(f"Initialized Fabric with {args.num_gpus} GPUs")

    # Create model, optimizer, and loss function
    model = YOLOv8(num_classes=args.num_classes)
    optimizer = optim.AdamW(model.parameters(), lr=args.learning_rate)
    criterion = YOLOLoss(num_classes=args.num_classes)
    logger.info("Created model, optimizer and criterion")

    # Setup with Fabric
    model, optimizer = fabric.setup(model, optimizer)
    logger.debug("Model and optimizer setup with Fabric")

    # Create dataloaders
    train_loader = create_dataloader(
        img_dir=args.train_img_dir,
        label_dir=args.train_label_dir,
        batch_size=args.batch_size,
        image_size=args.image_size,
        num_workers=args.num_workers,
        augment=True
    )
    logger.debug(f"Created training dataloader with {len(train_loader)} batches")
    
    val_loader = create_dataloader(
        img_dir=args.val_img_dir,
        label_dir=args.val_label_dir,
        batch_size=args.batch_size,
        image_size=args.image_size,
        num_workers=args.num_workers,
        augment=False,
        shuffle=False
    )
    logger.debug(f"Created validation dataloader with {len(val_loader)} batches")

    train_loader = fabric.setup_dataloaders(train_loader)
    val_loader = fabric.setup_dataloaders(val_loader)
    logger.debug("Dataloaders setup with Fabric")

    # Training loop
    model.train()
    for epoch in range(args.epochs):
        if fabric.is_global_zero:
            logger.info(f"Starting epoch {epoch + 1}/{args.epochs}")

        epoch_loss = 0.0
        for batch_idx, batch in enumerate(train_loader):
            # Training step
            optimizer.zero_grad()
            loss = train_step(batch, model, criterion)
            fabric.backward(loss)
            optimizer.step()
            
            epoch_loss += loss.item()

            if fabric.is_global_zero and batch_idx % args.log_interval == 0:
                logger.debug(f"Epoch {epoch + 1}, Batch {batch_idx}, Loss: {loss.item():.4f}")

        if fabric.is_global_zero:
            avg_epoch_loss = epoch_loss / len(train_loader)
            logger.info(f"Epoch {epoch + 1} completed, Average Loss: {avg_epoch_loss:.4f}")

        # Validation phase
        if epoch % args.val_interval == 0:
            model.eval()
            val_loss = 0
            with torch.no_grad():
                for batch_idx, batch in enumerate(val_loader):
                    loss = train_step(batch, model, criterion)
                    val_loss += loss.item()
            
            val_loss /= len(val_loader)
            if fabric.is_global_zero:
                logger.info(f"Validation Loss at epoch {epoch + 1}: {val_loss:.4f}")
            model.train()

        if fabric.is_global_zero:
            # Save checkpoint
            checkpoint = {
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }
            checkpoint_path = os.path.join(args.output_dir, f'checkpoint_epoch_{epoch}.pt')
            torch.save(checkpoint, checkpoint_path)
            logger.info(f"Saved checkpoint to {checkpoint_path}")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='YOLOv8 Training')
    parser.add_argument('--train-img-dir', type=str, required=True, help='training images directory')
    parser.add_argument('--train-label-dir', type=str, required=True, help='training labels directory')
    parser.add_argument('--val-img-dir', type=str, required=True, help='validation images directory')
    parser.add_argument('--val-label-dir', type=str, required=True, help='validation labels directory')
    parser.add_argument('--output-dir', type=str, default='outputs', help='output directory for checkpoints')
    parser.add_argument('--batch-size', type=int, default=32, help='batch size for training')
    parser.add_argument('--num-gpus', type=int, default=2, help='number of GPUs to use')
    parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train')
    parser.add_argument('--num-classes', type=int, default=80, help='number of classes')
    parser.add_argument('--image-size', type=int, default=640, help='input image size')
    parser.add_argument('--learning-rate', type=float, default=0.001, help='learning rate')
    parser.add_argument('--num-workers', type=int, default=4, help='number of worker threads')
    parser.add_argument('--log-interval', type=int, default=10, help='logging interval')
    parser.add_argument('--val-interval', type=int, default=1, help='validation interval')
    
    args = parser.parse_args()
    
    # Create output directory if it doesn't exist
    os.makedirs(args.output_dir, exist_ok=True)
    
    main(args)
