# Modified YOLOv5 classify/train_cspnet.py for CSPNet on CIFAR-100 (manual .pkl dataset)
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import os
import pickle
import sys
from pathlib import Path
from datetime import datetime
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from PIL import Image
import argparse
import torch.nn as nn
from models.experimental import attempt_load
from models.yolo import ClassificationModel
from utils.general import increment_path, yaml_save
from utils.torch_utils import select_device, reshape_classifier_output
from tqdm import tqdm

# Ensure correct path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# === Custom CIFAR-100 Dataset ===
class CIFAR100Custom(Dataset):
    def __init__(self, root, train=True, transform=None):
        self.root = root
        self.train = train
        self.transform = transform
        file = 'train' if train else 'test'
        file_path = os.path.join(root, file)
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"路径不存在：{file_path}")
        with open(file_path, 'rb') as fo:
            entry = pickle.load(fo, encoding='latin1')
            self.data = entry['data']
            self.labels = entry['fine_labels']
        self.data = self.data.reshape((-1, 3, 32, 32)).transpose((0, 2, 3, 1))

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        img, label = self.data[index], self.labels[index]
        img = Image.fromarray(img)
        if self.transform:
            img = self.transform(img)
        return img, label

# === Training Function ===
def train(opt):
    device = select_device(opt.device)
    save_dir = increment_path(Path("runs/train-cspnet") / opt.name, exist_ok=True)
    save_dir.mkdir(parents=True, exist_ok=True)
    yaml_save(save_dir / "opt.yaml", vars(opt))

    transform = transforms.Compose([
        transforms.Resize((opt.imgsz, opt.imgsz)),
        transforms.ToTensor()
    ])

    trainset = CIFAR100Custom(opt.data, train=True, transform=transform)
    valset = CIFAR100Custom(opt.data, train=False, transform=transform)

    trainloader = DataLoader(trainset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers)
    valloader = DataLoader(valset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.workers)

    nc = 100  # CIFAR-100

    # Load model
    model = attempt_load(opt.model, device='cpu', fuse=False)
    reshape_classifier_output(model, nc)  # 修改输出层为100类
    model = model.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    for epoch in range(opt.epochs):
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0
        pbar = tqdm(trainloader, desc=f"Epoch {epoch+1}/{opt.epochs}")
        for inputs, labels in pbar:
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()
            pbar.set_postfix({"loss": running_loss / (total / opt.batch_size), "acc": 100. * correct / total})

        # Validation
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        with torch.no_grad():
            for inputs, labels in valloader:
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                val_loss += loss.item()
                _, predicted = outputs.max(1)
                val_total += labels.size(0)
                val_correct += predicted.eq(labels).sum().item()
        print(f"\nValidation: Loss={val_loss / len(valloader):.4f}, Acc={100. * val_correct / val_total:.2f}%")

        # Save checkpoint
        ckpt_path = save_dir / f"epoch_{epoch+1}.pt"
        torch.save(model.state_dict(), ckpt_path)

# === Argparse ===
def parse_opt():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data', type=str, default='datasets/cifar-100-python', help='Path to cifar-100-python')
    parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='Initial weights path')
    parser.add_argument('--epochs', type=int, default=20)
    parser.add_argument('--batch-size', type=int, default=64)
    parser.add_argument('--imgsz', type=int, default=224)
    parser.add_argument('--lr', type=float, default=1e-3)
    parser.add_argument('--device', default='cpu', help='cuda or cpu')
    parser.add_argument('--workers', type=int, default=4)
    parser.add_argument('--name', default='cifar100-cspnet')
    return parser.parse_args()

if __name__ == "__main__":
    opt = parse_opt()
    train(opt)