import os
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from CoAtNet import CoAtNet
import json
from tqdm import tqdm

# CIFAR-100 本地数据读取
class CIFAR100Local(Dataset):
    def __init__(self, root, train=True, transform=None):
        self.transform = transform
        self.train = train
        self.data = []
        self.targets = []

        file = 'train' if train else 'test'
        path = os.path.join(root, file)
        with open(path, 'rb') as f:
            entry = pickle.load(f, encoding='latin1')
            self.data = entry['data']
            self.targets = entry['fine_labels']

        self.data = np.reshape(self.data, (-1, 3, 32, 32)).transpose((0, 2, 3, 1))

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        img, target = self.data[index], self.targets[index]
        img = transforms.ToPILImage()(img)
        if self.transform is not None:
            img = self.transform(img)
        return img, target

# 训练配置
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size = 32
epochs = 5
learning_rate = 0.0001
save_path = 'training_log.json'
# 本地数据集路径（绝对路径）
data_root = '/kaggle/input/cifar100/cifar-100-python'


# 数据预处理
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
])

transform_test = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.507, 0.487, 0.441), (0.267, 0.256, 0.276)),
])

# 数据加载
train_dataset = CIFAR100Local(data_root, train=True, transform=transform_train)
test_dataset = CIFAR100Local(data_root, train=False, transform=transform_test)

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)

# 模型定义
model = CoAtNet(224, 224, 3, config='coatnet-0', num_classes=100).to(device)
criterion = nn.CrossEntropyLoss()
# optimizer = optim.Adam(model.parameters(), lr=learning_rate)
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)

# 日志记录
history = {"train_loss": [], "test_acc": []}



for epoch in range(epochs):
    model.train()
    running_loss = 0.0
    progress_bar = tqdm(train_loader, desc=f"Epoch [{epoch+1}/{epochs}]", leave=False)

    for images, labels in progress_bar:
        images, labels = images.to(device), labels.to(device)
        optimizer.zero_grad()
        outputs = model(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()

        # 动态更新进度条信息
        progress_bar.set_postfix(loss=loss.item())

    avg_loss = running_loss / len(train_loader)
    history["train_loss"].append(avg_loss)

    # 每个 epoch 打印一次概况
    print(f"Epoch [{epoch+1}/{epochs}], Loss: {avg_loss:.4f}")


    # 测试准确率
    model.eval()
    correct = total = 0
    with torch.no_grad():
        for images, labels in test_loader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    acc = 100 * correct / total
    history["test_acc"].append(acc)

    print(f"Epoch [{epoch+1}/{epochs}], Loss: {avg_loss:.4f}, Accuracy: {acc:.2f}%")

# 保存日志
with open(save_path, 'w') as f:
    json.dump(history, f)
