import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import csv
import time
from vit import ViT
from randomaug import RandAugment

# parsers
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=1e-4, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--noaug', action='store_false', help='disable use randomaug')
parser.add_argument('--mixup', action='store_true', help='add mixup augumentations')
parser.add_argument('--bs', default='512')
parser.add_argument('--size', default="32")
parser.add_argument('--n_epochs', type=int, default='200')
parser.add_argument('--patch', default='4', type=int, help="patch for ViT")
parser.add_argument('--dimhead', default="512", type=int)

args = parser.parse_args()

# take in args

bs = int(args.bs)
imsize = int(args.size)
aug = args.noaug

device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch


print('==> Preparing data..')

size = imsize

transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.Resize(size),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.Resize(size),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])


if aug:  
    N, M = 2, 14
    transform_train.transforms.insert(0, RandAugment(N, M))

trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=bs, shuffle=True, num_workers=8)

testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=8)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

print('==> Building model..')
net = ViT(
    image_size = size,
    patch_size = args.patch,
    num_classes = 10,
    dim = int(args.dimhead),
    depth = 6,
    heads = 8,
    mlp_dim = 512,
    dropout = 0.1,
    emb_dropout = 0.1
).to(device)


if args.resume:
    print('==> Resuming from checkpoint..')
    assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
    checkpoint = torch.load('./checkpoint/vit-ckpt.pth')
    net.load_state_dict(checkpoint['net'])
    best_acc = checkpoint['acc']
    start_epoch = checkpoint['epoch']

log_dir = 'log'
os.makedirs(log_dir, exist_ok=True)
os.makedirs('checkpoint', exist_ok=True)
base_name = f'log_patch{args.patch}'
existing_files = [f for f in os.listdir(log_dir) if f.startswith(base_name) and f.endswith('.csv')]
if existing_files:
    indices = []
    for fname in existing_files:
        try:
            idx = int(fname.split('_')[-1].split('.')[0])
            indices.append(idx)
        except ValueError:
            continue
    next_index = max(indices) + 1 if indices else 1
else:
    next_index = 1

csv_filename = os.path.join(log_dir, f'{base_name}_{next_index}.csv')

criterion = nn.CrossEntropyLoss()
optimizer = optim.AdamW(net.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_epochs)


def train(epoch):
    epoch_start = time.time()
    net.train()
    train_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        print(f"{batch_idx}/{len(trainloader)} | "
                f"Loss: {train_loss/(batch_idx+1):.3f} | "
                f"Acc: {100.*correct/total:.3f}% ({correct}/{total})")
    
    epoch_time = time.time() - epoch_start
    print(f"Training epoch {epoch} took {epoch_time:.2f}s")
    return train_loss/(batch_idx+1)


def test(epoch):
    epoch_start = time.time()
    global best_acc
    net.eval()
    test_loss = 0
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            print(f"{batch_idx}/{len(testloader)} | "
                    f"Loss: {test_loss/(batch_idx+1):.3f} | "
                    f"Acc: {100.*correct/total:.3f}% ({correct}/{total})")
    
    acc = 100.*correct/total
    val_time = time.time() - epoch_start
    print(f"Validation epoch {epoch} took {val_time:.2f}s")
    return test_loss, acc


list_loss=[]
list_acc=[]

for epoch in range(start_epoch, args.n_epochs):
    epoch_start_time = time.time()
    train_loss = train(epoch)
    val_loss, acc = test(epoch)
    epoch_time = time.time() - epoch_start_time
    
    scheduler.step()
    
    list_loss.append(val_loss)
    list_acc.append(acc)
    
    with open(csv_filename, 'w') as f:
        writer = csv.writer(f, lineterminator='\n')
        writer.writerow(['Epoch', 'Train Loss', 'Val Loss', 'Acc', 'Time'])
        for i in range(len(list_loss)):
            writer.writerow([i+1, list_loss[i], list_acc[i], epoch_time])
    
    print(f"Epoch {epoch} summary | "
            f"Train Loss: {train_loss:.3f} | "
            f"Val Loss: {val_loss:.3f} | Acc: {acc:.3f}% | "
            f"Total Time: {epoch_time:.2f}s")
