from model import Resnet50, Resnet18
from datasets import Caltech
import argparse
import torchvision.transforms as T
from torch.utils.data import DataLoader
from utils import train_one_epoch, seed_every, lr_lambda
import torch.optim as optim
import torch
from tqdm import tqdm
import os
def main(args):
    train_path = os.path.join(args.data_root, 'train.txt')
    val_path = os.path.join(args.data_root, 'val.txt')
    transforms_train = T.Compose([ 
                            T.RandomHorizontalFlip(),
                            T.RandomVerticalFlip(),
                            T.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
                            T.RandomRotation(30),
                            T.RandomGrayscale(p=0.1),
                            T.Resize((224, 224)),
                            T.ToTensor(), 
                            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
    transforms_val = T.Compose([ 
                            T.Resize((224, 224)),
                            T.ToTensor(), 
                            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
    save_dir = args.save_root
    os.makedirs(save_dir, exist_ok=True)
    seed_every(args.seed)
    train_dataset = Caltech(train_path, transforms_train)
    val_dataset = Caltech(val_path, transforms_val)
    train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8)
    val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=8)
    model = Resnet18().to(args.device)

    if args.resume:
        ckpt = torch.load(args.resume)
        model.load_state_dict(ckpt)
    optimizer = optim.SGD(model.parameters(), lr=args.lr)
    # optimizer = optim.AdamW(model.parameters(), args.lr)
    steplr = optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[50, 75], gamma=0.1)

    for epoch in range(args.epochs):
        model.train()
        train_one_epoch(train_dataloader, model, args.device, 
                        args.num_classes, args.batch_size, optimizer, epoch)
        steplr.step()
        torch.save(model.state_dict(), f'{save_dir}epoch{epoch}.pth')

        model.eval()
        with torch.no_grad():
            correct_num = 0
            top3_correct_num = 0
            for samples, targets in tqdm(val_dataloader, 
                                        desc=f'\033[1;34mvalidation epoch[{epoch}]\033[0m', 
                                        ncols=70,
                                        bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt}'):
                samples = samples.to(args.device)
                targets = targets.to(args.device)
                prob = model(samples) #8x257
                maxvalue, index = torch.max(prob, dim=1) #index->8  maxvalue->8

                topk_values, topk_indices = torch.topk(prob, k=3, dim=1)
                for i in range(prob.shape[0]):
                    if targets[i] in topk_indices[i].tolist():
                        top3_correct_num += 1
                correct_num += sum(index == targets)
        accuracy = correct_num/len(val_dataset)
        top3_accuracy = top3_correct_num/len(val_dataset)
        print(f"\033[1;35mepoch[{epoch}]      accuracy\033[0m : {accuracy}")
        print(f"\033[1;35mepoch[{epoch}] top3_accuracy\033[0m : {top3_accuracy}\n")
    print("\033[1;31mtraining finished!\033[0m")


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--batch_size", default = 64, type = int)
    parser.add_argument("--device", default = 'cuda', type = str)
    parser.add_argument("--epochs", default = 100, type = int)
    parser.add_argument("--data_root", default = 'Caltech256/labels/', type = str)
    parser.add_argument("--num_classes", default = 257, type = int)
    parser.add_argument("--lr", default = 0.1, type = float)
    parser.add_argument("--save_root", default = 'out/classify_resnet50/',type = str)
    parser.add_argument("--seed", default = 42, type = int)
    parser.add_argument("--resume", default = None, type = str)
    args = parser.parse_args()
    main(args)
