import torch
from torchvision import transforms
from torchvision.datasets import CIFAR100, CIFAR10
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
from flagai.trainer import Trainer
from flagai.auto_model.auto_loader import AutoLoader
import os 
# from timm.models.vision_transformer import vit_base_patch32_224_in21k as create_model  
from timm.models.vision_transformer import vit_large_patch16_224_in21k as create_model  
from timm.models.vision_transformer import _load_weights
# os.environ["CUDA_VISIBLE_DEVICES"] = "1,3"

lr = 5e-5
n_epochs = 100
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")

# env_type = "pytorch"
# trainer = Trainer(
#     env_type=env_type,
#     experiment_name="vit-cifar100-deepspeed",
#     batch_size=128,
#     num_gpus=1,
#     gradient_accumulation_steps=1,
#     lr=lr,
#     warm_up=0.0,
#     weight_decay=1e-5,
#     epochs=n_epochs,
#     log_interval=10,
#     eval_interval=1000,
#     load_dir=None,
#     pytorch_device=device,
#     save_dir="checkpoints_vit_cifar100_deepspeed",
#     save_interval=1000,
#     num_checkpoints=1,
#     hostfile="./hostfile",
#     deepspeed_config='./deepspeed.json',
#     training_script="train_deepspeed_cifar100.py"
# )

def build_cifar():
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.Resize(224),
        transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    transform_test = transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    train_dataset = CIFAR100(root="/home/xingzhaohu/dataset/cifar100", train=True, download=False, transform=transform_train)
    test_dataset = CIFAR100(root="/home/xingzhaohu/dataset/cifar100", train=False, download=False, transform=transform_test)
    # train_dataset = CIFAR10(root="/home/xingzhaohu/sharefs/datasets/cifar10/", train=True, download=False, transform=transform_train)
    # test_dataset = CIFAR10(root="/home/xingzhaohu/sharefs/datasets/cifar10/", train=False, download=False, transform=transform_test)

    return train_dataset, test_dataset

def collate_fn(batch):
    images = torch.stack([b[0] for b in batch])
    labels = [b[1] for b in batch]
    labels = torch.tensor(labels).long()
    return {"images": images, "labels": labels}

def validate(logits, labels, meta=None):
    _, predicted = logits.max(1)
    total = labels.size(0)
    correct = predicted.eq(labels).sum().item()
    return correct / total

if __name__ == '__main__':
    # loader = AutoLoader(task_name="classification",
    #                     model_name="vit-base-p16-224",
    #                     num_classes=100)

    # model = loader.get_model()
    model = create_model(pretrained=False, num_classes=100)
        # self.model.load_state_dict("./checkp")
    # _load_weights(model, "./checkpoints_2/vit-base-p16-224/pytorch_model.npz")
    # _load_weights(model, "./checkpoints/vit-base-p32-224/pytorch_model.npz")
    _load_weights(model, "./checkpoints/vit-large-p16-224/pytorch_model.npz")
# 
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs)
    train_dataset, val_dataset = build_cifar()
    model.to(device)

    from torch.utils.data import DataLoader
    from tqdm import tqdm 
    train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True, collate_fn=collate_fn, num_workers=8)
    val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False, collate_fn=collate_fn, num_workers=8)
    
    n_eval = 1000
    report_loss = 0
    step = 0
    ce = torch.nn.CrossEntropyLoss()

    for e in range(n_epochs):
        model.train()
        for data in tqdm(train_loader, total=len(train_loader)):
            step += 1
            images = data["images"]
            labels = data["labels"]
            images = images.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            pred = model(images)

            loss = ce(pred, labels)
            loss.backward()
            optimizer.step()
            report_loss += loss.item()

            if step % 100 == 0:
                print(f"loss is {report_loss / 100}")
                report_loss = 0
            
            right = 0
            if step % 1000 == 0:
                model.eval()
                for data in val_loader:
                    images = data["images"]
                    labels = data["labels"]
                    images = images.to(device)
                    labels = labels.to(device)
                    with torch.no_grad():
                        pred = model(images).argmax(dim=1)
                    right += (labels == pred).sum()
                
                acc = right / len(val_dataset)
                print(f"epoch is {e}, step is {step}, acc is {acc}")
                model.train()



    # trainer.train(model,
    #               optimizer=optimizer,
    #             #   lr_scheduler=scheduler,
    #               train_dataset=train_dataset,
    #               valid_dataset=val_dataset,
    #               metric_methods=[["accuracy", validate]],
    #               collate_fn=collate_fn)





