import torch
from torchvision import transforms
from torchvision.datasets import CIFAR100
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
from flagai.trainer import Trainer
from flagai.auto_model.auto_loader import AutoLoader
from timm.utils import accuracy, AverageMeter
from imagenet_need.dataset import build_dataset
from imagenet_need.config import get_config
import os 
from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from torch.utils.data import DataLoader 
from tqdm import tqdm 

# os.environ["CUDA_VISIBLE_DEVICES"] = "2,3"

lr = 5e-5
n_epochs = 10
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

env_type = "pytorchDDP"
trainer = Trainer(
    env_type=env_type,
    experiment_name="vit-imagenet-2gpu-finetuning",
    batch_size=128,
    num_gpus=2,
    warm_up=0.05,
    gradient_accumulation_steps=1,
    lr=lr,
    fp16=False,
    weight_decay=1e-3,
    epochs=n_epochs,
    log_interval=10,
    eval_interval=100,
    load_dir=None,
    pytorch_device=device,
    # save_dir="swinv2_base_p4_w8_256_imagenet",
    save_dir="vit_imagenet_finetuning",
    save_interval=1000,
    hostfile="./hostfile",
    training_script="train_DDP.py",
    clip_grad=5.0
)

config = get_config()
train_ds, _ = build_dataset(True, config)
val_ds, _ = build_dataset(False, config)

def collate_fn(batch):
    images = torch.stack([b[0] for b in batch])
    labels = [b[1] for b in batch]
    labels = torch.tensor(labels).long()

    return {"images": images, "labels": labels}


def validate(logits, labels, meta=None):
    # print(logits.shape)
    # print(labels.shape)
    acc1, acc5 = accuracy(logits, labels, topk=(1, 5))
    print(f"acc1 is {acc1}, acc5 is {acc5}")
    return acc1 

if __name__ == '__main__':
    # device = "cuda:2"
    loader = AutoLoader(task_name="classification",
                        # model_name="swinv2-base-patch4-window8-256",
                        model_name="vit-base-p16-224",
                        # only_download_config=True,
                        model_dir="checkpoints",
                        num_classes=1000)

    # train_loader = DataLoader(train_ds, 256, True)
    # val_loader = DataLoader(val_ds, 256, False)
    
    # all_iter = n_epochs * len(train_loader)

    model = loader.get_model()
    # model.to(device)

    optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, all_iter)
    # ce = torch.nn.CrossEntropyLoss()

    # report_loss = 0.0
    # step = 0 
    # for i in range(n_epochs):

    #     for image, label in tqdm(train_loader, total=len(train_loader)):
    #         image = image.to(device)
    #         label = label.to(device)
    #         step += 1
    #         optimizer.zero_grad()
    #         pred = model(image)["logits"]
    #         loss = ce(pred, label)
    #         loss.backward()
    #         optimizer.step()
    #         scheduler.step()
    #         report_loss += loss.item()

    #         if step % 1000 == 0:
    #             print(f"epoch is {i}, loss is {report_loss / 100}")
    #             report_loss = 0.0
    #             all_pred = []
    #             all_label = []
    #             acc_1 = []
    #             acc_5 = []
    #             for image, label in tqdm(val_loader, total=len(val_loader)):
    #                 image = image.to(device)
    #                 label = label.to(device)
    #                 with torch.no_grad():
    #                     pred = model(image)["logits"]
    #                 all_pred.append(pred)
    #                 all_label.append(label)
    #                 # acc1, acc5 = accuracy(pred, label, topk=(1, 5))
    #                 # acc_1.append(acc1)
    #                 # acc_5.append(acc5)
                
                
    #             all_pred = torch.cat(all_pred, dim=0)
    #             all_label = torch.cat(all_label, dim=0)
    #             # acc_1 = sum(acc_1) / len(acc_1)
    #             # acc_5 = sum(acc_5) / len(acc_5)
    #             acc_1, acc_5 = accuracy(all_pred, all_label, topk=(1, 5))
    #             print(f"acc_1 is {acc_1}, acc_5 is {acc_5}")

    trainer.train(model,
                  optimizer=optimizer,
                #   lr_scheduler=scheduler,
                  train_dataset=train_ds,
                  valid_dataset=val_ds,
                  metric_methods=[["accuracy_1", validate]],
                  collate_fn=collate_fn)





