import numpy as np
import torch 
import torch.nn as nn 
from monai.networks.nets.basic_unet import BasicUNet
from monai.networks.nets.unetr import UNETR
from monai.networks.nets.swin_unetr import SwinUNETR
from monai.inferers import SlidingWindowInferer
from light_training.evaluation.metric import dice
from light_training.trainer import Trainer
from monai.utils import set_determinism
from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR
from light_training.utils.files_helper import save_new_model_and_delete_last
from torchvision import transforms
from torchvision.datasets import CIFAR100
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import random
import h5py
from torch.utils.data import Dataset 
set_determinism(42)
from timm.models.vision_transformer import vit_base_patch32_224_in21k as create_model  
from timm.models.vision_transformer import _load_weights

import glob 
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "1,3"
# logdir = "./logs_brats/swinunetr_multi_gpu/"
# logdir = "./logs_brats/unet2d/"
# logdir = "./logs_brats/unet3d/"
# logdir = "./logs_brats/unetr/"
# logdir = "./logs_brats/tranbts/"
# logdir = "./logs_huanhu/segresnet/"

# logdir = "./logs_brats/transvw"

# logdir = "./logs_brats/swinunet2d"
logdir = "./logs_cifar100/vit_base_p32"
# logdir = "./logs_brats/modelsgenesis"
# logdir = "./logs_brats/transvw"
# logdir = "./logs_brats/attentionUNet"

env = "pytorch"
model_save_path = os.path.join(logdir, "model")
max_epoch = 50
batch_size = 128
val_every = 1
num_gpus = 1
device = "cuda:1"

def build_cifar():
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.Resize(224),
        transforms.AutoAugment(policy=transforms.AutoAugmentPolicy.CIFAR10),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    transform_test = transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    train_dataset = CIFAR100(root="/home/xingzhaohu/dataset/cifar100", train=True, download=False, transform=transform_train)
    test_dataset = CIFAR100(root="/home/xingzhaohu/dataset/cifar100", train=False, download=False, transform=transform_test)
    return train_dataset, test_dataset

class BraTSTrainer(Trainer):
    def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"):
        super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script)

        self.val_mean = False
        self.model = create_model(pretrained=False, num_classes=100)
        # self.model.load_state_dict("./checkp")
        # _load_weights(self.model, "./checkpoints_2/vit-base-p16-224/pytorch_model.npz")
        _load_weights(self.model, "./checkpoints/vit-base-p32-224/pytorch_model.npz")
        print(f"参数加载成功")
        self.val_batch_size = 128
        self.best_acc = 0.0
        self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=5e-5, weight_decay=1e-5)
        self.loss_func = nn.CrossEntropyLoss()
        # self.scheduler = LinearWarmupCosineAnnealingLR(self.optimizer,
        #                                           warmup_epochs=5,
        #                                           max_epochs=max_epochs)

    def training_step(self, batch):
        image, label = self.get_input(batch)

        pred = self.model(image)
        loss = self.loss_func(pred, label)
        self.log("train_loss", loss, step=self.global_step)
        return loss 

    # for image, label in data_loader:
    def get_input(self, batch):
        image, label = batch 

        label = label.long()
        return image, label 

    def validation_step(self, batch):
        image, label = self.get_input(batch)
       
        output = self.model(image).argmax(dim=1).cpu().numpy()
        target = label.cpu().numpy()
        right = (output == target).sum()

        # acc = right / self.val_batch_size
        
        return right

    def validation_end(self, mean_val_outputs):
        right = mean_val_outputs
        print(len(right))

        acc = sum(right) / len(self.val_dataset)

        self.log("acc", acc, step=self.epoch)

        if acc > self.best_acc:
            self.best_acc = acc
            save_new_model_and_delete_last(self.model, 
                                            os.path.join(model_save_path, 
                                            f"best_model_{acc:.4f}.pt"), 
                                            delete_symbol="best_model")

        save_new_model_and_delete_last(self.model, 
                                        os.path.join(model_save_path, 
                                        f"final_model_{acc:.4f}.pt"), 
                                        delete_symbol="final_model")

        print(f"acc is {acc}")

if __name__ == "__main__":

    trainer = BraTSTrainer(env_type=env,
                            max_epochs=max_epoch,
                            batch_size=batch_size,
                            device=device,
                            logdir=logdir,
                            val_every=val_every,
                            num_gpus=num_gpus,
                            master_port=17751,
                            training_script=__file__,
                        
                            )
    
    train_ds, val_ds = build_cifar()
    trainer.train(train_dataset=train_ds, val_dataset=val_ds)
