import torchvision.transforms as transforms
import os
import numpy as np
import torchvision
from torch.utils.data import Dataset
import torch
from torch.nn import Linear
from torch.utils.data import DataLoader
from vit_pytorch import ViT
from vit_pytorch.deepvit import DeepViT
import json

os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def write_json(file_name, obj_dict, store_dir):
    json_str = json.dumps(obj_dict, ensure_ascii=False)
    file_absolute_path = os.path.join(store_dir, file_name)
    with open(file_absolute_path, 'w', encoding='utf-8') as json_file:
        json_file.write(json_str)
    print("file : {} write success!".format(file_absolute_path))


def get_transform():
    transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((224, 224)),
        transforms.AutoAugment(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
    return transform


def generate_images_valid(dataset_dir, image_valid_path):
    transform = get_transform()
    data_dic = {}
    class_idx = 0
    class_map = {}
    ii = 1
    for folder in os.listdir(dataset_dir):
        if folder[0] != ".":
            class_map[folder] = class_idx
            folder_path = os.path.join(dataset_dir, folder)
            for image in os.listdir(folder_path):
                if image[-3:] == "jpg":
                    image_path = os.path.join(folder_path, image)
                    data_dic[image_path] = class_idx
            class_idx += 1
    write_json("class_map.json", class_map, os.getcwd())
    images_valid = {}
    for k, v in data_dic.items():
        try:
            a = torchvision.io.read_image(k)
            transform(a)
            images_valid[k] = v
        except Exception as e:
            print(e)
            print("INVALID : {} ".format(k))
            pass
    np.save(image_valid_path, images_valid)


class ImageDataset(Dataset):
    def __init__(self, images_valid, transform=None, target_transform=None):

        self.images_valid = images_valid
        self.images_valid_path = list(images_valid.keys())
        self.transform = transform
        self.target_transform = target_transform

    def __len__(self):
        return len(self.images_valid.keys())

    def __getitem__(self, idx):
        try:
            img_path = self.images_valid_path[idx]
            image = torchvision.io.read_image(img_path)
            # print(img_path)
            label = self.images_valid[img_path]
            # print(label)
            if self.transform:
                image = self.transform(image)
            if self.target_transform:
                label = self.target_transform(label)
            return image, label
        except Exception as e:
            print(e)
            return False


def train_loop(dataloader, model, loss_fn, optimizer):
    size = len(dataloader.dataset)
    for batch, (X, y) in enumerate(dataloader):
        # Compute prediction and loss

        X = X.to(device)
        y = y.to(device)
        pred = model(X).to(device)
        loss = loss_fn(pred, y)

        # Backpropagation
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if batch % 1000 == 0:
            loss, current = loss.item(), batch * len(X)
            print(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")


def test_loop(dataloader, model, loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    test_loss, correct = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X = X.to(device)
            y = y.to(device)
            pred = model(X).to(device)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()
    test_loss /= num_batches
    correct /= size
    print(f"________________Test Error: \n Accuracy: {(100 * correct):>0.1f}%, Avg loss: {test_loss:>8f} \n___________")
    return correct


def train(loader_train, loader_test, model, loss_fn, optimizer):
    acc_best = 0
    for t in range(epochs):
        print(f"Epoch {t + 1}\n-------------------------------")
        train_loop(loader_train, model, loss_fn, optimizer)
        acc_now = test_loop(loader_test, model, loss_fn)
        if acc_best < acc_now:
            acc_best = acc_now
            print("updating model while training .. ")
            torch.save(model, 'model_test_best.pth')
    print("Done!")


def get_loader(dataset_dir):
    transform = get_transform()

    try:
        image_valid_path = dataset_dir.split("/")[-1] + '.npy'
        # print(image_valid_path)
        # breakpoint()
        images_valid = np.load(image_valid_path, allow_pickle=True).tolist()
    except Exception as e:
        print(e)
        image_valid_path = dataset_dir.split("/")[-1] + '.npy'
        # print("ex:",image_valid_path)
        generate_images_valid(dataset_dir, image_valid_path)
        images_valid = np.load(image_valid_path, allow_pickle=True).tolist()
    print("training images : {} ".format(len(images_valid)))
    data = ImageDataset(images_valid=images_valid, transform=transform)
    data_len = len(data)
    train_images = int(data_len * train_ratio)
    test_images = data_len - train_images
    data_train, data_test = torch.utils.data.random_split(data, (train_images, test_images))
    loader_train = DataLoader(data_train, batch_size=batch_size)
    loader_test = DataLoader(data_test, batch_size=batch_size)
    return loader_train, loader_test


def get_model(model_name="vit"):

    if model_name == "vit":
        model = ViT(
            image_size=224,
            patch_size=32,
            num_classes=377,
            dim=1024,
            depth=6,
            heads=16,
            mlp_dim=2048,
            dropout=0.1,
            emb_dropout=0.1)
        model.to(device)
        return model

    if model_name == "deep_vit":
        model = DeepViT(
            image_size=224,
            patch_size=32,
            num_classes=1000,
            dim=1024,
            depth=6,
            heads=16,
            mlp_dim=2048,
            dropout=0.1,
            emb_dropout=0.1
        ).to(device)
        return model


if __name__ == '__main__':
    # dataset_dir = "/home/leslie/project/ViT/dataset_shucai"
    #dataset_dir = "/home/leslie/project/ViT/shucaitrain-change-377_JPG"  # 1.set dataset absolute directory path
    dataset_dir = "/home/leslie/Project/tupianhunhexunlianji"
    learning_rate = 0.001
    train_ratio = 0.9
    batch_size = 32
    epochs = 100
    class_num = 543  # 2.set dataset's class numbers
    if_cheakpoint = 1  # load best_model.pth or not(1/0)

    print(device)
    try:
        # 3.please delete '*.pth' or '*.npy' file before trai22ning.
        if if_cheakpoint:
            model = torch.load('model_test_best.pth')
            print("loaded pretrained model : 'model_test_best.pth' ")
        else:
            model = torchvision.models.vit_b_16(pretrained=True)
            model.heads.add_module(name="out", module=Linear(1000, class_num))
            model = model.to(device)
            print("loaded pretrained model..")

    except Exception as e:
        print(e)
        # model = get_model("vit")  # model options : 'vit' , 'deep_vit'

    loader_train, loader_test = get_loader(dataset_dir)
    loss_fn = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
    train(loader_train, loader_test, model, loss_fn, optimizer)

    print(1)
