# Training script for Auto-Encoder.
import os
import numpy as np
import torch
import efficient_resnet18_capsule_model as torch_model
import torch_capsule_family_engine as torch_engine
import torchvision.transforms as T
import albumentations as A
from albumentations.pytorch import ToTensorV2
import config_efficient_resnet18_capsule_oil as config
from tqdm import tqdm
import torch.nn as nn

import utils
import torchvision

from loss.custom_loss import TotalCodingRate, SimCLR, Z_loss, MaximalCodingRateReduction, MarginLoss
import pandas as pd

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"


class Contractive_Loss(nn.Module):
    def __init__(self, user_criterion : nn.Module):
        # user_criterion : like nn.MSELoss()
        super().__init__()
        self.criterion = user_criterion
        pass

    def forward(self, z_list, z_avg):
        nums_z = len(z_list)
        z_list = torch.stack(list(z_list), dim=0)
        # z_avg = z_list.mean(dim=0)

        z_tcr = 0
        for i in range(nums_z):
            z_tcr += self.criterion(z_list[i], z_avg)

        z_tcr = z_tcr / nums_z
        # z_tcr_out = z_tcr.clone().detach()

        return z_tcr


if __name__ == "__main__":
    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"
    # device = "cpu"
    print("Setting Seed for the run, seed = {}".format(config.SEED))

    utils.seed_everything(config.SEED)

    # transforms = T.Compose([T.ToTensor()])

    # add Resize 2023-03-24
    image_resize_size = config.IMG_HEIGHT
    # transforms = T.Compose([T.Resize(image_resize_size), T.ToTensor()])
    transforms = T.Compose([
        T.CenterCrop(image_resize_size),
        T.Resize(image_resize_size),
        T.Grayscale(num_output_channels=1),
        T.ToTensor()
    ])

    # transforms = A.Compose(
    #     [
    #         A.SmallestMaxSize(max_size=256),
    #         A.CenterCrop(height=image_resize_size, width=image_resize_size),
    #         # A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
    #         ToTensorV2(),
    #     ]
    # )

    print("----------- Creating Dataset -----------")

    # full_dataset = torch_data.FolderDataset(config.IMG_PATH, transforms)
    #
    # train_size = int(config.TRAIN_RATIO * len(full_dataset))
    # val_size = len(full_dataset) - train_size
    #
    # train_dataset, val_dataset = torch.utils.data.random_split(
    #     full_dataset, [train_size, val_size]
    # )
    train_dataset = torchvision.datasets.ImageFolder(root=config.TRAIN_DATA_PATH, transform=transforms)
    val_dataset = torchvision.datasets.ImageFolder(root=config.TEST_DATA_PATH, transform=transforms)
    full_dataset = torchvision.datasets.ImageFolder(root=config.IMG_DATA_PATH, transform=transforms)

    print("------------ Dataset Created ------------")
    print("------------ Creating DataLoader ------------")
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=config.TRAIN_BATCH_SIZE, shuffle=True, drop_last=True
    )
    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=config.TEST_BATCH_SIZE
    )
    full_loader = torch.utils.data.DataLoader(
        full_dataset, batch_size=config.FULL_BATCH_SIZE
    )

    print("------------ Dataloader Cretead ------------")

    # print(train_loader)
    # criterion = nn.MSELoss()
    criterion = TotalCodingRate(eps=0.2)
    enc_loss_fn = Contractive_Loss(criterion)
    dec_loss_fn = nn.MSELoss()

    # classification_loss: torch.nn.CrossEntropyLoss() | MarginLoss(user_device=device)
    # torch.nn.MultiLabelSoftMarginLoss(weight=None, reduction='mean')
    # torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=None,  reduction='mean')
    cls_loss_fn = MarginLoss(user_device=device)  #


    encoder = torch_model.EfficientResNet18CapsNet()
    decoder = torch_model.ResNet18CapsNetReconstructionNet(128, config.NUM_CLASSES)
    # encoder.load_state_dict(torch.load(config.ENCODER_MODEL_PATH, map_location=device))
    # decoder.load_state_dict(torch.load(config.DECODER_MODEL_PATH, map_location=device))
    # reconstruction_model = torch_model.ResNet18CapsNetReconstructionNet()
    # total_model = torch_model.EfficientResNet18CapsNetWithReconstruction(encoder, reconstruction_model)


    if torch.cuda.is_available():
        print("GPU Availaible moving models to GPU")
    else:
        print("Moving models to CPU")

    encoder.to(device)
    decoder.to(device)

    # print(device)

    autoencoder_params = list(encoder.parameters()) + list(decoder.parameters())
    # optimizer = optim.AdamW(autoencoder_params, lr=config.LEARNING_RATE)
    from lion_pytorch import Lion
    optimizer = Lion(autoencoder_params, lr=config.LEARNING_RATE, weight_decay=1e-4)

    # early_stopper = utils.EarlyStopping(patience=5, verbose=True, path=)
    max_loss = 9999

    print("------------ Training started ------------")
    top1_train_accuracy_list = []
    train_loss_list = []
    val_loss_list = []
    top1_accuracy_list = []
    top5_accuracy_list = []
    for epoch in tqdm(range(config.EPOCHS)):
        train_loss, top1_train_accuracy = torch_engine.train_step(
            encoder, decoder, train_loader, enc_loss_fn, dec_loss_fn,cls_loss_fn, optimizer, device=device
        )
        top1_train_accuracy_list.append(top1_train_accuracy)
        train_loss_list.append(train_loss)
        print(f"Epochs = {epoch}, Training Loss : {train_loss},\tTop1 Train accuracy {top1_train_accuracy.item()}")

        val_loss, top1_accuracy, top5_accuracy, acc_vect = torch_engine.val_step(
            encoder, decoder, val_loader, enc_loss_fn, dec_loss_fn,cls_loss_fn, device=device
        )
        top1_accuracy_list.append(top1_accuracy)
        top5_accuracy_list.append(top5_accuracy)
        val_loss_list.append(train_loss)
        # Simple Best Model saving
        if val_loss < max_loss:
            print("Validation Loss decreased, saving new best model")
            torch.save(encoder.state_dict(), config.ENCODER_MODEL_PATH)
            torch.save(decoder.state_dict(), config.DECODER_MODEL_PATH)

        print(f"Epochs = {epoch}, Validation Loss : {val_loss},\tTop1 Test accuracy: {top1_accuracy.item()},\tTop5 test acc: {top5_accuracy.item()}")
        # print('best test acc {}, last acc {}'.format(acc_vect.max().item(), acc_vect[-1].item()))
    print("Training Done")

    print("---- Creating Embeddings for the full dataset ---- ")

    embedding = torch_engine.create_embedding(
        encoder, full_loader, config.EMBEDDING_SHAPE, device
    )

    # Convert embedding to numpy and save them
    numpy_embedding = embedding.cpu().detach().numpy()
    num_images = numpy_embedding.shape[0]

    # Dump the embeddings for complete dataset, not just train
    flattened_embedding = numpy_embedding.reshape((num_images, -1))
    np.save(config.EMBEDDING_PATH, flattened_embedding)

    header = ["train_top-1", "test_top-1", "test_top-5", "train_loss", "val_loss"]
    top1_train_accuracy_array = pd.Series(np.asarray(top1_train_accuracy_list).flatten())
    train_loss_array = pd.Series(np.asarray(train_loss_list).flatten())
    val_loss_array = pd.Series(np.asarray(val_loss_list).flatten())
    top1_accuracy_array = pd.Series(np.asarray(top1_accuracy_list).flatten())
    top5_accuracy_array = pd.Series(np.asarray(top5_accuracy_list).flatten())
    eval_vector = pd.concat(
        (top1_train_accuracy_array, top1_accuracy_array, top5_accuracy_array, train_loss_array, val_loss_array), axis=1)

    print(eval_vector)
    # pd_eval_vector = pd.DataFrame(eval_vector, columns=["train_top-1", "test_top-1", "test_top-5", "train_loss", "val_loss"])
    # pd_eval_vector = pd.DataFrame(eval_vector, columns=header)
    # print(pd_eval_vector)
    # writer = pd.ExcelWriter(os.path.join(config.MODEL_DIR_PATH, "model_eval.csv"))
    eval_vector.to_csv(os.path.join(config.MODEL_DIR_PATH, "model_eval.csv"), index=False, header=header)
