# Training script for Auto-Encoder.
import os
import torch
import torch_engine
import torchvision.transforms as T
import torch_oil_data as torch_data
import config_glom_oil as config
from tqdm import tqdm
import torch.nn as nn
from einops.layers.torch import Rearrange
import torch.nn.functional as F
import torch.optim as optim
import utils

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.backends.cudnn.benchmark =True


import numpy as np
from mytools.image_transform_helper import Rescale
from lion_pytorch import Lion
from model import glom_pytorch as torch_model

if __name__ == "__main__":

    if torch.cuda.is_available():
        device = "cuda"
        print("GPU Availaible moving models to GPU")
    else:
        print("Moving models to CPU")
        device = "cpu"
    # device = "cpu"

    print("Setting Seed for the run, seed = {}".format(config.SEED))

    utils.seed_everything(config.SEED)

    # transforms = T.Compose([T.ToTensor()])

    # add Resize 2023-03-24
    image_resize_size = config.IMG_HEIGHT
    # transforms = T.Compose([T.Resize(image_resize_size), T.ToTensor()])
    transforms = T.Compose([T.RandomResizedCrop(image_resize_size), T.ToTensor()])
    # normalize = T.Normalize([0.5], [0.5])
    #
    # transforms = T.Compose([
    #     T.RandomResizedCrop(image_resize_size),
    #     T.Grayscale(num_output_channels=1),
    #     # T.RandomHorizontalFlip(p=0.5),
    #     # T.RandomApply([T.ColorJitter(0.4, 0.4, 0.4, 0.2)], p=0.8),
    #     # T.RandomGrayscale(p=0.2),
    #     # GBlur(p=0.1),
    #     # T.RandomApply([Solarization()], p=0.1),
    #     T.ToTensor(),
    #     normalize
    # ])


    print("----------- Creating Dataset -----------")

    full_dataset = torch_data.FolderDataset(config.IMG_PATH, transforms)

    train_size = int(config.TRAIN_RATIO * len(full_dataset))
    val_size = len(full_dataset) - train_size

    train_dataset, val_dataset = torch.utils.data.random_split(
        full_dataset, [train_size, val_size]
    )

    print("------------ Dataset Created ------------")
    print("------------ Creating DataLoader ------------")
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=config.TRAIN_BATCH_SIZE, shuffle=False, pin_memory= True, drop_last=False,num_workers=4
    )
    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=config.TEST_BATCH_SIZE, pin_memory= True, drop_last=False, num_workers=4
    )
    full_loader = torch.utils.data.DataLoader(
        full_dataset, batch_size=config.FULL_BATCH_SIZE,pin_memory= True,  drop_last=False, num_workers=4
    )

    print("------------ Dataloader Cretead ------------")

    # print(train_loader)
    loss_fn = nn.MSELoss()

    encoder = torch_model.Glom(
                dim = 128,         # dimension
                levels = 4,        # number of levels
                image_size = 250,  # image size
                patch_size = 10,    # patch size
                consensus_self=False,
                local_consensus_radius=0,
                image_chanel=1,
                return_state=2,
                device=device
                ).to(device)

    # decoder is patches_to_images
    decoder  = nn.Sequential(
                nn.Linear(128, 10 * 10 * 1),
                Rearrange('b (h w) (p1 p2 c) -> b c (h p1) (w p2)', p1 = 10, p2 = 10, h = (250 // 10))
              )

    encoder.to(device)
    decoder.to(device)

    # print(device)

    autoencoder_params = list(encoder.parameters()) + list(decoder.parameters())
    optimizer = optim.AdamW(autoencoder_params, lr=config.LEARNING_RATE)
    # optimizer = Lion(autoencoder_params, lr=config.LEARNING_RATE)

    # early_stopper = utils.EarlyStopping(patience=5, verbose=True, path=)
    max_loss = 9999

    print("------------ Training started ------------")

    for epoch in tqdm(range(config.EPOCHS)):
        train_loss = torch_engine.train_step(
            encoder, decoder, train_loader, loss_fn, optimizer, device=device
        )
        print(f"Epochs = {epoch}, Training Loss : {train_loss}")
        val_loss = torch_engine.val_step(
            encoder, decoder, val_loader, loss_fn, device=device
        )

        # Simple Best Model saving
        if val_loss < max_loss:
            print("Validation Loss decreased, saving new best model")
            torch.save(encoder.state_dict(), config.ENCODER_MODEL_PATH)
            torch.save(decoder.state_dict(), config.DECODER_MODEL_PATH)

        print(f"Epochs = {epoch}, Validation Loss : {val_loss}")

    print("Training Done")

    print("---- Creating Embeddings for the full dataset ---- ")

    embedding = torch_engine.create_embedding(
        encoder, full_loader, config.EMBEDDING_SHAPE, device
    )

    # Convert embedding to numpy and save them
    numpy_embedding = embedding.cpu().detach().numpy()
    num_images = numpy_embedding.shape[0]

    # Dump the embeddings for complete dataset, not just train
    flattened_embedding = numpy_embedding.reshape((num_images, -1))
    np.save(config.EMBEDDING_PATH, flattened_embedding)
