# Training script for Auto-Encoder.
import os
import numpy as np
import torch
import torch_classical_features_model_oil as torch_model
import torch_classical_features_oil_engine as torch_engine
import torchvision.transforms as T
from torch.utils import data
from torch.utils.data import DataLoader

import config_classical_features_oil as config
from create_classical_features_index import create_index_basedon_features, update_index_basedon_features
from create_classical_features_index import create_labels_index_basedon_features, update_labels_index_basedon_features
from tqdm import tqdm
import torch.nn as nn

import utils
import torchvision
from custom_func import cluster_classical_featuers_acc
import pandas as pd
import time

import seaborn as sn
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"


class Contractive_Loss(nn.Module):
    def __init__(self, user_criterion : nn.Module):
        # user_criterion : like nn.MSELoss()
        super().__init__()
        self.criterion = user_criterion
        pass

    def forward(self, z_list, z_avg):
        nums_z = len(z_list)
        z_list = torch.stack(list(z_list), dim=0)
        # z_avg = z_list.mean(dim=0)

        z_tcr = 0
        for i in range(nums_z):
            z_tcr += self.criterion(z_list[i], z_avg)

        z_tcr = z_tcr / nums_z
        # z_tcr_out = z_tcr.clone().detach()

        return z_tcr


class tensor_dataset(data.Dataset):
    def __init__(self, x, y, transform=None):
        self.x = x
        self.y = y
        self.length = x.shape[0]
        self.transform = transform

    def __getitem__(self, indx):
        if self.transform is not None:
            tensor_image = self.transform(self.x[indx])
            return tensor_image, self.y[indx]
        else:
            return self.x[indx], self.y[indx]


    def __len__(self):
        return self.length


def min_max_scale_features(enc_features):
    n_features = enc_features.shape[0]
    features_dim = enc_features.shape[1]
    min_features = []
    max_features = []
    delta_list = []
    for j in tqdm(range(features_dim), desc="Processing Scale Features"):
        min_features_j = enc_features[:, j].min()
        max_features_j = enc_features[:, j].max()
        min_features.append(min_features_j)
        max_features.append(max_features_j)
        delta = max_features_j - min_features_j
        delta_list.append(delta)
    min_features_array = np.array(min_features)
    # max_features_array = np.array(max_features)
    delta_array = np.array(delta_list)
    for i in tqdm(range(n_features), desc="Processing Scale Samples"):
        enc_features[i] = (enc_features[i] - min_features_array) / delta_array
    return enc_features, min_features, max_features


def min_max_scale_features_with_trainfeature(enc_features, min_features, max_features):
    # 给验证集用的
    n_features = enc_features.shape[0]
    features_dim = enc_features.shape[1]
    max_features_array = np.array(max_features)
    min_features_array = np.array(min_features)
    delta_array = np.subtract(max_features_array, min_features_array)
    for i in tqdm(range(n_features), desc="Processing Scale Samples"):
        enc_features[i] = (enc_features[i] - min_features_array) / delta_array
    return enc_features


def mean_std_scale_features(enc_features):
    # 考虑张量克隆方法 enc_features.clone()
    n_features = enc_features.shape[0]
    features_dim = enc_features.shape[1]
    mean_features = []
    std_features = []
    for j in tqdm(range(features_dim), desc="Processing Scale Features"):
        mean_features_j = enc_features[:, j].mean()
        std_features_j = enc_features[:, j].std()
        mean_features.append(mean_features_j)
        std_features.append(std_features_j)

    mean_features_array = np.array(mean_features)
    std_features_array = np.array(std_features)
    for i in tqdm(range(n_features), desc="Processing Scale Samples"):
        enc_features[i] = (enc_features[i] - mean_features_array) / std_features_array
    return enc_features, mean_features, std_features


def mean_std_scale_features_with_trainfeature(enc_features, mean_features, std_features):
    # 考虑张量克隆方法 enc_features.clone()
    n_features = enc_features.shape[0]
    features_dim = enc_features.shape[1]
    mean_features_array = np.array(mean_features)
    std_features_array = np.array(std_features)
    for i in tqdm(range(n_features), desc="Processing Scale Samples"):
        enc_features[i] = (enc_features[i] - mean_features_array) / std_features_array
    return enc_features


def plot_confusion_matrix(epoch,pred_list,target_label,save_dir):
    confusion_mat = np.zeros((config.NUM_CLASSES, config.NUM_CLASSES), dtype=int)
    for i in range(len(pred_list)):
        true_label = target_label[i].item()
        pred_label = pred_list[i].item()
        confusion_mat[true_label][pred_label] += 1
    sn.heatmap(confusion_mat, annot=True, cmap="Blues", fmt="d")
    plt.xlabel("Predicted label")
    plt.ylabel("True label")
    plt.title(" Confusion Matrix")
    plt.savefig(
        os.path.join(save_dir,"confusion_mat-epoch_{}.png".format(epoch))
    )
    plt.close()


if __name__ == "__main__":


    print("Setting Seed for the run, seed = {}".format(config.SEED))

    utils.seed_everything(config.SEED)

    # transforms = T.Compose([T.ToTensor()])

    # add Resize 2023-03-24
    image_resize_size = config.IMG_HEIGHT
    # transforms = T.Compose([T.Resize(image_resize_size), T.ToTensor()])
    normalize = T.Normalize([0.456], [0.224])
    aug_transform = T.Compose([
        T.RandomResizedCrop(250, scale=(0.5, 1.0), ratio=(0.8, 1)),
        T.Grayscale(num_output_channels=1),
        T.RandomHorizontalFlip(p=0.5),
        # transforms.RandomCrop((250,250)),
        # transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.2)], p=0.8),
        T.RandomGrayscale(p=0.2),
        # GBlur(p=0.1),
        # transforms.RandomApply([Solarization()], p=0.1),
        T.ToTensor(),
        normalize
    ])

    transforms = T.Compose([
        T.RandomResizedCrop(image_resize_size),
        T.Grayscale(num_output_channels=1),
        T.PILToTensor()
    ])

    print("----------- Creating Dataset -----------")

    # full_dataset = torch_data.FolderDataset(config.IMG_PATH, transforms)
    #
    # train_size = int(config.TRAIN_RATIO * len(full_dataset))
    # val_size = len(full_dataset) - train_size
    #
    # train_dataset, val_dataset = torch.utils.data.random_split(
    #     full_dataset, [train_size, val_size]
    # )
    train_dataset = torchvision.datasets.ImageFolder(root=config.TRAIN_DATA_PATH, transform=aug_transform)
    val_dataset = torchvision.datasets.ImageFolder(root=config.TEST_DATA_PATH, transform=aug_transform)
    # full_dataset = torchvision.datasets.ImageFolder(root=config.IMG_DATA_PATH, transform=transforms)

    # train_dataset = torchvision.datasets.CIFAR10(root=config.CIFAR_DATA_PATH, transform=transforms)
    # val_dataset = torchvision.datasets.CIFAR10(root=config.CIFAR_DATA_PATH, transform=transforms)

    print("------------ Dataset Created ------------")
    print("------------ Creating DataLoader ------------")
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=config.TRAIN_BATCH_SIZE, shuffle=False, drop_last=True
    )
    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=config.TEST_BATCH_SIZE
    )
    # full_loader = torch.utils.data.DataLoader(
    #     full_dataset, batch_size=config.FULL_BATCH_SIZE
    # )

    print("------------ Dataloader Cretead ------------")

    # print(train_loader)
    # criterion = nn.MSELoss()
    # criterion = TotalCodingRate(eps=0.2)
    # enc_loss_fn = Contractive_Loss(criterion)
    # dec_loss_fn = nn.MSELoss()

    # classification_loss: torch.nn.CrossEntropyLoss() | MarginLoss(user_device=device)
    # torch.nn.MultiLabelSoftMarginLoss(weight=None, reduction='mean')
    # torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=None,  reduction='mean')
    # cls_loss_fn = MarginLoss(user_device=device)  #
    cls_loss_fn = torch.nn.CrossEntropyLoss()
    device = torch.device("cpu")
    feature_list = config.FEATURE_LIST
    encoder = torch_model.ClassicalFeaturesEncoder(feature_list)
    # RBF | Linear
    decoder = torch_model.ClassifierDecoder(config.DECODE_LOGIT_TYPE, device=device)  # RBF | Linear
    # encoder.load_state_dict(torch.load(config.ENCODER_MODEL_PATH, map_location=device))
    if os.path.exists(config.DECODER_MODEL_PATH):
        decoder.load_state_dict(torch.load(config.DECODER_MODEL_PATH, map_location=device))
    # reconstruction_model = torch_model.ResNet18CapsNetReconstructionNet()
    # total_model = torch_model.EfficientResNet18CapsNetWithReconstruction(encoder, reconstruction_model)

    if torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"

    if torch.cuda.is_available():
        print("GPU Availaible moving models to GPU")
    else:
        print("Moving models to CPU")

    # # encoder has no device support
    device = "cpu"
    encoder.to(device)
    # device = "cpu"
    decoder.to(device)

    print(device)

    # autoencoder_params = list(encoder.parameters()) + list(decoder.parameters())
    autoencoder_params = list(decoder.parameters())

    # print(autoencoder_params)
    optimizer = torch.optim.AdamW(autoencoder_params, lr=config.LEARNING_RATE)
    # from lion_pytorch import Lion
    # optimizer = Lion(autoencoder_params, lr=config.LEARNING_RATE, weight_decay=1e-4)

    # early_stopper = utils.EarlyStopping(patience=5, verbose=True, path=)
    max_loss = 500000000
    origin_embedding = torch.randn(config.EMBEDDING_SHAPE)
    orgin_label_embedding = torch.randint(0, config.NUM_CLASSES, config.LABEL_EMBEDDING_SHAPE)
    # judge the label is exist
    if os.path.exists(config.EMBEDDING_LABEL_PATH):
        train_labels = torch.from_numpy(np.load(config.EMBEDDING_LABEL_PATH))
    else:
        train_orgin_datas, train_labels = torch_engine.data_label_extrat_step(train_loader)
        train_labels_embedding = torch.cat((orgin_label_embedding, train_labels), 0)
        train_labels_numpy_embedding = train_labels_embedding.cpu().detach().numpy()
        np.save(config.EMBEDDING_LABEL_PATH, train_labels_numpy_embedding)
    if os.path.exists(config.EMBEDDING_LABEL_PATH_2):
        val_labels = torch.from_numpy(np.load(config.EMBEDDING_LABEL_PATH_2))
    else:
        val_orgin_datas, val_labels = torch_engine.data_label_extrat_step(val_loader)
        val_labels_embedding = torch.cat((orgin_label_embedding, val_labels), 0)
        val_labels_numpy_embedding = val_labels_embedding.cpu().detach().numpy()
        np.save(config.EMBEDDING_LABEL_PATH_2, val_labels_numpy_embedding)
    # # ------------------加载embedding-------------------------
    if os.path.exists(config.EMBEDDING_PATH):
        train_features = torch.from_numpy(np.load(config.EMBEDDING_PATH))
    else:
        print("---- Creating Feature Embeddings for the dataset ---- ")

        train_features, train_labels = torch_engine.feature_extrat_step(
            encoder, train_loader, device
        )
        # Convert embedding to numpy and save them
        train_features_embedding = torch.cat((origin_embedding, train_features), 0)
        train_labels_embedding = torch.cat((orgin_label_embedding, train_labels), 0)
        train_numpy_embedding = train_features_embedding.cpu().detach().numpy()
        train_labels_numpy_embedding = train_labels_embedding.cpu().detach().numpy()
        train_num_images = train_numpy_embedding.shape[0]
        np.save(config.EMBEDDING_PATH, train_numpy_embedding)
        np.save(config.EMBEDDING_LABEL_PATH, train_labels_numpy_embedding)
    if os.path.exists(config.EMBEDDING_PATH_2):
        val_features = torch.from_numpy(np.load(config.EMBEDDING_PATH_2))
    else:
        val_features, val_labels = torch_engine.feature_extrat_step(
            encoder, val_loader, device
        )
        val_features_embedding = torch.cat((origin_embedding, val_features), 0)
        val_labels_embedding = torch.cat((orgin_label_embedding, val_labels), 0)
        val_numpy_embedding = val_features_embedding.cpu().detach().numpy()
        val_labels_numpy_embedding = val_labels_embedding.cpu().detach().numpy()
        val_num_images = val_numpy_embedding.shape[0]
        # Dump the embeddings for complete dataset, not just train
        # flattened_embedding = train_numpy_embedding.reshape((num_images, -1))
        np.save(config.EMBEDDING_PATH_2, val_numpy_embedding)
        np.save(config.EMBEDDING_LABEL_PATH_2, val_labels_numpy_embedding)
    # ------------------------------根据features和dataset创建faiss索引---------------------------------------
    image_faiss_index = create_index_basedon_features(train_dataset, train_features, config.FAISS_INDEX_DIR_PATH)
    # update_index_basedon_features(image_faiss_index, val_features, val_dataset, config.FAISS_INDEX_DIR_PATH)
    train_labels_array = train_labels.detach().cpu().numpy()
    train_labels_array_use = train_labels_array
    image_labels_faiss_index = create_labels_index_basedon_features(train_features, train_labels_array,
                                                                    config.MODEL_DIR_PATH,
                                                                    config.FAISS_LABELS_INDEX_PATH,
                                                                    config.FAISS_LABELS_INDEX_MAP)
    # ------------------------------根据features训练分类器---------------------------------------

    # print("start_time:", start_time)
    # 按照特征向量不同维度进行归一化
    if config.SCALE_POLICY == "mean_std":
        scale_train_features, mean_features, std_features = mean_std_scale_features(train_features)
        scale_val_features = mean_std_scale_features_with_trainfeature(val_features, mean_features, std_features)
        np.save(config.EMB_SCALE_PARA1_PATH, mean_features)
        np.save(config.EMB_SCALE_PARA2_PATH, std_features)
    else:
        scale_train_features, min_features, max_features = min_max_scale_features(train_features)
        scale_val_features = min_max_scale_features_with_trainfeature(val_features, min_features, max_features)
        np.save(config.EMB_SCALE_PARA1_PATH, min_features)
        np.save(config.EMB_SCALE_PARA2_PATH, max_features)
    # 将归一化后的特征向量制作数据集
    train_data = tensor_dataset(scale_train_features, train_labels)
    val_data = tensor_dataset(scale_val_features, val_labels)
    train_features_loader = DataLoader(train_data, batch_size=config.TRAIN_BATCH_SIZE, shuffle=True, drop_last=True, num_workers=2)
    val_features_loader = DataLoader(val_data, batch_size=config.TEST_BATCH_SIZE, shuffle=True, drop_last=True, num_workers=2)

    # ------------------------------根据features训练分类器---------------------------------------
    print("------------ Training started ------------")
    start_time = time.time()
    top1_train_accuracy_list = []
    train_loss_list = []
    val_loss_list = []
    top1_accuracy_list = []
    top5_accuracy_list = []

    from torch_classical_features_model_oil import WeightedKNNClassifier
    knn_classifier = WeightedKNNClassifier()
    for epoch in tqdm(range(config.EPOCHS), desc="training:"):
        train_loss, top1_train_accuracy = torch_engine.train_step(
            knn_classifier, decoder, train_features_loader, cls_loss_fn, optimizer, device=device
        )
        top1_train_accuracy_list.append(top1_train_accuracy.detach().cpu().numpy())
        train_loss_list.append(train_loss)
        print(f"Epochs = {epoch}, Training Loss : {train_loss},\tTop1 Train accuracy {top1_train_accuracy.item()}.\n")

        val_loss, top1_accuracy, top5_accuracy, acc_vect, pred_list = torch_engine.val_step(
            knn_classifier, decoder, val_features_loader,cls_loss_fn, device=device
        )
        top1_accuracy_list.append(top1_accuracy.detach().cpu().numpy())
        top5_accuracy_list.append(top5_accuracy.detach().cpu().numpy())
        val_loss_list.append(val_loss)

        knn_classifier.compute()
        # Simple Best Model saving
        if val_loss < max_loss:
            print("Validation Loss decreased, saving new best model")
            # torch.save(encoder.state_dict(), config.ENCODER_MODEL_PATH)
            torch.save(decoder.state_dict(), config.DECODER_MODEL_PATH)
            max_loss = val_loss

        print(f"Epochs = {epoch}, Validation Loss : {val_loss},\tTop1 Test accuracy: {top1_accuracy.item()},\tTop5 test acc: {top5_accuracy.item()}.\n")
        # print('best test acc {}, last acc {}'.format(acc_vect.max().item(), acc_vect[-1].item()))
        plot_confusion_matrix(epoch, pred_list, val_labels, config.MODEL_DIR_PATH)
        # 绘制聚类分析图
        # acc_single, acc_merge, NMI, ARI = cluster_classical_featuers_acc(val_features_loader, encoder, decoder,device, print_result=True, save_name_img='cluster_img', save_name_fig='pca_figure')
    print("Training Done")

    # print("---- Creating Embeddings for the full dataset ---- ")
    # when model is done finally, use full loader
    # embedding = torch_engine.create_embedding(
    #     encoder, full_loader, config.EMBEDDING_SHAPE, device
    # )

    # embedding = torch_engine.create_embedding(
    #     encoder, train_loader, config.EMBEDDING_SHAPE, device
    # )
    #
    # # Convert embedding to numpy and save them
    # numpy_embedding = embedding.cpu().detach().numpy()
    # num_images = numpy_embedding.shape[0]
    #
    # # Dump the embeddings for complete dataset, not just train
    # flattened_embedding = numpy_embedding.reshape((num_images, -1))
    # np.save(config.EMBEDDING_PATH, numpy_embedding)
    # np.save(config.EMBEDDING_PATH_2, flattened_embedding)
    header = ["train_top-1", "test_top-1", "test_top-5", "train_loss", "val_loss"]
    top1_train_accuracy_array = pd.Series(np.asarray(top1_train_accuracy_list).flatten())
    train_loss_array = pd.Series(np.asarray(train_loss_list).flatten())
    val_loss_array = pd.Series(np.asarray(val_loss_list).flatten())
    top1_accuracy_array = pd.Series(np.asarray(top1_accuracy_list).flatten())
    top5_accuracy_array = pd.Series(np.asarray(top5_accuracy_list).flatten())
    eval_vector = pd.concat(
        (top1_train_accuracy_array, top1_accuracy_array, top5_accuracy_array, train_loss_array, val_loss_array),axis=1)

    print(eval_vector)
    # pd_eval_vector = pd.DataFrame(eval_vector, columns=["train_top-1", "test_top-1", "test_top-5", "train_loss", "val_loss"])
    # pd_eval_vector = pd.DataFrame(eval_vector, columns=header)
    # print(pd_eval_vector)
    # writer = pd.ExcelWriter(os.path.join(config.MODEL_DIR_PATH, "model_eval.csv"))
    eval_vector.to_csv(os.path.join(config.MODEL_DIR_PATH, "model_eval.csv"),index=False,header=header)

    # result_file = open(os.path.join(config.MODEL_DIR_PATH, "model_eval.xlsx"), 'w', encoding='UTF-8')
    # result_file.write(str(header) + '\n')
    # for i in range(len(eval_vector)):
    #     result_file.write(str(eval_vector[i]) + '\n')
    # result_file.close()

    end_time = time.time()
    print("time use: {:.5f} s".format(end_time - start_time))