# Joint Learning for DeepDPM
# Part1 : AE Training
# Part2 : DPM Training

import os
import torch
import numpy as np
import argparse
from argparse import ArgumentParser
import pytorch_lightning as pl
from tqdm import tqdm

from src.clustering_models.clusternet_modules.clusternetasmodel import ClusterNetModel
from src.clustering_models.clusternet_modules.utils.training_utils import training_utils
from AutoEncoder import DFConvAE
from src.datasets import CustomDataset, TensorDatasetWrapper, transform_embeddings
from src.utils import check_args, cluster_acc
import warnings
warnings.filterwarnings("ignore")


class JointLearning(pl.LightningModule):
    def __init__(self, args, input_dim=5000, latent_dim=20):
        super().__init__()
        self.args = args
        self.input_dim = input_dim
        self.latent_dim = latent_dim
        
        self.AE_model = DFConvAE(single_data_length=self.input_dim, latent_dim=latent_dim)
        self.Load_AutoEncoder()
        
        # 这个nt check_args非得修改NIW参数，真的鹅心
        check_args(self.args, self.latent_dim)
        self.cluster_model = ClusterNetModel(args, input_dim=latent_dim, init_k=1)
        
        self.criterions = torch.nn.MSELoss()
    
        self.initiate_clusters()
        # 将cluster_model放到GPU上，方便推理
        # self.cluster_model.to(self.device)
        
    def Load_AutoEncoder(self, ):
        state = torch.load(self.args.pretrain_path)
        new_state_dict = {}
        for key in state.keys():
            new_state_dict[key[6:]] = state[key]
            
        self.AE_model.load_state_dict(new_state_dict)
    
    def forward(self, batch,):
        x, y = batch
        
        x = x.view(x.size(0), 1, -1)
        
        latent_x = self.AE_model.Encoder(x)
        
        # latent_x = latent_x.detach().numpy()
        # latent_x = transform_embeddings("standard", latent_x)
        
        cluster_assign = self.cluster_model(latent_x)
        
        return latent_x, cluster_assign
        
    def _loss(self, x, latent_x, cluster_assign):
        batch_size = x.size(0)
        rec_x = self.AE_model(x)
        
        # reconstruction loss
        rec_loss = 0.005 * self.criterions(rec_x, x)
        
        # regularization loss

        reg_loss = self.cluster_model.training_utils.cluster_loss_function(
            latent_x.detach(),
            cluster_assign,
            model_mus = self.cluster_model.mus,
            K = self.cluster_model.K,
            codes_dim = self.cluster_model.codes_dim,
            model_covs = self.cluster_model.covs,
            pi = self.cluster_model.pi,
        ) * batch_size
        '''
        dist_loss = torch.tensor(0.0).to(self.device)
        clusters = torch.FloatTensor(self.cluster_model.K).to(self.device)
        for i in range(batch_size):
            diff_vec = latent_x[i] - clusters[cluster_assign.argmax(-1)[i]]
            sample_dist_loss = torch.matmul(diff_vec.view(1, -1), diff_vec.view(-1, 1))
            dist_loss += 0.5 * self.args.beta * torch.squeeze(sample_dist_loss)
        reg_loss = dist_loss
        '''
        
        return (
            rec_loss + reg_loss,
            rec_loss.detach(),
            reg_loss.detach(),
        )
    
    def Get_all_latent_x(self,):
        train_dataloader = self.train_dataloader()
        latent_x = []
        labels = []
        
        for batch in tqdm(train_dataloader, desc="Getting Encoded data..."):
            x, y = batch
            x = x.view(x.size(0), 1, -1)
            
            with torch.no_grad():  # 关闭梯度计算以节省内存
                latent_batch = self.AE_model.Encoder(x)
            
            latent_x.append(latent_batch)
            labels.append(y)

        latent_x = torch.Tensor(torch.cat(latent_x, dim=0))
        labels = torch.cat(labels, dim=0)
        
        latent_x = transform_embeddings("standard", latent_x)
        print("Encoded data shape: ", latent_x.shape)
        
        train_set = TensorDatasetWrapper(latent_x, labels)
        
        latent_train_loader = torch.utils.data.DataLoader(train_set, batch_size=self.args.batch_size, shuffle=False)
        
        return latent_train_loader
    
    def Get_test_latent_x(self,):
        train_codes = torch.Tensor(torch.load("../SingleTabCluster/AutoEncoder/output/Encoded_20dim_3C1LS1_train.pt"))
        train_labels = torch.load("../SingleTabCluster/AutoEncoder/output/train_labels.pt")
        
        train_codes = transform_embeddings("standard", train_codes)
        train_set = TensorDatasetWrapper(train_codes, train_labels)
        
        latent_train_loader = torch.utils.data.DataLoader(train_set, batch_size=self.args.batch_size, shuffle=False)
        return latent_train_loader
    
    def initiate_clusters(self,):  
        logger = pl.loggers.TensorBoardLogger("cluster_logs/", name=self.args.exp_name)
        trainer = pl.Trainer(logger=logger, 
                             gpus = 1,
                             max_epochs=150)
        
        # 将self.train_loader中的数据通过AE模型得到latent_x
        # 目前只有train_loader中的数据
        latent_train_loader = self.Get_all_latent_x()
        # latent_train_loader = self.Get_test_latent_x()

        trainer.fit(self.cluster_model, latent_train_loader)
        
        self.cluster_model.freeze()
        
    def train_dataloader(self):
        dataset_obj = CustomDataset(self.args)
        train_loader, val_loader = dataset_obj.get_loaders()
        return train_loader

    def training_step(self, batch, batch_idx):
        x, y = batch
        x = x.view(x.size(0), 1, -1)
        
        latent_x, cluster_assign = self(batch)
        
        loss, rec_loss, reg_loss = self._loss(x, latent_x, cluster_assign)
        
        self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
        self.log("train_rec_loss", rec_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
        self.log("train_reg_loss", reg_loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
        
        return loss

    def configure_optimizers(self):
        optimizer = torch.optim.Adam(
            self.parameters(), lr=self.args.lr, weight_decay=self.args.wd
        )
        return optimizer


def parse_minimal_args():
    parser = ArgumentParser()
    # Training paramentss
    parser.add_argument("--dir", default="../SingleTabCluster/AutoEncoder/datasets/", help="dataset directory")
    parser.add_argument("--dataset", default="custom")
    parser.add_argument("--train_data_file", type=str, default="filename.pt")
    parser.add_argument(
        "--lr", type=float, default=0.002, help="learning rate (default: 1e-4)"
    )
    parser.add_argument(
        "--wd", type=float, default=5e-4, help="weight decay (default: 5e-4)"
    )
    parser.add_argument(
        "--batch-size", type=int, default=128, help="input batch size for training"
    )
    parser.add_argument(
        "--epoch", type=int, default=100, help="number of epochs to train"
    )
    parser.add_argument(
        "--pretrain_epochs", type=int, default=0, help="number of pre-train epochs"
    )

    parser.add_argument(
        "--pretrain_path", type=str, default="../SingleTabCluster/AutoEncoder/saved_models/Conv_20dim_3C1LS1.pth", help="use pretrained weights"
    )
    parser.add_argument(
        "--use_labels_for_eval",
        action = "store_true",
        help="whether to use labels for evaluation"
    )
    
    # Utility parameters
    parser.add_argument(
        "--device",
        type=str,
        default="cuda",
        help="device for computation (default: cpu)",
    )
    
    parser.add_argument(
        "--seed",
        type=int,
        default=None,
        help="random seed",
    )

    parser.add_argument(
        "--max_epochs",
        type=int,
        default=100,
        help="number of AE epochs",
    )
    parser.add_argument(
        "--limit_train_batches", type=float, default=1., help="used for debugging"
    )
    parser.add_argument(
        "--limit_val_batches", type=float, default=1., help="used for debugging" 
    )
    parser.add_argument(
        "--save_checkpoints", type=bool, default=False
    )
    parser.add_argument(
        "--exp_name", type=str, default="default_exp"
    )
    parser.add_argument(
        "--gpus", type=int, default=-1, help="number of gpus to use"
    )



    parser.add_argument(
        "--init_k", default=1, type=int, help="number of initial clusters"
    )
    parser.add_argument(
        "--clusternet_hidden",
        type=int,
        default=50,
        help="The dimensions of the hidden dim of the clusternet. Defaults to 50.",
    )
    parser.add_argument(
        "--clusternet_hidden_layer_list",
        type=int,
        nargs="+",
        default=[50],
        help="The hidden layers in the clusternet. Defaults to [50, 50].",
    )
    parser.add_argument(
        "--transform_input_data",
        type=str,
        default="normalize",
        choices=["normalize", "min_max", "standard", "standard_normalize", "None", None],
        help="Use normalization for embedded data",
    )
    parser.add_argument(
        "--cluster_loss_weight",
        type=float,
        default=1,
    )
    parser.add_argument(
        "--init_cluster_net_weights",
        action="store_true",
        default=False,
    )
    parser.add_argument(
        "--when_to_compute_mu",
        type=str,
        choices=["once", "every_epoch", "every_5_epochs"],
        default="every_epoch",
    )
    parser.add_argument(
        "--how_to_compute_mu",
        type=str,
        choices=["kmeans", "soft_assign"],
        default="soft_assign",
    )
    parser.add_argument(
        "--how_to_init_mu",
        type=str,
        choices=["kmeans", "soft_assign", "kmeans_1d"],
        default="kmeans",
    )
    parser.add_argument(
        "--how_to_init_mu_sub",
        type=str,
        choices=["kmeans", "soft_assign", "kmeans_1d"],
        default="kmeans_1d",
    )
    parser.add_argument(
        "--log_emb_every",
        type=int,
        default=20,
    )
    parser.add_argument(
        "--log_emb",
        type=str,
        default="never",
        choices=["every_n_epochs", "only_sampled", "never"]
    )
    parser.add_argument(
        "--train_cluster_net",
        type=int,
        default=300,
        help="Number of epochs to pretrain the cluster net",
    )
    parser.add_argument(
        "--cluster_lr",
        type=float,
        default=0.0005,
    )
    parser.add_argument(
        "--subcluster_lr",
        type=float,
        default=0.005,
    )
    parser.add_argument(
        "--lr_scheduler", type=str, default="StepLR", choices=["StepLR", "None", "ReduceOnP"]
    )
    parser.add_argument(
        "--start_sub_clustering",
        type=int,
        default=45,
    )
    parser.add_argument(
        "--subcluster_loss_weight",
        type=float,
        default=1.0,
    )
    parser.add_argument(
        "--start_splitting",
        type=int,
        default=55,
    )
    parser.add_argument(
        "--alpha",
        type=float,
        default=10.0,
    )
    parser.add_argument(
        "--softmax_norm",
        type=float,
        default=1,
    )
    parser.add_argument(
        "--subcluster_softmax_norm",
        type=float,
        default=1,
    )
    parser.add_argument(
        "--split_prob",
        type=float,
        default=None,
        help="Split with this probability even if split rule is not met.  If set to None then the probability that will be used is min(1,H).",
    )
    parser.add_argument(
        "--merge_prob",
        type=float,
        default=None,
        help="merge with this probability even if merge rule is not met. If set to None then the probability that will be used is min(1,H).",
    )
    parser.add_argument(
        "--init_new_weights",
        type=str,
        default="same",
        choices=["same", "random", "subclusters"],
        help="How to create new weights after split. Same duplicates the old cluster's weights to the two new ones, random generate random weights and subclusters copies the weights from the subclustering net",
    )
    parser.add_argument(
        "--start_merging",
        type=int,
        default=55,
        help="The epoch in which to start consider merge proposals",
    )
    parser.add_argument(
        "--merge_init_weights_sub",
        type=str,
        default="highest_ll",
        help="How to initialize the weights of the subclusters of the merged clusters. Defaults to same",
    )
    parser.add_argument(
        "--split_init_weights_sub",
        type=str,
        default="random",
        choices=["same_w_noise", "same", "random"],
        help="How to initialize the weights of the subclusters of the merged clusters. Defaults to same",
    )
    parser.add_argument(
        "--split_every_n_epochs",
        type=int,
        default=10,
        help="Example: if set to 10, split proposals will be made every 10 epochs",
    )
    parser.add_argument(
        "--split_merge_every_n_epochs",
        type=int,
        default=30,
        help="Example: if set to 10, split proposals will be made every 10 epochs",
    )
    parser.add_argument(
        "--merge_every_n_epochs",
        type=int,
        default=10,
        help="Example: if set to 10, merge proposals will be made every 10 epochs",
    )
    parser.add_argument(
        "--raise_merge_proposals",
        type=str,
        default="brute_force_NN",
        help="how to raise merge proposals",
    )
    parser.add_argument(
        "--cov_const",
        type=float,
        default=0.005,
        help="gmms covs (in the Hastings ratio) will be torch.eye * cov_const",
    )
    parser.add_argument(
        "--freeze_mus_submus_after_splitmerge",
        type=int,
        default=5,
        help="Numbers of epochs to freeze the mus and sub mus following a split or a merge step",
    )
    parser.add_argument(
        "--freeze_mus_after_init",
        type=int,
        default=5,
        help="Numbers of epochs to freeze the mus and sub mus following a new initialization",
    )
    parser.add_argument(
        "--use_priors",
        type=int,
        default=1,
        help="Whether to use priors when computing model's parameters",
    )
    parser.add_argument("--prior", type=str, default="NIW", choices=["NIW", "NIG"])
    parser.add_argument(
        "--pi_prior", type=str, default="uniform", choices=["uniform", None]
    )
    parser.add_argument(
        "--prior_dir_counts",
        type=float,
        default=0.1,
    )
    parser.add_argument(
        "--prior_kappa",
        type=float,
        default=0.0001,
    )
    parser.add_argument(
        "--NIW_prior_nu",
        type=float,
        default=None,
        help="Need to be at least codes_dim + 1",
    )
    parser.add_argument(
        "--prior_mu_0",
        type=str,
        default="data_mean",
    )
    parser.add_argument(
        "--prior_sigma_choice",
        type=str,
        default="isotropic",
        choices=["iso_005", "iso_001", "iso_0001", "data_std"],
    )
    parser.add_argument(
        "--prior_sigma_scale",
        type=float,
        default=".005",
    )
    parser.add_argument(
        "--prior_sigma_scale_step",
        type=float,
        default=1.,
        help="add to change sigma scale between alternations"
    )
    parser.add_argument(
        "--compute_params_every",
        type=int,
        help="How frequently to compute the clustering params (mus, sub, pis)",
        default=1,
    )
    parser.add_argument(
        "--start_computing_params",
        type=int,
        help="When to start to compute the clustering params (mus, sub, pis)",
        default=25,
    )
    parser.add_argument(
        "--cluster_loss",
        type=str,
        help="What kind og loss to use",
        default="KL_GMM_2",
        choices=["diag_NIG", "isotropic", "KL_GMM_2"],
    )
    parser.add_argument(
        "--subcluster_loss",
        type=str,
        help="What kind og loss to use",
        default="isotropic",
        choices=["diag_NIG", "isotropic", "KL_GMM_2"],
    )
    
    parser.add_argument(
        "--ignore_subclusters",
        type=bool,
        default=False,
    )
    parser.add_argument(
        "--log_metrics_at_train",
        type=bool,
        default=True,
    )
    parser.add_argument(
        "--evaluate_every_n_epochs",
        type=int,
        default=5,
        help="How often to evaluate the net"
    )
    
    parser.add_argument(
        "--beta",
        type=float,
        default=1.0,
        help="coefficient of the regularization term on " "clustering",
    )
    
    parser.add_argument(
        "--lambda_",
        type=float,
        default=0.005,
        help="coefficient of the regularization term on " "clustering",
    )
    
    args = parser.parse_args()
    return args


def Main():
    args = parse_minimal_args()
    
    logger = pl.loggers.TensorBoardLogger("logs/", name=args.exp_name)
    
    # train_loader, val_loader = Get_dataloaders(args)
    
    trainer = pl.Trainer(
        max_epochs=10,
        gpus = 1,
        logger=logger,
    )
    
    model = JointLearning(args)
    
    trainer.fit(model)
    
    # save models
    torch.save(model.AE_model.state_dict(), "./saved_models/AE_models/AE_model.pth")
    torch.save(model.cluster_model.state_dict(), "./saved_models/Cluster_models/Cluster_model.pth")
    
    # evaluation
    net_pred = []
    labels = []
    for batch in model.train_dataloader():
        x, y = batch
        x = x.view(x.size(0), 1, -1)
        _, pred = model(batch)
        
        pred = pred.argmax(axis=1).cpu().numpy()
        net_pred.append(pred)
        labels.append(y)
        
    net_pred = np.concatenate(net_pred)
    labels = torch.cat(labels, dim=0)
    
    np.save("net_pred.npy", net_pred)
    
    acc = np.round(cluster_acc(labels.cpu().numpy(), pred), 5)
    print("Clustering accuracy: ", acc)
    print("Final K:", len(np.unique(pred)))
        
        
    
if __name__ == '__main__':
    Main()