import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import Parameter
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
from sklearn.cluster import KMeans
import math, os
from sklearn import metrics
#from single_cell_tools import cluster_acc

def buildNetwork(layers, type, activation="relu"):
    net = []
    for i in range(1, len(layers)):
        net.append(nn.Linear(layers[i-1], layers[i]))
        if activation=="relu":
            net.append(nn.ReLU())
        elif activation=="sigmoid":
            net.append(nn.Sigmoid())
    return nn.Sequential(*net)

def euclidean_dist(x, y):
    return torch.sum(torch.square(x - y), dim=1)

class scDeepCluster(nn.Module):
    def __init__(self, input_dim, z_dim, encodeLayer=[], decodeLayer=[], 
            activation="relu", sigma=1., alpha=1., gamma=1., device="cuda"):
        super(scDeepCluster, self).__init__()
        torch.set_default_dtype(torch.float64)
        self.z_dim = z_dim
        self.activation = activation
        self.sigma = sigma
        self.alpha = alpha
        self.gamma = gamma
        self.device = device
        self.encoder = buildNetwork([input_dim]+encodeLayer, type="encode", activation=activation)
        self.decoder = buildNetwork([z_dim]+decodeLayer, type="decode", activation=activation)
        self._enc_mu = nn.Linear(encodeLayer[-1], z_dim)
        self._dec_mu = nn.Linear(decodeLayer[-1], input_dim)

        self.to(device)
    
    def save_model(self, path):
        torch.save(self.state_dict(), path)

    def load_model(self, path):
        pretrained_dict = torch.load(path, map_location=lambda storage, loc: storage)
        model_dict = self.state_dict()
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
        model_dict.update(pretrained_dict) 
        self.load_state_dict(model_dict)

    def soft_assign(self, z):
        q = 1.0 / (1.0 + torch.sum((z.unsqueeze(1) - self.mu)**2, dim=2) / self.alpha)
        q = q**((self.alpha+1.0)/2.0)
        q = (q.t() / torch.sum(q, dim=1)).t()
        return q
    
    def target_distribution(self, q):
        p = q**2 / q.sum(0)
        return (p.t() / p.sum(1)).t()

    def forwardAE(self, x):
        h = self.encoder(x+torch.randn_like(x) * self.sigma)
        z = self._enc_mu(h)
        h = self.decoder(z)
        x_recon = self._dec_mu(h)

        h0 = self.encoder(x)
        z0 = self._enc_mu(h0)
        return z0, x_recon

    def forward(self, x):
        h = self.encoder(x+torch.randn_like(x) * self.sigma)
        z = self._enc_mu(h)
        h = self.decoder(z)
        x_recon = self._dec_mu(h)

        h0 = self.encoder(x)
        z0 = self._enc_mu(h0)
        q = self.soft_assign(z0)
        return z0, q, x_recon

    def encodeBatch(self, X, batch_size=256):
        self.eval()
        encoded = []
        num = X.shape[0]
        num_batch = int(math.ceil(1.0*X.shape[0]/batch_size))
        for batch_idx in range(num_batch):
            xbatch = X[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
            inputs = Variable(xbatch).to(self.device)
            z, _, = self.forwardAE(inputs)
            encoded.append(z.data)

        encoded = torch.cat(encoded, dim=0)
        return encoded.to(self.device)

    def cluster_loss(self, p, q):
        def kld(target, pred):
            return torch.mean(torch.sum(target*torch.log(target/(pred+1e-6)), dim=-1))
        kldloss = kld(p, q)
        return self.gamma*kldloss

    def pretrain_autoencoder(self, X, batch_size=256, lr=0.001, epochs=400):
        self.train()
        dataset = TensorDataset(torch.Tensor(X))
        dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
        print("Pretraining stage")
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.parameters()), lr=lr, amsgrad=True)
        criterion = nn.MSELoss()
        for epoch in range(epochs):
            loss_val = 0
            for batch_idx, x_batch in enumerate(dataloader):
                x_batch = x_batch[0]
                x_tensor = Variable(x_batch).to(self.device)
                _, x_recon_tensor = self.forwardAE(x_tensor)
                loss = criterion(x_tensor, x_recon_tensor)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                loss_val += loss.item() * len(x_batch)
            print('Pretrain epoch %3d, ZINB loss: %.8f' % (epoch+1, loss_val/X.shape[0]))

    def save_checkpoint(self, state, index, filename):
        newfilename = os.path.join(filename, 'FTcheckpoint_%d.pth.tar' % index)
        torch.save(state, newfilename)

    def fit(self, X, init_centroid, lr=1., batch_size=256, 
            num_epochs=10, update_interval=1,):
        '''X: tensor data'''
        self.train()
        print("Clustering stage")
        X = torch.tensor(X, dtype=torch.float64)
        
        centroid = self.encodeBatch(torch.tensor(init_centroid, dtype=torch.float64).to(self.device))
        self.mu = Parameter(centroid)

        optimizer = optim.Adadelta(filter(lambda p: p.requires_grad, self.parameters()), lr=lr, rho=.95)
        criterion = nn.MSELoss()


        num = X.shape[0]
        num_batch = int(math.ceil(1.0*X.shape[0]/batch_size))



        for epoch in range(num_epochs):
            # if epoch%update_interval == 0:
            #     # update the targe distribution p
            #     # latent = self.encodeBatch(X.to(self.device))
            #     # q = self.soft_assign(latent)
            #     # p = self.target_distribution(q).data

            #     # evalute the clustering performance
            #     self.y_pred = torch.argmax(q, dim=1).data.cpu().numpy()



            # train 1 epoch for clustering loss
            train_loss = 0.0
            recon_loss_val = 0.0
            cluster_loss_val = 0.0
            q = []
            for batch_idx in range(num_batch):
                xbatch = X[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
                # pbatch = p[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
                optimizer.zero_grad()
                inputs = Variable(xbatch).to(self.device)
                # target = Variable(pbatch).to(self.device)

                _, qbatch, x_recon_batch = self.forward(inputs)
                target = self.target_distribution(qbatch).data

                cluster_loss = self.cluster_loss(target, qbatch)
                recon_loss = criterion(inputs, x_recon_batch)

                loss = cluster_loss*self.gamma + recon_loss
                loss.backward()
                optimizer.step()
                cluster_loss_val += cluster_loss.item() * len(inputs)
                recon_loss_val += recon_loss.item() * len(inputs)
                train_loss += loss.item() * len(inputs)

                q.append(qbatch.data)                    
            
            if epoch%update_interval == 0 and epoch != 0:
                q = torch.cat(q, dim=0)
                self.y_pred = torch.argmax(q, dim=1).data.cpu().numpy()

            print("Epoch %3d: Total: %.8f Clustering Loss: %.8f MSE Loss: %.8f" % (
                epoch + 1, train_loss / num, cluster_loss_val / num, recon_loss_val / num))

        return self.y_pred

