import tensorflow as tf
import os
os.environ["CUDA_VISIBLE_DEVICES"]="2,3"
import numpy as np
import input_data
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
from scipy.misc import imsave as ims
from utils import *
from ops import *
import numpy
from tensorflow.python import debug as tf_debug
from scipy.stats import norm
import pdb
import seaborn as sns
np.set_printoptions(threshold='nan')
class LatentAttention(): 
    def __init__(self,n_k=1,ratio=1):
        self.mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
        self.name=''
        self.n_samples = self.mnist.train.num_examples
        self.const=tf.Variable(0,dtype=tf.float32)
        self.n_z = 2 # numbers of latent diemensions
        self.n_k = n_k # numbers of kernels/ particles
        self.batchsize = 500
        self.ratio =ratio # Ratio of (KDE iteration+Particle):KDE iteration
        self.stepsize=tf.placeholder(tf.float32, shape=[])
        self.image_size=2
        self.N=10000
        self.epsilon=1e-30
        self.epoch=300
        self.echo=False #print out intermediate variable
        # self.alpha=tf.placeholder(dtype=tf.float32,shape=[n_k])
        self.alpha=tf.Variable(np.array([1.0/self.n_k for x in xrange(self.n_k)]),dtype=tf.float32,name='alpha',trainable=False)
# 
        self.is_to_update_alpha=True
        self.isKDE=tf.placeholder(tf.bool, shape=[])# True: gkernel, False: particle
        self.iszFromInput=tf.placeholder(tf.bool, shape=[])# True: gkernel, False: particle
        self.Inputz= tf.placeholder(tf.float32, [self.batchsize, self.n_z])
        # when this is_to_update_alpha is off, alpha is set as fix value.
        # self.p_x_theta=tf.Variable(dtype=tf.float32,trainable=False,name='')
        self.images = tf.placeholder(tf.float32, [self.batchsize, self.image_size*1])
        # self.images_repeat=
        image_matrix = tf.reshape(self.images,[self.batchsize, self.image_size, 1, 1])
        # self.w_mean_tensor=tf.Variable(np.array([[[5,5],[-5,-5]]for x in xrange(self.batchsize)]),dtype=tf.float32,name='mean_variable')
        # self.w_log_var_tensor=tf.Variable(np.array([[[0,-10],[0,-10]]for x in xrange(self.batchsize)]),dtype=tf.float32,name='log_var_variable')
        self.w_mean_tensor, self.w_log_var_tensor =self.recognition(image_matrix)
        # self.batchsize, self.n_k, self.n_z

        # self.sampled_z=tf.cond(self.iszFromInput,lambda :self.Inputz, lambda:self.sampling()) 
        self.sampled_z=self.sampling()
        print 'Sample_z',self.sampled_z

        # self.batchsize, self.n_k, self.n_z

        # sampled_z=self.sampling()       
        # generated_images is the output, while generation is the encoder.

        # self.batchsize, self.n_k, self.image_size
        self.generated_images = self.generation(self.sampled_z)
        # self.loglikelihood=tf.abs(self.generated_images-self.images[:,tf.newaxis,:])
        self.loglikelihood=tf.square(self.generated_images-self.images[:,tf.newaxis,:])
        print 'Likelihood',self.loglikelihood
        # self.batchsize,self.n_k,self.image_size
        self.generation_loss=self.generationLoss()
        # batchsize,1-x
        self.latent_loss=self.latentLoss(self.w_mean_tensor,self.w_log_var_tensor)*0.00001
        print 'Latent_loss shape',self.latent_loss
        self.cost = tf.reduce_mean(self.generation_loss + self.latent_loss)
        print 'Cost',self.cost
        # self.batchsize->scaler
        self.optimizer = tf.train.AdamOptimizer(0.1).minimize(self.cost)
        self.alphaout=self.update_alpha(self.alpha,self.w_mean_tensor,self.w_log_var_tensor)

        # self.alpha=self.update_alpha(self.alpha,self.w_mean_tensor,self.w_log_var_tensor)
        
    def sampling(self):
        with tf.variable_scope("sampling"):
            # return tf.cond(self.isKDE, lambda: self.sampling_gkernel(self.w_mean_tensor,self.w_log_var_tensor), lambda: self.sampling_particle(self.w_mean_tensor))
            samples_normal = tf.random_normal([self.batchsize,self.n_k,self.n_z],0,1,dtype=tf.float32)
            # if isKDE then add noise otherwise return original tensor.
            sample_tensor= tf.cond(self.isKDE, lambda:self.w_mean_tensor+(tf.exp(self.w_log_var_tensor/2) * samples_normal),
                 lambda: self.w_mean_tensor)

            return sample_tensor

    def update_alpha(self, x,w_mean_tensor,w_log_var_tensor):

        self.p_x_theta1 = - 0.5\
                           * tf.reduce_sum(self.loglikelihood, 2)\
                           * self.N / self.batchsize\
                           * self.stepsize
        self.p_x_theta1= tf.cast(self.p_x_theta1,tf.float64)
        self.p_x_theta = tf.reduce_mean(tf.exp(self.p_x_theta1-tf.reduce_min(self.p_x_theta1)), 0)
        # self.p_x_theta = tf.exp(tf.reduce_sum(self.p_x_theta1-tf.reduce_min(self.p_x_theta1), 0))
        # print 'self.p_x_theta',self.p_x_theta
        
        tsr=w_mean_tensor
        # tsr=tf.Print(tsr,[tsr],'Tensor:')
        theta_matrix=tsr[:,tf.newaxis,...]-tsr[:,:,tf.newaxis,...]

        # batchsize,n_k,n_k,n_z
        q_t_z_i=- 0.5 * tf.reduce_sum(tf.square(theta_matrix), axis=3)*self.alpha/ self.batchsize * (- self.stepsize)
        q_t_z_i=tf.cast(q_t_z_i,tf.float64)
        h=1
        self.q_t_z_i = tf.reduce_mean(tf.reduce_sum(tf.exp(1/h*tf.clip_by_value(q_t_z_i-tf.reduce_min(q_t_z_i),0,20)) ,axis=2), axis=0) 
        # print 'self.q_t_z_i',self.q_t_z_i

        p_z_i=-0.5*tf.reduce_sum(tf.square(tsr),2)* (self.stepsize/ self.batchsize)
        p_z_i=tf.cast(p_z_i,tf.float64)
        self.p_z_i=  tf.reduce_mean(tf.exp(tf.clip_by_value(p_z_i-tf.reduce_min(p_z_i),0,20)),axis=0)
        # print 'self.p_z_i',self.p_z_i

        # self.alpha=tf.Print(self.alpha,[self.alpha],'Alpha2:')
        # ret=tf.cast(self.alpha,tf.float64)
        ret=tf.cond(self.isKDE, lambda: self.q_t_z_i*self.p_z_i*self.p_x_theta, \
            lambda: tf.cast(tf.pow(self.alpha,( 1 - self.stepsize)),tf.float64) *self.p_x_theta)
            # lambda:tf.cast(self.alpha,tf.float64))
        self.rets=tf.reduce_sum(ret)
        
        ret=ret/tf.reduce_sum(ret)
        return tf.cast(ret,tf.float32)
    # encoder
    def latentLoss(self,w_mean_tensor,w_log_var_tensor):
        # self.batchsize,self.n_z->self.batchsize
        with tf.variable_scope("latent_loss_call"):
            return tf.cond(self.isKDE, 
              lambda: tf.reduce_sum((0.5 * tf.reduce_sum(tf.square(w_mean_tensor) 
            + tf.exp(w_log_var_tensor)- w_log_var_tensor/2- 1 ,2) )*self.alpha,1),
              lambda: tf.reduce_sum(0.5 * tf.reduce_sum(tf.square(w_mean_tensor),2)*self.alpha,1))

    def generationLoss(self):
        # self.batchsize,self.n_z->self.batchsize
        # x=tf.cast(x,tf.float64)
        # images=self.images,tf.float64
        images=self.images
        with tf.variable_scope("generation_loss_call"):
            # GaussianLoss=tf.reduce_sum(tf.reduce_sum(self.loglikelihood ,-1)*tf.reshape(self.alpha,[1,self.n_k]),-1)
            # GaussianLoss=-tf.reduce_sum(tf.log(tf.reduce_sum(tf.exp(-self.loglikelihood)*tf.reshape(self.alpha,[1,self.n_k]) ,-1)),-1)
            
            # rand=tf.cast(tf.contrib.distributions.OneHotCategorical(probs=self.alpha).sample([self.batchsize]),tf.float32)
            # ParticleLoss=-tf.reduce_sum(tf.log(tf.reduce_sum(tf.exp(-self.loglikelihood)*tf.reshape(rand,[self.batchsize,self.n_k,1]), -1))-1)
            # ret=tf.cond(self.isKDE, lambda: GaussianLoss, lambda: ParticleLoss)
            sum_loss=tf.reduce_sum(self.loglikelihood,-1)

                
            coef=tf.map_fn(lambda x: tf.reduce_sum(tf.exp(tf.reduce_min(x)-x),-1),sum_loss)
            # coef=tf.Print(coef,[coef],'Adjustment coef:')
            # sum_loss=tf.Print(sum_loss,[sum_loss],'Sum_loss:')

            alpha_temp=tf.clip_by_value(tf.reshape(self.alpha,[1,self.n_k]),1e-20,1e+20)
            # GaussianLoss=-tf.reduce_max(-0.5*sum_loss+tf.log(alpha_temp) ,-1)
            GaussianLoss=-tf.reduce_max(-0.5*sum_loss ,-1)*coef
            rand=tf.cast(tf.contrib.distributions.OneHotCategorical(probs=self.alpha).sample([self.batchsize]),tf.float32)
            rand_temp=tf.clip_by_value(rand,1e-20,1e+20)
            ParticleLoss=-tf.reduce_max(-0.5*sum_loss+tf.log(rand_temp), -1)
            ParticleLoss=-tf.reduce_max(-0.5*sum_loss, -1)
            ret=tf.cond(self.isKDE, lambda: GaussianLoss, lambda: ParticleLoss)
            return ret

    def recognition(self, input_images):
        with tf.variable_scope("recognition"):
            # h1 = lrelu(conv2d(input_images, 1, 16*self.n_k, "d_h1")) # self.image_sizexself.image_sizex1 -> 14x14x16
            # h2 = lrelu(conv2d(h1, 16*self.n_k, 32*self.n_k, "d_h2")) # 14x14x16 -> 7x7x32
            # h2_flat = tf.reshape(h2,[self.batchsize, 7*7*32*self.n_k])# self.batchsize, 7*7*32
            # print input_images
            input_flat = tf.reshape(input_images,[self.batchsize, 2])# self.batchsize, 7*7*32

            # input_flat1 = tf.nn.sigmoid(dense(input_flat, 2, (self.n_k*self.n_z), "flat1"))
            input_flat1 =dense(input_flat, 2, (self.n_k*self.n_z))
            # The k-th  kernel has n_z dimension
            w_mean_dense = dense(input_flat1, self.n_k*self.n_z, (self.n_k*self.n_z), "w_mean_dense") # self.batchsize, n_k,n_z
            w_stddev_dense = dense(input_flat1, self.n_k*self.n_z, (self.n_k*self.n_z), "w_stddev_dense")# self.batchsize, n_k,n_z
            w_mean_tensor=tf.reshape(w_mean_dense,[self.batchsize,self.n_k,self.n_z]) # self.batchsize, n_k,n_z
            w_log_var_tensor=tf.reshape(w_stddev_dense,[self.batchsize,self.n_k,self.n_z])# self.batchsize, n_k,n_z
            # print "w_mean_tensor",w_mean_tensor
            # return tf.Variable([[[5,5],[-5,-5]]for x in xrange(self.batchsize)],dtype=tf.float32),\
            # tf.Variable([[[0.1001,0],[0,0.01]]for x in xrange(self.batchsize)],dtype=tf.float32)
            return w_mean_tensor, w_log_var_tensor

    # decoder
    def generation(self, z):
        # z=tf.reshape(z,[self.batchsize,self.n_k*self.n_z])

        theta=tf.Variable(np.array([[2, 1], [-1, -2]]),trainable=False, dtype=tf.float32)
        # ret=tf.matmul(z,theta)
        ret=tf.map_fn(lambda x:tf.matmul(x,theta),z)
        # ret=z*theta
        # print 'Generation Shape:',ret
        return ret
        # g1_dense = tf.map_fn(z_reshape, lambda x: tf.nn.sigmoid(dense(x, self.n_z, (100*self.n_z), "g1_dense")))
        # output_dense = tf.map_fn(g1_dense,lambda x:  tf.nn.sigmoid(dense(x, (100*self.n_z), (self.n_z), "output_dense")))
        # output_dense=tf.transpose(output_dense,[1,0,2])
        # g1_dense = tf.nn.sigmoid(dense(z, self.n_k*self.n_z, (100*self.n_z*self.n_k), "g1_dense"))
        # output_dense = tf.nn.sigmoid(dense(g1_dense, (100*self.n_z*self.n_k), (self.n_k*self.n_z), "output_dense"))
        # with tf.variable_scope("generation"):
            # z_develop = dense(z, self.n_z, 7*7*32, scope='z_matrix')
            # z_matrix = tf.nn.relu(tf.reshape(z_develop, [self.batchsize, 7, 7, 32]))
            # h1 = tf.nn.relu(conv_transpose(z_matrix, [self.batchsize, 14, 14, 16], "g_h1"))
            # h2 = conv_transpose(h1, [self.batchsize, self.image_size, self.image_size, 1], "g_h2")
            # h2 = tf.nn.sigmoid(h2)

        # output_dense=tf.reshape(z,[self.batchsize,self.n_k,self.n_z])
        # return output_dense

    def train(self):
        n=int(np.sqrt(self.batchsize))
        pickedpoint=[0 for x in xrange(self.n_k)]
        z_sample=np.zeros([self.batchsize,self.n_z])
        # visualization = self.mnist.train.next_batch(self.batchsize)[0]
        # reshaped_vis = visualization.reshape(self.batchsize,self.image_size,self.image_size)
        # alpha=np.array([1.0/self.n_k for x in xrange(self.n_k)])
        


        name='./result_k='+str(self.n_k)+'_ratio='+str(self.ratio)+'/'
        # self.name=name                                                                                                                                         
        res={}
        try:
            os.mkdir(name)
        except:
            None
        # ims(name+"base.jpg",merge(reshaped_vis[:64],[8,8]))
        # train
        saver = tf.train.Saver(max_to_keep=2)
        idx_cnt=0
        with tf.Session() as sess:
            # Let your BUILD target depend on "//tensorflow/python/debug:debug_py"
            # (You don't need to worry about the BUILD dependency if you are using a pip
            #  install of open-source TensorFlow.)
            from tensorflow.python import debug as tf_debug


            gamma_init =1e-5

            sess.run(tf.initialize_all_variables())
            alphain=np.array([1.0/self.n_k for x in xrange(self.n_k)])
            for epoch in range(self.epoch):
                gamma = gamma_init/ (100 + np.sqrt(self.N / self.batchsize * epoch))
                print gamma
                for idx in range(int(self.n_samples / self.batchsize)):    
                    idx_cnt+=1
                    
                    if self.is_to_update_alpha==False:  
                        input_flag = False
                    else:
                        input_flag = (idx_cnt)%self.ratio==0
                        input_flag = False
                        # print idx_cnt
                        # input_flag=0
                        # input_flag=0
                    
                    batch = self.mnist.train.next_batch(self.batchsize)
                    # pdb.set_trace()
                    if self.is_to_update_alpha: 
                        # gen_loss,lat_loss,_,_,diff,rets,p_z_i, q_t_z_i, p_x_theta= sess.run((self.generation_loss,self.latent_loss,self.optimizer,\
                        #     self.increment_global_alpha_op,self.diff,self.rets,self.p_z_i,self.q_t_z_i,self.p_x_theta),\
                        #  feed_dict={self.images: batch, self.isKDE:input_flag,self.stepsize:gamma,\
                        #  self.iszFromInput:False,self.Inputz:z_sample})
                        # print rets,p_z_i, q_t_z_i, p_x_theta
                        # pdb.set_trace()
                        
                        # alphain=[1.0]
                        tfo=tf.Variable(np.array([1.0/self.n_k for x in xrange(self.n_k)]))
                        gen_loss,lat_loss,_,rets,alpha1= sess.run((self.generation_loss,self.latent_loss,self.optimizer,self.rets,self.alphaout),feed_dict={self.images: batch, self.isKDE:input_flag,self.stepsize:gamma,self.iszFromInput:False,self.Inputz:z_sample\
                            ,self.alpha:alphain\
                            })
                        # print alpha1
                        alphain=alpha1
                        # gen_loss,lat_loss,_,diff,rets,alpha1= sess.run((self.generation_loss,self.latent_loss,self.optimizer,self.diff,self.rets,self.alphaout),\
                        #  feed_dict={self.images: batch, self.isKDE:input_flag,self.stepsize:gamma,\
                        #  self.iszFromInput:False,self.Inputz:z_sample})
                         # print rets
                        # if numpy.isnan(gen_loss):
                            # pdb.set_trace()
                    # pdb.set_trace()
                    if idx % (self.n_samples - 3) == 0: 
                        import pandas as pd

                        print "epoch %d: genloss %f latloss %f" % (epoch, np.mean(gen_loss), np.mean(lat_loss))
                        # saver.save(sess, os.getcwd()+"/training/train",global_step=epoch)
                        # generated_test,mean,var = sess.run((self.generated_images,self.w_mean_tensor, self.w_log_var_tensor), feed_dict={self.images: visualization, 
                        #     self.isKDE:input_flag,self.stepsize:gamma,self.iszFromInput:False,self.Inputz:z_sample})
                        # batch=np.array([pickedpoint for x in xrange(self.batchsize)])
                        if epoch%10!=0:
                       	    continue
                        batch = self.mnist.train.next_batch(self.batchsize)
                        generated_test,mean,var = sess.run((self.generated_images,self.sampled_z, self.w_log_var_tensor), feed_dict={self.images: batch, 
                            self.isKDE:True,self.stepsize:gamma,self.iszFromInput:False,self.Inputz:z_sample
                            ,self.alpha:alphain\
                            })


                        print alpha1
                        # 

                        ssframe=pd.DataFrame(np.concatenate([generated_test[:,x] for x in xrange(generated_test.shape[1])]),columns=['X','Y'])
                        # pdb.set_trace()
                        sns.jointplot('X','Y', ssframe, kind = 'kde')
                        plt.savefig(name+'x'+str(epoch)+'.jpg')

                        # ssframe=pd.DataFrame(np.concatenate([mean[:,x] for x in xrange(mean.shape[1])]),columns=['X','Y'])
                        # plt.scatter( mean[:,0,0],mean[:,0,1],color='r')
                        # plt.scatter( mean[:,1,0],mean[:,1,1],color='b')

                        ssframe=pd.DataFrame(np.concatenate([mean[:,x] for x in xrange(mean.shape[1])]),columns=['X','Y'])
                        sns.jointplot('X','Y', ssframe, kind = 'kde')
                        plt.savefig(name+'z'+str(epoch)+'.jpg')

                        f = open(name+'test'+str(epoch)+'.txt', "w") 
                        for i in xrange(mean.shape[0]):
                            print >> f,batch[i],mean[i],[mean[i][0][0]*2-mean[i][0][1],mean[i][0][0]-2*mean[i][0][1]],[mean[i][1][0]*2-mean[i][1][1],mean[i][1][0]-2*mean[i][1][1]]
                        f.close()

            import pandas as pd
            res=pd.DataFrame(res)
            res.to_csv(name+'out.csv') 
            batch=np.array([pickedpoint for x in xrange(self.batchsize)])
            # batch=self.mnist.train.next_batch(self.batchsize)
            batch10=np.concatenate([batch[:10,:]for x in xrange(self.batchsize/10)])
            batch20=np.concatenate([batch[:20,:]for x in xrange(self.batchsize/20)])
            batch50=np.concatenate([batch[:50,:]for x in xrange(self.batchsize/50)])
            batch100=np.concatenate([batch[:100,:]for x in xrange(self.batchsize/100)])
            # pdb.set_trace()
            input_flag=True
            generated_test,mean,var = sess.run((self.generated_images,self.sampled_z, self.w_log_var_tensor), feed_dict={self.images: batch10,\
                self.isKDE:input_flag,self.stepsize:gamma,self.iszFromInput:False,self.Inputz:z_sample\
                ,self.alpha:alphain\
                })
            pdb.set_trace()
            ssframe=pd.DataFrame(np.concatenate([generated_test[:,x] for x in xrange(generated_test.shape[1])]),columns=['X','Y'])
            sns.jointplot('X','Y', ssframe, kind = 'kde')
            plt.savefig(name+'x_last_10.jpg')

            ssframe=pd.DataFrame(np.concatenate([mean[:,x] for x in xrange(mean.shape[1])]),columns=['X','Y'])
            sns.jointplot('X','Y', ssframe, kind = 'kde')
            plt.savefig(name+'z_last_10.jpg')


            generated_test,mean,var = sess.run((self.generated_images,self.sampled_z, self.w_log_var_tensor), feed_dict={self.images: batch20,\
                self.isKDE:input_flag,self.stepsize:gamma,self.iszFromInput:False,self.Inputz:z_sample\
                ,self.alpha:alphain\
                })
            ssframe=pd.DataFrame(np.concatenate([generated_test[:,x] for x in xrange(generated_test.shape[1])]),columns=['X','Y'])
            sns.jointplot('X','Y', ssframe, kind = 'kde')
            plt.savefig(name+'x_last_20.jpg')

            ssframe=pd.DataFrame(np.concatenate([mean[:,x] for x in xrange(mean.shape[1])]),columns=['X','Y'])
            sns.jointplot('X','Y', ssframe, kind = 'kde')
            plt.savefig(name+'z_last_20.jpg')

            generated_test,mean,var = sess.run((self.generated_images,self.sampled_z, self.w_log_var_tensor), feed_dict={self.images: batch50,\
                self.isKDE:input_flag,self.stepsize:gamma,self.iszFromInput:False,self.Inputz:z_sample\
                ,self.alpha:alphain\
                })
            ssframe=pd.DataFrame(np.concatenate([generated_test[:,x] for x in xrange(generated_test.shape[1])]),columns=['X','Y'])
            sns.jointplot('X','Y', ssframe, kind = 'kde')
            plt.savefig(name+'x_last_50.jpg')

            ssframe=pd.DataFrame(np.concatenate([mean[:,x] for x in xrange(mean.shape[1])]),columns=['X','Y'])
            sns.jointplot('X','Y', ssframe, kind = 'kde')
            plt.savefig(name+'z_last_50.jpg')


            generated_test,mean,var = sess.run((self.generated_images,self.sampled_z, self.w_log_var_tensor), feed_dict={self.images: batch100,\
                self.isKDE:input_flag,self.stepsize:gamma,self.iszFromInput:False,self.Inputz:z_sample\
                ,self.alpha:alphain\
                })
            ssframe=pd.DataFrame(np.concatenate([generated_test[:,x] for x in xrange(generated_test.shape[1])]),columns=['X','Y'])
            sns.jointplot('X','Y', ssframe, kind = 'kde')
            plt.savefig(name+'x_last_100.jpg')

            ssframe=pd.DataFrame(np.concatenate([mean[:,x] for x in xrange(mean.shape[1])]),columns=['X','Y'])
            sns.jointplot('X','Y', ssframe, kind = 'kde')
            plt.savefig(name+'z_last_100.jpg')

            sess.close()

import argparse



parser = argparse.ArgumentParser()
parser.add_argument("-k")
parser.add_argument("-r")
args = parser.parse_args()
print args.k,args.r
model = LatentAttention(n_k=int(args.k),ratio=int(args.r))
model.train()

    
