"""
Restricted Boltzmann Machines (RBM)
author:Cuson
2019/12/16
"""
# import os
# import timeit
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers

class RBM(object):
    """A Restricted Boltzmann Machines class"""
    def __init__(self, inpt=None, n_visiable=784, n_hidden=500, W=None,
                 hbias=None, vbias=None):
        """
        :param inpt: Tensor, the input tensor [None, n_visiable]
        :param n_visiable: int, number of visiable units
        :param n_hidden: int, number of hidden units
        :param W, hbias, vbias: Tensor, the parameters of RBM (tf.Variable)
        """
        self.n_visiable = n_visiable
        self.n_hidden = n_hidden
        # Optionally initialize input
        if inpt is None:
            inpt = tf.placeholder(dtype=tf.float32, shape=[None, self.n_visiable])
        self.input = inpt
        # Initialize the parameters if not given
        if W is None:
            # bounds = 4.0 * np.sqrt(6.0 / (self.n_visiable + self.n_hidden))
            bounds = -4.0 * np.sqrt(6.0 / (self.n_visiable + self.n_hidden))
            W = tf.Variable(tf.random_uniform([self.n_visiable, self.n_hidden], minval=-bounds,
                                              maxval=bounds), dtype=tf.float32)
        if hbias is None:
            hbias = tf.Variable(tf.zeros([self.n_hidden, ]), dtype=tf.float32)
        if vbias is None:
            vbias = tf.Variable(tf.zeros([self.n_visiable, ]), dtype=tf.float32)
        self.W = W
        self.hbias = hbias
        self.vbias = vbias
        # keep track of parameters for training (DBN)
        self.params = [self.W, self.hbias, self.vbias]
    
    def propup(self, v):
        """Compute the sigmoid activation for hidden units given visible units"""
        return tf.nn.sigmoid(tf.matmul(v, self.W) + self.hbias)

    def propdown(self, h):
        """Compute the sigmoid activation for visible units given hidden units"""
        return tf.nn.sigmoid(tf.matmul(h, tf.transpose(self.W)) + self.vbias)
    
    def sample_prob(self, prob):
        """Do sampling with the given probability (you can use binomial in Theano)"""
        return tf.nn.relu(tf.sign(prob - tf.random_uniform(tf.shape(prob))))
    
    def sample_h_given_v(self, v0_sample):
        """Sampling the hidden units given visiable sample"""
        h1_mean = self.propup(v0_sample)
        h1_sample = self.sample_prob(h1_mean)
        return (h1_mean, h1_sample)
    
    def sample_v_given_h(self, h0_sample):
        """Sampling the visiable units given hidden sample"""
        v1_mean = self.propdown(h0_sample)
        v1_sample = self.sample_prob(v1_mean)
        return (v1_mean, v1_sample)
    
    def gibbs_vhv(self, v0_sample):
        """Implement one step of Gibbs sampling from the visiable state"""
        h1_mean, h1_sample = self.sample_h_given_v(v0_sample)
        v1_mean, v1_sample = self.sample_v_given_h(h1_sample)
        return (h1_mean, h1_sample, v1_mean, v1_sample)

    def gibbs_hvh(self, h0_sample):
        """Implement one step of Gibbs sampling from the hidden state"""
        v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
        h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
        return (v1_mean, v1_sample, h1_mean, h1_sample)

    def free_energy(self, v_sample):
        """Compute the free energy"""
        wx_b = tf.matmul(v_sample, self.W) + self.hbias
        vbias_term = tf.matmul(v_sample, tf.expand_dims(self.vbias, axis=1))
        hidden_term = tf.reduce_sum(tf.log(1.0 + tf.exp(wx_b)), axis=1)
        return -hidden_term - vbias_term

    def get_train_ops(self, learning_rate=0.1, k=1, persistent=None):
        """
        Get the training opts by CD-k
        :params learning_rate: float
        :params k: int, the number of Gibbs step (Note k=1 has been shown work surprisingly well)
        :params persistent: Tensor, PCD-k (TO DO:)
        """
        # Compute the positive phase
        ph_mean, ph_sample = self.sample_h_given_v(self.input)
        # The old state of the chain
        if persistent is None:
            chain_start = ph_sample
        else:
            chain_start = persistent

        # Use tf.while_loop to do the CD-k
        cond = lambda i, nv_mean, nv_sample, nh_mean, nh_sample: i < k
        body = lambda i, nv_mean, nv_sample, nh_mean, nh_sample: (i+1, ) + self.gibbs_hvh(nh_sample)
        i, nv_mean, nv_sample, nh_mean, nh_sample = tf.while_loop(cond, body, loop_vars=[tf.constant(0), tf.zeros(tf.shape(self.input)), 
                                                            tf.zeros(tf.shape(self.input)), tf.zeros(tf.shape(chain_start)), chain_start])
        """
        # Compute the update values for each parameter
        update_W = self.W + learning_rate * (tf.matmul(tf.transpose(self.input), ph_mean) - 
                                tf.matmul(tf.transpose(nv_sample), nh_mean)) / tf.to_float(tf.shape(self.input)[0])  # use probability
        update_vbias = self.vbias + learning_rate * (tf.reduce_mean(self.input - nv_sample, axis=0))   # use binary value
        update_hbias = self.hbias + learning_rate * (tf.reduce_mean(ph_mean - nh_mean, axis=0))       # use probability
        # Assign the parameters new values
        new_W = tf.assign(self.W, update_W)
        new_vbias = tf.assign(self.vbias, update_vbias)
        new_hbias = tf.assign(self.hbias, update_hbias)
        """
        chain_end = tf.stop_gradient(nv_sample)   # do not compute the gradients
        cost = tf.reduce_mean(self.free_energy(self.input)) - tf.reduce_mean(self.free_energy(chain_end))
        # Compute the gradients
        gparamm = tf.gradients(ys=[cost], xs=self.params)
        gparams=[]
        for i in range(len(gparamm)):
            gparams.append(tf.clip_by_value(gparamm[i],clip_value_min=-1,clip_value_max=1))
        new_params = []
        for gparam, param in zip(gparams, self.params):
            new_params.append(tf.assign(param, param - gparam*learning_rate))

        if persistent is not None:
            new_persistent = [tf.assign(persistent, nh_sample)]
        else:
            new_persistent = []
        return new_params + new_persistent  # use for training

    def get_reconstruction_cost(self):
        """Compute the cross-entropy of the original input and the reconstruction"""
        activation_h = self.propup(self.input)
        activation_v = self.propdown(activation_h)
        # Do this to not get Nan
        activation_v_clip = tf.clip_by_value(activation_v, clip_value_min=1e-10, clip_value_max=1)
        reduce_activation_v_clip = tf.clip_by_value(1.0 - activation_v, clip_value_min=1e-10, clip_value_max=1)
        cross_entropy = -tf.reduce_mean(tf.reduce_sum(self.input*(tf.log(activation_v_clip)) + (1.0 - self.input)*(tf.log(reduce_activation_v_clip)), axis=1))
         # cross_entropy = tf.reduce_mean(tf.reduce_sum(self.input * (tf.log(activation_v_clip)) + (1.0 - self.input) * (tf.log(reduce_activation_v_clip)), axis=1))
        # cross_entropy = tf.sqrt(tf.reduce_mean(tf.square(self.input - activation_v)))
        return cross_entropy

    def reconstruct(self, v):
        """Reconstruct the original input by RBM"""
        h = self.propup(v)
        return self.propdown(h)


def loadDataSet(fileName, ratio):
    trainingData = []
    testData = []
    with open(fileName) as txtData:
        lines = txtData.readlines()
        for line in lines:
            lineData = line.strip().split()  # 去除空格
            if random.random() < ratio:  # 数据集分割比例
                trainingData.append(lineData)  # 训练数据集列表
            else:
                testData.append(lineData)  # 测试数据集列表
    return trainingData, testData

def splitDataSet(data):
    numFeat = len(data[0])-1
    dataMat= [];labelMat=[]
    x= np.array(data).shape[0]
    for a in range(0,x):
        lineArr=[]
        # curLine=data[a].strip().split()
        for i in range(numFeat):
            lineArr.append(float(data[a][i]))
        dataMat.append(lineArr)
        labelMat.append(float(data[a][-1]))
    return np.array(dataMat),np.array(labelMat)

import  random



if __name__ == "__main__":
    filename = 'Boston House Price Dataset.txt'

    trainSet, testSet = loadDataSet(filename, 0.9)
    trainX, trainY = splitDataSet(trainSet)
    testX, testY = splitDataSet(testSet)
    # print(testX.shape[0])
    # print(testX)
    xs=tf.placeholder(dtype=tf.float32,shape=[None,13])
    mean,std=tf.nn.moments(xs,axes=[0])
    scale = 0.1
    shift = 0
    epsilon = 0.001
    data= tf.nn.batch_normalization(xs,mean,std,shift,scale,epsilon)
    x = tf.placeholder(dtype=tf.float32, shape=[None,13])

    # tf.set_random_seed(seed=99999)
    # np.random.seed(123)
    rbm = RBM(inpt=x, n_visiable=13, n_hidden=2)
    train_ops = rbm.get_train_ops(learning_rate=0.00001, k=1, persistent=None)
    cost = rbm.get_reconstruction_cost()
    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        sess.run(mean,feed_dict={xs:trainX})
        sess.run(std,feed_dict={xs:trainX})
        num=sess.run(data,feed_dict={xs:trainX})
        # print()
        #print(rbm.W.eval())
        for epoch in range(1000):
            avg_cost =0
            sess.run(train_ops, feed_dict={x: num})
        # 计算cost
            avg_cost += sess.run(cost, feed_dict={x: num,})
        # print >> sys.stderr, 'Training epoch %d, cost is ' % epoch, cost
            print(avg_cost)