#!/usr/bin/env python
# -*- coding:utf-8 -*-

from __future__ import print_function

import os
import sys
import timeit

import numpy

import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams

from load_dataset import *
import logging
from test_logging import logging_conf
from kmeans_alg import *
from sklearn import metrics

logging_conf()
logger = logging.getLogger('AE.py')

class AE(object):

    def __init__(
            self,
            numpy_rng,
            theano_rng=None,
            input=None,
            n_visible=195,
            n_hidden=128,
            W=None,
            bhid=None,
            bvis=None
    ):

        self.n_visible = n_visible
        self.n_hidden = n_hidden

        # create a Theano random generator that gives symbolic random values
        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))

        # note : W' was written as `W_prime` and b' as `b_prime`
        if not W:
            # W is initialized with `initial_W` which is uniformely sampled
            # from -4*sqrt(6./(n_visible+n_hidden)) and
            # 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
            # converted using asarray to dtype
            # theano.config.floatX so that the code is runable on GPU
            initial_W = numpy.asarray(
                numpy_rng.uniform(
                    low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
                    high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
                    size=(n_visible, n_hidden)
                ),
                dtype=theano.config.floatX
            )
            W = theano.shared(value=initial_W, name='W', borrow=True)

        if not bvis:
            bvis = theano.shared(
                value=numpy.zeros(
                    n_visible,
                    dtype=theano.config.floatX
                ),
                borrow=True
            )

        if not bhid:
            bhid = theano.shared(
                value=numpy.zeros(
                    n_hidden,
                    dtype=theano.config.floatX
                ),
                name='b',
                borrow=True
            )

        self.W = W
        # b corresponds to the bias of the hidden
        self.b = bhid
        # b_prime corresponds to the bias of the visible
        self.b_prime = bvis
        # tied weights, therefore W_prime is W transpose
        self.W_prime = self.W.T
        self.theano_rng = theano_rng

        # self.input = input
        # self.output = self.get_hidden_values(self.input)

        # if no input is given, generate a variable representing the input
        if input is None:
            # we use a matrix because we expect a minibatch of several
            # examples, each example being a row
            self.x = T.dmatrix(name='input')
        else:
            self.x = input

        # self.y = T.vector('y')
        self.params = [self.W, self.b, self.b_prime]

    def get_hidden_values(self, input):
        """ Computes the values of the hidden layer """
        # print (T.dot(input, self.W) + self.b)
        # f = theano.function([input], T.nnet.sigmoid(T.dot(input, self.W) + self.b))
        # print(f(input))
        return T.nnet.sigmoid(T.dot(input, self.W) + self.b)

    def get_reconstructed_input(self, hidden):
        """Computes the reconstructed input given the values of the
        hidden layer

        """
        return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)

    def get_cost_updates(self, learning_rate):
        """ This function computes the cost and the updates for one trainng
        step of the dA """

        logging.info("semi data:")
        # logging.info(semi_data)
        self.y = self.get_hidden_values(self.x)
        z = self.get_reconstructed_input(self.y)

        # object function: cost entropy
        # L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
        # cost = T.mean(L)

        # object function: euclidean distance
        cost = T.sqrt(T.sum((self.x - z) ** 2))

        print (cost)
        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)
        # generate the list of updates
        updates = [
            (param, param - learning_rate * gparam)
            for param, gparam in zip(self.params, gparams)
            ]

        return (cost, updates)


def test_ae(learning_rate=0.1, iter_num=10, error_threshold=0.000001, borrow=True):

    logger.info("start into test_ae to train model:")
    logger.info("there are param's value:")
    logger.info("learning_rate: " + str(learning_rate))
    logger.info("iter_num: " + str(iter_num))
    logger.info("error_threshold: " + str(error_threshold))
    # datasets = load_disney()
    # datasets = load_lastfm()
    datasets = load_cornell("../data/cornell/cornell_adj.txt",
                        "../data/cornell/cornell_content.txt")
    # datasets = load_cornell(
    #     "../data/washington/washington_adj.txt",
    #     "../data/washington/washington_content.txt")
    # logger.info(datasets)

    index = T.lscalar()
    x = T.matrix('x')

    # x = T.dscalar('x')

    rng = numpy.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2 ** 30))  # random integer [0, 2 ** 30)

    da = AE(
        numpy_rng=rng,
        theano_rng=theano_rng,
        input=x,
        n_visible=390,
        n_hidden=128
    )

    # temp_data = [1,2,3]
    cost, updates = da.get_cost_updates(
        learning_rate=learning_rate
    )

    # ss = T.dmatrix("ss")

    data = theano.shared(numpy.asarray(datasets, dtype=theano.config.floatX), borrow=borrow)
    # print (numpy.shape(numpy.asarray(datasets)))
    # print (da.get_hidden_values(data))
    # print (type(data))
    # x = numpy.asarray(datasets)
    train_da = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: data[index: (index + 1)]
        }
    )

    start_time = timeit.default_timer()

    sample_num = len(datasets)
    # for i in range(sample_num):
    #     print (datasets[i])

    # for iter_index in range(iter_num):
    #     c = []
    #     for batch_index in range(sample_num):
    #         # print ('start train batch %d' % batch_index)
    #         c.append(train_da(batch_index))
    #     print('Training iter_index %d, cost ' % iter_index, numpy.mean(c))
        # logging.info(numpy.mean(c))


    stopping = False
    pre_cost_val = 1000000.0
    epoch = 1
    while not stopping:
        # go through trainng set
        c = []
        for batch_index in range(sample_num):
            # print ('start train batch %d' % batch_index)
            c.append(train_da(batch_index))

        cost_result = numpy.mean(c)
        print('Training epoch %d, cost ' % epoch, cost_result)
        # print('Training epoch %d, chazhi: ' % epoch, pre_cost_val - cost_result)
        epoch += 1
        # if (pre_cost_val - cost_result) < error_threshold:
        #     stopping = True
        # pre_cost_val = cost_result
        # if cost_result < error_threshold:
        #     stopping = True
        if epoch > iter_num:
            stopping = True

    end_time = timeit.default_timer()

    training_time = (end_time - start_time)
    logger.info(('The train code runtime is %.2fm' % ((training_time) / 60.)))

    logger.info("end train.")
    logger.info("W's value:")
    logger.info(da.W.get_value())
    logger.info("b's value:")
    logger.info(da.b.get_value())
    logger.info("b_prime's value:")
    logger.info(da.b_prime.get_value())
    print ("hidden layer value: ")
    y = da.get_hidden_values(datasets)

    # print (da.y)
    # print("ddd")
    # print(y.eval())
    # print("ccc")

    # dd = mat(numpy.asarray(y))
    # print (dd)
    k = 5
    centroids, clusterAssment = kmeans(y.eval(), k)
    # print (clusterAssment)
    # logging.info(clusterAssment)
    # logging.info(clusterAssment.getA())
    # cluster_result_array = clusterAssment.getA()
    cluster_result_label = list(array(clusterAssment)[:, 0])
    print (cluster_result_label)
    cornell_ground_truth = load_cornell_groundtruth(
        "../data/cornell/cornell_ground_truth.txt")
    # cornell_ground_truth = load_cornell_groundtruth("../data/washington/washington_groundtruth.txt")
    # logging.info(metrics.adjusted_mutual_info_score(cornell_ground_truth, cluster_result_label))
    logging.info(metrics.normalized_mutual_info_score(cornell_ground_truth, cluster_result_label))

if __name__ == '__main__':
    test_ae()
    # print ("*********************************************************")
    # dd = load_cornell()
    # k = 5
    # centroids, clusterAssment = kmeans(dd, k)
    # cluster_result_label = list(array(clusterAssment)[:, 0])
    # print(cluster_result_label)
    # cornell_ground_truth = load_cornell_groundtruth()
    # print(metrics.adjusted_mutual_info_score(cornell_ground_truth, cluster_result_label))
    # print (metrics.normalized_mutual_info_score(cornell_ground_truth, cluster_result_label))
