#!/usr/bin/env python
# -*- coding:utf-8 -*-

from __future__ import print_function

import os
import sys
import timeit

import numpy

import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams

from load_dataset import *
import logging
from test_logging import logging_conf
from kmeans_alg import *
from sklearn import metrics

logging_conf()
logger = logging.getLogger('SAE_minicost.py')


class SAE(object):
    def __init__(
            self,
            numpy_rng,
            theano_rng=None,
            input=None,
            n_visible=390,
            n_hidden=256,
            W=None,
            bhid=None,
            bvis=None
    ):

        self.n_visible = n_visible
        self.n_hidden = n_hidden

        # create a Theano random generator that gives symbolic random values
        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))

        # note : W' was written as `W_prime` and b' as `b_prime`
        if not W:
            # W is initialized with `initial_W` which is uniformely sampled
            # from -4*sqrt(6./(n_visible+n_hidden)) and
            # 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
            # converted using asarray to dtype
            # theano.config.floatX so that the code is runable on GPU
            initial_W = numpy.asarray(
                numpy_rng.uniform(
                    low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
                    high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
                    size=(n_visible, n_hidden)
                ),
                dtype=theano.config.floatX
            )
            W = theano.shared(value=initial_W, name='W', borrow=True)

        if not bvis:
            bvis = theano.shared(
                value=numpy.zeros(
                    n_visible,
                    dtype=theano.config.floatX
                ),
                borrow=True
            )

        if not bhid:
            bhid = theano.shared(
                value=numpy.zeros(
                    n_hidden,
                    dtype=theano.config.floatX
                ),
                name='b',
                borrow=True
            )

        self.W = W
        # b corresponds to the bias of the hidden
        self.b = bhid
        # b_prime corresponds to the bias of the visible
        self.b_prime = bvis
        # tied weights, therefore W_prime is W transpose
        self.W_prime = self.W.T
        self.theano_rng = theano_rng

        # self.input = input
        # self.output = self.get_hidden_values(self.input)

        # if no input is given, generate a variable representing the input
        if input is None:
            # we use a matrix because we expect a minibatch of several
            # examples, each example being a row
            self.x = T.dmatrix(name='input')
        else:
            self.x = input

        # self.y = T.vector('y')
        self.params = [self.W, self.b, self.b_prime]

    def get_hidden_values(self, input):
        """ Computes the values of the hidden layer """
        # print (T.dot(input, self.W) + self.b)
        return T.nnet.sigmoid(T.dot(input, self.W) + self.b)

    def get_reconstructed_input(self, hidden):
        """Computes the reconstructed input given the values of the
        hidden layer

        """
        return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)

    def get_cost_updates(self, learning_rate):
        """ This function computes the cost and the updates for one trainng
        step of the dA """

        self.y = self.get_hidden_values(self.x)
        z = self.get_reconstructed_input(self.y)

        # object function: cost entropy
        # L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
        # cost = T.mean(L)

        # object function: euclidean distance
        cost = T.sqrt(T.sum((self.x - z) ** 2))

        print(cost)
        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)
        # generate the list of updates
        updates = [
            (param, param - learning_rate * gparam)
            for param, gparam in zip(self.params, gparams)
            ]

        return (cost, updates)


def test_ae(learning_rate=0.1, iter_num=20000, error_threshold=0.000001, get_mini_cost_iter_num=10, borrow=True):
    # log some parameters information
    logger.info("start into test_ae to train model:")
    logger.info("there are param's value:")
    logger.info("learning_rate: " + str(learning_rate))
    logger.info("iter_num: " + str(iter_num))
    logger.info("error_threshold: " + str(error_threshold))
    logger.info("get_mini_cost_iter_num: " + str(get_mini_cost_iter_num))

    # load data set
    datasets = load_cornell("../data/cornell/cornell_adj.txt",
                            "../data/cornell/cornell_content.txt")
    data = theano.shared(numpy.asarray(datasets, dtype=theano.config.floatX), borrow=borrow)

    # declare some variables
    index = T.lscalar()
    x = T.matrix('x')

    in_layer_size = [390, 256]
    out_layer_size = [256, 128]
    layer_num = len(out_layer_size)
    params_list = []
    every_layer_output_list = []
    every_layer_cost_result_temp_list = []
    every_layer_output_result_temp_list = []

    for layer_num_index in range(layer_num):
        # clear process
        every_layer_cost_result_temp_list = []
        for t in range(get_mini_cost_iter_num):

            # init params for every layer to get mini cost
            rng = numpy.random.RandomState()
            theano_rng = RandomStreams(rng.randint(2 ** 30))  # random integer [0, 2 ** 30)

            da = SAE(
                numpy_rng=rng,
                theano_rng=theano_rng,
                input=x,
                n_visible=in_layer_size[layer_num_index],
                n_hidden=out_layer_size[layer_num_index]
            )

            cost, updates = da.get_cost_updates(
                learning_rate=learning_rate
            )

            train_da = theano.function(
                [index],
                cost,
                updates=updates,
                givens={
                    x: data[index: (index + 1)]
                }
            )

            start_time = timeit.default_timer()

            sample_num = len(datasets)

            # train model
            stopping = False
            epoch = 1
            while not stopping:
                # go through training set
                c = []
                for batch_index in range(sample_num):
                    # print ('start train batch %d' % batch_index)
                    c.append(train_da(batch_index))

                cost_result = numpy.mean(c)
                # print('Training layer %i, epoch %d, cost %f' % (layer_num_index, epoch, cost_result))
                print('Training layer %i, iter num %d, epoch %d, cost %f' % ((layer_num_index + 1), (t + 1), epoch, cost_result))
                epoch += 1
                if epoch > iter_num:
                    stopping = True
                    every_layer_cost_result_temp_list.append(cost_result)
                    logging.info("params result:")
                    logging.info(da.W.get_value())
                    y = da.get_hidden_values(data)
                    every_layer_output_result_temp_list.append(y.eval())

        # get the output data matching to mini cost value
        logging.info("Training layer %i, all cost result list is:")
        logging.info(every_layer_cost_result_temp_list)
        logging.info("Training layer %i, all output result is:")
        logging.info(every_layer_output_result_temp_list)
        # get the mini cost value and output result
        temp_mini_cost = every_layer_cost_result_temp_list[0]
        temp_mini_cost_index = 0
        for j in range(len(every_layer_cost_result_temp_list)):
            if every_layer_cost_result_temp_list[j] < temp_mini_cost:
                temp_mini_cost = every_layer_cost_result_temp_list[j]
                temp_mini_cost_index = j

        next_layer_input = every_layer_output_result_temp_list[temp_mini_cost_index]
        logging.info("mini cost index:")
        logging.info(temp_mini_cost_index)
        logging.info("next layer input:")
        logging.info(next_layer_input)
        end_time = timeit.default_timer()

        training_time = (end_time - start_time)
        logger.info(('The train code runtime is %.2fm' % ((training_time) / 60.)))
        # params_list.append(da.params)
        # y = da.get_hidden_values(data)
        every_layer_output_list.append(next_layer_input)
        data.set_value(next_layer_input)

    # cluster the last output data by
    kmeans_cost_result = []
    cluster_result_label_result = []
    for t in range(get_mini_cost_iter_num):
        # logging.info(clusterAssment.getA())
        # cluster_result_array = clusterAssment.getA()
        k = 5
        centroids, cluster_assment = kmeans(every_layer_output_list[-1], k)
        cluster_result_label = list(array(cluster_assment)[:, 0])
        cluster_result_error = list(array(cluster_assment)[:, 1])
        kmeans_cost_result.append(numpy.sum(cluster_result_error))
        cluster_result_label_result.append(cluster_result_label)

    # get the best clustering with mini cost value
    logging.info("k-means cost result:")
    logging.info(kmeans_cost_result)
    logging.info("k-means label result:")
    logging.info(cluster_result_label_result)
    temp_kmeans_mini_cost = kmeans_cost_result[0]
    temp_kmeans_mini_cost_index = 0
    for p in range(len(kmeans_cost_result)):
        if kmeans_cost_result[p] < temp_kmeans_mini_cost:
            temp_kmeans_mini_cost = kmeans_cost_result[p]
            temp_kmeans_mini_cost_index = p

    cornell_ground_truth = load_cornell_groundtruth(
        "../data/cornell/cornell_ground_truth.txt")

    logging.info("best clustering index:")
    logging.info(temp_kmeans_mini_cost_index)
    logging.info("best clustering lable:")
    # logging.info(metrics.adjusted_mutual_info_score(cornell_ground_truth, cluster_result_label_result[temp_kmeans_mini_cost_index]))
    logging.info(metrics.normalized_mutual_info_score(cornell_ground_truth,
                                                      cluster_result_label_result[temp_kmeans_mini_cost_index]))


if __name__ == '__main__':
    test_ae()
    # print ("*********************************************************")
    # dd = load_cornell()
    # k = 5
    # centroids, clusterAssment = kmeans(dd, k)
    # cluster_result_label = list(array(clusterAssment)[:, 0])
    # print(cluster_result_label)
    # cornell_ground_truth = load_cornell_groundtruth()
    # print(metrics.adjusted_mutual_info_score(cornell_ground_truth, cluster_result_label))
    # print (metrics.normalized_mutual_info_score(cornell_ground_truth, cluster_result_label))
