#!/usr/bin/env python
# -*- coding:utf-8 -*-

from __future__ import print_function

import os
import sys
import timeit

import numpy
import h5py

import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams

from load_dataset import *
import logging
from test_logging import logging_conf
from kmeans_alg import *
from sklearn import metrics
import scipy.io as sio

logging_conf()
logger = logging.getLogger('M_SAE.py')

class SAE(object):

    def __init__(
            self,
            numpy_rng,
            theano_rng=None,
            input=None,
            n_visible=390,
            n_hidden=256,
            W=None,
            bhid=None,
            bvis=None
    ):

        self.n_visible = n_visible
        self.n_hidden = n_hidden

        # create a Theano random generator that gives symbolic random values
        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))

        # note : W' was written as `W_prime` and b' as `b_prime`
        if not W:
            initial_W = numpy.asarray(
                numpy_rng.uniform(
                    low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
                    high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
                    size=(n_visible, n_hidden)
                ),
                dtype=theano.config.floatX
            )
            W = theano.shared(value=initial_W, name='W', borrow=True)

        if not bvis:
            bvis = theano.shared(
                value=numpy.zeros(
                    n_visible,
                    dtype=theano.config.floatX
                ),
                borrow=True
            )

        if not bhid:
            bhid = theano.shared(
                value=numpy.zeros(
                    n_hidden,
                    dtype=theano.config.floatX
                ),
                name='b',
                borrow=True
            )

        self.W = W
        self.b = bhid
        self.b_prime = bvis
        self.W_prime = self.W.T
        self.theano_rng = theano_rng

        # if no input is given, generate a variable representing the input
        if input is None:
            self.x = T.dmatrix(name='input')
        else:
            self.x = input

        self.params = [self.W, self.b, self.b_prime]

    def get_hidden_values(self, input):
        """ Computes the values of the hidden layer """
        return T.nnet.sigmoid(T.dot(input, self.W) + self.b)

    def get_reconstructed_input(self, hidden):
        """Computes the reconstructed input given the values of the
        hidden layer

        """
        return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)

    def get_cost_updates(self, learning_rate, semi_adj_matrix):
        """ This function computes the cost and the updates for one trainng
        step of the dA """

        self.y = self.get_hidden_values(self.x)
        z = self.get_reconstructed_input(self.y)

        # object function: cost entropy
        # L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
        # cost = T.mean(L) + 0.01 * (self.W ** 2).sum()

        # object function: euclidean distance
        # cost = T.mean(T.sum((self.x - z) ** 2, axis=1)) + 0.01 * (self.y ** 2).sum()
        a = T.sqrt(T.sum((self.x - z) ** 2))
        # T.dot(T.dot(self.y.T, l_matrix), self.y).trace() - T.dot(T.dot(self.y.T, b_matrix), self.y).trace()
        b = T.dot(T.dot(self.y.T, semi_adj_matrix), self.y).trace()
        cost = a + 0.02*b
        #cost = a
        # print (cost)
        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)
        # generate the list of updates
        updates = [
            (param, param - learning_rate * gparam)
            for param, gparam in zip(self.params, gparams)
            ]

        return (cost, updates, a, b)


def test_ae(learning_rate=0.1, iter_num=20000, error_threshold=0.000001, run_num=10,
            dataset_name='washington', community_num=5, borrow=True):

    # print parameters log information
    logger.info("Start Data Set:", dataset_name)
    logger.info("there are param's value, learning_rate:%s, iter_num:%s, error_threshold:%s, run_num:%s"
                % (learning_rate, iter_num, error_threshold, run_num))
    f = h5py.File("washington_stack.h5", "r") 
    datasets = f["bb"]
    semi_adj_matrix = f["aa"]
    #datasets = construct_dataset("../data/washington/washington_adj.txt", "../data/washington/washington_content.txt")
    #semi_adj_matrix = load_adj_semi("../data/cornell/cornell_realpairwise.txt")
    #semi_adj_matrix = construct_adj_semi_www13("../data/washington/washington_adj.txt", "../data/washington/washington_content.txt")
    #semi_adj_matrix = load_adj_semi("../data/test/adj.txt")
    row_num = len(datasets)
    for run_index in range(run_num):
        index = T.lscalar()
        x = T.matrix('x')
        data = theano.shared(numpy.asarray(datasets, dtype=theano.config.floatX), borrow=borrow)
        input_data = data

        rng = numpy.random.RandomState(123)
        theano_rng = RandomStreams(rng.randint(2 ** 30))  # random integer [0, 2 ** 30)

        in_layer_size = [434, 256, 128, 64, 32]
        out_layer_size = [256, 128, 64, 32, 16]
        n_layer = len(out_layer_size)
        params_list = []
        y_list = []
        for i in range(n_layer):
            print ("==========================")
            print(i)
            da = SAE(
                numpy_rng=rng,
                theano_rng=theano_rng,
                input=x,
                n_visible=in_layer_size[i],
                n_hidden=out_layer_size[i]
            )

            cost, updates, a, b = da.get_cost_updates(
                learning_rate=learning_rate,
                semi_adj_matrix=semi_adj_matrix
            )

            train_da = theano.function(
                [index],
                [cost, a, b],
                updates=updates,
                givens={
                    x: data[index: (index + row_num)]
                }
            )

            start_time = timeit.default_timer()

            sample_num = len(datasets)

            stopping = False
            pre_cost_val = 1000000.0
            epoch = 1
            while not stopping:
                # go through trainng set
                c = []
                # for batch_index in range(sample_num):
                # print ('start train batch %d' % batch_index)
                batch_index = 0
                cost_val, a_val, b_val = train_da(batch_index)
                c.append(cost_val)

                cost_result = numpy.mean(c)
                print('Run num %i, training layer %i, epoch %d, cost %f, a %f, b %f'
                      % ((run_index + 1), (i + 1), epoch, cost_result, numpy.mean(a_val), numpy.mean(b_val)))

                epoch += 1
                if epoch > iter_num:
                    stopping = True

            end_time = timeit.default_timer()

            training_time = (end_time - start_time)
            logger.info(('The train code runtime is %.2fm' % ((training_time) / 60.)))
            params_list.append(da.params)
            y = da.get_hidden_values(data)
            y_list.append(y.eval())
            data.set_value(y.eval())

        k = community_num
        result_label_list = []
        for i in range(len(y_list)):
            sio.savemat('../emb/emb_' + dataset_name + '_' + str(i) + '_' + str(run_index) + '.mat',
                        {'result_emb': y_list[i]})
            centroids, clusterAssment = kmeans(y_list[i], k)
            cluster_result_label = list(array(clusterAssment)[:, 0])
            result_label_list.append(cluster_result_label)
            print (cluster_result_label)
            cornell_ground_truth = load_groundtruth("../data/washington/washington_ground_truth.txt")
            logging.info(metrics.normalized_mutual_info_score(cornell_ground_truth, cluster_result_label))
        time_str = time.strftime('%Y_%m_%d_%H_%M',time.localtime(time.time()))
        sio.savemat('../result/result_'+dataset_name+'_'+time_str+'_'+str(run_index)+'.mat', {'result_label_list': result_label_list})


if __name__ == '__main__':
    test_ae()
    # print ("*********************************************************")
    # dd = load_cornell()
    # k = 5
    # centroids, clusterAssment = kmeans(dd, k)
    # cluster_result_label = list(array(clusterAssment)[:, 0])
    # print(cluster_result_label)
    # cornell_ground_truth = load_cornell_groundtruth()
    # print(metrics.adjusted_mutual_info_score(cornell_ground_truth, cluster_result_label))
    # print (metrics.normalized_mutual_info_score(cornell_ground_truth, cluster_result_label))
