#!/usr/bin/env python
# -*- coding:utf-8 -*-

from __future__ import print_function

import timeit

import numpy
from sklearn import metrics
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams

from load_dataset import *
import logging
from test_logging import logging_conf
from kmeans_alg import *
import scipy.io as sio

# logging info
logging_conf()
logger = logging.getLogger('SAE_minicost_q_ncut.py')


class HiddenLayer(object):
    def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh):

        self.input = input

        if W is None:
            W_values = numpy.asarray(
                rng.uniform(
                    low=-numpy.sqrt(6. / (n_in + n_out)),
                    high=numpy.sqrt(6. / (n_in + n_out)),
                    size=(n_in, n_out)
                ),
                dtype=theano.config.floatX
            )
            if activation == theano.tensor.nnet.sigmoid:
                W_values *= 4

            W = theano.shared(value=W_values, name='W', borrow=True)

        if b is None:
            b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
            b = theano.shared(value=b_values, name='b', borrow=True)

        self.W = W
        self.b = b

        lin_output = T.dot(input, self.W) + self.b
        self.output = (
            lin_output if activation is None
            else activation(lin_output)
        )

        # x_temp = T.dmatrix('x_temp')
        # W_temp = T.dmatrix('W_temp')
        # b_temp = T.dvector('b_temp')
        # lin_output2 = activation(T.dot(x_temp, W_temp) + b_temp)
        # f = theano.function(inputs=[], outputs=[lin_output2], givens={
        #     x_temp: input,
        #     W_temp: self.W,
        #     b_temp: self.b
        # })
        # logging.info(f())

        # parameters of the model
        self.params = [self.W, self.b]


class AE(object):
    def __init__(
            self,
            numpy_rng,
            theano_rng=None,
            input=None,
            n_visible=390,
            n_hidden=256,
            W=None,
            bhid=None,
            bvis=None
    ):
        self.n_visible = n_visible
        self.n_hidden = n_hidden

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))

        # init params W, bhid and bvis
        if not W:
            initial_W = numpy.asarray(
                numpy_rng.uniform(
                    low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
                    high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
                    size=(n_visible, n_hidden)
                ),
                dtype=theano.config.floatX
            )
            W = theano.shared(value=initial_W, name='W', borrow=True)

        if not bvis:
            bvis = theano.shared(
                value=numpy.zeros(
                    n_visible,
                    dtype=theano.config.floatX
                ),
                borrow=True
            )

        if not bhid:
            bhid = theano.shared(
                value=numpy.zeros(
                    n_hidden,
                    dtype=theano.config.floatX
                ),
                name='b',
                borrow=True
            )

        self.W = W
        self.b = bhid
        self.b_prime = bvis
        self.W_prime = self.W.T
        self.theano_rng = theano_rng

        if input is None:
            self.x = T.dmatrix(name='input')
        else:
            self.x = input

        self.params = [self.W, self.b, self.b_prime]

    def get_hidden_values(self, input):
        """ Computes the values of the hidden layer """
        return T.nnet.sigmoid(T.dot(input, self.W) + self.b)

    def get_reconstructed_input(self, hidden):
        """Computes the reconstructed input given the values of the
        hidden layer

        """
        return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)

    def get_cost_updates(self, learning_rate):
        """ This function computes the cost and the updates for one trainng
        step of the dA """

        self.y = self.get_hidden_values(self.x)
        z = self.get_reconstructed_input(self.y)

        # define the object function (or called loss function)
        cost = T.sqrt(T.sum((self.x - z) ** 2))

        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)

        # generate the list of updates
        updates = [
            (param, param - learning_rate * gparam)
            for param, gparam in zip(self.params, gparams)
            ]

        return (cost, updates, self.y)


# define SAE class
class SAE(object):
    def __init__(
            self,
            numpy_rng,
            theano_rng=None,
            n_ins=390,
            hidden_layers_sizes=[256, 128]
    ):

        self.n_layers = len(hidden_layers_sizes)
        self.sigmoid_layers = []
        self.ae_layers = []
        self.params = []

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))

        self.x = T.matrix('x')

        # every layer's init
        for layer_num_index in range(self.n_layers):
            if layer_num_index == 0:
                input_size = n_ins
            else:
                input_size = hidden_layers_sizes[layer_num_index - 1]

            if layer_num_index == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].output

            sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[layer_num_index],
                                        activation=T.nnet.sigmoid)
            self.sigmoid_layers.append(sigmoid_layer)
            self.params.extend(sigmoid_layer.params)

            ae_layer = AE(numpy_rng=numpy_rng,
                          theano_rng=theano_rng,
                          input=layer_input,
                          n_visible=input_size,
                          n_hidden=hidden_layers_sizes[layer_num_index],
                          W=sigmoid_layer.W,
                          bhid=sigmoid_layer.b)
            self.ae_layers.append(ae_layer)

    def get_train_functions(self, train_set_x, batch_size):
        index = T.lscalar('index')
        learning_rate = T.scalar('lr')
        batch_begin = index * batch_size
        batch_end = batch_begin + batch_size
        train_fns = []
        for s in self.ae_layers:
            # get the cost and the updates list
            cost, updates, y = s.get_cost_updates(
                learning_rate=learning_rate
            )
            # compile the theano function
            fn = theano.function(
                inputs=[
                    index,
                    theano.In(learning_rate, value=0.1)
                ],
                outputs=[cost, y],
                updates=updates,
                givens={
                    self.x: train_set_x[batch_begin: batch_end]
                }
            )
            train_fns.append(fn)
        return train_fns


def run_sae(learning_rate=0.1, iter_num=10, error_threshold=0.000001, get_mini_cost_iter_num=10, borrow=True):

    # load data set
    datasets = load_cornell("../data/cornell/cornell_adj.txt", "../data/cornell/cornell_content.txt")
    data = theano.shared(numpy.asarray(datasets, dtype=theano.config.floatX), borrow=borrow)
    row_num = len(datasets)

    index = T.lscalar()
    # x = T.matrix('x')

    numpy_rng = numpy.random.RandomState()
    sae = SAE(
        numpy_rng=numpy_rng,
        n_ins=390,
        hidden_layers_sizes=[256, 128, 64]
    )

    train_fns = sae.get_train_functions(train_set_x=data, batch_size=1)

    for i in range(sae.n_layers):
        for epoch in range(iter_num):
            c = []
            for batch_index in range(row_num):
                # print ('start train batch %d' % batch_index)
                cost, y = train_fns[i](index=batch_index, lr=learning_rate)
                c.append(cost)
                print(shape(y))
            print('Training layer %i, epoch %d, cost %f' % (i, epoch, numpy.mean(c)))

    # logger.info(sae.ous)
    # f = theano.function(inputs=[data], outputs=sae.sigmoid_layers[-1].output)
    # f()


if __name__ == '__main__':
    run_sae()