# Copyright 2013    Yajie Miao    Carnegie Mellon University

# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#  http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.

# import cPickle
# import gzip
import os
import sys
import time

import numpy

import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams

from layers.logistic_sgd import LogisticRegression
from layers.mlp import HiddenLayer, DropoutHiddenLayer, _dropout_from_layer

class DNN(object):

    def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
                 hidden_layers_sizes=[500, 500], n_outs=10,
                 activation = T.nnet.sigmoid,
                 do_maxout = False, pool_size = 1, 
                 do_pnorm = False, pnorm_order = 1,
                 max_col_norm = None, l1_reg = None, l2_reg = None, adapt=False):

        self.sigmoid_layers = []
        self.params = []
        self.delta_params   = []
        self.n_layers = len(hidden_layers_sizes)

        self.max_col_norm = max_col_norm
        self.l1_reg = l1_reg
        self.l2_reg = l2_reg
        self.adapt = adapt

        assert self.n_layers > 0

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
        # allocate symbolic variables for the data
        self.x = T.matrix('x') 
        self.y = T.ivector('y')

        for i in range(self.n_layers):
            # construct the sigmoidal layer
            if i == 0:
                input_size = n_ins
                layer_input = self.x
            else:
                input_size = hidden_layers_sizes[i - 1]
                layer_input = self.sigmoid_layers[-1].output

            if do_maxout == True:
                sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i] * pool_size,
                                        activation = (lambda x: 1.0*x),
                                        do_maxout = True, pool_size = pool_size)
            elif do_pnorm == True:
                sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i] * pool_size,
                                        activation = (lambda x: 1.0*x),
                                        do_pnorm = True, pool_size = pool_size, pnorm_order = pnorm_order)
            else:
                sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i],
                                        activation=activation)
            # add the layer to our list of layers
            self.sigmoid_layers.append(sigmoid_layer)
            if adapt:
                if i==0:
                    self.params.extend(sigmoid_layer.adapt_params)
                    self.delta_params.extend(sigmoid_layer.adapt_delta_params)
            else:
                self.params.extend(sigmoid_layer.params)
                self.delta_params.extend(sigmoid_layer.delta_params)

        # We now need to add a logistic layer on top of the MLP
        self.logLayer = LogisticRegression(
                         input=self.sigmoid_layers[-1].output,
                         n_in=hidden_layers_sizes[-1], n_out=n_outs)

        self.sigmoid_layers.append(self.logLayer)

        if not(adapt):
            self.params.extend(self.logLayer.params)
            self.delta_params.extend(self.logLayer.delta_params)

        print >> sys.stderr , self.params
       
        # construct a function that implements one step of finetunining

        # compute the cost for second phase of training,
        # defined as the negative log likelihood
        self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
        self.errors = self.logLayer.errors(self.y)

        if self.l1_reg is not None:
            for i in range(self.n_layers):
                W = self.params[i * 2]
                self.finetune_cost += self.l1_reg * (abs(W).sum())

        if self.l2_reg is not None:
            for i in range(self.n_layers):
                W = self.params[i * 2]
                self.finetune_cost += self.l2_reg * T.sqr(W).sum()

    def build_finetune_functions(self, train_shared_xy, valid_shared_xy, batch_size):

        (train_set_x, train_set_y) = train_shared_xy
        (valid_set_x, valid_set_y) = valid_shared_xy

        index = T.lscalar('index')  # index to a [mini]batch
        learning_rate = T.fscalar('learning_rate')
        momentum = T.fscalar('momentum')

        # compute the gradients with respect to the model parameters
        gparams = T.grad(self.finetune_cost, self.params)

        # compute list of fine-tuning updates
        # muptiplier = numpy.zeros(size=(465, 1024), dtype=theano.config.floatX)  #todo remove the hard coding
        muptiplier = numpy.zeros((465, 1024), dtype=theano.config.floatX)  #todo remove the hard coding
        sensitiveList=[5.0, 17.0, 18.0, 42.0, 47.0, 70.0, 74.0, 99.0, 107.0, 108.0, 145.0, 154.0, 162.0, 182.0, 203.0, 205.0, 206.0,
                       209.0, 224.0, 229.0, 237.0, 238.0, 239.0, 240.0, 249.0, 255.0, 256.0, 275.0, 290.0, 295.0, 297.0, 298.0, 306.0, 313.0, 315.0,
                       324.0, 332.0, 357.0, 365.0, 369.0, 380.0, 388.0, 400.0, 416.0, 418.0, 424.0, 429.0, 431.0, 453.0, 459.0, 462.0, 469.0, 470.0, 497.0,
                       518.0, 525.0, 526.0, 531.0, 541.0, 547.0, 556.0, 560.0, 566.0, 578.0, 614.0, 640.0, 648.0, 659.0, 664.0, 671.0, 679.0, 680.0, 682.0, 704.0,
                       710.0, 722.0, 725.0, 745.0, 760.0, 782.0, 815.0, 820.0, 828.0, 839.0, 850.0, 852.0, 878.0, 879.0, 885.0, 888.0, 893.0, 895.0, 901.0, 905.0,
                       930.0, 955.0, 961.0, 974.0, 998.0, 1002.0]  #this is the list for dnn-fbank-ivec model


        # sensitiveList = [11.0, 16.0, 20.0, 28.0, 34.0, 46.0, 58.0, 61.0, 63.0, 82.0, 83.0, 94.0, 97.0, 102.0, 106.0, 109.0, 113.0, 129.0, 131.0, 152.0, 170.0,
        #                  171.0, 174.0, 175.0, 214.0, 215.0, 219.0, 234.0, 242.0, 267.0, 276.0, 321.0, 322.0, 333.0, 335.0, 351.0, 368.0, 389.0, 399.0, 403.0,
        #                  405.0, 410.0, 414.0, 417.0, 432.0, 435.0, 437.0, 455.0, 465.0, 468.0, 480.0, 501.0, 503.0, 519.0, 527.0, 529.0, 537.0, 552.0, 555.0,
        #                  576.0, 584.0, 590.0, 615.0, 631.0, 646.0, 658.0, 665.0, 676.0, 700.0, 711.0, 719.0, 720.0, 735.0, 766.0, 784.0, 802.0, 827.0, 868.0,
        #                  873.0, 881.0, 891.0, 898.0, 903.0, 910.0, 913.0, 933.0, 942.0, 948.0, 954.0, 960.0, 962.0, 966.0, 969.0, 975.0, 989.0, 1003.0, 1007.0,
        #                  1014.0, 1019.0, 1020.0]  #less sensitive hidden units of dnn-fbank-ivec model, just to check the wer

        # sensitiveList = [0.0, 7.0, 10.0, 11.0, 14.0, 20.0, 24.0, 27.0, 45.0, 76.0, 84.0, 96.0, 134.0, 156.0, 157.0, 159.0, 165.0, 168.0,
        #                  170.0, 180.0, 183.0, 227.0, 241.0, 245.0, 284.0, 291.0, 330.0, 340.0, 348.0, 367.0, 370.0, 379.0, 380.0, 383.0,
        #                  398.0, 406.0, 431.0, 440.0, 454.0, 462.0, 470.0, 471.0, 477.0, 483.0, 484.0, 493.0, 512.0, 514.0, 516.0, 524.0,
        #                  544.0, 557.0, 566.0, 571.0, 577.0, 586.0, 591.0, 594.0, 603.0, 620.0, 630.0, 645.0, 655.0, 658.0, 659.0, 676.0,
        #                  688.0, 693.0, 719.0, 723.0, 729.0, 741.0, 742.0, 744.0, 759.0, 763.0, 774.0, 788.0, 790.0, 797.0, 819.0, 830.0,
        #                  860.0, 865.0, 878.0, 885.0, 901.0, 903.0, 907.0, 916.0, 935.0, 938.0, 958.0, 959.0, 960.0, 970.0, 986.0, 995.0,
        #                  1007.0, 1022.0] #this is the list for dnn-fmllr-ivec model

        muptiplier[25:50,sensitiveList]= 1.0
        # muptiplier[440:,924:]= 1.0


        updates = {}
        for dparam, gparam in zip(self.delta_params, gparams):
            if self.adapt:
                updates[dparam] = momentum * dparam - (muptiplier *gparam)*learning_rate
            else:
                updates[dparam] = momentum * dparam - gparam*learning_rate
            #updates[dparam] = momentum * dparam - gparam*learning_rate
        for dparam, param in zip(self.delta_params, self.params):
            updates[param] = param + updates[dparam]

        if self.max_col_norm is not None:
            for i in range(self.n_layers):
                W = self.params[i * 2]
                if W in updates:
                    updated_W = updates[W]
                    col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
                    desired_norms = T.clip(col_norms, 0, self.max_col_norm)
                    updates[W] = updated_W * (desired_norms / (1e-7 + col_norms))

        train_fn = theano.function(inputs=[index, theano.Param(learning_rate, default = 0.0001),
              theano.Param(momentum, default = 0.5)],
              outputs=self.errors,
              updates=updates,
              givens={
                self.x: train_set_x[index * batch_size:
                                    (index + 1) * batch_size],
                self.y: train_set_y[index * batch_size:
                                    (index + 1) * batch_size]})

        valid_fn = theano.function(inputs=[index],
              outputs=self.errors,
              givens={
                self.x: valid_set_x[index * batch_size:
                                    (index + 1) * batch_size],
                self.y: valid_set_y[index * batch_size:
                                    (index + 1) * batch_size]})

        return train_fn, valid_fn

