# -*- coding: utf-8 -*-
__doc__ = """Tree GRU aka Recursive Neural Networks."""

import numpy as np
import theano
from theano import tensor as T
from collections import OrderedDict
import TD_RvNN
import BU_RvNN

#from theano.compat.python2x import OrderedDict
from theano.tensor.signal.pool import pool_2d
import cPickle

theano.config.floatX = 'float32'
################################ tree rnn class ######################################
class RvNN(object):
    """Data is represented in a tree structure.

    Every leaf and internal node has a data (provided by the input)
    and a memory or hidden state.  The hidden state is computed based
    on its own data and the hidden states of its children.  The
    hidden state of leaves is given by a custom init function.

    The entire tree's embedding is represented by the final
    state computed at the root.

    """
    def __init__(self, word_dim, hidden_dim=5, Nclass=4,
                degree=2, momentum=0.9,
                 trainable_embeddings=True,
                 labels_on_nonroot_nodes=False,
                 irregular_tree=True):                 
        assert word_dim > 1 and hidden_dim > 1

        #下面这一块代码是两个模型所共用的
        self.word_dim = word_dim
        self.hidden_dim = hidden_dim
        self.Nclass = Nclass
        self.degree = degree
        self.momentum = momentum
        self.irregular_tree = irregular_tree
        self.params = []

        #传给TD模型的参数
        self.td_x_word = T.matrix(name='td_x_word')  # word frequent
        self.td_x_index = T.imatrix(name='td_x_index')  # word indices
        self.td_tree = T.imatrix(name='td_tree')  # shape [None, self.degree]
        self.td_num_parent = T.iscalar(name='td_num_parent')
        self.td_num_nodes = self.td_x_word.shape[0]  # total number of nodes (leaves + internal) in tree
        self.td_num_child = self.td_num_nodes - self.td_num_parent - 1

        #传给BU模型的参数
        self.bu_x_word = T.matrix(name='bu_x_word')  # word frequent
        self.bu_x_index = T.imatrix(name='bu_x_index')  # word indices
        self.bu_tree = T.imatrix(name='bu_tree')  # shape [None, self.degree]
        self.bu_num_parent = T.iscalar(name='bu_num_parent')
        self.bu_num_nodes = self.bu_x_word.shape[0]  # total number of nodes (leaves + internal) in tree
        self.bu_num_child = self.bu_num_nodes - self.bu_num_parent - 1

        self.y = T.ivector(name='y')  # output shape [self.output_dim]

        #TD模型隐含向量计算
        self.td_tree_states = self.td_compute_tree(self.td_x_word, self.td_x_index, self.td_num_parent, self.td_tree)
        self.td_final_state = self.td_tree_states.max(axis=0)

        #BU模型隐含向量计算
        self.bu_tree_states = self.bu_compute_tree(self.bu_x_word, self.bu_x_index, self.bu_tree)
        self.bu_final_state = self.bu_tree_states[-1]

        #模型合并输出
        self.final_state = T.concatenate([self.td_final_state, self.bu_final_state], axis=0)
        self.output_fn = self.create_output_fn()
        self.pred_y = self.output_fn(self.final_state)
        self.loss = self.loss_fn(self.y, self.pred_y)

        #模型更新
        self.learning_rate = T.scalar('learning_rate')
        train_inputs = [self.td_x_word, self.td_x_index, self.td_num_parent, self.td_tree,
                        self.bu_x_word, self.bu_x_index, self.bu_tree,
                        self.y, self.learning_rate]
        updates = self.gradient_descent(self.loss)


        self._train = theano.function(train_inputs,
                                      [self.loss, self.pred_y],
                                      updates=updates, allow_input_downcast=True)
        self._evaluate = theano.function([self.td_x_word, self.td_x_index, self.td_num_parent, self.td_tree,self.bu_x_word, self.bu_x_index, self.bu_tree], self.final_state, allow_input_downcast=True)
        self._predict = theano.function([self.td_x_word, self.td_x_index, self.td_num_parent, self.td_tree,self.bu_x_word, self.bu_x_index, self.bu_tree], self.pred_y, allow_input_downcast=True)

    def gradient_descent(self, loss):
        """Momentum GD with gradient clipping."""
        grad = T.grad(loss, self.params)
        self.momentum_velocity_ = [0.] * len(grad)
        grad_norm = T.sqrt(sum(map(lambda x: T.sqr(x).sum(), grad)))
        updates = OrderedDict()
        not_finite = T.or_(T.isnan(grad_norm), T.isinf(grad_norm))
        scaling_den = T.maximum(5.0, grad_norm)
        for n, (param, grad) in enumerate(zip(self.params, grad)):
            grad = T.switch(not_finite, 0.1 * param,
                            grad * (5.0 / scaling_den))
            velocity = self.momentum_velocity_[n]
            update_step = self.momentum * velocity - self.learning_rate * grad
            self.momentum_velocity_[n] = update_step
            updates[param] = param + update_step
        return updates

    def create_BU_recursive_unit(self):
        self.E_bu = theano.shared(self.init_matrix([self.hidden_dim, self.word_dim]))
        self.W_z_bu = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.U_z_bu = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.b_z_bu = theano.shared(self.init_vector([self.hidden_dim]))
        self.W_r_bu = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.U_r_bu = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.b_r_bu = theano.shared(self.init_vector([self.hidden_dim]))
        self.W_h_bu = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.U_h_bu = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.b_h_bu = theano.shared(self.init_vector([self.hidden_dim]))
        self.params.extend([self.E_bu, self.W_z_bu, self.U_z_bu, self.b_z_bu, self.W_r_bu, self.U_r_bu, self.b_r_bu, self.W_h_bu, self.U_h_bu, self.b_h_bu])
        def unit(parent_word, parent_index, child_h, child_exists):
            h_tilde = T.sum(child_h, axis=0)
            parent_xe = self.E_bu[:,parent_index].dot(parent_word)
            z_bu = T.nnet.hard_sigmoid(self.W_z_bu.dot(parent_xe)+self.U_z_bu.dot(h_tilde)+self.b_z_bu)
            r_bu = T.nnet.hard_sigmoid(self.W_r_bu.dot(parent_xe)+self.U_r_bu.dot(h_tilde)+self.b_r_bu)
            c_bu = T.tanh(self.W_h_bu.dot(parent_xe)+self.U_h_bu.dot(h_tilde * r_bu)+self.b_h_bu)
            h_bu = z_bu*h_tilde + (1-z_bu)*c_bu
            return h_bu
        return unit

    def create_TD_recursive_unit(self):
        self.E_td = theano.shared(self.init_matrix([self.hidden_dim, self.word_dim]))
        self.W_z_td = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.U_z_td = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.b_z_td = theano.shared(self.init_vector([self.hidden_dim]))
        self.W_r_td = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.U_r_td = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.b_r_td = theano.shared(self.init_vector([self.hidden_dim]))
        self.W_h_td = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.U_h_td = theano.shared(self.init_matrix([self.hidden_dim, self.hidden_dim]))
        self.b_h_td = theano.shared(self.init_vector([self.hidden_dim]))
        self.params.extend([self.E_td, self.W_z_td, self.U_z_td, self.b_z_td, self.W_r_td, self.U_r_td, self.b_r_td, self.W_h_td, self.U_h_td, self.b_h_td])
        def unit(word, index, parent_h):
            child_xe = self.E_td[:,index].dot(word)
            z_td = T.nnet.hard_sigmoid(self.W_z_td.dot(child_xe)+self.U_z_td.dot(parent_h)+self.b_z_td)
            r_td = T.nnet.hard_sigmoid(self.W_r_td.dot(child_xe)+self.U_r_td.dot(parent_h)+self.b_r_td)
            c_td = T.tanh(self.W_h_td.dot(child_xe)+self.U_h_td.dot(parent_h * r_td)+self.b_h_td)
            h_td = z_td*parent_h + (1-z_td)*c_td
            return h_td
        return unit


    def init_matrix(self, shape):
        return np.random.normal(scale=0.1, size=shape).astype(theano.config.floatX)

    def init_vector(self, shape):
        return np.zeros(shape, dtype=theano.config.floatX)

    def create_output_fn(self):
        self.W_out = theano.shared(self.init_matrix([self.Nclass, 2*self.hidden_dim]))
        self.b_out = theano.shared(self.init_vector([self.Nclass]))
        self.params.extend([self.W_out, self.b_out])
        def fn(final_state):
            return T.nnet.softmax( self.W_out.dot(final_state)+ self.b_out )
        return fn

    def loss_fn(self, y, pred_y):
        return T.sum(T.sqr(y - pred_y))

    def td_compute_tree(self, x_word, x_index, num_parent, tree):
        self.td_recursive_unit = self.create_TD_recursive_unit()

        def ini_unit(x):
            return theano.shared(self.init_vector([self.hidden_dim]))

        init_node_h, _ = theano.scan(fn=ini_unit, sequences=[x_word])  # 对X_word每一个都进行初始化，那就奇怪了，X_word不是输入吗？

        def _recurrence(x_word, x_index, node_info, node_h, last_h):
            parent_h = node_h[node_info[0]]
            child_h = self.td_recursive_unit(x_word, x_index, parent_h)
            node_h = T.concatenate([node_h[:node_info[1]],
                                    child_h.reshape([1, self.hidden_dim]),
                                    node_h[node_info[1] + 1:]])
            return node_h, child_h

        dummy = theano.shared(self.init_vector([self.hidden_dim]))
        (_, child_hs), _ = theano.scan(
            fn=_recurrence,
            outputs_info=[init_node_h, dummy],
            sequences=[x_word[:-1], x_index, tree])

        return child_hs[num_parent - 1:]

    def create_leaf_unit(self):
        dummy = 0 * theano.shared(self.init_vector([self.degree, self.hidden_dim]))
        def unit(leaf_word, leaf_index):
            return self.bu_recursive_unit(leaf_word, leaf_index, dummy, dummy.sum(axis=1))
        return unit

    def bu_compute_tree(self, x_word, x_index, tree):
        self.bu_recursive_unit = self.create_BU_recursive_unit()
        self.leaf_unit = self.create_leaf_unit()
        num_parents = tree.shape[0]  # num internal nodes
        num_leaves = self.bu_num_nodes - num_parents

        # compute leaf hidden states
        leaf_h, _ = theano.map(
            fn=self.leaf_unit,
            sequences=[ x_word[:num_leaves], x_index[:num_leaves] ])
        if self.irregular_tree:
            init_node_h = T.concatenate([leaf_h, leaf_h, leaf_h], axis=0)
        else:
            init_node_h = leaf_h

        # use recurrence to compute internal node hidden states
        def _recurrence(x_word, x_index, node_info, t, node_h, last_h):
            child_exists = node_info > -1
            offset = 2*num_leaves * int(self.irregular_tree) - child_exists * t ### offset???
            child_h = node_h[node_info + offset-1] * child_exists.dimshuffle(0, 'x') ### transpose??
            parent_h = self.bu_recursive_unit(x_word, x_index, child_h, child_exists)
            node_h = T.concatenate([node_h,
                                    parent_h.reshape([1, self.hidden_dim])])
            return node_h[1:], parent_h

        dummy = theano.shared(self.init_vector([self.hidden_dim]))
        (_, parent_h), _ = theano.scan(
            fn=_recurrence,
            outputs_info=[init_node_h, dummy],
            sequences=[x_word[num_leaves:], x_index[num_leaves:], tree, T.arange(num_parents)],
            n_steps=num_parents)
        return T.concatenate([leaf_h, parent_h], axis=0)

    def train_step_up(self, TD_word_train, TD_index_train, TD_parent_num_train, TD_tree_train, BU_word_train, BU_index_train, BU_tree_train,BU_y_train, lr):
        return self._train(TD_word_train, TD_index_train, TD_parent_num_train, TD_tree_train, BU_word_train, BU_index_train, BU_tree_train, BU_y_train, lr)

    def evaluate(self, TD_word_test, TD_index_test, TD_parent_num_test, TD_tree_test, BU_word_test, BU_index_test, BU_tree_test):
        return self._evaluate(self, TD_word_test, TD_index_test, TD_parent_num_test, TD_tree_test, BU_word_test, BU_index_test, BU_tree_test)

    def predict_up(self, TD_word_test, TD_index_test, TD_parent_num_test, TD_tree_test, BU_word_test, BU_index_test, BU_tree_test):
        return self._predict(TD_word_test, TD_index_test, TD_parent_num_test, TD_tree_test, BU_word_test, BU_index_test, BU_tree_test)

    def SaveModels(self, filename):
        with open(filename, 'wb') as fw:
            for param in self.params:
                cPickle.dump(param.get_value(borrow=True),fw, -1)
        return

    def LoadModels(self, filename):
        with open(filename, 'wb') as fr:
            for param in self.params:
                param.set_value(cPickle.load(fr), borrow=True)
        print "params are loaded from ", filename
        return
