#/usr/bin/env python
#-*-coding:utf-8-*-

import tensorflow as tf
from tensorflow.contrib import rnn
import math
import numpy as np


class BirnnModel():
    def __init__(self, config, mode=tf.contrib.learn.ModeKeys.TRAIN):
        self.config = config
        self.mode = mode
        self.rnn_size = config['rnn_size']
        self.rnn_type = config['rnn_type']
        self.embedding_size = config['embedding_size']
        self.vocab_size = config['vocab_size']
        self.batch_size = config['batch_size']
        self.num_sampled = config['num_sampled']
        self.lr = config['lr']
        self.clip = config['clip']
        self.layers = 2
        self.time_major = True

        # todo 反向
        self.inputs = tf.placeholder(dtype=tf.int64, shape=[None, None],name="inputs")
        self.targets = tf.placeholder(dtype=tf.int64, shape=[None, None],name="targets")
        self.lengths = tf.placeholder(dtype=tf.int32, shape=[None],name="lenghts")

        self.global_step = tf.Variable(0, trainable=False)
        self.embedding = tf.get_variable("embedding", [self.vocab_size, self.embedding_size])


        self.cell_fn = rnn.LSTMCell

        cells = []
        for i in range(self.layers):
            cell = self.cell_fn(self.rnn_size)
            cell = rnn.DropoutWrapper(cell, input_keep_prob=0.75, output_keep_prob=0.75)
            cells.append(cell)
        self.cell = rnn.MultiRNNCell(cells, state_is_tuple=True )

        self.initial_state = self.cell.zero_state(self.batch_size, tf.float32)
        # embedding
        # todo inital_state
        # if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
        embed_inputs = self.embedding_layer(self.inputs)
        outputs, last_state = self.rnn_layer(embed_inputs)

        self.final_state = last_state
        print("type outputs ==>", outputs)
        logits = self.project_layer(outputs)
        print("logits ===>", logits)
        self.loss = self.loss_layer(logits)
        self.train()


    def project_layer(self, outputs):
        nce_weights = tf.get_variable(
                initializer=tf.truncated_normal(
                    [self.rnn_size, self.vocab_size],
                    stddev=1.0 / math.sqrt(self.rnn_size)), name="weight")
        # nce bias
        nce_biases = tf.get_variable(initializer=tf.zeros([self.vocab_size]), name="bias")

        outputs = tf.concat(outputs, 2)
        print("concat outpus ===> ", outputs)
        outputs = tf.reshape(outputs,shape=[-1, self.rnn_size])
        print("reshape outputs ===>", outputs)

        outputs = tf.nn.dropout(outputs, keep_prob=0.8)
        logits = tf.nn.xw_plus_b(outputs, nce_weights, nce_biases)
        probs = tf.nn.softmax(logits)
        logits = tf.reshape(logits, shape=(self.batch_size, -1, self.vocab_size))
        self.probs = tf.reshape(probs, shape=(self.batch_size, -1, self.vocab_size))
        return logits

    def loss_layer(self, logits):
        with tf.name_scope("loss"):
            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self.targets)
            mask = tf.sequence_mask(self.lengths, dtype=tf.float32)
            loss= loss * mask
            loss = tf.reduce_mean(loss)
            print("mask ===>",mask)
            print("loss ===>", loss)
            return loss


    def rnn_layer(self, inputs, ):

        # if self.mode == tf.contrib.learn.ModeKeys.TRAIN:
        outputs, last_state = tf.nn.dynamic_rnn(self.cell,
                                                inputs,
                                                time_major=False,
                                                swap_memory=True,
                                                initial_state=self.initial_state,
                                                dtype=tf.float32,
                                                sequence_length=self.lengths)
        return outputs, last_state

    def embedding_layer(self, inputs):
        embed_in = tf.nn.embedding_lookup(self.embedding, inputs)
        return embed_in

    def train(self):
        self.opt = tf.train.AdamOptimizer(self.lr)
        grads_vars = self.opt.compute_gradients(self.loss)
        capped_grads_vars = [[tf.clip_by_value(g, -self.clip, self.clip), v] for g, v in grads_vars]
        self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step)


    def sample(self, sess, id_to_char, vocab, UNK_ID, num=200, prime=[]):
        state = sess.run(self.cell.zero_state(1, tf.float32))

        for char in prime[:-1]:
            x = np.zeros((1, 1))
            print(char)
            x[0, 0] = vocab.get(char, UNK_ID)
            print("inputs",x)
            lengths = [1]
            feed = {self.inputs: x, self.initial_state: state , self.lengths:lengths}
            print(self.final_state)
            [state] = sess.run([self.final_state], feed)

        def weighted_pick(weights):
            # t = np.cumsum(weights)
            # s = np.sum(weights)
            # return int(np.searchsorted(t, np.random.rand(1) * s))
            print(np.sum(weights))
            print(weights.shape)
            return np.argmax(weights)

        ret = prime
        char = prime[-1]

        for _ in range(num):
            x = np.zeros((1, 1))
            x[0, 0] = vocab[char]
            print("x: ",char )
            lengths = [1]
            feed = {self.inputs: x, self.initial_state: state, self.lengths:lengths}
            [probs, state] = sess.run([self.probs, self.final_state], feed)
            p = probs[0]
            sample = weighted_pick(p)
            pred = id_to_char[sample]
            ret.append(pred)
            char = pred
        return ret

    # todo nce loss
    #     # self.loss = tf.reduce_mean(
    #     #
    #     #     tf.nn.nce_loss(
    #     #         weights=nce_weights,
    #     #         biases=nce_biases,
    #     #         labels=labels,
    #     #         inputs=outputs,
    #     #         num_sampled=self.num_sampled,
    #     #         num_classes=self.vocab_size), name="loss")
