import tensorflow as tf
import tensorflow.contrib.seq2seq as seq2seq
from config import *


class net():

    def __init__(self, is_inference, batch_size, vocab_size, embedding_size, hidden_size):
        self.batch_size = batch_size
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.embedding_size = embedding_size
        self.time_major = False
        self.lr = configer.lr
        self.is_inference = is_inference
        with tf.name_scope("placeholder"):
            self.input = tf.placeholder(shape=[batch_size, None], dtype=tf.int32, name="input")
            self.target = tf.placeholder(shape=[batch_size, None], dtype=tf.int32, name="target")
            self.input_length = tf.placeholder(shape=[batch_size], dtype=tf.int32, name="input_length")
            self.target_length = tf.placeholder(shape=[batch_size], dtype=tf.int32, name="target_length")
            self.global_step = tf.Variable(0, trainable=False)
        with tf.variable_scope("embedding", reuse=tf.AUTO_REUSE):
            self.embed_params = tf.Variable(
                tf.random_uniform(shape=[self.vocab_size, self.embedding_size], minval=-1.0, maxval=1.0),
                name='embedding', dtype=tf.float32)
            self.input_embed = tf.nn.embedding_lookup(params=self.embed_params, ids=self.input)

            self.target_embed = tf.nn.embedding_lookup(params=self.embed_params, ids=self.target)

    def encoder(self, input_embed, input_length):
        # encoder
        encoder_cell_f = tf.nn.rnn_cell.LSTMCell(num_units=self.hidden_size)
        encoder_cell_b = tf.nn.rnn_cell.LSTMCell(num_units=self.hidden_size)
        (encoder_outputs_f, encoder_outputs_b),\
        (encoder_final_state_f, encoder_final_state_b) =  \
            tf.nn.bidirectional_dynamic_rnn(
                cell_fw=encoder_cell_f,
                cell_bw=encoder_cell_b,
                inputs=input_embed,
                sequence_length=input_length,
                dtype=tf.float32,
                )
        # 重组encoder输出
        encoder_outputs = tf.concat([encoder_outputs_f, encoder_outputs_b], axis=2)
        encoder_state_c = tf.concat([encoder_final_state_f.c, encoder_final_state_b.c], axis=1)
        encoder_state_h = tf.concat([encoder_final_state_f.h, encoder_final_state_b.h], axis=1)
        encoder_final_states = tf.nn.rnn_cell.LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
        return encoder_outputs, encoder_final_states

    def _train_decoder(self, loss_name, encoder_outputs, target, target_embed, target_length, input_length):

        b_size, l_size = tf.unstack(tf.shape(target))
        pad = tf.zeros([self.batch_size, 1], tf.int32)
        # 一：申请decoder cell
        cell = tf.nn.rnn_cell.LSTMCell(self.hidden_size*2)
        fc_layer = tf.layers.Dense(self.vocab_size, name='fc_layer')
        # 二：申请helper, 训练阶段：输入label和label的length
        # attention
        if configer.is_attention:
            attention_mechanism = seq2seq.BahdanauAttention(num_units=configer.attention_units,
                                                            memory=encoder_outputs,
                                                            memory_sequence_length=input_length)
            decoder_cell = seq2seq.AttentionWrapper(cell=cell,
                                                    attention_mechanism=attention_mechanism,
                                                    output_attention=False
                                                    )
        else:decoder_cell = cell

        train_helper = seq2seq.TrainingHelper(target_embed, target_length)
        decoder = seq2seq.BasicDecoder(decoder_cell,train_helper,
                                       decoder_cell.zero_state(self.batch_size, tf.float32),
                                       fc_layer)
        final_outputs, _, _ = seq2seq.dynamic_decode(decoder, maximum_iterations=l_size*2)

        train_logits = tf.identity(final_outputs.rnn_output, 'logits')
        # self.train_sample_ids = train_final_outputs.sample_id
        # target 后移一位
        mask = tf.to_float(tf.not_equal(target, 0))
        train_target_ = tf.slice(target, [0, 1], [b_size, l_size-1])
        train_target = tf.concat((train_target_, pad), 1)


        # loss ，optimizer，trian_op
        loss = seq2seq.sequence_loss(train_logits, train_target, mask)
        # optimizer = tf.train.AdamOptimizer(self.lr)
        # gradients = optimizer.compute_gradients(loss)
        # capped_gradients = [(tf.clip_by_value(grad, -3., 3.), var) for grad, var in gradients if
        #                     grad is not None]
        # train_op = optimizer.apply_gradients(capped_gradients, global_step=self.global_step)

        # 记录loss信息
        tf.summary.scalar(loss_name, loss)
        return loss

    def _optimizer(self, loss1, loss2=0):
        loss = loss1 + loss2
        optimizer = tf.train.AdamOptimizer(self.lr)
        gradients = optimizer.compute_gradients(loss)
        capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if
                            grad is not None]
        train_op = optimizer.apply_gradients(capped_gradients, global_step=self.global_step)
        # 记录loss信息
        tf.summary.scalar('loss', loss)
        summary_merge = tf.summary.merge_all()
        return loss, summary_merge, train_op

    def _inference_decoder(self, encoder_outputs, encoder_final_states):

        b_size, l_size = tf.unstack(tf.shape(self.target))
        pad = tf.zeros([self.batch_size, 1], tf.int32)
        # 一：申请decoder cell
        decoder_cell = tf.nn.rnn_cell.LSTMCell(self.hidden_size * 2)
        fc_layer = tf.layers.Dense(self.vocab_size, name='fc_layer')

        # attention
        beam_width = configer.beam_width
        tiled_encoder_outputs = seq2seq.tile_batch(encoder_outputs, multiplier=beam_width)
        tiled_encoder_final_state = seq2seq.tile_batch(encoder_final_states, multiplier=beam_width)
        tiled_sequence_length = seq2seq.tile_batch(self.input_length, multiplier=beam_width)

        if configer.is_attention:
            attention_mechanism = seq2seq.BahdanauAttention(
                num_units=configer.attention_units,
                memory=tiled_encoder_outputs,
                memory_sequence_length=tiled_sequence_length
            )
            decoder_cell = seq2seq.AttentionWrapper(decoder_cell,
                                                    attention_mechanism,
                                                    output_attention=False)
        bos = tf.tile(tf.constant([1], dtype=tf.int32), [self.batch_size])
        eos = tf.constant(0, tf.int32)

        if configer.is_BeamSearch:
            Beam_decoder = seq2seq.BeamSearchDecoder(
                decoder_cell, self.embed_params, bos, eos,
                decoder_cell.zero_state(dtype=tf.float32, batch_size=self.batch_size * beam_width),
                beam_width,
                fc_layer,
            )
            decoder = Beam_decoder
            final_outputs, _, _ = seq2seq.dynamic_decode(decoder, maximum_iterations=l_size * 2)
            prediction = final_outputs.predicted_ids
        else:
            Greedy_helper = seq2seq.GreedyEmbeddingHelper(self.embed_params, bos, eos)

            Greedy_decoder = seq2seq.BasicDecoder(decoder_cell,
                                                  Greedy_helper,
                                                  decoder_cell.zero_state(
                                                      dtype=tf.float32,
                                                      batch_size=self.batch_size),
                                                  fc_layer)
            decoder = Greedy_decoder
            final_outputs, _, _ = seq2seq.dynamic_decode(decoder, maximum_iterations=l_size * 2)
            prediction = final_outputs.sample_id

        return prediction

    def build_seq2seq(self):

        if configer.is_inference:
            with tf.variable_scope('seq2seq'):
                encoder_outputs, encoder_final_states = self.encoder(self.input_embed, self.input_length)
                self.prediction = self._inference_decoder(encoder_outputs, encoder_final_states)
        elif not configer.is_inference and configer.is_reverse_data:
            with tf.variable_scope('seq2seq') as scope:
                encoder_outputs, encoder_final_states = self.encoder(self.input_embed, self.input_length)
                loss_1 = self._train_decoder(
                    loss_name='loss_',
                    encoder_outputs=encoder_outputs,
                    target=self.target,
                    target_embed=self.target_embed,
                    target_length=self.target_length,
                    input_length=self.input_length
                    )
            with tf.variable_scope(scope, reuse=True):
                encoder_outputs, encoder_final_states = self.encoder(self.target_embed, self.target_length)
                loss_2 = self._train_decoder(
                    loss_name='loss_re',
                    encoder_outputs=encoder_outputs,
                    target=self.input,
                    target_embed=self.input_embed,
                    target_length=self.input_length,
                    input_length=self.target_length
                )
            self.loss, self.summary_merge, self.train_op = self._optimizer(loss_1, loss_2)
        elif not configer.is_inference and not configer.is_reverse_data:
            with tf.variable_scope('seq2seq'):
                encoder_outputs, encoder_final_states = self.encoder(self.input_embed, self.input_length)
                loss_1 = self._train_decoder(
                    loss_name='loss_',
                    encoder_outputs=encoder_outputs,
                    target=self.target,
                    target_embed=self.target_embed,
                    target_length=self.target_length,
                    input_length=self.input_length
                )
                self.loss, self.summary_merge, self.train_op = self._optimizer(loss_1)