import numpy as np
import tensorflow as tf
from tensorflow import layers

from tensorflow.python.ops import array_ops
from tensorflow.contrib import seq2seq
from tensorflow.contrib.seq2seq import BahdanauAttention
from tensorflow.contrib.seq2seq import LuongAttention
from tensorflow.contrib.seq2seq import AttentionWrapper
from tensorflow.contrib.seq2seq import BeamSearchDecoder

from tensorflow.contrib.rnn import LSTMCell
from tensorflow.contrib.rnn import GRUCell
from tensorflow.contrib.rnn import MultiRNNCell
from tensorflow.contrib.rnn import DropoutWrapper
from tensorflow.contrib.rnn import ResidualWrapper

from word_sequence import WordSequence
from data_utils import _get_embed_device

"""
__init__:基本参数的保存、验证
build_model:模型构建
init_placeholders:初始化变量的占位符
build_encoder:初始化编码器
init_optimizer:初始化优化器
train：训练一个batch
predict：预测一个batch
"""


class SequenceToSequence(object):

    def __init__(self,
                 input_vocab_size,
                 target_vocab_size,
                 batch_size=32,
                 embedding_size=300,
                 mode='train',
                 hidden_units=256,
                 depth=1,
                 beam_width=0,
                 cell_type='lstm',
                 dropout=0.2,
                 use_dropout=False,
                 use_residual=False,
                 optimizer='adam',
                 learning_rate=0.001,
                 min_learning_rate=0.000001,
                 decay_steps=50000,
                 max_gradient_norm=5.0,
                 max_decode_step=None,
                 attention_type='Bahdanau',
                 bidirectional=False,
                 time_major=False,
                 seed=0,
                 parallel_iterations=None,
                 share_embedding=False,
                 pretrained_embedding=False):
        self.input_vocab_size = input_vocab_size
        self.target_vocab_size = target_vocab_size
        self.batch_size = batch_size
        self.embedding_size = embedding_size
        self.hidden_units = hidden_units
        self.depth = depth
        self.cell_type = cell_type.lower()
        self.use_dropout = use_dropout
        self.use_residual = use_residual
        self.attention_type = attention_type
        self.mode = mode
        self.optimizer = optimizer
        self.learning_rate = learning_rate
        self.min_learning_rate = min_learning_rate
        self.decay_steps = decay_steps
        self.max_gradient_norm = max_gradient_norm
        self.keep_prob = 1.0 - dropout
        self.bidirectional = bidirectional
        self.seed = seed
        self.pretrain_embedding = pretrained_embedding

        if isinstance(parallel_iterations, int):
            self.parallel_iterations = parallel_iterations
        else:
            self.parallel_iterations = batch_size
        self.time_major = time_major
        self.share_embedding = share_embedding

        self.initializer = tf.random_normal_initializer(-0.05, 0.05, dtype=tf.float32)

        assert self.cell_type in ('geu', 'lstm'), '无效的cell type格式'

        if share_embedding:
            assert input_vocab_size == target_vocab_size, '如果share_embedding为true，输入输出必须相等'

        assert mode in ('train', 'decode'), '无效的mode格式'

        assert dropout >= 0.0 and dropout <= 1.0, 'dropout不再值域内'

        assert attention_type.lower() in ('bahdanau', 'luong'), '无效的attention'

        assert beam_width < target_vocab_size, 'beam_width必须小于target_vocab_size'

        self.keep_prob_placeholder = tf.placeholder(tf.float32, shape=[], name='keep_prob')

        self.global_step = tf.Variable(0, trainable=False, name='global_step')

        self.use_beamsearch_decode = False
        self.beam_width = beam_width
        self.use_beamsearch_decode = True if self.beam_width > 0 else False
        self.max_decode_step = max_decode_step

        assert self.optimizer.lower() in (
            'adadelta', 'adam', 'rmsprop', 'momentum', 'sgd'), '优化器只能是adadelta、adam、rmsprop、momentum或sgd'

        self.build_model()

    def build_model(self):
        """
        1、初始化训练、预测所需变量
        2、构建编码器encoder
        3、构建解码器decoder
        4、构建优化器optimizer
        5、保存
        """

        self.init_placeholders()
        encoder_outputs, encoder_state = self.build_encoder()
        self.build_decoder(encoder_outputs, encoder_state)

        if self.mode == 'train':
            self.init_optimizer()

        self.saver = tf.train.Saver()

    def init_placeholders(self):
        self.add_loss = tf.placeholder(dtype=tf.float32, name="add_loss")

        self.encoder_inputs = tf.placeholder(dtype=tf.int32, shape=(self.batch_size, None), name='encoder_inputs')

        self.encoder_inputs_length = tf.placeholder(dtype=tf.int32, shape=(self.batch_size,),
                                                    name='encoder_input_length')

        if self.mode == 'train':
            self.decoder_inputs = tf.placeholder(dtype=tf.int32, shape=(self.batch_size, None), name='decoder_inputs')

            self.rewards = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, 1), name='rewards')

            self.decoder_inputs_length = tf.placeholder(dtype=tf.int32, shape=(self.batch_size,),
                                                        name='decoder_inputs_length')

            self.docoder_start_token = tf.ones(shape=(self.batch_size, 1), dtype=tf.int32) * WordSequence.START

            self.decoder_inouts_train = tf.concat([self.docoder_start_token, self.decoder_inputs], axis=1)

    """构建一个单独的rnn单元"""

    def build_single_cell(self, n_hideen, use_residual):
        if self.cell_type == 'geu':
            cell_type = GRUCell
        else:
            cell_type = LSTMCell

        cell = cell_type(n_hideen)

        if self.use_dropout:
            cell = DropoutWrapper(cell, dtype=tf.float32, output_keep_prob=self.keep_prob_placeholder, seed=self.seed)

        if use_residual:
            cell = ResidualWrapper(cell)
        return cell

    """构建单独的编码cell"""

    def build_encoder_cell(self):
        return MultiRNNCell(
            [self.build_single_cell(self.hidden_units, use_residual=self.use_residual) for _ in range(self.depth)])

    """构建编码器"""

    def build_encoder(self):

        with tf.variable_scope('encoder'):
            encoder_cell = self.build_encoder_cell()
            with tf.device(_get_embed_device(self.input_vocab_size)):

                if self.pretrain_embedding:
                    self.encoder_embeddings = tf.Variable(
                        tf.constant(0.0, shape=(self.input_vocab_size, self.embedding_size)), trainable=True,
                        name='embedding')

                    self.encoder_embeddings_placeholder = tf.placeholder(tf.float32,
                                                                         (self.input_vocab_size, self.embedding_size))

                    self.encoder_embeddings_init = self.encoder_embeddings.assign(self.encoder_embeddings_placeholder)

                else:
                    self.encoder_embeddings = tf.get_variable(name='embedding',
                                                              shape=(self.input_vocab_size, self.embedding_size),
                                                              initializer=self.initializer, dtype=tf.float32)

            self.encoder_inputs_embedded = tf.nn.embedding_lookup(params=self.encoder_embeddings,
                                                                  ids=self.encoder_inputs)

            if self.use_residual:
                self.encoder_inputs_embedded = layers.dense(self.encoder_inputs_embedded, self.hidden_units,
                                                            use_bias=False, name='encoder_residual_projection')

            inputs = self.encoder_inputs_embedded
            if self.time_major:
                inputs = tf.transpose(inputs, (1, 0, 2))
            if not self.bidirectional:
                (encoder_outputs, encoder_state) = tf.nn.dynamic_rnn(
                    cell=encoder_cell,
                    inputs=inputs,
                    sequence_length=self.encoder_inputs_length,
                    dtype=tf.float32,
                    time_major=self.time_major,
                    parallel_iterations=self.parallel_iterations,
                    swap_memory=True
                )
            else:
                encoder_cell_bw = self.build_encoder_cell()
                (
                    (encoder_fw_outputs, encoder_bw_outputs),
                    (encoder_fw_state, encoder_bw_state)
                ) = tf.nn.bidirectional_dynamic_rnn(
                    cell_bw=encoder_cell_bw,
                    cell_fw=encoder_cell,
                    inputs=inputs,
                    sequence_length=self.encoder_inputs_length,
                    dtype=tf.float32,
                    time_major=self.time_major,
                    parallel_iterations=self.parallel_iterations,
                    swap_memory=True
                )

                encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)

                encoder_state = []
                for i in range(self.depth):
                    encoder_state.append(encoder_fw_state[i])
                    encoder_state.append(encoder_bw_state[i])
                encoder_state = tuple(encoder_state)

            return encoder_outputs, encoder_state

    def build_decoder_cell(self, encoder_outputs, encoder_state):
        encoder_input_length = self.encoder_inputs_length
        batch_size = self.batch_size

        if self.bidirectional:
            encoder_state = encoder_state[-self.depth:]

        if self.time_major:
            encoder_outputs = tf.transpose(encoder_outputs, (1, 0, 2))

        if self.use_beamsearch_decode:
            encoder_outputs = seq2seq.tile_batch(
                encoder_outputs, multiplier=self.beam_width
            )
            encoder_state = seq2seq.tile_batch(
                encoder_state, multiplier=self.beam_width
            )
            encoder_input_length = seq2seq.tile_batch(
                self.encoder_inputs_length, multiplier=self.beam_width
            )
            batch_size *= self.beam_width

        if self.attention_type.lower() == 'luong':
            self.attention_mechanism = LuongAttention(
                num_units=self.hidden_units,
                memory=encoder_outputs,
                memory_sequence_length=encoder_input_length
            )
        else:
            self.attention_mechanism = BahdanauAttention(
                num_units=self.hidden_units,
                memory=encoder_outputs,
                memory_sequence_length=encoder_input_length
            )

        cell = MultiRNNCell([
            self.build_single_cell(self.hidden_units, use_residual=self.use_residual)
            for _ in range(self.depth)
        ])

        alignment_history = (
                self.mode != 'train' and not self.use_beamsearch_decode
        )

        """判断是否在attention计算前进行一次投影"""

        def cell_input_fn(inputs, attention):
            if not self.use_residual:
                return array_ops.concat([inputs, attention], -1)

            attn_projection = layers.Dense(self.hidden_units, dtype=tf.float32, use_bias=False,
                                           name='attention_cell_input_fn')

            return attn_projection(array_ops.concat([inputs, attention], -1))

        cell = AttentionWrapper(
            cell=cell,
            attention_mechanism=self.attention_mechanism,
            attention_layer_size=self.hidden_units,
            alignment_history=alignment_history,
            cell_input_fn=cell_input_fn,
            name='Attention_Wrapper')

        decoder_initial_state = cell.zero_state(batch_size, tf.float32)

        # 传递encoder状态
        decoder_initial_state = decoder_initial_state.clone(cell_state=encoder_state)

        return cell, decoder_initial_state

    """构建解码器"""

    def build_decoder(self, encoder_outputs, encoder_state):

        with tf.variable_scope('decoder') as decoder_scope:
            (
                self.decoder_cell,
                self.decoder_initial_state
            ) = self.build_decoder_cell(encoder_outputs, encoder_state)

            with tf.device(_get_embed_device(self.target_vocab_size)):
                if self.share_embedding:
                    self.decoder_embeddings = self.encoder_embeddings
                elif self.pretrain_embedding:
                    self.decoder_embeddings = tf.Variable(
                        tf.constant(0.0, shape=(self.target_vocab_size, self.embedding_size)),
                        trainable=True,
                        name='embeddings'
                    )

                    self.decoder_embeddings_placeholder = tf.placeholder(
                        tf.float32,
                        (self.target_vocab_size, self.embedding_size)
                    )
                    self.decoder_embeddings_init = self.decoder_embeddings.assign(self.decoder_embeddings_placeholder)
                else:
                    self.decoder_embeddings = tf.get_variable(
                        name='embeddings',
                        shape=(self.target_vocab_size, self.embedding_size),
                        initializer=self.initializer,
                        dtype=tf.float32
                    )

            self.decoder_output_projection = layers.Dense(self.target_vocab_size, dtype=tf.float32, use_bias=False,
                                                          name='decoder_output_projection')

            if self.mode == 'train':
                self.decoder_inputs_embeded = tf.nn.embedding_lookup(params=self.decoder_embeddings,
                                                                     ids=self.decoder_inouts_train)

                inputs = self.decoder_inputs_embeded

                if self.time_major:
                    inputs = tf.transpose(inputs, (1, 0, 2))

                training_helper = seq2seq.TrainingHelper(
                    inputs=inputs,
                    sequence_length=self.decoder_inputs_length,
                    time_major=self.time_major,
                    name='training_helper'
                )

                training_decoder = seq2seq.BasicDecoder(
                    cell=self.decoder_cell,
                    helper=training_helper,
                    initial_state=self.decoder_initial_state
                )

                max_decoder_length = tf.reduce_max(self.decoder_inputs_length)

                (
                    outputs,
                    self.final_state,
                    _
                ) = seq2seq.dynamic_decode(
                    decoder=training_decoder,
                    output_time_major=self.time_major,
                    impute_finished=True,
                    maximum_iterations=max_decoder_length,
                    parallel_iterations=self.parallel_iterations,
                    swap_memory=True,
                    scope=decoder_scope
                )

                self.decoder_logits_train = self.decoder_output_projection(outputs.rnn_output)

                self.masks = tf.sequence_mask(lengths=self.decoder_inputs_length, maxlen=max_decoder_length,
                                              dtype=tf.float32, name='masks')

                decoder_logits_train = self.decoder_logits_train
                if self.time_major:
                    decoder_logits_train = tf.transpose(decoder_logits_train, (1, 0, 2))

                self.decoder_prd_train = tf.argmax(decoder_logits_train, axis=-1, name='decoder_prd_train')

                self.train_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.decoder_inputs,
                                                                                    logits=decoder_logits_train)

                self.masks_rewards = self.masks * self.rewards

                self.loss_rewards = seq2seq.sequence_loss(
                    logits=decoder_logits_train,
                    targets=self.decoder_inputs,
                    weights=self.masks_rewards,
                    average_across_timesteps=True,
                    average_across_batch=True
                )

                self.loss = seq2seq.sequence_loss(
                    logits=decoder_logits_train,
                    targets=self.decoder_inputs,
                    weights=self.masks,
                    average_across_timesteps=True,
                    average_across_batch=True
                )

                #self.add_loss = self.loss + self.add_loss  #2020-6-12修改
                self.loss_add = self.loss + self.add_loss

            elif self.mode == 'decode':

                start_tokens = tf.tile(
                    [WordSequence.START],
                    [self.batch_size]
                )
                end_token = WordSequence.END

                def embed_and_input_proj(inputs):
                    return tf.nn.embedding_lookup(
                        self.decoder_embeddings,
                        inputs
                    )

                if not self.use_beamsearch_decode:
                    decoding_helper = seq2seq.GreedyEmbeddingHelper(
                        start_tokens=start_tokens,
                        end_token=end_token,
                        embedding=embed_and_input_proj
                    )

                    inference_decoder = seq2seq.BasicDecoder(
                        cell=self.decoder_cell,
                        helper=decoding_helper,
                        initial_state=self.decoder_initial_state,
                        output_layer=self.decoder_output_projection
                    )

                else:
                    inference_decoder = BeamSearchDecoder(
                        cell=self.decoder_cell,
                        embedding=embed_and_input_proj,
                        start_tokens=start_tokens,
                        end_token=end_token,
                        initial_state=self.decoder_initial_state,
                        beam_width=self.beam_width,
                        output_layer=self.decoder_output_projection
                    )

                if self.max_decode_step is not None:
                    max_decoder_step = self.max_decode_step
                else:
                    max_decoder_step = tf.round(tf.reduce_max(self.encoder_inputs_length) * 4)

                (
                    self.decoder_outputs_decode,
                    self.final_state,
                    self.final_sequence_lengths
                ) = (seq2seq.dynamic_decode(
                    decoder=inference_decoder,
                    output_time_major=self.time_major,
                    maximum_iterations=max_decoder_step,
                    parallel_iterations=self.parallel_iterations,
                    swap_memory=True,
                    scope=decoder_scope
                ))

                if not self.use_beamsearch_decode:
                    # 2020-6-10修改
                    """
                    dod = self.decoder_outputs_decode
                    self.decoder_prd_decode = tf.transpose(self.decoder_prd_decode, (1, 0))
                    """
                    dod = self.decoder_outputs_decode
                    self.decoder_pred_decode = dod.sample_id

                    if self.time_major:
                        self.decoder_pred_decode = tf.transpose(self.decoder_pred_decode, (1, 0))


                else:
                    self.decoder_prd_decode = self.decoder_outputs_decode.predicted_ids

                    if self.time_major:
                        self.decoder_prd_decode = tf.transpose(self.decoder_prd_decode, (1, 0, 2))

                    self.decoder_prd_decode = tf.transpose(self.decoder_prd_decode, perm=[0, 2, 1])
                    dod = self.decoder_outputs_decode
                    self.beam_prob = dod.beam_search_decoder_output.scores

    def save(self, sess, save_epoch, save_path='model.ckpt'):
        self.saver.save(sess, save_path=save_path, global_step=save_epoch, write_meta_graph=False)

    def load(self, sess, save_path='model.ckpt'):
        print('try load model from', save_path)
        self.saver.restore(sess, save_path)

    def finetune(self, sess, meta_path, checkpoint_path):
        self.saver = tf.train.import_meta_graph(meta_path)
        self.saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))

    def init_optimizer(self):
        """
        learning_rate = tf.train.polynomial_decay(
            self.learning_rate,
            self.global_step,
            self.decay_steps,
            #self.min_learning_rate,
            end_learning_rate=0
            #power=0.5
        )
        """
        learning_rate = tf.train.exponential_decay(
            self.learning_rate,
            self.global_step,
            self.decay_steps,
            decay_rate=0.95
        )
        self.current_learning_rate = learning_rate

        trainable_params = tf.trainable_variables()

        if self.optimizer.lower() == 'adadelta':
            self.opt = tf.train.AdadeltaOptimizer(
                learning_rate=learning_rate
            )
        elif self.optimizer.lower() == 'adam':
            self.opt = tf.train.AdamOptimizer(
                learning_rate=learning_rate
            )
        elif self.optimizer.lower() == 'rmsprop':
            self.opt = tf.train.RMSPropOptimizer(
                learning_rate=learning_rate
            )
        elif self.optimizer.lower() == 'momentum':
            self.opt = tf.train.MomentumOptimizer(
                learning_rate=learning_rate
            )
        elif self.optimizer.lower() == 'sgd':
            self.opt = tf.train.GradientDescentOptimizer(
                learning_rate=learning_rate
            )

        gradients = tf.gradients(self.loss, trainable_params)

        clip_gradients, _ = tf.clip_by_global_norm(gradients, self.max_gradient_norm)
        # 更新model
        self.updates = self.opt.apply_gradients(zip(clip_gradients, trainable_params), global_step=self.global_step)

        gradients = tf.gradients(self.loss_rewards, trainable_params)
        clip_gradients, _ = tf.clip_by_global_norm(gradients, self.max_gradient_norm)
        self.updates_rewards = self.opt.apply_gradients(zip(clip_gradients, trainable_params),
                                                        global_step=self.global_step)


        #gradients = tf.gradients(self.add_loss, trainable_params)  #2020-6-12修改
        gradients = tf.gradients(self.loss_add, trainable_params)

        clip_gradients, _ = tf.clip_by_global_norm(gradients, self.max_gradient_norm)
        self.updates_add = self.opt.apply_gradients(zip(clip_gradients, trainable_params), global_step=self.global_step)

    def check_feeds(self, encoder_inputs, encoder_inputs_length, decoder_inputs, decoder_inputs_length, decode):
        input_batch_size = encoder_inputs.shape[0]
        if input_batch_size != encoder_inputs_length.shape[0]:
            raise ValueError("%d 与 %d 的第一个维度必须相等" % (input_batch_size, encoder_inputs_length.shape[0]))
        if not decode:
            target_batch_size = decoder_inputs.shape[0]
            if target_batch_size != input_batch_size:
                raise ValueError("%d 与 %d 的第一个维度必须相等" % (input_batch_size, target_batch_size))
            if target_batch_size != decoder_inputs_length.shape[0]:
                raise ValueError("%d 与 %d 的第一个维度必须相等" % (target_batch_size, decoder_inputs_length.shape[0]))
        input_feed = {}

        input_feed[self.encoder_inputs.name] = encoder_inputs
        input_feed[self.encoder_inputs_length.name] = encoder_inputs_length

        if not decode:
            input_feed[self.decoder_inputs.name] = decoder_inputs
            input_feed[self.decoder_inputs_length.name] = decoder_inputs_length

        return input_feed

    def train(self, sess, encoder_inputs, encoder_inputs_length, decoder_inputs, decoder_inputs_length, rewards=None,
              return_lr=False, loss_only=False, add_loss=None):
        # 输入
        input_feed = self.check_feeds(encoder_inputs, encoder_inputs_length, decoder_inputs, decoder_inputs_length,
                                      False)
        # 设置dropout
        input_feed[self.keep_prob_placeholder.name] = self.keep_prob

        if loss_only:
            # 输出
            return sess.run(self.loss, input_feed)

        if add_loss is not None:
            input_feed[self.add_loss.name] = add_loss
            output_feed = [self.updates_add, self.add_loss, self.current_learning_rate]
            _, cost, lr = sess.run(output_feed, input_feed)

            if return_lr:
                return cost, lr
            return cost

        if rewards is not None:
            input_feed[self.rewards.name] = rewards
            output_feed = [self.updates_rewards, self.loss_rewards, self.current_learning_rate]
            _, cost, lr = sess.run(output_feed, input_feed)

            if return_lr:
                return cost, lr
            return cost

        output_feed = [self.updates, self.loss, self.current_learning_rate]
        _, cost, lr = sess.run(output_feed, input_feed)

        if return_lr:
            return cost, lr
        return cost

    def predict(self, sess, encoder_inputs, encoder_inputs_length, attention=False):
        input_feed = self.check_feeds(encoder_inputs, encoder_inputs_length, None, None, True)

        input_feed[self.keep_prob_placeholder.name] = 1.0

        if attention:
            assert not self.use_beamsearch_decode, "无法打开BeamSearch"

            pred, atten = sess.run([self.decoder_prd_decode, self.final_state.aligment_history.stack()], input_feed)

            return pred, atten

        if self.use_beamsearch_decode:
            pred, beam_prob = sess.run([self.decoder_prd_decode, self.beam_prob], input_feed)
            beam_prob = np.mean(beam_prob, axis=1)
            pred = pred[0]

            return pred

        # 2020-6-10修改
        #pred, = sess.run([self.decoder_pred_decode], input_feed)
        pred = sess.run([self.decoder_pred_decode], input_feed)
        pred = pred[0]

        return pred
