# -*- coding: utf-8 -*-
# @Author: lidongdong
# @time  : 19-1-24 上午10:07
# @file  : model.py

import tensorflow as tf
from tensorflow.contrib.lookup.lookup_ops import MutableHashTable
from tensorflow.python.framework import constant_op
from tensorflow.contrib.seq2seq import dynamic_decode
from tensorflow.contrib.seq2seq import BasicDecoder
from tensorflow.contrib.seq2seq import BeamSearchDecoder
from tensorflow.contrib.seq2seq import GreedyEmbeddingHelper
from tensorflow.contrib.seq2seq import TrainingHelper
from tensorflow.contrib.seq2seq import BahdanauAttention
from tensorflow.contrib.seq2seq import AttentionWrapper
from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple
from encoder_helper import resnet_v2_101_mine
from tensorflow.contrib.seq2seq import tile_batch

SOS_ID = 0
EOS_ID = 1
UNK_ID = 2
PAD_ID = 3

SOS = "<sos>"
EOS = "<eos>"
UNK = "<unk>"
PAD = "<pad>"
# 看了很多的代码将pad设置为id=0, 这里好像是应为helper中的填充有关。


class CaptionModel(object):
    def __init__(self,
                 vocab_size,
                 embed_dim,
                 session,
                 word2index_value,
                 index2word_value,
                 rnn_layers=2,
                 hidden_units=256,
                 keep_prob_placeholder=0.9,
                 beam_width=3,
                 mode="train"):
        """
        Caption Model init
        :param vocab_size:
        :param embed_dim:
        :param session:
        :param word2index_value:
        :param index2word_value:
        :param rnn_layers:
        :param hidden_units:
        :param keep_prob_placeholder:
        :param beam_width: an python integer
        :param mode: choose one from train/val/infer
        """
        # parameters
        self.vocab_size = vocab_size
        self.embed_dim = embed_dim
        self.mode = mode                    # mode can be set to train, val, infer
        self.rnn_layers = rnn_layers
        self.hidden_units = hidden_units
        self.keep_prob_placeholder = keep_prob_placeholder
        self.beam_width = beam_width

        # placeholder
        self.image_inputs = tf.placeholder(tf.float32, [None, 224, 224, 3], "image_inputs")
        self.caption_targets = tf.placeholder(tf.string, [None, None], "captions")
        self.caption_lens = tf.placeholder(tf.int32, [None, ], "caption_lens")

        # look table execute
        self.word2index = MutableHashTable(key_dtype=tf.string,
                                           value_dtype=tf.int64,
                                           default_value=UNK_ID,
                                           shared_name="in_table",
                                           name="in_table",
                                           checkpoint=True)

        self.index2word = MutableHashTable(key_dtype=tf.int64,
                                           value_dtype=tf.string,
                                           default_value="_unk",
                                           shared_name="out_table",
                                           name="out_table",
                                           checkpoint=True)
        if session:
            session.run(self.word2index.insert(constant_op.constant(word2index_value[0]),
                                               constant_op.constant(word2index_value[1], dtype=tf.int64)))
            session.run(self.index2word.insert(constant_op.constant(index2word_value[0], dtype=tf.int64),
                                               constant_op.constant(index2word_value[1])))

        self.embedding = tf.get_variable("word_embed", [self.vocab_size, self.embed_dim], tf.float32)

        # tensor parameters
        self.batch_size = tf.shape(self.caption_targets)[0]
        self.caption_maxlen = tf.shape(self.caption_targets)[1]
        self.feature_maps = None
        self.build_model()

    def build_model(self):
        print "[*] encode"
        self.feature_maps = self.encode(self.image_inputs, is_training=True)

        print "[*] feature_maps_2_encoder_features"
        self.feature_maps_2_encoder_features()

        print "[*] caption preparing"
        self.target_word_indexs = self.word2index.lookup(self.caption_targets)
        self.input_word_indexs = tf.concat([tf.ones([self.batch_size, 1], dtype=tf.int64) * SOS_ID,
                                            tf.split(self.target_word_indexs, [self.caption_maxlen - 1, 1], 1)[0]], 1)

        print "[*] caption embedding"
        self.input_word_embedding = tf.nn.embedding_lookup(self.embedding, self.input_word_indexs)
        self.target_word_embedding = tf.nn.embedding_lookup(self.embedding, self.target_word_indexs)

        print "[*] decode"
        self.decode()

        print self.target_word_indexs
        print self.final_outputs
        # loss
        if self.mode == "train" or self.mode == "val":
            self.cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.cast(self.target_word_indexs, tf.int32),
                                                                                logits=self.output_logits)
        else:
            self.cross_entropy = None

    def encode(self, inputs, is_training=True):
        """encoder component 使用 resnet 101的部分网络结构来生成特征，使用的是最后的 7 * 7 * 2048的特征
        # TODO 从已有的resnet权重中load weight

        # Arguments:
            inputs: a tensor shaped of [batch_size, height, width, channel]
            is_training: boolean, True if training else False(infer)

        # Returns:
            feature_maps: [None, 7, 7, 2048]
        """
        with tf.variable_scope("encoder"):
            feature_maps = resnet_v2_101_mine(inputs, is_training=is_training)
        return feature_maps

    def feature_maps_2_encoder_features(self):
        """将encoder得到的feature maps转换为rnn的outputs和states
        TODO 没有写完嘞, 手工构造 state tuple, 因为使用了两层cell，
        脑阔疼, 脑阔疼
        """
        # batch_size, 2048， this is also called memory
        self.encoder_outputs = tf.reshape(self.feature_maps, (self.batch_size, -1, 2048))

        encoder_final_states = tf.reduce_mean(self.encoder_outputs, axis=1)
        with tf.variable_scope("hidden_state_compute"):
            # 初始state全连接到self.hidden_units维
            state_matrix = tf.get_variable("state_matrix", shape=[2048, self.hidden_units], dtype=tf.float32,
                                           initializer=tf.random_normal_initializer(stddev=0.02))
            encoder_states = tf.matmul(encoder_final_states, state_matrix)

        # 包含了c, h两个字段
        lstm_state_tuple = LSTMStateTuple(encoder_states, encoder_states)

        # 用了多层的 cell, 使用AttentionWrapperState返回
        self.encoder_states = (lstm_state_tuple, ) * self.rnn_layers

    def decode(self):
        """decoder, add attention mechanism and dynamic decoder

        # Arguments:
            encoder_outputs: a tensor of shape [batch_size, height, width, channel]
            encoder_states: a tensor of shape [batch_size, channel]; the average of encoder outputs
            encoder_    lens: a tensor of shape [batch_size, 1]
            targets: a tensor of shape [batch_size, sequence_max_len]
        """
        with tf.variable_scope("decoder"):
            if self.mode == "train" or self.mode == "val":
                self.batch_size_ = self.batch_size
                attention_mechanism = BahdanauAttention(128, self.encoder_outputs, self.caption_lens)
            elif self.mode == "infer":
                # 1.memory 2.len 3.batch_size tile_batch
                self.encoder_outputs = tf.contrib.seq2seq.tile_batch(self.encoder_outputs, self.beam_width)
                self.encoder_states = tf.contrib.seq2seq.tile_batch(self.encoder_states, self.beam_width)
                self.caption_lens_ = tf.contrib.seq2seq.tile_batch(self.caption_lens, self.beam_width)
                self.batch_size_ = self.batch_size * self.beam_width
                attention_mechanism = BahdanauAttention(128, self.encoder_outputs, self.caption_lens_)
            else:
                raise ValueError("{} is not in train/val/infer".format(self.mode))

            cell = self.create_rnn_cell()
            wrapped_cell = AttentionWrapper(cell, attention_mechanism, attention_layer_size=256)

            # 将tuple转换为wrappedcellstate类型
            decoder_initial_state = wrapped_cell.zero_state(self.batch_size_, tf.float32).\
                clone(cell_state=self.encoder_states)
            self.decoder_initial_state = decoder_initial_state
            projection_layer = tf.layers.Dense(self.vocab_size, use_bias=False, name="projection_layer")

            if self.mode == "train" or self.mode == "val":
                if self.mode == "train":
                    helper = TrainingHelper(self.input_word_embedding, self.caption_lens)
                else:
                    helper = GreedyEmbeddingHelper(self.embedding, start_tokens=tf.ones(self.batch_size_, dtype=tf.int32) * SOS_ID, end_token=EOS_ID)

                # 怎么将这个
                decoder = BasicDecoder(wrapped_cell, helper, initial_state=decoder_initial_state, output_layer=projection_layer)
                final_outputs, final_state, final_sequence_lengths = dynamic_decode(decoder=decoder, maximum_iterations=100)
                # rnn_output, sample_id
                self.final_outputs = final_outputs
                # logits
                self.output_logits = self.final_outputs[0]
                # argmax index
                self.sample_id = self.final_outputs[1]

            elif self.mode == "infer":
                # 对于输入数据预处理 tile_batch
                # 构建decoder
                decoder = BeamSearchDecoder(wrapped_cell,
                                            self.embedding,
                                            tf.fill([self.batch_size, ], SOS_ID),
                                            EOS_ID,
                                            decoder_initial_state,
                                            self.beam_width,
                                            output_layer=projection_layer)

                final_outputs, final_states, final_sequence_lengths = dynamic_decode(decoder, maximum_iterations=100)
                # predicted_ids[bs, beams, sqe_len], beam_search_decoder_output(scores, predicted_ids, parent_ids)
                self.final_outputs = final_outputs
                self.predicted_ids = self.final_outputs[0]

            else:
                raise ValueError("{} is not in train/val/infer".format(self.mode))

    def create_rnn_cell(self):
        def single_rnn_cell():
            # 创建单个cell，这里需要注意的是一定要使用一个single_rnn_cell的函数，不然直接把cell放在MultiRNNCell
            # 的列表中最终模型会发生错误
            single_cell = tf.contrib.rnn.LSTMCell(self.hidden_units)
            cell = tf.contrib.rnn.DropoutWrapper(single_cell, output_keep_prob=self.keep_prob_placeholder)
            return cell

        stacked_cell = tf.contrib.rnn.MultiRNNCell([single_rnn_cell() for _ in range(self.rnn_layers)])
        return stacked_cell
