# -*- coding: utf-8 -*-
# @Author: lidongdong
# @time  : 19-1-25 下午2:35
# @file  : model_back.py

import tensorflow as tf
from tensorflow.contrib.lookup.lookup_ops import MutableHashTable
from tensorflow.python.framework import constant_op
from tensorflow.contrib.seq2seq import dynamic_decode
from tensorflow.contrib.seq2seq import BasicDecoder
from tensorflow.contrib.seq2seq import CustomHelper
from tensorflow.contrib.seq2seq import TrainingHelper

from tensorflow.contrib import slim
from tensorflow.contrib.slim.python.slim.nets.resnet_v2 import *
from encoder_helper import load_encoder_fn, resnet_v2_101_mine

SOS_ID = 0
EOS_ID = 1
UNK_ID = 2
PAD_ID = 3

SOS = "<sos>"
EOS = "<eos>"
UNK = "<unk>"
PAD = "<pad>"


class CaptionModel(object):
    def __init__(self, vocab_size, embed_dim, session, word2index_value, index2word_value):
        self.vocab_size = vocab_size
        self.embed_dim = embed_dim
        self.image_inputs = tf.placeholder(tf.float32, [None, 224, 224, 3], "image_inputs")
        self.caption_targets = tf.placeholder(tf.string, [None, None], "captions")
        self.caption_lens = tf.placeholder(tf.int32, [None, ], "caption_lens")

        self.word2index = MutableHashTable(key_dtype=tf.string,
                                           value_dtype=tf.int32,
                                           default_value=UNK_ID,
                                           shared_name="in_table",
                                           name="in_table",
                                           checkpoint=True)

        self.index2word = MutableHashTable(key_dtype=tf.int32,
                                           value_dtype=tf.string,
                                           default_value="_unk",
                                           shared_name="out_table",
                                           name="out_table",
                                           checkpoint=True)

        session.run(self.word2index.insert(constant_op.constant(word2index_value[0]),
                                           constant_op.constant(word2index_value[1])))
        session.run(self.index2word.insert(constant_op.constant(index2word_value[0]),
                                           constant_op.constant(index2word_value[1])))

        self.embedding = tf.get_variable("word_embed", [self.vocab_size, self.embed_dim], tf.float32)
        # parameters
        self.batch_size = tf.shape(self.caption_targets)[0]
        self.caption_maxlen = tf.shape(self.caption_targets)[1]
        self.featuremaps = None
        self.build_model()

    def build_model(self):
        self.featuremaps = self.encode(self.image_inputs, is_training=True)

        self.target_word_indexs = self.word2index.lookup(self.caption_targets)
        self.input_word_indexs = tf.concat([tf.ones([self.batch_size, 1], dtype=tf.int32) * SOS_ID,
                                       tf.split(self.caption_targets, [self.caption_maxlen - 1][0])], 1)
        caption_lens = self.caption_lens + 1
        self.input_word_embedding = tf.nn.embedding_lookup(self.embedding, self.input_word_indexs)
        self.target_word_embedding = tf.nn.embedding_lookup(self.embedding, self.target_word_indexs)
        self.final_outputs = self.decode()

        # loss
        self.loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.caption_targets, logits=self.final_outputs)

    def encode(self, inputs, is_training=True):
        """encoder component 使用resnet 101的部分网络结构来

        # Arugments:
            inputs: a tensor shaped of [batch_size, height, width, channel]
            is_training: boolean, True if training else False(infer)
        """
        # resnet_v2_101_mine = load_encoder_fn()
        featuremaps = resnet_v2_101_mine(inputs, is_training=is_training)
        # output shape is [None, 7, 7, 2048]
        return featuremaps

    def decode(self, is_training=True):
        """decoder, add attention mechanism and dynamic decoder

        # Arguments:
            encoder_outputs: a tensor of shape [batch_size, height, width, channel]
            encoder_states: a tensor of shape [batch_size, channel]; the average of encoder outputs
            encoder_lens: a tensor of shape [batch_size, 1]
            targets: a tensor of shape [batch_size, sequence_max_len]
        """
        encoder_states = tf.reduce_mean(self.featuremaps, axis=[1, 2])

        # 这里写的狗屎
        def initial_fn():
            initial_elements_finished = (0 >= self.caption_lens)
            initial_input = tf.concat((self.input_word_embedding[:, 0, :], encoder_states), 1)
            return initial_elements_finished, initial_input

        def sample_fn_inference(time, outputs, state):
            # inference mode: that can
            # outputs: batch_size, vocab_size
            prediction_id = tf.to_int32(tf.argmax(outputs, axis=1))
            return prediction_id

        def sample_fn_training(time, outputs, state):
            # 假设time传递的为一个scalar吧
            sample_index = self.input_word_indexs[:, time]       # batch_size,
            return sample_index

        def next_inputs_fn(time, outputs, state, sample_ids):
            # 上个时间点上的输出类别，获取embedding再作为下个时间点的输入
            finished = (time > self.caption_lens)
            atten_content = attention(state, self.featuremaps)
            # 从sample_ids来获取下个时刻的输入
            inputs = tf.concat([tf.gather_nd(self.embedding, sample_ids), atten_content], 1)
            return finished, inputs, state

        with tf.variable_scope("hidden_state_compute"):
            state_matrix = tf.get_variable("state_matrix", shape=[2048, 512], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.02))
            initial_state = tf.matmul(encoder_states, state_matrix)

        cell = tf.contrib.rnn.LSTMCell(num_units=512)
        custom_helper = CustomHelper(initial_fn(), sample_fn_training if is_training else sample_fn_inference, next_inputs_fn)
        projection_layer = tf.layers.Dense(self.vocab_size, use_bias=False, name="projection_layer")
        decoder = BasicDecoder(cell, custom_helper, initial_state=initial_state, output_layer=projection_layer)
        final_outputs, final_state, final_sequence_lengths = dynamic_decode(decoder=decoder, maximum_iterations=100)

        return final_outputs


def attention(hidden_state, encoder_outputs):
    """attentin

    # Arugments:
        hidden_state:  a tensor shaped of [batch_size, 512]
        encoder_outputs: a tensor shaped of [batch_size, height * width, 2048]
    """
    with tf.variable_scope("attention", reuse=tf.AUTO_REUSE):
        W1 = tf.get_variable("w1", shape=[512, 128], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.02))
        W2 = tf.get_variable("w2", shape=[2048, 128], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.02))
        temp = tf.matmul(hidden_state, W1)
        temp = tf.expand_dims(temp, axis=1)

        batch_size = tf.shape(encoder_outputs)[0]
        pixels_num = tf.shape(encoder_outputs)[1]
        encoder_outputs_flatten = tf.reshape(encoder_outputs, [-1, 2048])
        temp2 = tf.matmul(encoder_outputs_flatten, W2)
        temp2 = tf.reshape(temp2, shape=[batch_size, pixels_num, -1])

        temp3 = temp + temp2    # [batch_size, piexls_num, 128]
        temp3 = tf.reshape(temp3, [-1, 128])
        v = tf.get_variable("v", shape=[128, 1], dtype=tf.float32, initializer=tf.random_normal_initializer(stddev=0.02))
        temp4 = tf.matmul(temp3, v)
        temp4 = tf.reshape(temp4, [batch_size, pixels_num])
        attn = tf.nn.softmax(temp4) # batch_size, pixels_num

        attn = tf.expand_dims(attn, axis=-1)
        temp5 = encoder_outputs_flatten * attn
        temp5 = tf.reduce_sum(temp5, axis=1)

    return temp5


if __name__ == '__main__':
    # TODO 貌似ckpt文件和我的模型不能搭配，可能是tensorflow版本的原因么

    """
    inputs = tf.placeholder(dtype=tf.float32, shape=[None, 224, 224, 3])
    featuremaps = encoder(inputs)
    trainable_variables = tf.trainable_variables()
    for var in trainable_variables:
        print var.name, var.shape

    session = tf.InteractiveSession()
    saver = tf.train.Saver()

    from tensorflow.python import pywrap_tensorflow
    checkpoint_path = "/dl_data/gans/mscoco_image_caption/checkpoints/resnet_v2_101.ckpt"
    reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
    var_to_shape_map = reader.get_variable_to_shape_map()

    keys={}
    for key in var_to_shape_map:
        print key
        keys[key] = 1

    for var in trainable_variables:
        tag = "N"
        if var.name[:-2] in keys:
            tag = "Y"
        print "[{}] {}".format(tag, var.name)

    print featuremaps
    """