# --*-- coding: UTF-8 -*-
import tensorflow as tf
from tensorflow.contrib import rnn
import math


class VisLstmModel():

    def __init__(self, config, mode=0):
        self.mode = mode
        self.init_var(config=config)
        self.init()

    def init_weight(self, dim_in, dim_out, name=None, stddev=1.0):
        return tf.Variable(tf.truncated_normal([dim_in, dim_out], stddev=stddev / math.sqrt(float(dim_in))),
                           name=name)

    def init_bias(self, dim_out, name=None):
        return tf.Variable(tf.zeros([dim_out]), name=name)

    def init_var(self, config):
        self.hidden_size = config.rnn_size
        self.lstm_layer_size = config.num_lstm_layers
        self.embedding_size = config.embedding_size
        self.fc7_feat_length = config.fc7_feat_length
        self.ans_vocab_size = config.ans_vocab_size + 1
        self.max_ques_length = config.max_ques_length
        self.q_vocab_size = config.q_vocab_size + 1

    def init(self):
        self.batch_size = tf.placeholder(tf.int32, [])
        self.lstm_multi_layers = rnn.MultiRNNCell([self.get_lstm_cell() for i in range(self.lstm_layer_size)],
                                                  state_is_tuple=True)
        self.init_state = self.lstm_multi_layers.zero_state(batch_size=self.batch_size, dtype=tf.float32)
        self.embedding_space = self.init_weight(self.q_vocab_size, self.embedding_size)

    def get_lstm_cell(self):
        lstm_cell = rnn.BasicLSTMCell(num_units=self.hidden_size, forget_bias=1.0, state_is_tuple=True)
        if self.mode == 0:
            lstm_cell = rnn.DropoutWrapper(cell=lstm_cell, input_keep_prob=1.0, output_keep_prob=0.5)
        return lstm_cell

    def build_model(self):
        with tf.name_scope('input'):
            self.img_feat = tf.placeholder(tf.float32, [None, self.fc7_feat_length], name='img_feat')
            self.ques = tf.placeholder(tf.int32, [None, self.max_ques_length], name="ques")
            self.answer = tf.placeholder(tf.float32, [None, self.ans_vocab_size], name="answer")

        with tf.name_scope('word_embed'):
            word_emb = tf.nn.embedding_lookup(self.embedding_space, self.ques)

        with tf.name_scope('img_embed'):
            img_feat = tf.nn.relu(self.img_feat, "vis_relu")
            img_W = self.init_weight(self.fc7_feat_length, self.embedding_size, name="img_W")
            img_b = self.init_bias(self.embedding_size, "img_b")
            img_embed = tf.nn.tanh(tf.nn.bias_add(tf.matmul(img_feat, img_W), img_b))
            img_embed = tf.reshape(img_embed, shape=[-1, 1, self.embedding_size])

        with tf.name_scope("dynamic_rnn"):
            embed = tf.concat([word_emb, img_embed], axis=1)
            output, state = tf.nn.dynamic_rnn(cell=self.lstm_multi_layers, inputs=embed,
                                              initial_state=self.init_state, time_major=False)
            lstm_output = state[-1][1]

        with tf.name_scope("softmax"):
            ans_W = self.init_weight(self.embedding_size, self.ans_vocab_size, name="ans_W")
            ans_b = self.init_bias(self.ans_vocab_size)
            logits = tf.nn.bias_add(tf.matmul(lstm_output, ans_W), ans_b)
            self.answer_probab = tf.nn.softmax(logits, name='answer_probab')

        with tf.name_scope("cross_entropy"):
            cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.answer, logits=logits,
                                                                    name='ce')
        with tf.name_scope('loss'):
            self.loss = tf.reduce_sum(cross_entropy, name='loss')
        with tf.name_scope('accuracy'):
            correct_predictions = tf.equal(tf.argmx(self.answer_probab, 1), tf.argmax(self.answer, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
        with tf.name_scope('pred'):
            self.predictions = tf.argmax(self.answer_probab, 1)
        print("build complete...")
