from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import time
import numpy as np
import tensorflow as tf

from properties import Properties

class Seq2SeqModel(object):

    def __init__(self, forward_only, cell, props):
        """
        初始化模型参数
        :param forward_only: bool, 不backward更新参数
        :param cell: cell-type, 模型类型
        :param props: Properties, 参数对象
        """
        print('>> initialize model settings')
        self.fw_only = forward_only
        self.props = props
        self.modcell = cell
        self.mode = props.getProperties('Mode.mode')
        self.buckets = eval(props.getProperties('Attrs.buckets'))
        self.batch_size = int(props.getProperties('Attrs.batch_size'))
        self.lr_decay_factor = float(props.getProperties('Attrs.learning_rate_decay_factor'))
        self.learning_rate = tf.Variable(float(props.getProperties('Attrs.learning_rate')), trainable=False)
        self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * self.lr_decay_factor)
        self.num_samples = int(props.getProperties('Attrs.num_samples'))
        self.dec_vocab_size = int(props.getProperties('Attrs.dec_vocab_size'))
        self.hidden_size = int(props.getProperties('Attrs.layer_size'))
        self.source_vocab_size = int(props.getProperties('Attrs.src_vocab_size'))
        self.target_vocab_size = int(props.getProperties('Attrs.tar_vocab_size'))
        self.num_layers = int(props.getProperties('Attrs.num_layers'))
        self.max_grad_norm = float(props.getProperties('Attrs.max_gradient_norm'))

    def _create_placeholders(self):
        """
        初始化encoder和decoder
        """
        print('>> create placeholders')
        self.encoder_inputs = [tf.placeholder(tf.int32, shape=[None], name='encoder{}'.format(i))
                               for i in range(self.buckets[-1][0])]
        self.decoder_inputs = [tf.placeholder(tf.int32, shape=[None], name='decoder{}'.format(i))
                               for i in range(self.buckets[-1][1] + 1)]
        self.decoder_masks = [tf.placeholder(tf.float32, shape=[None], name='mask{}'.format(i))
                              for i in range(self.buckets[-1][1] + 1)]
        # Our targets are decoder inputs shifted by one (to ignore <GO> symbol)
        self.targets = self.decoder_inputs[1:]

    def _inference(self):
        """
        初始化调优参数，定义网络
        """
        print('>> create inference')
        if self.num_samples > 0 and self.num_samples < self.dec_vocab_size:
            w = tf.get_variable("proj_w", [self.hidden_size, self.target_vocab_size])
            b = tf.get_variable("proj_b", [self.target_vocab_size])
            self.output_projection = (w, b)
            w_t = tf.transpose(w)

        def sampled_loss(labels, logits):
            labels = tf.reshape(labels, [-1, 1])
            return tf.nn.sampled_softmax_loss(w_t, b, inputs=logits, \
                                              labels=labels, num_sampled=self.num_samples,
                                              num_classes=self.target_vocab_size)
        def eval_loss(labels, logits):
            logits = tf.matmul(logits, w_t)
            logits = tf.nn.bias_add(logits,b)
            labels_one_hot = tf.one_hot(labels, self.target_vocab_size)
            return tf.nn.softmax_cross_entropy_with_logits_v2(labels = labels_one_hot, logits = logits)

        if self.mode == 'train':
            self.softmax_loss_func = sampled_loss
        elif self.moed == 'eval':
            self.softmax_loss_func = eval_loss
        single_cell = self.modcell(self.hidden_size)
        self.cell = tf.contrib.rnn.MultiRNNCell([single_cell] * self.num_layers)

    def _create_losses(self):
        """
        初始化loss
        """
        print('>> creating loss.. ')
        start = time.time()
        def _seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
            setattr(self.modcell, '__deepcopy__', lambda self, _: self)
            setattr(tf.contrib.rnn.MultiRNNCell, '__deepcopy__', lambda self, _: self)
            return tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(
                encoder_inputs, decoder_inputs, self.cell,
                num_encoder_symbols=self.source_vocab_size,
                num_decoder_symbols=self.target_vocab_size,
                embedding_size=self.hidden_size,
                output_projection=self.output_projection,
                feed_previous=do_decode)

        if self.fw_only:
            self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
                                        self.encoder_inputs,
                                        self.decoder_inputs,
                                        self.targets,
                                        self.decoder_masks,
                                        self.buckets,
                                        lambda x, y: _seq2seq_f(x, y, True),
                                        softmax_loss_function=self.softmax_loss_func)

            # If we use output projection, we need to project outputs for decoding.
            if self.output_projection:
                for bucket in range(len(self.buckets)):
                    self.outputs[bucket] = [tf.matmul(output,
                                            self.output_projection[0]) + self.output_projection[1]
                                            for output in self.outputs[bucket]]
        else:
            self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
                                        self.encoder_inputs,
                                        self.decoder_inputs,
                                        self.targets,
                                        self.decoder_masks,
                                        self.buckets,
                                        lambda x, y: _seq2seq_f(x, y, False),
                                        softmax_loss_function=self.softmax_loss_func)
        print('>> created loss, time:', time.time() - start)

    def _created_optimizer(self):
        """
        生成调优器
        """
        print('>> create optimizer..')
        with tf.variable_scope('training') as scope:
            self.global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')

            if not self.fw_only:
                self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
                trainables = tf.trainable_variables()
                self.gradient_norms = []
                self.train_ops = []
                start = time.time()
                for bucket in range(len(self.buckets)):
                    clipped_gradients, norm = tf.clip_by_global_norm(
                        tf.gradients(self.losses[bucket],trainables), self.max_grad_norm)
                    self.gradient_norms.append(norm)
                    self.train_ops.append(self.optimizer.apply_gradients(zip(clipped_gradients, trainables),
                                                                         global_step=self.global_step))
                    print('>> creating opt for bucket {} took {} seconds'.format(bucket, time.time() - start))
                    start = time.time()
        self.saver = tf.train.Saver(tf.global_variables())
        print('>> created optimizer')

    def build_graph(self):
        self._create_placeholders()
        self._inference()
        self._create_losses()
        self._created_optimizer()

