#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author coldwind
"""

import tensorflow as tf


class TextFast:
    def __init__(self,
                 model_type,
                 sequence_length,
                 number_class,
                 word_embedding,
                 vocab_size,
                 embedding_size,
                 l2_reg_lambda):

        self.input_x = tf.placeholder(shape=[None, sequence_length], dtype=tf.int32, name='input_x')
        self.input_y = tf.placeholder(shape=[None, number_class], dtype=tf.float32, name='input_y')
        self.dropout_keep_prob = tf.placeholder(dtype=tf.float32, name='dropout_keep_prob')
        self.learn_rate = tf.placeholder(dtype=tf.float32, name='learn_rate')

        # 正则化损失
        self.l2_loss = tf.constant(value=0.0, dtype=tf.float32)

        # Embedding layer
        with tf.device('/cpu:0'), tf.name_scope(name='embedding'):
            if word_embedding:
                self.Embedding = tf.get_variable(initializer=word_embedding, name='word_embedding')
            else:
                self.Embedding = tf.Variable(
                    tf.random_uniform(shape=[vocab_size, embedding_size], minval=-1.0, maxval=1.0), trainable=True,
                    name='word_embedding')
            # (batch_size, sequence_length, embedding_size)
            self.embedded_words = tf.nn.embedding_lookup(self.Embedding, self.input_x)

        # Create a average layer (avg pooling)
        with tf.name_scope(name='avg_pooling'):
            # (batch_size, embedding_size)
            self.output = tf.reduce_mean(self.embedded_words, axis=1)

        # Add dropout
        with tf.name_scope(name='dropout'):
            # (batch_size, embedding_size)
            self.h_dropout = tf.nn.dropout(self.output, keep_prob=self.dropout_keep_prob)

        # Final (unnormalized) scores and predictions
        with tf.name_scope(name='output'):
            W = tf.get_variable(dtype=tf.float32, shape=[embedding_size, number_class],
                                initializer=tf.contrib.layers.xavier_initializer(), name='W')
            b = tf.Variable(tf.constant(value=0.1, shape=[number_class]), name='b')

            self.l2_loss += tf.nn.l2_loss(W)
            self.l2_loss += tf.nn.l2_loss(b)
            # (batch_size, number_class)
            self.scores = tf.nn.xw_plus_b(x=self.h_dropout, weights=W, biases=b)

            if model_type == 'clf':
                # tf.argmax(values, axis): 返回values在axis维度最大值的索引
                # (batch_size,)
                self.predictions = tf.argmax(self.scores, axis=1, name='predictions')
            elif model_type == 'reg':
                # tf.reduce_max(values, axis): 返回values在axis维度的最大值
                # (batch_size)
                self.predictions = tf.reduce_max(self.scores, axis=1, name='predictions')
                self.predictions = tf.expand_dims(self.predictions, -1)

        # Calculate mean cross-entropy loss, or root-mean-square error loss
        with tf.name_scope('loss'):
            if model_type == 'clf':
                # (batch_size,)
                losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.scores, labels=self.input_y)
                self.loss = tf.reduce_mean(losses) + l2_reg_lambda * self.l2_loss
            elif model_type == 'reg':
                losses = tf.sqrt(tf.losses.mean_squared_error(predictions=self.predictions, labels=self.input_y))
                self.loss = tf.reduce_mean(losses) + l2_reg_lambda * self.l2_loss

        # Accuracy
        with tf.name_scope('accuracy'):
            if model_type == 'clf':
                # correct_predictions:[batch_size, ] dtype 为boolean, 不能直接进行reduce_mean
                correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y))
                self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, dtype=tf.float32), name='accuracy')
            elif model_type == 'reg':
                self.accuracy = tf.constant(value=0.0, name='accuracy')


if __name__ == '__main__':
    # cls: 分类任务; reg: 回归任务
    text_fast = TextFast(model_type='cls',
                         sequence_length=10,
                         number_class=2,
                         word_embedding=None,
                         vocab_size=100,
                         embedding_size=128,
                         l2_reg_lambda=0.1)
