#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author coldwind
"""

import tensorflow as tf


class TextCNN:
    def __init__(self,
                 model_type,
                 sequence_length,
                 number_class,
                 word_embedding,
                 vocab_size,
                 embedding_size,
                 filter_sizes,
                 number_filters,
                 l2_reg_lambda=0.0):

        self.input_x = tf.placeholder(shape=[None, sequence_length], dtype=tf.int32, name='input_x')
        self.input_y = tf.placeholder(shape=[None, number_class], dtype=tf.float32, name='input_y')
        self.dropout_keep_prob = tf.placeholder(dtype=tf.float32, name='dropout_keep_prob')
        self.learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')

        self.l2_loss = tf.constant(value=0.0)

        with tf.device('/cpu:0'), tf.name_scope('embedding'):
            if word_embedding:
                self.Embedding = tf.get_variable(initial_value=word_embedding, dtype=tf.float32, name='word_embedding')
            else:
                self.Embedding = tf.Variable(
                    tf.random_uniform(shape=[vocab_size, embedding_size], minval=-1, maxval=1, dtype=tf.float32),
                    trainable=True, name='word_embedding')
            # (batch_size, sequence_length, embedding_size)
            self.embedded_words = tf.nn.embedding_lookup(self.Embedding, self.input_x, name='embedded_words')
            # (batch_size, sequence_length, embedding_size, 1)
            self.embedded_words_expand = tf.expand_dims(self.embedded_words, -1)

        pooled_outputs = []
        # Create a convolution + maxpool layer for each filter size
        for i, filter_size in enumerate(filter_sizes):
            with tf.name_scope('conv-maxpool-%s' % filter_size):
                # Convolution Layer (宽, 长, 通道数, 卷积核数量)
                filter_shape = [filter_size, embedding_size, 1, number_filters]
                # 产生正态分布(truncated_normal 和 random_normal 都返回的是正态分布)
                W = tf.Variable(tf.truncated_normal(shape=filter_shape, stddev=0.1, dtype=tf.float32), name='W')
                b = tf.Variable(tf.constant(0.1, shape=[number_filters], dtype=tf.float32), name='b')
                # (batch_size, sequence_length - filter_size + 1, 1, number_filters)
                conv = tf.nn.conv2d(self.embedded_words_expand,
                                    filter=W,
                                    strides=[1, 1, 1, 1],
                                    padding='VALID',
                                    name='conv')
                h = tf.nn.relu(tf.nn.bias_add(value=conv, bias=b), name='relu')

                # Maxpooling over the outputs (batch_size, 1, 1, number_filters)
                pooled = tf.nn.max_pool(value=h,
                                        ksize=[1, sequence_length - filter_size + 1, 1, 1],
                                        strides=[1, 1, 1, 1],
                                        padding='VALID',
                                        name='pooled')
                # [(batch_size, 1, 1, number_filters),
                #  (batch_size, 1, 1, number_filters),
                #  (batch_size, 1, 1, number_filters)]
                pooled_outputs.append(pooled)

        # Combine all the pooled features
        number_features_total = number_filters * len(filter_sizes)
        # (batch_size, 1, 1, number_features_total))
        self.h_pool = tf.concat(values=pooled_outputs, axis=3, name='concat')
        # (batch_size, number_features_total)
        self.h_pool_flatten = tf.reshape(self.h_pool, shape=[-1, number_features_total], name='pool_flatten')

        # dropout
        with tf.name_scope('dropout'):
            self.h_drop = tf.nn.dropout(self.h_pool_flatten, keep_prob=self.dropout_keep_prob, name='dropout')

        # Final (un_normalized) scores and predictions
        with tf.name_scope('output'):
            W = tf.get_variable(shape=[number_features_total, number_class], dtype=tf.float32,
                                initializer=tf.contrib.layers.xavier_initializer(), name='W')
            b = tf.Variable(tf.constant(0.1, shape=[number_class]), name='b')

            self.l2_loss += tf.nn.l2_loss(W)
            self.l2_loss += tf.nn.l2_loss(b)

            self.scores = tf.nn.xw_plus_b(x=self.h_drop, weights=W, biases=b, name='scores')
            if model_type == 'clf':
                self.predictions = tf.argmax(self.scores, axis=1, name='predictions')
            elif model_type == 'reg':
                self.predictions = tf.reduce_max(self.scores, axis=1, name='predictions')
                self.predictions = tf.expand_dims(self.predictions, -1)

        # Calculate mean cross-entropy loss, or root-mean-square error loss
        with tf.name_scope('loss'):
            if model_type == 'clf':
                losses = tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.input_y, logits=self.scores)
                self.loss = tf.reduce_mean(losses) + self.l2_loss * l2_reg_lambda
            elif model_type == 'reg':
                losses = tf.sqrt(tf.losses.mean_squared_error(labels=self.predictions, predictions=self.scores))
                self.loss = tf.reduce_mean(losses) + self.l2_loss * l2_reg_lambda

        # Accuracy
        with tf.name_scope('accuracy'):
            if model_type == 'clf':
                correct_predictions = tf.equal(tf.argmax(self.input_y, axis=1), self.predictions)
                self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, dtype=tf.float32), name='accuracy')
            elif model_type == 'reg':
                self.accuracy = tf.constant(0.0, name="accuracy")


if __name__ == '__main__':
    # cls: 分类任务; reg: 回归任务(number_class设置为1)
    text_cnn = TextCNN(model_type='cls',
                       sequence_length=10,
                       number_class=2,
                       word_embedding=None,
                       vocab_size=100,
                       embedding_size=128,
                       filter_sizes=[2, 3],
                       number_filters=64,
                       l2_reg_lambda=0.1)
