# -- encoding:utf-8 --

import tensorflow as tf
from tensorflow.contrib import slim


class Network(object):
    def __init__(self, inputs, targets, num_labels,
                 name='TEXT_CNN', input_dimensions=128,
                 num_filters=[128, 128, 128], region_sizes=[2, 3, 4]):
        self.inputs = inputs  # Tensor，[N,T,E]
        self.targets = targets  # Tensor, [N,]
        self.num_labels = num_labels  # 数字
        self.name = name
        self.input_dimensions = input_dimensions
        self.num_filters = num_filters
        self.region_sizes = region_sizes
        self.logits = None  # [N,num_labels]
        self.probabilities = None  # [N,num_labels]
        self.predictions = None  # [N]
        self.per_example_loss = None  # [N,]
        self.loss = None  # []

    def interface(self):
        self.build_network()
        self.build_loss()
        return self.loss, self.per_example_loss, self.logits, self.probabilities, self.predictions

    def build_network(self):
        with tf.variable_scope(self.name):
            with tf.variable_scope("input"):
                conv_input = slim.fully_connected(self.inputs, num_outputs=self.input_dimensions,
                                                  activation_fn=tf.nn.tanh)
                conv_input = tf.expand_dims(conv_input, axis=-1)  # [N,T,E] --> [N,T,E,1]

            outputs = []
            with tf.variable_scope("cnn"):
                for idx, region_size in enumerate(self.region_sizes):
                    with tf.variable_scope("conv-max-pooling-{}".format(idx)):
                        # 卷积的功能相当于将region_size个单词看成一个整体，然后进行单词的特征向量信息的融合提取
                        # 最终返回结果形状为: [N,T,1,C]
                        # NOTE: 这里的T实际上是比原来的序列长度小， T = sequence_length - region_size + 1
                        conv = slim.conv2d(
                            conv_input,  # [N,T,E,1]
                            num_outputs=self.num_filters[idx],  # C, eg:2
                            kernel_size=(region_size, self.input_dimensions)  # (h,w), eg:(3,E)
                        )
                        # 针对序列的每个通道获取一个最大值，相当于认为每个卷积核提取某种特征信息，这里直接获取主要特征信息出来
                        # [N,T,1,C] --> [N,1,1,C]
                        pooled = tf.reduce_max(conv, axis=[1, 2], keep_dims=True)
                        # 通道压缩，因为维度1其实是无用的
                        output = tf.squeeze(pooled, axis=[1, 2])
                        # 添加到临时列表中
                        outputs.append(output)

            with tf.variable_scope("merge"):
                features = tf.concat(outputs, axis=-1)

            with tf.variable_scope("project"):
                # [N,num_labels], 置信度信息
                score = slim.fully_connected(features, num_outputs=self.num_labels, activation_fn=None)
                self.logits = tf.identity(score, "logits")
                self.probabilities = tf.nn.softmax(self.logits, name='probabilities')
                self.predictions = tf.argmax(self.logits, axis=-1, name='predictions')

    def build_loss(self):
        with tf.variable_scope("loss"):
            # 求解各个样本的损失
            self.per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.targets,
                                                                                   logits=self.logits)
            # 求解所有损失
            self.loss = tf.reduce_mean(self.per_example_loss)
