# coding:utf-8
'''
author:wangyi
'''

import tensorflow as tf


class CNN:

    def __init__(self,filter_sizes,feature_dim,filter_nums,max_seq_length,learning_rate):

        self.filter_sizes = filter_sizes
        self.feature_dim = feature_dim
        self.filter_nums = filter_nums
        self.max_seq_length = max_seq_length
        self.learning_rate = learning_rate

        self.input_x = tf.placeholder(dtype=tf.float32, shape=[None, self.max_seq_length, self.feature_dim],name='input_x')
        self.input_y = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='input_y')
        self.pad = tf.placeholder(dtype=tf.float32,shape=[None,1,self.feature_dim],name='pad')
        self.pads = tf.expand_dims(self.pad,-1)
        self.input_xs = tf.expand_dims(self.input_x,-1)
        self.cnn()


    def cnn(self):
        with tf.name_scope('conv-pool'):
            pools = []
            convs = []
            for filter in self.filter_sizes:
                # up_pad = tf.concat([self.pads]*((filter-1)//2),1)
                # bom_pad = tf.concat([self.pads]*((filter-1)-((filter-1)//2)),1)
                # self.input_xs_pad = tf.concat([up_pad,self.input_xs,bom_pad],1)
                F = tf.Variable(tf.truncated_normal(shape=[filter,self.feature_dim,1,self.filter_nums],stddev=0.01))
                #F = tf.Variable(tf.random_uniform(-1,1,dtype=tf.float32,shape=[filter,self.feature_dim,1,self.filter_nums]))
                B = tf.Variable(tf.constant(0.0,shape=[self.filter_nums],dtype=tf.float32))
                conv = tf.nn.relu(tf.nn.conv2d(self.input_xs,F,strides=[1,1,1,1],padding='VALID')+B)
            #     convs.append(conv)
            # convs = tf.concat(convs,3)
            # for filter in self.filter_sizes:
            #     F2 = tf.Variable(tf.truncated_normal(shape=[filter, 1, self.filter_nums*len(self.filter_sizes), self.filter_nums], stddev=0.01))
            #     # F = tf.Variable(tf.random_uniform(-1,1,dtype=tf.float32,shape=[filter,self.feature_dim,1,self.filter_nums]))
            #     B2 = tf.Variable(tf.constant(0.0, shape=[self.filter_nums], dtype=tf.float32))
            #     conv = tf.nn.relu(tf.nn.conv2d(convs, F2, strides=[1, 1, 1, 1], padding='VALID') + B2)
                pool = tf.nn.max_pool(conv,ksize=[1,self.max_seq_length-filter+1,1,1],strides=[1,1,1,1],padding='VALID')
                pool = tf.squeeze(pool,[1,2])
                pools.append(pool)
            self.h = tf.concat(pools,axis=1)

        with tf.variable_scope('logits'):
            hz = 64
            w0 = tf.Variable(tf.truncated_normal(shape=[len(self.filter_sizes)*self.filter_nums,hz]),dtype=tf.float32)
            b0 = tf.Variable(tf.constant(0.0,shape=[hz]),dtype=tf.float32)
            h = tf.nn.relu(tf.matmul(self.h,w0)+b0)
            # w = tf.get_variable('w',shape=[len(self.filter_sizes)*self.filter_nums,1],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
            # b = tf.Variable(tf.constant(0.0,shape=[1],dtype=tf.float32),name='b')
            w = tf.get_variable('w', shape=[hz, 1], dtype=tf.float32,
                                initializer=tf.contrib.layers.xavier_initializer())
            b = tf.Variable(tf.constant(0.0, shape=[1], dtype=tf.float32), name='b')
            self.y_preds = tf.nn.relu(tf.matmul(h,w)+b)

        with tf.name_scope('loss'):
            self.loss_v = tf.reduce_mean(tf.abs((self.y_preds - self.input_y)))
            self.op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_v)






