# coding:utf-8"
'''
模型/网络结构类
@author：wangyi
'''
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer
from tensorflow.contrib.layers import batch_norm
from configs import config
class Model(object):
    # 构造方法 初始化模型结构
    def __init__(self,model_name,embedding_size,seq_length,filter_sizes,filter_nums,num_class,vocab_size,inital_learning_rate,l2_lamda,
                 decay_steps,decay_rate,num_units=0):
        self.model_name = model_name # 模型名称
        self.embedding_size = embedding_size # 词向量维度
        self.vocab_size = vocab_size # 词汇表大小
        self.seq_length = seq_length  # 最大句子长度/词个数
        self.filter_sizes = filter_sizes # 卷积核结构
        self.filter_nums = filter_nums # 每种核的核数目
        self.num_class = num_class # 标签个数
        self.inital_learning_rate = inital_learning_rate # 初始学习率
        self.l2_lamda = l2_lamda # 正则化系数
        self.num_units = num_units # 隐藏神经元个数
        self.input_x = tf.placeholder(dtype=tf.int32,shape=[None,self.seq_length])
        self.input_y = tf.placeholder(dtype=tf.float32,shape=[None,self.num_class])
        self.keep_drop_prob = tf.placeholder(dtype=tf.float32)
        self.sequence_length = tf.placeholder(tf.int32,[None]) # 序列真实长度
        self.batch_size = tf.placeholder(tf.int32,[]) #实际batch大小
        self.istraining = tf.placeholder(tf.bool)
        self.decay_steps = decay_steps
        self.decay_rate = decay_rate
        #self.is_training = tf.placeholder(dtype=tf.bool)
        with tf.name_scope('embedding'):
            # 词嵌入
            embedding = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1, 1, dtype=tf.float32),
                                name='embedding')
            self.word_embedding = tf.nn.embedding_lookup(embedding, self.input_x)
            self.word_embedding_expand = tf.expand_dims(self.word_embedding, -1)
        if self.model_name == 'text_cnn':
            self.text_cnn()
        if self.model_name == 'gru' or self.model_name == 'lstm':
            self.rnn(self.word_embedding)
    # text_cnn
    def text_cnn(self):
        # 卷积池化层
        pool_out = []
        for i,filter_size in enumerate(self.filter_sizes):
            with tf.variable_scope('conv-pooling-'+str(i)):
                # 卷积
                W_filter = tf.get_variable(name='conv'+str(i),shape=[filter_size,self.embedding_size,1,self.filter_nums[i]],\
                                           initializer=xavier_initializer())
                b_filter = tf.Variable(tf.constant(0.0,shape=[self.filter_nums[i]]),name='bias')
                conv = tf.nn.conv2d(self.word_embedding_expand,W_filter,strides=[1,1,1,1],padding='VALID')
               # conv = batch_norm(conv,is_training=self.istraining)
                conv = tf.nn.relu(tf.add(conv,b_filter))
                # chunk-max pooling
                #pool = self.chunk_max_pooling(conv,3)
                # 池化
                pool = tf.nn.max_pool(conv,ksize=[1,self.seq_length-filter_size+1,1,1],strides=[1,1,1,1],padding='VALID')
                pool_out.append(pool)
        pool_out = tf.concat(pool_out,3)
        h = tf.reshape(pool_out,[-1,(pool_out.get_shape()[1].value)*(pool_out.get_shape()[3].value)])
        # 全连接
        with tf.name_scope('fully-connection'):
            self.W_fc = tf.get_variable(name='w_fc',shape=[(pool_out.get_shape()[1].value)*(pool_out.get_shape()[3].value),self.num_units]
                                        ,initializer=xavier_initializer())
            self.b_fc = tf.Variable(tf.constant(0.0,shape=[self.num_units]),'b_fc')
            self.fc = tf.nn.relu(tf.matmul(h,self.W_fc)+self.b_fc)
            self.W = tf.get_variable(name='w',shape=[self.num_units,self.num_class],\
                                     initializer=xavier_initializer())
            #self.W = tf.get_variable(name='w',shape=[(pool_out.get_shape()[1].value)*(pool_out.get_shape()[3].value),self.num_class],\
            #                         initializer=xavier_initializer())
            self.b = tf.Variable(tf.constant(0.0,shape=[self.num_class]),name='b')
            # 正则化
            self.l2_loss = tf.constant(0.0)
            self.l2_loss += tf.nn.l2_loss(self.W)
            self.l2_loss += tf.nn.l2_loss(self.b)
            self.logits = tf.add(tf.matmul(self.fc,self.W),self.b,name='logits')
            self.logits = tf.nn.dropout(self.logits,self.keep_drop_prob)
            self.pred_prob = tf.nn.softmax(self.logits)
            # 预测标签
            self.pred = tf.argmax(self.logits,1)
        # 损失
        with tf.name_scope('loss'):
            self.loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y,logits=self.logits)
            self.loss = tf.reduce_mean(self.loss+self.l2_loss*self.l2_lamda)
        # 优化
        with tf.name_scope('optimizer'):
            global_step = tf.Variable(0)
            # 学习率指数衰减
            self.learning_rate = tf.train.exponential_decay(self.inital_learning_rate, global_step=global_step,
                                                            decay_steps=self.decay_steps, decay_rate=self.decay_rate)
            self.optim = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)

        # 准确率
        with tf.name_scope('acc'):
            self.acc = tf.reduce_mean(tf.cast(tf.equal(self.pred,tf.argmax(self.input_y,1)),tf.float32))
    # chunk-max pooling 实现
    def chunk_max_pooling(self,conv,k):
        chunk_num = int((conv.get_shape()[1].value) / k)
        convs = []
        for i in range(k):
            start = i*chunk_num
            end = min((i+1)*chunk_num,(conv.get_shape()[1].value))
            conv_sub = tf.strided_slice(conv,[0,start,0,0],[config['batch_size'],end,
                                                         conv.get_shape()[2].value,conv.get_shape()[3].value])
            pooling_sub = tf.nn.max_pool(conv_sub,[1,conv_sub.get_shape()[1].value,1,1],strides=[1,1,1,1],padding='VALID')
            convs.append(pooling_sub)
        return tf.concat(convs,axis=1)

    # 双向RNN模型 可选GRU/LSTM
    def rnn(self,inputs):
        fw_cell = None
        bw_cell = None
        with tf.name_scope("basic_rnn"):
            if self.model_name == 'gru':
                fw_cell = tf.nn.rnn_cell.GRUCell(num_units=self.num_units)
                bw_cell = tf.nn.rnn_cell.GRUCell(num_units=self.num_units)
            elif self.model_name == 'lstm':
                fw_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.num_units)
                bw_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.num_units)
            else:
                raise ValueError("please choose a rnn model(gru/lstm)")
        with tf.name_scope("bi-rnn"):
            fw_cell = tf.nn.rnn_cell.DropoutWrapper(fw_cell,self.keep_drop_prob)
            bw_cell = tf.nn.rnn_cell.DropoutWrapper(bw_cell,self.keep_drop_prob)
            initial_state_fw = fw_cell.zero_state(self.batch_size)
            initial_state_bw = bw_cell.zero_state(self.batch_size)
            out_put,states = tf.nn.bidirectional_dynamic_rnn(fw_cell,bw_cell,inputs,sequence_length=self.sequence_length,
                                                             initial_state_fw=initial_state_fw,initial_state_bw=initial_state_bw)
            out_put = tf.concat(out_put,2)
        with tf.variable_scope("fully-connected"):
            self.w = tf.get_variable(name='w',shape=[2*self.num_units,self.num_class],initializer=tf.contrib.layers.xavier_initializer())
            self.b = tf.get_variable(name='b',shape=[self.num_class],initializer=tf.contrib.layers.xavier_initializer())
            self.logits = tf.nn.dropout(tf.matmul(out_put,self.w)+self.b,self.keep_drop_prob)
            self.pred = tf.argmax(self.logits,1)
        with tf.name_scope("loss"):
            self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits,labels=self.input_y))
        with tf.name_scope("acc"):
            global_step = tf.Variable(0)
            # 学习率指数衰减
            self.learning_rate = tf.train.exponential_decay(self.inital_learning_rate,global_step=global_step,
                                                            decay_steps=self.decay_steps,decay_rate=self.decay_rate)
            self.optim = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
            self.acc = tf.reduce_mean(tf.cast(tf.equal(self.pred,tf.argmax(self.input_y,1)),tf.float32))











