#coding: UTF-8

import tensorflow as tf
import numpy as np
import sys

'''
这个代码是融合ｃｎｎ文本分类、老于的ｓｉｍｎｅｔ和　周博文的那个ｃｎｎ搞得
'''

class SimnetCNN(object):
    def __init__(
        self, 
        sequence_length, 
        batch_size, 
        vocab_size,
        embedding_size, 
        filter_sizes, 
        num_filters, 
        l2_reg_lambda
    ):

        #device_name = '/cpu:0'
        device_name = '/gpu:0'
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
        #准备q1向量和ｑ2向量
        self.q1vec = tf.placeholder(tf.int32, [None, sequence_length], name="q1vec")
        self.q2vec = tf.placeholder(tf.int32, [None, sequence_length], name="q2vec")
        self.input_y = tf.placeholder(tf.float32, [None, 1], name = "input_y")
        
        fc_out = 300

        #add l2
        l2_loss = tf.constant(0.0)

        #构建预测层, 用ｃｐｕ构建
        with tf.device(device_name), tf.name_scope("embedding_layer-zxm"):
            #权重
            W = tf.Variable(
                tf.random_normal([vocab_size, embedding_size], -1.0, 1.0,
                name = "W")
            )
            
            char_q1 = tf.nn.embedding_lookup(W, self.q1vec)
            char_q2 = tf.nn.embedding_lookup(W, self.q2vec)

            self.embedd_q1 = tf.expand_dims(char_q1, -1)
            self.embedd_q2 = tf.expand_dims(char_q2, -1)

        #卷积层
        pooled_outputs_q1 = []
        pooled_outputs_q2 = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.device(device_name), tf.name_scope("conv_maxpol_layer_%s-zxm"%filter_size):
                filter_shape = [filter_size, embedding_size, 1, num_filters]
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.01), name = "W")
                b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name = "b")

                #conv q vec                
                conv = tf.nn.conv2d(
                    self.embedd_q1, W, 
                    strides = [1,1,1,1], 
                    padding = "VALID", name = "conv-q1-zxm"
                )

                h = tf.nn.elu(tf.nn.bias_add(conv, b), name = "tanh-q1-zxm")
                #pooled = tf.nn.max_pool(
                pooled = tf.nn.max_pool(
                    h, 
                    ksize=[1, sequence_length - filter_size + 1, 1, 1], 
                    strides = [1, 1, 1, 1], 
                    padding = "VALID", 
                    name = "pool-q1-zxm"
                )
                pooled_outputs_q1.append(pooled)
                #=====> end

                #conv ap vec
                conv = tf.nn.conv2d(
                    self.embedd_q2, W, 
                    strides = [1,1,1,1], 
                    padding = "VALID", name = "conv-q2-zxm"
                )

                h = tf.nn.elu(tf.nn.bias_add(conv, b), name = "tanh-q2-zxm")
                #pooled = tf.nn.max_pool(
                pooled = tf.nn.max_pool(
                    h, 
                    ksize=[1, sequence_length - filter_size + 1, 1, 1], 
                    strides = [1, 1, 1, 1],
                    padding = "VALID", 
                    name = "pool-q2-zxm"
                )
                pooled_outputs_q2.append(pooled)
                #========>end

        num_filters_total = num_filters * len(filter_sizes)
        
        with tf.device(device_name):
            pooled_q1 = tf.concat(pooled_outputs_q1, 3)
            pooled_q2 = tf.concat(pooled_outputs_q2, 3)
            pooled_reshape_q1 = tf.reshape(pooled_q1, [-1, num_filters_total])
            pooled_reshape_q2 = tf.reshape(pooled_q2, [-1, num_filters_total])
            #pooled_reshape_an = tf.reshape(tf.concat(3, pooled_outputs_an), [-1, num_filters_total])

            #dropout
            #pooled_flat_q1 = tf.nn.dropout(pooled_reshape_q1, self.dropout_keep_prob)
            #pooled_flat_q2 = tf.nn.dropout(pooled_reshape_q2, self.dropout_keep_prob)
            #W = tf.Variable(tf.truncated_normal([]))

            #print "tensorshape", pooled_reshape_q1.get_shape()
            #sys.exit(1)

            #pooled_flat_q1 = pooled_reshape_q1
            #pooled_flat_q2 = pooled_reshape_q2
            #print "float shape", pooled_flat_q1.get_shape()
        
            W1 = tf.Variable(tf.truncated_normal([num_filters_total, fc_out], stddev=0.1), name = "W1")
            #b = tf.Variable(tf.constant(0.1, shape=[fc_out]), name = "b1")
            #print "W1 shape", W1.get_shape()

            #p1 = tf.nn.xw_plus_b(pooled_flat_q1, W, b)
            pooled_flat_q1 = tf.nn.dropout(tf.nn.elu(tf.matmul(pooled_reshape_q1, W1)), self.dropout_keep_prob)
            pooled_flat_q2 = tf.nn.dropout(tf.nn.elu(tf.matmul(pooled_reshape_q2, W1)), self.dropout_keep_prob)
            #print "p1 shape", p1.get_shape()
            #sys.exit(1)

            #计算向量长度Batch模式
            pooled_len_q1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_q1, pooled_flat_q1), 1)) #计算向量长度Batch模式
            pooled_len_q2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_q2, pooled_flat_q2), 1))

            pooled_mul_q12 = tf.reduce_sum(tf.multiply(pooled_flat_q1, pooled_flat_q2), 1) #计算向量的点乘Batch模式

        with tf.device(device_name), tf.name_scope("output-zxm"):
            self.cos_12 = tf.div(pooled_mul_q12, tf.multiply(pooled_len_q1, pooled_len_q2), name="scores-zxm") #计算向量夹角Batch模式
            #self.cos_12 = tf.div(pooled_mul_q12, 1.0, name="scores-zxm") #计算向量夹角Batch模式
            #print "cos_12", self.cos_12
            #self.predictions = tf.Variable(self.cos_12, name = "pred")

        
        with tf.device(device_name), tf.name_scope("loss-zxm"):
            #zero = tf.multiply(self.input_y, 0.0)
            #margin = tf.add(zero, 0.1)
            self.losses1 = tf.abs(tf.subtract(self.cos_12, self.input_y))
            #self.losses = tf.maximum(zero, tf.subtract(self.flat_loss, margin))
            self.losses = tf.multiply(tf.subtract(self.cos_12, self.input_y), 
                                       tf.subtract(self.cos_12, self.input_y))
             
            #margin = tf.multiply(self.losses1, 0.0)
            #self.losses = tf.maximum(margin, tf.subtract(self.losses1, 0.16))
            #self.losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.cos_12, labels=self.input_y)
            #part1 = tf.multiply(tf.log(self.cos_12), self.input_y)
            #part2 = tf.multiply(tf.log(tf.abs(tf.subtract(self.cos_12, 1.0))),\
            #                tf.subtract(1.0, self.input_y))
            #partt = tf.multiply(tf.add(part1, part2), -1.0)
            #self.losses = partt 
            self.loss = tf.reduce_mean(self.losses) + l2_reg_lambda * l2_loss
            print('loss ', self.loss)

        # Accuracy
        with tf.device(device_name), tf.name_scope("accuracy-zxm"):
            self.correct = tf.less(self.losses, 0.22)
            #self.accuracy = tf.cast(tf.reduce_sum(self.correct, "accuracy"), "float32") 
            #tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy-zxm")
            self.cos_12_res = self.cos_12

        