# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 16:26:47 2017

@author: xuanlei

"""

import pandas as pd
import tensorflow as tf
import numpy as np

#==============================================================================

# Batch Normalization

#==============================================================================

def batch_norm_layer(x, train_phase, scope_bn):
    with tf.variable_scope(scope_bn):
        beta = tf.Variable(tf.constant(0.0, shape=[x.shape[-1]]), name='beta', trainable=True)
        gamma = tf.Variable(tf.constant(1.0, shape=[x.shape[-1]]), name='gamma', trainable=True)
        axises = np.arange(len(x.shape) - 1)
        batch_mean, batch_var = tf.nn.moments(x, axises, name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=0.5)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(train_phase, mean_var_with_update,
                            lambda: (ema.average(batch_mean), ema.average(batch_var)))
        normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
    return normed

def xavier_init(fan_in, fan_out, constant=1):
    """xavier initialization function

    fan_in: input node number

    fan_out: output node number"""
    low = -constant*np.sqrt(6/(fan_in+fan_out))
    high = constant*np.sqrt(6/(fan_in+fan_out))
    return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)

#==============================================================================

# CNN Structure

#==============================================================================

class CNN():
    
    #initial setting

    def __init__(self,input_size,output_size,height,width,len_char,len_char_set,c1_size, c2_size, c3_size,h1_size,LR,batch_size,reg=0.001,resize=10240,transfer = tf.nn.relu):

        self.input_size = input_size
        self.output_size = output_size
        self.height = height
        self.width = width
        self.len_char = len_char
        self.len_char_set = len_char_set
        self.c1_size = c1_size
        self.c2_size = c2_size
        self.c3_size = c3_size
        self.resize = resize
        self.h1_size = h1_size
        self.batch_size = batch_size
        self.LR = LR
        self.reg = reg
        self.transfer = transfer
        self.w_alpha = 0.01
        
        with tf.name_scope('inputs'):
            self.xs = tf.placeholder(tf.float32, [None, input_size], name='xs')
            self.ys = tf.placeholder(tf.float32, [None, output_size], name='ys')
            self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
            self.train_phase = tf.placeholder(tf.bool, name='train_phase')

            
        with tf.name_scope('cnn_1'):
            self.add_cnn1_layer()
            
        with tf.name_scope('cnn_2'):
            self.add_cnn2_layer()

        with tf.name_scope('cnn_3'):
            self.add_cnn3_layer()
        
        with tf.name_scope('h1'):
            self.add_h1_layer()
            
        with tf.name_scope('out'):
            self.add_output_layer()
    
        with tf.name_scope('cost'):
            self.compute_cost()
            
        with tf.name_scope('acc'):
            self.compute_acc()

        with tf.name_scope('train'):
            self.train_op = tf.train.AdamOptimizer(learning_rate=self.LR).minimize(self.cost)

    def add_cnn1_layer(self):
        with tf.name_scope('cnn_layer_1'):   
            c1_x = tf.reshape(self.xs,[-1,self.height,self.width,1], name='c1_x_input')
            #Ws_h1 = tf.Variable(tf.truncated_normal([self.cell_size, self.h1_size], mean=3, stddev=1))
            self.w_c1 = tf.Variable(self.w_alpha*tf.random_normal([3, 3, 1, self.c1_size]))
#            w_c1 = tf.Variable(xavier_init(self.input_size, self.c1_size),name = "w_c1")
            b_c1 = tf.Variable(0.1*tf.random_normal([self.c1_size]))
            conv1 = self.transfer(tf.nn.bias_add(tf.nn.conv2d(c1_x, self.w_c1, strides=[1, 1, 1, 1], padding='SAME'), b_c1))
            conv1_p = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            self.conv1_d = tf.nn.dropout(conv1_p, self.keep_prob)
            
            tf.summary.histogram('conv1_w', self.w_c1)
            tf.summary.histogram('conv1_b', b_c1)
            tf.summary.histogram('conv1_out', self.conv1_d)
#            self.conv1_y = batch_norm_layer(conv1_d, train_phase=self.train_phase, scope_bn='conv1')
        

    def add_cnn2_layer(self):
        with tf.name_scope('cnn_layer_2'):
#            c2_x = tf.reshape(self.conv1_d, [-1,self.c1_size])
#            Ws_h2 = tf.Variable(tf.truncated_normal([self.h1_size, self.h2_size], mean=3, stddev=2))
            self.w_c2 = tf.Variable(self.w_alpha*tf.random_normal([3, 3, 32, self.c2_size]))
#            w_c2 = tf.Variable(xavier_init(self.c1_size, self.c2_size),name = "w_c2")
            b_c2 = tf.Variable(0.1*tf.random_normal([self.c2_size]))
#            b_c2 = tf.Variable(tf.zeros([self.c2_size,])+0.001)
            conv2 = self.transfer(tf.nn.bias_add(tf.nn.conv2d(self.conv1_d, self.w_c2, strides=[1, 1, 1, 1], padding='SAME'), b_c2))
            conv2_p = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            self.conv2_d = tf.nn.dropout(conv2_p, self.keep_prob)
            
            tf.summary.histogram('conv2_w', self.w_c2)
            tf.summary.histogram('conv2_b', b_c2)
            tf.summary.histogram('conv2_out', self.conv2_d)
#            self.conv2_y = batch_norm_layer(conv2_d, train_phase=self.train_phase, scope_bn='conv2')

    def add_cnn3_layer(self):
        with tf.name_scope('cnn_layer_3'):
#            c3_x = tf.reshape(self.conv2_d, [-1,self.c2_size])
            #Ws_h3 = tf.Variable(tf.truncated_normal([self.h2_size, self.h3_size], mean=3, stddev=2))
            self.w_c3 = tf.Variable(self.w_alpha*tf.random_normal([3, 3, self.c2_size, self.c3_size]))
#            w_c3 = tf.Variable(xavier_init(self.c2_size, self.c3_size),name = "w_c3")
            b_c3 = tf.Variable(0.1*tf.random_normal([self.c3_size]))
#            b_c3 = tf.Variable(tf.zeros([self.c3_size,])+0.001)
            conv3 = self.transfer(tf.nn.bias_add(tf.nn.conv2d(self.conv2_d, self.w_c3, strides=[1, 1, 1, 1], padding='SAME'), b_c3))
            conv3_p = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            self.conv3_d = tf.nn.dropout(conv3_p, self.keep_prob)
            
            tf.summary.histogram('conv3_w', self.w_c3)
            tf.summary.histogram('conv3_b', b_c3)
            tf.summary.histogram('conv3_out', self.conv3_d)

#            self.conv3_y = batch_norm_layer(conv3_d, train_phase=self.train_phase, scope_bn='conv3')
            
    def add_h1_layer(self):
        with tf.name_scope('h1_layer'):   
            h1_x = tf.reshape(self.conv3_d, [-1,self.resize])
            #Ws_h1 = tf.Variable(tf.truncated_normal([self.cell_size, self.h1_size], mean=3, stddev=1))
            self.Ws_h1 = tf.Variable(xavier_init(self.resize, self.h1_size),name = "W1")
            bs_h1 = tf.Variable(0.1*tf.random_normal([self.h1_size]))
#            bs_h1 = tf.Variable(tf.zeros([self.h1_size,])+0.001)
            non_bn_h1 = self.transfer(tf.matmul(h1_x,self.Ws_h1)+bs_h1)
#            non_bn_h1 = tf.matmul(h1_x,Ws_h1)+bs_h1
            self.h1_y = non_bn_h1
            
            tf.summary.histogram('h1_w', self.Ws_h1)
            tf.summary.histogram('h1_b', bs_h1)
            tf.summary.histogram('h1_out', self.h1_y)

#            self.h1_y = batch_norm_layer(non_bn_h1, train_phase=self.train_phase, scope_bn='bn_h1')


    def add_output_layer(self):
        with tf.name_scope('output_layer'):
            #Ws_out = tf.Variable(tf.truncated_normal([self.h3_size, self.output_size], mean=3, stddev=1))
            self.Ws_out = tf.Variable(xavier_init(self.h1_size, self.output_size),name = "W_out")
            bs_out = tf.Variable(tf.zeros([self.output_size,]))
            self.pred = tf.add(tf.matmul(self.h1_y, self.Ws_out), bs_out)


            tf.summary.histogram('out_w', self.Ws_out)
            tf.summary.histogram('out_b', bs_out)
            tf.summary.histogram('out_out', self.pred)
            
    def compute_acc(self):
        predict = tf.reshape(self.pred, [-1, self.len_char, self.len_char_set])
        max_idx_p = tf.argmax(predict, 2)
        max_idx_l = tf.argmax(tf.reshape(self.ys, [-1, self.len_char, self.len_char_set]), 2)
        correct_pred = tf.equal(max_idx_p, max_idx_l)
        self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        
        tf.summary.scalar('accuracy', self.accuracy)
        
     
#===============================================================================

# cost计算

#===============================================================================

    def compute_cost(self):
        with tf.name_scope('loss'):
            self.cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.pred, labels=self.ys))
#            self.cost = tf.reduce_mean(tf.square(tf.subtract(self.pred, self.ys)))
#            self.cost = tf.reduce_mean(tf.square(tf.subtract(self.pred, self.ys)))+self.reg*0.5*(tf.reduce_sum(tf.square(self.w_c1))+
#                                       tf.reduce_sum(tf.square(self.w_c2))+tf.reduce_sum(tf.square(self.w_c3))+
#                                       tf.reduce_sum(tf.square(self.Ws_h1))+tf.reduce_sum(tf.square(self.Ws_out)))
#            self.cost = -tf.reduce_mean(self.ys*tf.log(self.pred+0.001)+(1-self.ys)*tf.log(1-self.pred+0.001))
            tf.summary.scalar('result_cost', self.cost)
