#/usr/bin/python2.7
#coding:utf-8

from __future__ import print_function
from tensorflow.contrib.layers.python.layers import batch_norm              ###################################
import tensorflow as tf
import random,csv
import numpy as np


def next_batch(feature_list,label_list,size):
    feature_batch_temp=[]
    label_batch_temp=[]
    f_list = random.sample(range(len(feature_list)), size)
    for i in f_list:
        feature_batch_temp.append(feature_list[i])
    for i in f_list:
        label_batch_temp.append(label_list[i])
    return feature_batch_temp,label_batch_temp

def weight_variable(shape,layer_name):
    #定义一个shape形状的weights张量
    with tf.name_scope(layer_name + '_Weights'):
        Weights = tf.Variable(tf.truncated_normal(shape, stddev=0.1),name='W')
    tf.summary.histogram(layer_name + '_Weights', Weights)
    return Weights

def bias_variable(shape,layer_name):
    #定义一个shape形状的bias张量
    with tf.name_scope(layer_name + '_biases'):
        biases = tf.Variable(tf.constant(0.1, shape=shape),name='b')
    tf.summary.histogram(layer_name + '_biases', biases)
    return biases

def conv2d(x, W, layer_name, padding='SAME'):
    #卷积计算函数
    # stride [1, x步长, y步长, 1]
    # padding:SAME/FULL/VALID（边距处理方式）
    with tf.name_scope(layer_name + '_h_conv2d'):
        h_conv2d = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)
    return h_conv2d

def max_pool_2x2(x,layer_name,strides=[1,2,2,1],padding='VALID', ksize=[1,2,2,1]):
    # max池化函数
    # ksize [1, x边长, y边长,1] 池化窗口大小
    # stride [1, x步长, y步长, 1]
    # padding:SAME/FULL/VALID（边距处理方式）
    with tf.name_scope(layer_name + '_h_maxpool'):
        h_pool = tf.nn.max_pool(x, ksize=ksize, strides=strides, padding=padding)
    return h_pool

def avg_pool_2x2(x,layer_name, strides=[1,2,2,1],padding='VALID', ksize=[1,2,2,1]):
    # avg池化函数
    # ksize [1, x边长, y边长,1] 池化窗口大小
    # stride [1, x步长, y步长, 1]
    # padding:SAME/FULL/VALID（边距处理方式）
    with tf.name_scope(layer_name + '_h_avgpool'):
        h_pool = tf.nn.avg_pool(x, ksize=ksize, strides=strides, padding=padding)
    return h_pool

def load_data():  #取出两个数据集的label和feature
    global feature
    global label
    global feature_full
    global label_full
    feature=[]
    label=[]
    feature_full=[]
    label_full=[]
    file_path ='/data/DingNan/kdd99/kddcup.data_10_percent_corrected_handled2.cvs'
    with (open(file_path,'r')) as data_from:
        csv_reader=csv.reader(data_from)
        for i in csv_reader:
            # print i
            label_list=[0]*23
            feature.append(i[:36])
            label_list[int(i[41])]=1
            label.append(label_list)
            # print label
            # print feature
    file_path_full ='/data/DingNan/kdd99/kddcup.data.corrected_handled2.cvs'
    with (open(file_path_full,'r')) as data_from_full:
        csv_reader_full=csv.reader(data_from_full)
        for j in csv_reader_full:
            # print i
            label_list_full=[0]*23
            feature_full.append(j[:36])
            label_list_full[int(j[41])]=1
            label_full.append(label_list_full)

if __name__  == '__main__':
    global feature
    global label
    global feature_full
    global label_full
    # load数据
    load_data()
    feature_test = feature
    feature_train =feature_full
    label_test = label
    label_train = label_full
    # 定义用以输入的palceholder
    with tf.name_scope('inputs'):
        xs = tf.placeholder(tf.float32, [None, 36],name='pic_data') # 6x6
        ys = tf.placeholder(tf.float32, [None, 23],name='pic_label')
        keep_prob = tf.placeholder(tf.float32,name='keep_prob')
        is_training = tf.placeholder(tf.bool,name='is_training')                #######################################3
        x_image = tf.reshape(xs, [-1, 6, 6, 1])    # -1表示不约束这个位置 1表示信道1（灰度图仅有一个信道）

    ## 第一个卷积层 ##
    with tf.name_scope('conv1_layer'):
        W_conv1 = weight_variable([3,3,1,16],layer_name='conv1')                 # 卷积窗 3x3, 输入厚度 1, 输出厚度 16
        b_conv1 = bias_variable([16],layer_name='conv1')
        conv_1 = conv2d(x_image, W_conv1,layer_name='conv1') + b_conv1
        bn_1 = batch_norm(conv_1,decay=0.9,updates_collections=None,is_training=is_training)
        h_conv1 = tf.nn.sigmoid(bn_1)                                                                         # 输入大小： 6x6x1
        h_pool1 = max_pool_2x2(h_conv1,layer_name='conv1',strides=[1,1,1,1],ksize=[1,2,2,1],padding='VALID')  # 输出大小： 5x5x16

    ## 第二个卷积层 ##
    with tf.name_scope('conv2_layer'):
        W_conv2 = weight_variable([2,2,16,32],layer_name='conv2')    # 卷积窗 2x2, 输入厚度 16, 输出厚度 32
        b_conv2 = bias_variable([32],layer_name='conv2')
        conv_2 = conv2d(h_pool1, W_conv2, padding='SAME', layer_name='conv2') + b_conv2
        bn_2 = batch_norm(conv_2,decay=0.9,updates_collections=None,is_training=is_training)
        h_conv2 = tf.nn.sigmoid(bn_2)    # 输入大小： 5x5x16
        h_pool2 = max_pool_2x2(h_conv2,layer_name='conv2',strides=[1,1,1,1],ksize=[1,2,2,1],padding='VALID')  # 输出大小： 4x4x32

    ## 第一个全连接层 ##
    # 带有dropout
    with tf.name_scope('fc1_layer'):
        W_fc1 = weight_variable([4*4*32,1024],layer_name='fc1')
        b_fc1 = bias_variable([1024],layer_name='fc1')
        with tf.name_scope('reshape'):
            h_pool2_flat = tf.reshape(h_pool2, [-1,4*4*32])
        with tf.name_scope('sigmoid'):
            h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
        with tf.name_scope('dropout'):
            h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)    
        
    ## 第二个全连接层 ##
    with tf.name_scope('fc2_layer'):
        W_fc2 = weight_variable([1024, 23],layer_name='fc2')
        b_fc2 = bias_variable([23],layer_name='fc2')
        with tf.name_scope('softmax'):
            prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)


    # 计算loss/cost
    with tf.name_scope('loss'):
        cross_entropy = -tf.reduce_sum(ys * tf.log(prediction))       # loss
    tf.summary.scalar('loss',cross_entropy)
    # 计算accuracy
    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(ys,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar('accuracy', accuracy)
    # 使用Adam优化器来实现梯度最速下降
    #with tf.name_scope('train'):
        #train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)                         ##############################
    with tf.control_dependencies(update_ops):
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)


    with tf.Session() as sess:
        # 初始化所有张量
        sess.run(tf.global_variables_initializer())
        # 将神经网络结构画出来
        writer = tf.summary.FileWriter("/output", sess.graph)
        # 分别分出训练和评估时标记值的变化
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter("/output/train", sess.graph)
        test_writer = tf.summary.FileWriter("/output/test", sess.graph)

        #feature_test_batch, label_test_batch = next_batch(feature_test, label_test, len(feature_test))
        
        for epoch in range(3):
            print('#####The trainning of epoch %d #####'% epoch)
            for step in range( int( np.ceil( len(feature_train)/1000 ) ) ):
                if step % 10 == 0:
                    print('The %d -th step is training...'% step)
                feature_train_batch, label_train_batch = next_batch(feature_train, label_train, 1000)  # 随机梯度下降训练，每次选大小为1000的batch
            
                sess.run(train_step, feed_dict={xs: feature_train_batch, ys: label_train_batch, keep_prob: 0.8, is_training: True})  ############
            
                if step % 100 == 0 and step!=0:
                    print('Validation on step %d :'% step)
                    train_writer.add_summary(sess.run(merged, feed_dict={
                            xs: feature_train_batch, ys: label_train_batch, keep_prob: 1,is_training: False}), step)         ###########
                    test_writer.add_summary(sess.run(merged, feed_dict={
                            xs: feature_test, ys: label_test, keep_prob: 1,is_training: False }), step)                     ############
                    #print('The prediction is :')
                    #print(sess.run(tf.argmax(prediction, 1)[7:27], feed_dict={xs: feature_test, ys: label_test, keep_prob: 1}))
                    #print('The label is :')
                    #print(sess.run(tf.argmax(ys, 1)[7:27], feed_dict={xs: feature_test, ys: label_test, keep_prob: 1}))
                    print('The accuracy on test is :')
                    print(sess.run(accuracy, feed_dict={xs: feature_test, ys: label_test, keep_prob: 1}))
                    print('The accuracy on train is :')
                    print(sess.run(accuracy, feed_dict={xs: feature_train_batch, ys: label_train_batch, keep_prob: 1}))