# encoding: utf-8
'''
Created on 2018年5月31日

@author: mengqiang.song
'''
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' #屏蔽掉tensorFlow的一些警告

import tensorflow as tf
from app.common.traindata import TrainData
"""
    1.定义session图
    
    2.从文件中读取session
    
    3.训练session
    
    4.保存session
    
    5.验证session准确性
    
    6.提供session调用接口
    
"""

#随机生成W
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)

#随机生成b
def bias_variable(shape):
    initial = tf.constant(0.1,shape = shape)
    return tf.Variable(initial);

#定义卷积
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')

#定义max-pool
def max_pool(x):
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1 ], padding = 'SAME')

x1 = tf.placeholder(tf.float32, [None,5184])  # 72*72
x2 = tf.placeholder(tf.float32, [None,5184])  # 72*72
y_actual = tf.placeholder(tf.float32,[None,2])
keep_prob = tf.placeholder("float")

def crpd2(x):
    x_img = tf.reshape(x, [-1,72,72,1])
    
    #卷积核 前两个参数 5,5 是卷积核的高和宽 ，第三个参数是当前图像的通道数 ，灰度图填1，rgb三通道填3，，第四个参数是输出通道数
    w_conv1 = weight_variable([5,5,1,32])
    
    #偏值量
    b_conv1 = bias_variable([32])
    
    #先卷积，再relu
    h_conv1 = tf.nn.relu(conv2d(x_img,w_conv1)+b_conv1)
    #在做maxPool,特征最大值提取卷积
    h_pool1 = max_pool(h_conv1)
    
    
    
    w_conv2 = weight_variable([5,5,32,64])
    
    b_conv2 = bias_variable([64])
    
    h_conv2 = tf.nn.relu(conv2d(h_pool1,w_conv2)+b_conv2)
    
    h_pool2 = max_pool(h_conv2)
    
    #全连接  72/2 = 36 /2 = 18
    w_fc1 = weight_variable([18*18*64,1024])
    b_fc1 = bias_variable([1024])
    h_pool2_flat = tf.reshape(h_pool2,[-1,18*18*64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc1)+b_fc1)
    
    #降低过拟合
    h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)

    return h_fc1_drop;

drop1 = crpd2(x1)  # 18*18*64
drop2 = crpd2(x2)  # 18*18*64

drop3 = tf.concat([drop1,drop2],1)

# drop1Ext=tf.expand_dims(drop1,1)  
# drop2Ext=tf.expand_dims(drop2,1)  
# drop3 = tf.concat(1, [drop1Ext,drop2Ext])

#第二层全连接
W_fc2 = weight_variable([2048,1024])
b_fc2 = bias_variable([1024])
y_1024 = tf.nn.relu(tf.matmul(drop3,W_fc2)+b_fc2)
y_1024_drop = tf.nn.dropout(y_1024,keep_prob)

W_fc3 = weight_variable([1024,512])
b_fc3 = bias_variable([512])
y_512 = tf.nn.relu(tf.matmul(y_1024_drop,W_fc3)+b_fc3)
y_512_drop = tf.nn.dropout(y_512,keep_prob)

W_fc4 = weight_variable([512,2])
b_fc4 = bias_variable([2])
y_2 = tf.nn.softmax(tf.matmul(y_512_drop,W_fc4)+b_fc4)
y_2_rop = tf.nn.dropout(y_2,keep_prob)

cross_entropy = -tf.reduce_sum(y_actual * tf.log(y_2_rop)) #交叉熵
train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy) #梯度下降

correct_prediction = tf.equal(tf.argmax(y_2_rop,1), tf.argmax(y_actual,1)) 
accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float")) #精准计算 用于验证

fileBasePath = os.path.abspath("../temp")
def trainSession():
#     saver = tf.train.Saver()
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        i = 0 
        for px1,px2,py in TrainData(fileBasePath+"\\train_data",5):
            if i%50 == 0:
                train_acc = accuracy.eval(feed_dict = {x1:px1,x2:px2,y_actual:py,keep_prob:1.0})
                print('第',i,'次训练前的准确率：',train_acc)
            train_step.run(feed_dict = {x1:px1,x2:px2,y_actual:py,keep_prob:0.5})  
            if i%50 == 0:
                train_acc = accuracy.eval(feed_dict = {x1:px1,x2:px2,y_actual:py,keep_prob:1.0})
                print('第',i,'次训练后的准确率：',train_acc)
            break
            i = i+1
trainSession()    