# -*- coding: UTF-8 -*-
import pandas as pd
import numpy as np
import tensorflow as tf
import os
import gp_db

# 进行One-hot编码
def dense_to_one_hot(labels_dense , num_classes):
    num_labels = labels_dense.shape[0]
    index_offset = np.arange(num_labels) * num_classes
    labels_one_hot = np.zeros((num_labels, num_classes))
    labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
    return labels_one_hot

# 定义几个处理的函数
def weight_variable(shape):
    #初始死权靈，正态分布标准方差为0.1
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)
def bias_variable(shape):
    #初隹化偏霣值，设为非零避免死神经元
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)
#对TensorFlow的2D卷积进行封装
def conv2d(x, W):
    #卷积不改变辅入的shape
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# 对TensorFlow的池化进行封装
def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],strides=[1,2, 2, 1], padding='SAME')



def tf_cnn_train(data,label_data ,model_name , times):
    # 3 对分类结果进行处理
    labels_count = np.unique(label_data).shape[0]
    data_len = data.shape[1]
    print('结果的种类=> {0}'.format(labels_count))
    labels = dense_to_one_hot(label_data, labels_count)
    labels = labels.astype(np.uint8)
    print('结果的数量： ( {0[0]},{0[1]})'.format(labels.shape))

    # 4 把辖入数据划分训练集和验证集
    train_size = data.shape[0]
    #  取训练集中的1/20的数据作为回测数据
    VALIDATION_SIZE = int(train_size/20)
    # VALIDATION_SIZE = int(len(images)/10)
    validation_images = data[:VALIDATION_SIZE]
    validation_labels = labels[:VALIDATION_SIZE]
    train_data = data[VALIDATION_SIZE:]
    train_labels = labels[VALIDATION_SIZE:]

    # 5 对训练集进行分批
    batch_size = 100
    n_batch = len(train_data) / batch_size

    # 6 定义两个placeholder用来承载数据
    # weights = tf.Variable(tf.zeros([data_len, labels_count]))
    # biases = tf.Variable(tf.zeros([labels_count]))
    x = tf.placeholder('float', shape=[None, data_len],name="input_image")
    y = tf.placeholder('float', shape=[None, labels_count])

    #7 把测试数据编程一个4维的张量，第2，3列对应的是图片的长宽，第四列对应的是颜色
    x_image = tf.reshape(x,[-1,28,28,1])

    # 8 计算32个特征，每3*3patch,第一二个螫数指的是patch的size,第三个参数是输入的c h a n n e l s,第四个参数是输出的c h a n n e l s
    w_conv1 = weight_variable([3, 3, 1, 32])
    # 13偏差的shape应该和输出的shape—致，所以也是32
    b_convl = bias_variable([32])
    # 28*28的图片卷积时步长为lt随意卷积后大小不变，按2*2最大值池化，相当于从2*2块中提取一个最大值,
    # 所以池化后大小为【28/2,28/2】=【14,14】，第二次池化后为口4/2,14/2】=[7,7]
    # 9 对数据做卷积操作
    h_convl = tf.nn.relu(conv2d(x_image,w_conv1) + b_convl)
    # 15对结果做池化，max_pool_2x2之后，图片变成14*14
    h_pooll = max_pool_2x2(h_convl)

    # 16在以前的基砥上，生成了64个特征
    w_conv2 = weight_variable([6, 6, 32, 64])
    b_conv2 = bias_variable([64])
    h_conv2 = tf.nn.relu(conv2d(h_pooll, w_conv2) + b_conv2)

    # 17 max_pool_2x2之后，图片变成7*7
    h_pool2 = max_pool_2x2(h_conv2)
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])

    # 18构造一个全连接的神经网络，1024个神经元
    w_fcl = weight_variable([7 * 7 * 64, 1024])
    b_fcl = bias_variable([1024])
    h_fcl = tf.nn.relu(tf.matmul(h_pool2_flat, w_fcl) + b_fcl)
    #19 做Dropout操作
    keep_prob = tf.placeholder(tf.float32,name="keep_prob")
    h_fcl_drop = tf.nn.dropout(h_fcl, keep_prob)

    # 20把1024个神经元的输入变力一个10维辕出
    w_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_conv = tf.matmul(h_fcl_drop, w_fc2) + b_fc2

    # 7 创建损失函数，以交又熵的平均值为衡量
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits (labels = y , logits=y_conv))
    # 8 用梯度下降法优化參数
    train_step = tf.train.AdadeltaOptimizer(learning_rate=0.1).minimize(loss)

    # 10 计算准确度
    correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_conv,1,name='czcprediction'))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

    # 9 初始化变置
    init = tf.global_variables_initializer()

    #保存模型
    saver = tf.train.Saver(max_to_keep=4)
    strs = ""
    with tf.Session() as sess:
        # 初始化
        sess.run(init)
        #循环50轮
        for epoch in range(times):
            for batch in range(int(n_batch)):
                #按照分片取出数据
                batch_x = train_data[batch*batch_size:(batch+1) *batch_size]
                batch_y = train_labels[batch*batch_size:(batch+1)*batch_size]
                #进行训练 ' '
                sess.run(train_step,feed_dict = {x: batch_x, y: batch_y, keep_prob: 0.5})
            #每_轮计算一次准确度
            accuracy_n = sess.run(accuracy, feed_dict={x: validation_images, y: validation_labels, keep_prob: 1.0})
            print("第"+ str(epoch + 1 ) +"轮，准确率为：" +str(accuracy_n))
            #strs = ("第"+ str(epoch + 1 ) +"轮，准确率为：" + str(accuracy_n)) + "\n" + strs
        if os.path.exists("/plpy/model_file/%s"%model_name) == False:
            os.mkdir("/plpy/model_file/%s"%model_name)
        saver.save(sess, "/plpy/model_file/%s/tf_model"%model_name)
    return strs;

def predict(test_data,model_name):
    #model_name = 'model1'
    with tf.Session() as sess:
      init = tf.global_variables_initializer()
      sess.run(init)
      #加载模型
      saver = tf.train.import_meta_graph('/plpy/model_file/%s/tf_model.meta'%model_name)
      saver.restore(sess,"/plpy/model_file/%s/tf_model"%model_name )
      graph = tf.get_default_graph()
      prediction = graph.get_tensor_by_name("czcprediction:0")
      x = graph.get_tensor_by_name("input_image:0")
      keep_prob = graph.get_tensor_by_name("keep_prob:0")
      #进行预测
      pr = sess.run(prediction, feed_dict={x: test_data ,keep_prob: 1.0})
      return pr

#查询数据
train_data = pd.read_csv('./data/train.csv')
images = train_data.iloc[:, 1:].values
# 矩阵拉平
t_labels = train_data.iloc[:,0:1].values;
labels_flat = t_labels.ravel()

# 2 对输入数据进行处理
images = images.astype(np.float)
images = np.multiply(images, 1.0 / 255.0)
print("输入数据的数量：（%g,%g）" % images.shape)
image_size = images.shape[1]
print('输入的数据维度=>{0}'.format(image_size))
#调用训练函数
print(tf_cnn_train(images,labels_flat,'cnn_model',20))

# 调用测试函数
# test_data =  pd.read_csv('./data/test.csv').values
# test_data = test_data.astype(np.float)
# test_data = np.multiply(test_data, 1.0 / 255.0)
#
# print("预测结果：")
# print(predict(test_data,"cnn_model"))
