﻿CREATE or replace FUNCTION tf_train(table_name varchar,column_name varchar,model_table varchar,times int)
  RETURNS varchar
AS $$
    import sys
    import pandas as pd
    import numpy as np
    import tensorflow as tf
    import os
    # 进行One-hot编码
    def dense_to_one_hot(labels_dense , num_classes):
        num_labels = labels_dense.shape[0]
        index_offset = np.arange(num_labels) * num_classes
        labels_one_hot = np.zeros((num_labels, num_classes))
        labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
        return labels_one_hot
    
    def tf_train(train_data ,model_name , times):
        images = train_data.iloc[:, 1:].values
        
        t_labels = train_data.iloc[:,0:1].values;
        labels_flat = t_labels.ravel()
    
        # 2 对输入数据进行处理
        # images = images.astype(np.float)
        # 为什么除以255
        images = np.multiply(images, 1.0 / 255.0)
        print("输入数据的数量：（%g,%g）" % images.shape)
        image_size = images.shape[1]
        print('输入的数据维度=>{0}'.format(image_size))
        # image_width = image_height = np.ceil(np.sqrt(image_size)).astype(np.uint8)
        # print('图片的长 => {0}\n图片的高 => {1}'.format(image_width, image_height))
    
        # 3 对分类结果进行处理
        labels_count = np.unique(labels_flat).shape[0]
        print('结果的种类=> {0}'.format(labels_count))
        labels = dense_to_one_hot(labels_flat, labels_count)
        labels = labels.astype(np.uint8)
        print('结果的数量： ( {0[0]},{0[1]})'.format(labels.shape))
    
        # 4 把辖入数据划分训练集和验证集
        #  把40000个数据作为训练集，2000个数据作为验证集
        VALIDATION_SIZE = 2000
        # VALIDATION_SIZE = int(len(images)/10)
        validation_images = images[:VALIDATION_SIZE]
        validation_labels = labels[:VALIDATION_SIZE]
        train_images = images[VALIDATION_SIZE:]
        train_labels = labels[VALIDATION_SIZE:]
        image_size = images.shape[1]
        labels_count = labels.shape[1]
        # 5 对训练集进行分批
        batch_size = 100
        n_batch = len(train_images) / batch_size
    
        # 6 创建一个简单的神经网络用来对图片进行识别
        weights = tf.Variable(tf.zeros([784, 10]))
        biases = tf.Variable(tf.zeros([10]))
        x = tf.placeholder('float', shape=[None, image_size],name="input_image")
        result = tf.matmul(x, weights) + biases
        prediction = tf.nn.softmax(result)
    
        # 7 创建损失函数，以交又熵的平均值为衡量
        y = tf.placeholder('float', shape=[None, labels_count])
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits (labels = y , logits=prediction))
        # 8 用梯度下降法优化參数
        train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
    
        # 9 初始化变置
        init = tf.global_variables_initializer()
        # 10 计算准确度
        czcprediction = tf.argmax(prediction,1,name='czcprediction')
        correct_prediction = tf.equal(tf.argmax(y,1),czcprediction)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    
        #保存模型
        saver = tf.train.Saver(max_to_keep=4)
        strs = ""
        with tf.Session() as sess:
            # 初始化
            sess.run(init)
            #循环50轮
            for epoch in range(times):
                for batch in range(int(n_batch)):
                    #按照分片取出数据
                    batch_x = train_images[batch*batch_size:(batch+1) *batch_size]
                    batch_y = train_labels[batch*batch_size:(batch+1)*batch_size]
                    #进行训练 ' '
                    sess.run(train_step,feed_dict = {x:batch_x , y:batch_y})
                #每_轮计算一次准确度
                accuracy_n = sess.run(accuracy,feed_dict= { x:validation_images,y: validation_labels})
                strs = ("第"+ str(epoch + 1 ) +"轮，准确率为：" +str(accuracy_n)) + "\n" + strs
            if os.path.exists("/plpy/model_file/%s"%model_name) == False:
                os.mkdir("/plpy/model_file/%s"%model_name)
            saver.save(sess, "/plpy/model_file/%s/tf_model"%model_name)
            return strs;
  
    
    # 查询数据
    rv = plpy.execute("SELECT %s FROM %s" % (table_name,column_name))
    trainrows = []
    for c in rv:
        trainrows.append(c[column_name])
    train_data = pd.DataFrame(trainrows)
    
    # 调用函数
    return tf_train(train_data, model_table, times)
    # 保存模型数据
$$ LANGUAGE plpythonu;

SELECT tf_train('image_trains','data','model2',50);
