import os
import sys
import numpy as np
import tensorflow as tf
from sklearn.model_selection import  train_test_split
IN_DIR="../../../../../large_data/CV3/_many_files/agriculture/train_bottleneck"
OUT_DIR = './_save'
checkpoint_every = 100

def get_data(path):
    x_vecs=[]
    y_labels=[]
    cls_names = os.listdir(path)
    cls_names = sorted(cls_names)
    for i, j in enumerate(cls_names):
        sub_path = os.path.join(path, j)
        for vec in os.listdir(sub_path):
            vec_path = os.path.join(sub_path, vec)
            with open(vec_path, 'r') as f:
                vec_str = f.read()
            vec_values = [float(x) for x in vec_str.split(',')]
            x_vecs.append(vec_values)
            y_labels.append(np.eye(5)[i])
    return np.array(x_vecs), np.array(y_labels)

image_data,labels=get_data(IN_DIR)

train_data,test_data,train_label,test_label=train_test_split(image_data,labels,train_size=0.8,shuffle=True)
test_data,val_data,test_label,val_label=train_test_split(test_data,test_label,train_size=0.5)
print('train_data', train_data.shape)
print('train_label', train_label.shape)
print('test_data', test_data.shape)
print('test_label', test_label.shape)
print('val_data', val_data.shape)
print('val_label', val_label.shape)
# sys.exit(0)  # tmp

X = tf.placeholder(tf.float32, [None, 2048])
Y = tf.placeholder(tf.float32, [None, 5])

with tf.name_scope('final_training_ops'):
    logits = tf.layers.dense(X, 5)

cross_entropy_mean = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy_mean)

with tf.name_scope('evaluation'):
    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
    evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

batch_size=64
g_b=0
def next_batch(size):
    global g_b
    xb=train_data[g_b:g_b+size]
    yb=train_label[g_b:g_b+size]
    g_b=g_b+size
    return xb,yb

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    # 保存检查点
    checkpoint_dir = os.path.abspath(os.path.join(OUT_DIR, 'checkpoints'))
    checkpoint_prefix = os.path.join(checkpoint_dir, 'model')
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=6)

    # ITERS = 1000
    ITERS = 1000
    # GROUP = 100
    GROUP = 10
    for epoch in range(ITERS):
        g_b = 0
        num_step=len(train_data)//batch_size
        for i in range(num_step):
            x,y=next_batch(batch_size)
            _=sess.run([train_step],feed_dict={X:x,Y:y})
        if epoch%GROUP==0 or epoch == ITERS - 1:
            validation_accuracy=sess.run(evaluation_step,feed_dict={X:val_data,Y:val_label})
            print("[epoch {}]验证集准确率{:.3f}%".format(epoch,validation_accuracy*100.))

            path = saver.save(sess, checkpoint_prefix, global_step=epoch)
            print('Saved model checkpoint to {}\n'.format(path))

    test_accuracy=sess.run(evaluation_step,feed_dict={X:test_data,Y:test_label})
    print("测试集准确率{:.3f}%".format(test_accuracy*100.))
