import tensorflow as tf
import numpy as np
import os
import sys
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

VER = 'v1.0'
BATCH_SIZE = 128
BASE_DIR, FILE_NAME = os.path.split(__file__)
SAVE_DIR = os.path.join(BASE_DIR, '_save', FILE_NAME, VER)
dir = '../../../../large_data/CV3/_many_files/agriculture/train_bottleneck'
VEC_DIR = os.path.join(BASE_DIR, dir)


# ①	数据集读取
# 1)	运行代码transform_learning.py，通过迁移，产生5种农作物图片数据的向量数据
# 2)	将该向量数据保存在路径data\bottleneck下
# 3)	定义特征读取函数readData，读取向量数据
def readData(vec_dir):
    x, y = [], []
    yi = 0
    for sub_dir_name in os.listdir(vec_dir):
        sub_dir_path = os.path.join(vec_dir, sub_dir_name)
        if not os.path.isdir(sub_dir_path):
            continue
        for file_name in os.listdir(sub_dir_path):
            ext = os.path.splitext(file_name)[1].lower()
            if '.txt' != ext:
                continue
            file_path = os.path.join(sub_dir_path, file_name)
            vec = np.loadtxt(file_path, delimiter=',')
            x.append(vec)
            y.append(yi)
        yi += 1
    x = np.float32(x)
    y = np.int64(y)
    return x, y


x, y = readData(VEC_DIR)
n_cls = len(np.unique(y))

# 4)	将读取的数据划分训练集（0.8）、测试集（0.2）
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=1, shuffle=True)


def dl(x, y, batch_size):
    xlen = int(np.ceil(len(x) / batch_size))
    for i in range(xlen):
        yield x[i*batch_size:(i + 1)*batch_size], y[i*batch_size:(i + 1)*batch_size]


# ②	模型创建和评估
# 1)	创建全连接神经网络模型，实现分类任务
ph_x = tf.placeholder(tf.float32, (None, 2048), 'ph_x')
ph_y = tf.placeholder(tf.int64, (None,), 'ph_y')
pred = tf.layers.Dense(n_cls)(ph_x)
loss = tf.reduce_mean(
    tf.nn.sparse_softmax_cross_entropy_with_logits(labels=ph_y, logits=pred)
)
optim = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
accuracy = tf.reduce_mean(
    tf.cast(
        tf.equal(
            ph_y,
            tf.argmax(pred, axis=1)
        ),
        tf.float32
    )
)


# 2)	训练500次，自拟数据批次，每100次打印损失值
# 3)	设置保存检查点，每100次保存后端模型参数
def process_data(sess, x, y, label, is_train, epoch=1):
    avg_loss, avg_acc = 0., 0.
    it = 0
    for i, (bx, by) in enumerate(dl(x, y, BATCH_SIZE)):
        it = i
        if is_train:
            _, lossv, accv = sess.run([optim, loss, accuracy], feed_dict={ph_x: bx, ph_y: by})
        else:
            lossv, accv = sess.run([loss, accuracy], feed_dict={ph_x: bx, ph_y: by})
        avg_loss += lossv
        avg_acc += accv
    avg_loss /= it + 1
    avg_acc /= it + 1
    return avg_loss, avg_acc


with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # n_epochs = int(np.ceil(500 / BATCH_SIZE))
    # n_group = int(np.ceil(100 / BATCH_SIZE))
    n_epochs = 500
    n_group = 100
    loss_his, acc_his = [], []
    for epoch in range(n_epochs):
        avg_loss, avg_acc = process_data(sess, x_train, y_train, 'train', True, epoch + 1)
        loss_his.append(avg_loss)
        acc_his.append(avg_acc)
        if epoch % n_group == 0 or epoch == n_epochs - 1:
            print(f'epoch#{epoch + 1}: loss = {avg_loss}, acc = {avg_acc}')

    # 4)	根据测试集，打印测试准确率
    print('Testing ...')
    avg_loss, avg_acc = process_data(sess, x_test, y_test, 'test', False)
    print(f'Test: loss = {avg_loss}, acc = {avg_acc}')

    spr = 1
    spc = 2
    spn = 0
    plt.figure(figsize=[12, 6])
    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title('loss')
    plt.plot(loss_his)
    plt.grid()
    spn += 1
    plt.subplot(spr, spc, spn)
    plt.title('acc')
    plt.plot(acc_his)
    plt.grid()
    plt.show()
