"""
2.	把inceptionnet-v3预训练模型，作为主干网络，将5种农作物图像数据，映射为2048维向量，然后训练后端全连接网络，进行5种农作物图像分类。
按照下述要求，完成相应操作（40分）
"""
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split

tf.random.set_random_seed(1)
np.random.seed(1)


def sep(label=''):
    print('-' * 32, label, '-' * 32, sep='')


# ①	定义数据参数，包括：图像文件夹、图像特征向量保存地址、inception-v3模型路径和模型参数等
sep('Define arguments')
VER = 'v1.1'
MODEL_PATH = 'C4_datasets/model/tensorflow_inception_graph.pb'
IMG_DIR = 'C4_datasets/data/agriculture'
BOTTLE_NECK_DIR = 'C4_datasets/data/bottleneck/' + VER
os.makedirs(BOTTLE_NECK_DIR, exist_ok=True)
INPUT_PLACEHOLDER_NAME = 'DecodeJpeg/contents:0'
OUTPUT_TENSOR_NAME = 'pool_3/_reshape:0'
FILE_NAME = os.path.basename(__file__)
LOG_DIR = os.path.join('_log', FILE_NAME, VER)
VER02 = 'v1.4'
BATCH_SIZE = 128
ALPHA = 0.001
N_EPOCHS = 1000
print('N_EPOCHS', N_EPOCHS)
SAVE_DIR = os.path.join('_save', FILE_NAME, VER02)
SAVE_PREFIX = os.path.join(SAVE_DIR, 'ckpt')

# ②	创建计算图，加载inception-v3预训练模型，返回数据输入张量和瓶颈层输出张量
sep('Import inception v3 model')
graphDef = tf.GraphDef()
with open(MODEL_PATH, 'rb') as f:
    model_bin = f.read()
graphDef.ParseFromString(model_bin)
input_ph, output_tensor = tf.import_graph_def(graphDef, return_elements=[
    INPUT_PLACEHOLDER_NAME,
    OUTPUT_TENSOR_NAME
])

# ③	开启会话，读取所有的图像，将图像映射的特征向量保存在相应地址
sep('images to vectors')
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    print('Processing image ...')
    cnt = 0
    for sub_dir in os.listdir(IMG_DIR):
        sub_dir_path = os.path.join(IMG_DIR, sub_dir)
        vec_dir_path = os.path.join(BOTTLE_NECK_DIR, sub_dir)
        os.makedirs(vec_dir_path, exist_ok=True)
        for file in os.listdir(sub_dir_path):
            cnt += 1
            if cnt % 25 == 0:
                print(f'Processing no.{cnt} image ...')
            vec_path = os.path.join(vec_dir_path, file + '.txt')
            if os.path.exists(vec_path):
                continue
            file_path = os.path.join(sub_dir_path, file)
            with open(file_path, 'rb') as f:
                img_bin = f.read()
            vec = sess.run(output_tensor, feed_dict={input_ph: img_bin})
            vec = np.squeeze(vec)
            np.savetxt(vec_path, vec)
    print('Processing over.')

# ④	读取特征向量数据，划分训练集（0.8）、验证集（0.1）、测试集（0.1）
sep('Load data')
yi = 0
x, y, pathes = [], [], []
idx2label, label2idx = {}, {}
for sub_dir in os.listdir(BOTTLE_NECK_DIR):
    idx2label[yi] = sub_dir
    label2idx[sub_dir] = yi
    sub_dir_path = os.path.join(BOTTLE_NECK_DIR, sub_dir)
    for vec_file in os.listdir(sub_dir_path):
        vec_path = os.path.join(sub_dir_path, vec_file)
        vec = np.loadtxt(vec_path)
        x.append(vec)
        y.append(yi)
        pathes.append(sub_dir + '/' + vec_file[:-4])
    yi += 1
x = np.float32(x)
y = np.int64(y)
x_train, x_test_val, y_train, y_test_val, path_train, path_test_val = train_test_split(x, y, pathes, train_size=0.8, random_state=2, shuffle=True)
x_test, x_val, y_test, y_val, path_test, path_val = train_test_split(x_test_val, y_test_val, path_test_val, train_size=0.5, random_state=1, shuffle=True)
print('x_train', x_train.shape)
print('x_test', x_test.shape)
print('x_val', x_val.shape)
print('y_train', y_train.shape)
print('y_test', y_test.shape)
print('y_val', y_val.shape)
print('path_train', np.shape(path_train))
print('path_test', np.shape(path_test))
print('path_val', np.shape(path_val))
vec_len = x_train.shape[1]
print('vector length:', vec_len)
n_cls = len(idx2label)
print('n_cls', n_cls)

# ⑤	创建全连接神经网络后端模型，实现5种农作物分类任务
ph_x = tf.placeholder(tf.float32, [None, vec_len], 'ph_x')
ph_y = tf.placeholder(tf.int64, [None], 'ph_y')
pred = tf.layers.Dense(n_cls, activation=None)(ph_x)
loss = tf.reduce_mean(
    tf.nn.sparse_softmax_cross_entropy_with_logits(labels=ph_y, logits=pred)
)
optim = tf.train.AdamOptimizer(learning_rate=ALPHA).minimize(loss)
accuracy = tf.reduce_mean(
    tf.cast(
        tf.equal(ph_y, tf.argmax(pred, axis=1)),
        tf.float32
    )
)


# ⑥	训练1000次，自拟数据批次，每200次打印损失值和交叉验证准确率
# ⑦	设置保存检查点，每200次保存后端模型参数
def dl_generator(x, y, batch_size):
    xlen = len(x)
    n_batch = int(np.ceil(xlen / batch_size))
    for i in range(n_batch):
        yield x[i*batch_size:(i+1)*batch_size], y[i*batch_size:(i+1)*batch_size]


def process_data(sess, dl_data, label, is_train=False, epoch=1, saver=None):
    global g_step
    avg_loss, avg_acc = 0., 0.
    for i, (bx, by) in enumerate(dl_data):
        if is_train:
            _, lossv, accv = sess.run([optim, loss, accuracy], feed_dict={ph_x: bx, ph_y: by})
            if g_step % 200 == 1:
                os.makedirs(SAVE_DIR, exist_ok=True)
                saver.save(sess, SAVE_PREFIX, global_step=g_step)
            g_step += 1
        else:
            lossv, accv = sess.run([loss, accuracy], feed_dict={ph_x: bx, ph_y: by})
        print(f'{label}: epoch#{epoch}: #{i + 1}: loss = {lossv}, acc = {accv}')
        avg_loss += lossv
        avg_acc += accv
    avg_loss /= i + 1
    avg_acc /= i + 1
    return avg_loss, avg_acc


g_step = 1
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=6)

    train = False
    if os.path.exists(SAVE_DIR):
        path = tf.train.latest_checkpoint(SAVE_DIR)
        saver.restore(sess, path)
    else:
        train = True
        loss_his, acc_his, loss_his_val, acc_his_val = [], [], [], []
        for epoch in range(N_EPOCHS):
            sep(epoch + 1)
            avg_loss, avg_acc = process_data(sess, dl_generator(x_train, y_train, BATCH_SIZE), 'train', True, epoch + 1, saver)
            print(f'Epoch#{epoch + 1}: loss = {avg_loss}, acc = {avg_acc}')
            avg_loss_val, avg_acc_val = process_data(sess, dl_generator(x_val, y_val, BATCH_SIZE), 'val', False, epoch + 1)
            print(f'Epoch#{epoch + 1}: loss = {avg_loss}, acc = {avg_acc}, loss_val = {avg_loss_val}, acc_val = {avg_acc_val}')
            loss_his.append(avg_loss)
            acc_his.append(avg_acc)
            loss_his_val.append(avg_loss_val)
            acc_his_val.append(avg_acc_val)
        print('Final g_step:', g_step)

    # ⑧	根据测试集，打印测试准确率
    sep('Test')
    avg_loss_test, avg_acc_test = process_data(sess, dl_generator(x_test, y_test, BATCH_SIZE), 'test', False)
    print(f'Test: loss_test = {avg_loss_test}, acc_test = {avg_acc_test}')

    if train:
        spr = 1
        spc = 2
        spn = 0
        plt.figure(figsize=[12, 6])

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.title('loss')
        plt.plot(loss_his, label='train')
        plt.plot(loss_his_val, label='val')
        plt.grid()
        plt.legend()

        spn += 1
        plt.subplot(spr, spc, spn)
        plt.title('accuracy')
        plt.plot(acc_his, label='train')
        plt.plot(acc_his_val, label='val')
        plt.grid()
        plt.legend()

        plt.show()

    spr = 3
    spc = 3
    spn = 0
    plt.figure(figsize=[6, 6])

    # ⑨	从测试集中随机选取9个样本，利用训练好的后端模型，预测它们对应的种类
    n = 9
    bx = x_test[:n]
    by = y_test[:n]
    b_label_true = [idx2label[i] for i in by]
    b_path = path_test[:n]
    predv = sess.run(pred, feed_dict={ph_x: bx}).argmax(axis=1)
    b_label_pred = [idx2label[i] for i in predv]
    judge = ['V' if i == j else 'X' for i, j in zip(by, predv)]

    # ⑩	打印输出9个样本的预测种类名称和真实种类名称，进行比较
    for i in range(n):
        spn += 1
        plt.subplot(spr, spc, spn)
        path = IMG_DIR + '/' + b_path[i]
        title = f'{b_label_true[i]}=>{b_label_pred[i]} ({judge[i]})'
        print(f'{path} => {title}')
        plt.title(title, color='black' if judge[i] == 'V' else 'red')
        plt.axis('off')
        img = plt.imread(path)
        plt.imshow(img)

# Finally
plt.show()
