"""
1.	在计算机视觉任务和自然语言处理任务中，深度学习将预训练的模型作为新模型的起点是一种常用的方法，通常这些预训练的模型在开发神经网络的时候已经
消耗了巨大的时间资源和计算资源，迁移学习可以将已习得的强大技能迁移到相关的问题上。把inceptionnet-v3预训练模型，作为主干网络，将5种农作物图像
数据，映射为2048维向量。按照下述要求，完成相应操作（70分）

【注意】 For Tensorflow 1.x
"""

import tensorflow as tf
import numpy as np
import os
import sys
import re
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import glob

VER = 'v1.0'
RAND_SEED = 1
BATCH_SIZE = 32
ALPHA = 0.001
N_EPOCHS = 10
np.random.seed(RAND_SEED)
tf.random.set_random_seed(RAND_SEED)
FILE_NAME = os.path.basename(__file__)
SAVE_DIR = os.path.join('_save', FILE_NAME, VER)
os.makedirs(SAVE_DIR, exist_ok=True)
SAVE_WEIGHT_DIR = os.path.join('_save', FILE_NAME, VER, 'weight')
os.makedirs(SAVE_WEIGHT_DIR, exist_ok=True)
SAVE_WEIGHT_PREFIX = os.path.join(SAVE_WEIGHT_DIR, 'epoch')
LOG_DIR = os.path.join('_log', FILE_NAME, VER)

# ①	加载预训练模型inceptionnet-v3，提取图片数据集特征
# 1)	定义数据参数，包括：图像文件夹、图像特征向量保存地址、inception-v3模型路径和模型参数等
# 2)	创建计算图，加载inception-v3预训练模型，返回数据输入张量和瓶颈层输出张量
IMG_DIR = '../../../../large_data/CV2/_many_files/flower_photos_liuqilong'
FEATURE_SAVE_DIR = '../../../../large_data/CV2/_many_files/flower_photos_liuqilong_vec/' + VER
os.makedirs(FEATURE_SAVE_DIR, exist_ok=True)
MODEL_PATH = '../../../../large_data/model/inceptionV3/tensorflow_inception_graph.pb'


def sep(lable=''):
    print('-' * 32, lable, '-' * 32)


sep('Import model')
with open(MODEL_PATH, 'rb') as f:
    model_bin = f.read()
graphDef = tf.GraphDef()
graphDef.ParseFromString(model_bin)
input_placeholder, output_tensor = tf.import_graph_def(graphDef, return_elements=[
    'DecodeJpeg/contents:0',
    'pool_3/_reshape:0'
])

sep('Start session')
with tf.Session() as sess:

    sep('Put graph into tensorboard log')
    with tf.summary.FileWriter(LOG_DIR) as fw:
        fw.add_graph(sess.graph)

    sep('Extrat feature')
    # 3)	开启会话，读取所有的图像，将图像映射的特征向量保存在相应地址
    sub_dirs = os.listdir(IMG_DIR)
    sub_dirs = sorted(sub_dirs)
    y_v = -1
    idx2name = {}
    name2idx = {}
    ext_set = set(['.jpg', '.jpeg', '.png'])
    for sub_dir in sub_dirs:
        sub_dir_path = os.path.join(IMG_DIR, sub_dir)
        if not os.path.isdir(sub_dir_path):
            continue
        img_names = os.listdir(sub_dir_path)
        if len(img_names) == 0:
            continue

        y_v += 1
        idx2name[y_v] = sub_dir
        name2idx[sub_dir] = y_v

        feature_sub_dir = os.path.join(FEATURE_SAVE_DIR, sub_dir)
        os.makedirs(feature_sub_dir, exist_ok=True)

        print(f'Processing images in sub-class "{sub_dir}" ...')
        if len(os.listdir(feature_sub_dir)) > 0:
            print('This sub-class has been processed already, and caches will be used directly. (Change VER and re-run to process again)')
            continue
        cnt = 0
        for img_name in img_names:
            img_path = os.path.join(sub_dir_path, img_name)
            if os.path.isdir(img_path):
                continue
            ext = os.path.splitext(img_name)[1].lower()
            if not ext in ext_set:
                continue
            with open(img_path, 'rb') as f:
                img_bin = f.read()
            vec = sess.run(output_tensor, feed_dict={input_placeholder: img_bin})
            vec = np.squeeze(vec, axis=0)
            feature_path = os.path.join(feature_sub_dir, img_name + '.txt')
            np.savetxt(feature_path, vec)
            cnt += 1
            if cnt % 20 == 0:
                print(f'{cnt} images processed.')
        if cnt % 20 != 0:
            print(f'{cnt} images processed.')
    print('Feature extraction is over:')
    print(idx2name)
    print(name2idx)

    # ②	训练后端网络模型
    # 1)	将图片映射的特征向量数据集，划分训练集、验证集、测试集
    sep('Tidy data')
    x_data = []
    y_data = []
    path_data = []
    cls_names = name2idx.keys()
    for cls_name in cls_names:
        cls_idx = name2idx[cls_name]
        feature_sub_dir = os.path.join(FEATURE_SAVE_DIR, cls_name)
        for vec_file in os.listdir(feature_sub_dir):
            vec_path = os.path.join(feature_sub_dir, vec_file)
            vec = np.loadtxt(vec_path)
            x_data.append(vec)
            y_data.append(cls_idx)
            img_rel_path = cls_name + '/' + vec_file[:-4]
            path_data.append(img_rel_path)
    x_data = np.float32(x_data)
    y_data = np.int64(y_data)
    path_data = np.array(path_data)  # ATTENTION If it is not numpy array but just a python list: TypeError: only integer scalar arrays can be converted to a scalar index
    print('x_data', np.shape(x_data))
    print('y_data', np.shape(y_data))
    print('path_data', np.shape(path_data))

    x_train, x_test_val, y_train, y_test_val, path_train, path_test_val\
        = train_test_split(x_data, y_data, path_data, train_size=0.8, random_state=RAND_SEED, shuffle=True)
    x_test, x_val, y_test, y_val, path_test, path_val\
        = train_test_split(x_test_val, y_test_val, path_test_val, train_size=0.5, random_state=RAND_SEED, shuffle=True)
    print('x_train', np.shape(x_train))
    print('y_train', np.shape(y_train))
    print('path_train', np.shape(path_train))
    print('x_test', np.shape(x_test))
    print('y_test', np.shape(y_test))
    print('path_test', np.shape(path_test))
    print('x_val', np.shape(x_val))
    print('y_val', np.shape(y_val))
    print('path_val', np.shape(path_val))


    def data_loader(x_data, y_data, path_data, discard_reminder=True):
        M = len(x_data)
        if discard_reminder:
            iters = int(np.floor(M / BATCH_SIZE))
        else:
            iters = int(np.ceil(M / BATCH_SIZE))
        for i in range(iters):
            yield M, x_data[i*BATCH_SIZE:(i + 1)*BATCH_SIZE], y_data[i*BATCH_SIZE:(i + 1)*BATCH_SIZE], path_data[i*BATCH_SIZE:(i + 1)*BATCH_SIZE]


    # 2)	定义全连接后端网络模型、代价函数、准确率计算函数
    sep('Rear network model')
    N_CLS = len(idx2name.keys())
    print(f'Number of classes: {N_CLS}')
    x = tf.placeholder(tf.float32, [None, 2048], name='ph_x')
    y = tf.placeholder(tf.int64, [None], name='ph_y')
    logits = tf.layers.Dense(N_CLS)(x)
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
    )
    optim = tf.train.AdamOptimizer(learning_rate=ALPHA).minimize(loss)
    h = tf.nn.softmax(logits)
    acc = tf.reduce_mean(
        tf.cast(
            tf.equal(
                y,
                tf.argmax(h, axis=1)
            ),
            tf.float32
        )
    )

    # 3)	训练集训练模型时，每100步保存检测点和验证集的准确率
    sep('Train')
    saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=6)  # ATTENTION Saver


    def run_on_data(x_data, y_data, epoch=0, is_training=True, discard_reminder=True, print_interv=None):
        loss_avg = 0.
        acc_avg = 0.
        for i, (n_batches, bx, by, _) in enumerate(data_loader(x_data, y_data, [], discard_reminder)):
            if is_training:
                _, loss_v, acc_v = sess.run([optim, loss, acc], feed_dict={x: bx, y: by})
            else:
                loss_v, acc_v = sess.run([loss, acc], feed_dict={x: bx, y: by})
            if print_interv is not None and (i % print_interv == 0 or i == n_batches - 1):
                print(f'#{i + 1}: loss = {loss_v}, acc = {acc_v}')
            loss_avg += loss_v
            acc_avg += acc_v
        loss_avg /= i + 1
        acc_avg /= i + 1

        if is_training:
            saver.save(sess, SAVE_WEIGHT_PREFIX, global_step=epoch)  # ATTENTION Saver

        return loss_avg, acc_avg


    sess.run(tf.global_variables_initializer())
    for i in range(N_EPOCHS):
        loss_avg, acc_avg = run_on_data(x_train, y_train, i, is_training=True, discard_reminder=True, print_interv=5)
        print(f'epoch {i + 1}: avg loss = {loss_avg}, avg acc = {acc_avg}')
        loss_avg, acc_avg = run_on_data(x_val, y_val, is_training=False, discard_reminder=False, print_interv=None)
        print(f'epoch {i + 1}: val loss = {loss_avg}, val acc = {acc_avg}')


    # 4)	模型训练完毕后，计算测试集的准确率
    sep('Test')
    loss_avg, acc_avg = run_on_data(x_test, y_test, is_training=False, discard_reminder=False, print_interv=None)
    print(f'Test loss = {loss_avg}, test acc = {acc_avg}')

    # ③	图形显示16张图片预测结果
    spr = 4
    spc = 4
    spn = 0
    plt.figure(figsize=[13, 6])
    for i, (n_batches, bx, by, bpath) in enumerate(data_loader(x_test, y_test, path_test, discard_reminder=False)):
        h_v = sess.run(h, feed_dict={x: bx})
        h_v = np.argmax(h_v, axis=1)
        for cls_idx, rel_path, pred_idx in zip(by, bpath, h_v):
            spn += 1
            if spn > spr * spc:
                break
            plt.subplot(spr, spc, spn)
            title = f'{idx2name[cls_idx]}=>{idx2name[pred_idx]} ({"V" if cls_idx == pred_idx else "X"})'
            plt.title(title)
            plt.axis('off')
            img = plt.imread(os.path.join(IMG_DIR, rel_path))
            plt.imshow(img)
        if spn > spr * spc:
            break
    plt.show()

    # 1)	加载最后的检测点模型参数文件
    sep('Load weight')
    latest_file = tf.train.latest_checkpoint(SAVE_WEIGHT_DIR)  # ATTENTION Saver
    print(f'Restore from {latest_file}')
    saver.restore(sess, latest_file)  # ATTENTION Saver

    # 2)	提取给定16张图片的特征向量
    # 3)	打印输出16张图片的真实值和预测值，并图形显示
    sep('Test random pictures')
    M_TEST = len(path_test)
    rand_idx = np.random.permutation(M_TEST)
    path_test = path_test[rand_idx]
    paths = path_test[:16]

    spr = 4
    spc = 4
    spn = 0
    plt.figure(figsize=[13, 6])
    regexp = re.compile(r'^([^\\/]+)[\\/][^\\/]+$')
    for rel_path in paths:
        path = os.path.join(IMG_DIR, rel_path)
        spn += 1
        if spn > spr * spc:
            break
        with open(path, 'rb') as f:
            bin = f.read()
        vec = sess.run(output_tensor, feed_dict={input_placeholder: bin})  # (1, 2048)
        h_v = sess.run(h, feed_dict={x: vec})
        h_v = np.argmax(h_v, axis=1)
        h_v = h_v[0]
        pred_name = idx2name[h_v]

        matcher = regexp.match(rel_path)
        if matcher is None:
            true_name = '?'
            is_right = '?'
        else:
            true_name = matcher[1]
            is_right = "V" if true_name == pred_name else "X"

        plt.subplot(spr, spc, spn)
        title = f'{true_name}: {pred_name} ({is_right})'
        plt.title(title)
        plt.axis('off')
        img = plt.imread(path)
        plt.imshow(img)

    plt.show()
    print('Over')
