import pickle
import tensorflow as tf
import numpy as np
import os
from sklearn.model_selection import train_test_split

JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_VEC_SIZE = 2048
MODEL_PATH = '../../../../../../large_data/model/inceptionV3/tensorflow_inception_graph.pb'
IMG_ROOT_DIR = '../../../../../../large_data/DL1/_many_files/zoo'
PERCENT_TRAIN = 80
PERCENT_TEST = 10
PERCENT_VAL = 10


def sep(label='', cnt=32):
    print('-' * cnt, label, '-' * cnt, sep='')


########################################################################################################################
sep('Prepare')
tf.random.set_random_seed(777)
np.random.seed(777)

VER = 'v7.0'
FILE_NAME = os.path.basename(__file__)
CKPT_DIR = os.path.join('_save', FILE_NAME, VER, 'ckpt')
BOTTLENECK_DIR = os.path.join('_save', FILE_NAME, VER, 'bottleneck')
LOG_DIR = os.path.join('_log', FILE_NAME, VER)
os.makedirs(CKPT_DIR, exist_ok=True)
os.makedirs(BOTTLENECK_DIR, exist_ok=True)

########################################################################################################################
sep('Load model')
# 读取模型
graphDef = tf.GraphDef()
with open(MODEL_PATH, 'rb') as f:
    model_bin = f.read()
graphDef.ParseFromString(model_bin)

input_placeholder, output_tensor = tf.import_graph_def(graphDef, return_elements=[
    JPEG_DATA_TENSOR_NAME,
    BOTTLENECK_TENSOR_NAME,
])
with tf.Session() as sess:
    with tf.summary.FileWriter(LOG_DIR) as fw:
        fw.add_graph(sess.graph, global_step=0)
    print('Imported model into tensorboard.')

    ########################################################################################################################
    sep('Extract feature')
    cnt = 0
    for sub_dir_name in os.listdir(IMG_ROOT_DIR):
        sub_dir_name_bn = os.path.join(BOTTLENECK_DIR, sub_dir_name)
        os.makedirs(sub_dir_name_bn, exist_ok=True)
        sub_dir_path = os.path.join(IMG_ROOT_DIR, sub_dir_name)
        for file_name in os.listdir(sub_dir_path):
            cnt += 1
            if cnt % 25 == 1:
                print(f'Processing no. {cnt} pic ...')
            file_path = os.path.join(sub_dir_path, file_name)
            vec_file_path = os.path.join(sub_dir_name_bn, file_name + '.txt')
            if os.path.exists(vec_file_path):
                continue
            with open(file_path, 'rb') as f:
                img_bin = f.read()
            feature = sess.run(output_tensor, feed_dict={input_placeholder: img_bin})
            feature = np.squeeze(feature, axis=0)
            np.savetxt(vec_file_path, feature)

    ########################################################################################################################
    sep('Prepare data')
    x = []
    y = []
    label_idx = -1
    for sub_dir_name in os.listdir(BOTTLENECK_DIR):
        label_idx += 1
        sub_dir_path = os.path.join(BOTTLENECK_DIR, sub_dir_name)
        for file_name in os.listdir(sub_dir_path):
            file_path = os.path.join(sub_dir_path, file_name)
            vec = np.loadtxt(file_path)
            x.append(vec)
            y.append(label_idx)
    x = np.float32(x)
    y = np.int64(y)
    y = np.eye(2, 2, dtype=np.int64)[y]
    print('x', x.shape)
    print('y', y.shape)

    x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, random_state=1, shuffle=True)
    print('x_train', x_train.shape)
    print('y_train', y_train.shape)
    print('x_test', x_test.shape)
    print('y_test', y_test.shape)

    ########################################################################################################################
    sep('Train backend')
    x_ph = tf.placeholder(tf.float32, [None, BOTTLENECK_VEC_SIZE], 'x_ph')
    y_ph = tf.placeholder(tf.float32, [None, 2], 'y_ph')
    pred = tf.layers.Dense(2)(x_ph)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_ph, logits=pred))
    acc = tf.reduce_mean(tf.cast(
        tf.equal(
            tf.argmax(pred, axis=1),
            tf.argmax(y_ph, axis=1)
        ),
        tf.float32
    ))
    optim = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

    sess.run(tf.global_variables_initializer())

    ITERS = 200
    GROUP = 20
    for i in range(ITERS):
        _, lossv, accv = sess.run([optim, loss, acc], feed_dict={x_ph: x_train, y_ph: y_train})
        if i % GROUP == 0 or i + 1 == ITERS:
            print(f'#{i + 1}: loss = {lossv}, acc = {accv}')

    ########################################################################################################################
    sep('Test')
    lossv, accv = sess.run([loss, acc], feed_dict={x_ph: x_test, y_ph: y_test})
    print(f'Test loss = {lossv}, acc = {accv}')

    print('over')
