import pickle
import tensorflow.compat.v1 as tf
import numpy as np
import os
from sklearn.model_selection import train_test_split


def sep(label = '', cnt=32):
    print('-' * cnt, label, '-' * cnt, sep='')


tf.random.set_random_seed(1)
np.random.seed(1)
tf.disable_eager_execution()

VER = 'v7.0'
FILE_NAME = os.path.basename(__file__)
SAVE_DIR = os.path.join('_save', FILE_NAME, VER)
SAVE_NAME = 'bottleneck.pickle'

# Inception-v3模型瓶颈层的节点个数
BOTTLENECK_TENSOR_SIZE = 2048

# Inception-v3模型中代表瓶颈层结果的张量名称。
# 在谷歌提出的Inception-v3模型中，这个张量名称就是'pool_3/_reshape:0'。
# 在训练模型时，可以通过tensor.name来获取张量的名称。
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'

# 图像输入张量所对应的名称。
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'

# 下载的谷歌训练好的Inception-v3模型文件目录
MODEL_DIR = '../../../../../large_data/model/inceptionV3/'

# 下载的谷歌训练好的Inception-v3模型文件名
MODEL_FILE = 'tensorflow_inception_graph.pb'

# 因为一个训练数据会被使用多次，所以可以将原始图像通过Inception-v3模型计算得到的特征向量保存在文件中，免去重复的计算。
# 下面的变量定义了这些文件的存放地址。
CACHE_DIR = './images_my/tmp/bottleneck/'

# 图片数据文件夹。
# 在这个文件夹中每一个子文件夹代表一个需要区分的类别，每个子文件夹中存放了对应类别的图片。
path = '../../../../../large_data/CV2/_many_files/flower_photos_liuqilong/'
INPUT_DATA = path

# 验证的数据百分比
VALIDATION_PERCENTAGE = 10
# 测试的数据百分比
TEST_PERCENTAGE = 10

# 定义神经网络的设置
LEARNING_RATE = 0.01
STEPS = 40
GROUP = 5
BATCH = 100

########################################################################################################################
sep('load model')
with open(os.path.join(MODEL_DIR, MODEL_FILE), 'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
# 加载读取的Inception-v3模型，并返回数据输入所对应的张量以及计算瓶颈层结果所对应的张量。
bottleneck_tensor, jpeg_data_tensor = tf.import_graph_def(graph_def,
                                                          return_elements=[
                                                              BOTTLENECK_TENSOR_NAME,
                                                              JPEG_DATA_TENSOR_NAME
                                                          ])

print('bottlenect_tensor', bottleneck_tensor.shape)
print('jpeg_data_tensor', jpeg_data_tensor.shape)


########################################################################################################################
sep('load pictures')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
x_data = []
y_data = []
x_path = []
y = 0
cnt = 0
idx2label = {}
label2idx = {}
for dir_name in os.listdir(INPUT_DATA):
    dir_path = os.path.join(INPUT_DATA, dir_name)
    if not os.path.isdir(dir_path):
        continue
    idx2label[y] = dir_name
    label2idx[dir_name] = y
    for file_name in os.listdir(dir_path):
        file_path = os.path.join(INPUT_DATA, dir_name, file_name)
        if os.path.isdir(file_path):
            continue
        ext = os.path.splitext(file_name)[1].lower()
        if not (ext == '.jpg' or ext == '.jpeg'):
            continue
        with open(file_path, 'br') as f:
            img = f.read()
        bottleneck = sess.run(bottleneck_tensor, feed_dict={jpeg_data_tensor: img})
        bottleneck = np.squeeze(bottleneck, axis=0)
        x_data.append(bottleneck)
        y_data.append(y)
        relative_path = dir_name + '/' + file_name
        x_path.append(relative_path)
        cnt += 1
        if cnt % 25 == 0:
            print(f'{cnt} pictures processed.')
    y += 1
if cnt % 25 != 0:
    print(f'{cnt} pictures processed.')
x_data = np.float32(x_data)
y_data = np.int32(y_data)
x_path = np.array(x_path)

rand_idx = np.random.permutation(cnt)
x_data = x_data[rand_idx]
y_data = y_data[rand_idx]
x_path = x_path[rand_idx]

# 80% train
# 10% validation
# 10% test
x_train, x_test_val, y_train, y_test_val, x_path_train, x_path_test_val = train_test_split(x_data, y_data, x_path, train_size=0.8)
x_test, x_val, y_test, y_val, x_path_test, x_path_val = train_test_split(x_test_val, y_test_val, x_path_test_val, train_size=0.5)

pickle_data = {
    'x_train': x_train,
    'x_path_train': x_path_train,
    'y_train': y_train,
    'x_test': x_test,
    'x_path_test': x_path_test,
    'y_test': y_test,
    'x_val': x_val,
    'x_path_val': x_path_val,
    'y_val': y_val,
    'idx2label': idx2label,
    'label2idx': label2idx
}
print('Saving ....')
path = os.path.join(SAVE_DIR, SAVE_NAME)
os.makedirs(SAVE_DIR, exist_ok=True)
with open(path, 'bw') as f:
    pickle.dump(pickle_data, f)
print('Saved.')
sess.close()
