import tensorflow as tf
import numpy as np
from PIL import Image
import os

from tensorflow import optimizers

# train_path = 'C:\Temp\TF2.4 training\splited dataset\\train\\'
# train_txt = 'C:\Temp\TF2.4 training\splited dataset\labels-train.txt'
# x_train_savepath = 'C:\Temp\TF2.4 training\splited dataset\\x_train.npy'
# y_train_savepath = 'C:\Temp\TF2.4 training\splited dataset\\y_train.npy'
#
# test_path = 'C:\Temp\TF2.4 training\splited dataset\\test\\'
# test_txt = 'C:\Temp\TF2.4 training\splited dataset\labels-test.txt'
# x_test_savepath = 'C:\Temp\TF2.4 training\splited dataset\\x_test.npy'
# y_test_savepath = 'C:\Temp\TF2.4 training\splited dataset\\y_test.npy'

train_path = 'C:\Temp\TF2.4 training\\test datasets\\train\\'
train_txt = 'C:\Temp\TF2.4 training\\test datasets\labels-train.txt'
x_train_savepath = 'C:\Temp\TF2.4 training\\test datasets\\x_train.npy'
y_train_savepath = 'C:\Temp\TF2.4 training\\test datasets\\y_train.npy'

test_path = 'C:\Temp\TF2.4 training\\test datasets\\test\\'
test_txt = 'C:\Temp\TF2.4 training\\test datasets\labels-test.txt'
x_test_savepath = 'C:\Temp\TF2.4 training\\test datasets\\x_test.npy'
y_test_savepath = 'C:\Temp\TF2.4 training\\test datasets\\y_test.npy'

def generateds(path, txt):
    f = open(txt, 'r')
    contents = f.readlines()
    f.close()
    x, y_ = [], []
    for content in contents:
        value = content.split()
        img_path = path + value[0]          # 生成照片路径
        img = Image.open(img_path)          # 获取图像张量
        image = img.resize((32, 24))
        img = np.array(img.convert('L'))    # 通过加权平均法将 RGB 三通道的像素值转换为单通道灰度值
        img = img / 255.                    # 归一化到[0,1]范围
        x.append(img)
        y_.append(value[1])
        print('loading :' + content)

    x = np.array(x)
    y_ = np.array(y_)
    y_ = y_.astype(np.int64)
    print(x)
    print(y_)
    return x, y_

# x_train, y_train = generateds(train_path, train_txt)
# x_test, y_test = generateds(test_path, test_txt)

if os.path.exists(x_train_savepath) and os.path.exists(y_train_savepath) and os.path.exists(
        x_test_savepath) and os.path.exists(y_test_savepath):
    print('----------------Load Datasets----------------')
    x_train_save = np.load(x_train_savepath)
    y_train_save = np.load(y_train_savepath)
    x_test_save = np.load(x_test_savepath)
    y_test_save = np.load(y_test_savepath)
    x_train = np.reshape(x_train_save, (len(x_train_save), 34, 24))
    x_test = np.reshape(x_train_save, (len(x_test_save), 34, 24))
else:
    print('----------------Generate Datasets----------------')
    x_train, y_train = generateds(train_path, train_txt)
    x_test, y_test = generateds(test_path, test_txt)

    print('----------------Save Datasets----------------')
    x_train_save = np.reshape(x_train, (len(x_train), -1))
    x_test_save = np.reshape(x_train, (len(x_test), -1))
    np.save(x_train_savepath, x_train_save)
    np.save(y_train_savepath, y_train)
    np.save(x_test_savepath, x_test_save)
    np.save(y_test_savepath, y_test)

model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')
])

model.compile(
    optimizer = 'adam',
    loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
    metrics=['sparse_categorical_accuracy']
)

model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1)

model.summary()