"""
'''
读取cifar_10的数据
'''
"""
import pickle
import numpy as np
import tensorflow as tf
from classification.cnn.alexnet_tensor import alexnet_model_fn

file_path = "D:/ChromeDownload/cifar-10-batches-py/data_batch_1"


def unpickle_cifar_dic(file):
    fo = open(file, 'rb')
    dict = pickle.load(fo, encoding='bytes')
    fo.close()
    d_decoded = {}
    for key, value in dict.items():
        d_decoded[key.decode('utf8')] = value
    return d_decoded["data"], d_decoded["labels"]


def unpickle(file, batch_size):
    train_files = ["data_batch_" + str(i) for i in range(1, 6)]

    # test_file = ["test_batch"]
    # cifar10_files = train_files + test_file
    data_dir = "D:/code/pythonCode/PracticeTensorflow/data"

    images = []
    labels = []
    for file in train_files:
        filename = data_dir + "/cifar-10-batches-py/" + file

        images_tmp, labels_tmp = unpickle_cifar_dic(filename)

        images.append(images_tmp)
        labels.append(labels_tmp)

    train_data = np.asarray(images, dtype=np.float32).reshape((50000, 3, 32, 32))
    train_data = np.swapaxes(train_data, 1, 3)
    train_labels = np.asarray(labels, dtype=np.int32).reshape(50000)

    dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels))
    dataset = dataset.repeat(1).prefetch(batch_size)
    dataset = dataset.batch(batch_size)

    return dataset


def train(file):
    model_root = "D:/code/pythonCode/PracticeTensorflow/model/alexnet_tensorflow"
    config = tf.estimator.RunConfig(save_checkpoints_steps=500)

    classifier = tf.estimator.Estimator(
        model_fn=alexnet_model_fn, model_dir=model_root, config=config)

    tensors_to_log = {"loss": "loss"}
    logging_hook = tf.train.LoggingTensorHook(
        tensors=tensors_to_log, every_n_iter=1)

    # Train the model
    batch_size = 20

    classifier.train(
        input_fn=lambda: unpickle(file_path, 20),
        steps=1000,
        hooks=[logging_hook])

    # accuracy_score = classifier.evaluate(
    #     input_fn=lambda: self.csv_input_fn(self.train_list, 100),
    # )
    # print("accuracy:", accuracy_score)
    # accuracy_score = classifier.evaluate(
    #     input_fn=lambda: self.csv_input_fn(self.test_list, batch_size),
    # )
    # print("accuracy:", accuracy_score)

    return


if __name__ == "__main__":
    train(file_path)
