import tensorflow as tf
import numpy as np
from tf2st.p3.p3_ini_te.cnmodl import CnnModel
import os
import pickle
import time
from tf2st.p3.p3_ini_te.ty1 import get_config
import sys
import random

gConfig = {}
gConfig = get_config(config_file='config.ini')


def unpickle_patch(file):
    patch_bin_file = open(file, 'rb')
    patch_dict = pickle.load(patch_bin_file, encoding='bytes')
    return patch_dict


def read_data(dataset_path, im_dim, num_channels, num_files, images_per_file):
    files_name = os.listdir(dataset_path)

    dataset_array = np.zeros(shape=(num_files * images_per_file, im_dim, im_dim, num_channels))
    dataset_labels = np.zeros(shape=(num_files * images_per_file),dtype=np.int8)

    index = 0

    for file_name in files_name:
        if file_name[0:len(file_name) - 1] == 'data_batch_':
            print('using ' + str(file_name) + ' ...')
            data_dict = unpickle_patch(dataset_path + file_name)
            images_data = data_dict[b'data']
            images_data_reshaped = np.reshape(images_data, newshape=(len(images_data), im_dim, im_dim, num_channels))
            dataset_array[index * images_per_file:(index + 1) * images_per_file, :, :, :] = images_data_reshaped

            dataset_labels[index * images_per_file:(index + 1) * images_per_file] = data_dict[b'labels']
            index = index + 1
            return dataset_array, dataset_labels


def create_model():
    if 'pretrained_model' in gConfig:
        model = tf.keras.models.load_model(gConfig['pretrained_model'])
        return model
    ckpt = tf.io.gfile.listdir(gConfig['working_directory'])

    if ckpt:
        model_file = os.path.join(gConfig['working_directory'], ckpt[-1])
        print('reading model parameters from %s' % model_file)
        model = tf.keras.models.load_model(model_file)
        return model
    else:
        model = CnnModel(gConfig['rate'])
        model = model.create_model()
        return model


dataset_array, dataset_labels = read_data(dataset_path=gConfig['dataset_path'], im_dim=gConfig['im_dim'],
                                          num_channels=gConfig['num_channels'], num_files=gConfig['num_files'],
                                          images_per_file=gConfig['images_per_file'])

dataset_array = dataset_array.astype('float32') / 255
dataset_labels = tf.keras.utils.to_categorical(dataset_labels, 10)


def train():
    model = create_model()
    history = model.fit(dataset_array, dataset_labels, verbose=1, epochs=100, validation_split=0.2)

    filename = 'cnn_model.h5'
    checkpoint_path = os.path.join(gConfig['working_directory'], filename)
    model.save(checkpoint_path)


def predict(data):
    # ckpt=os.listdir(gConfig['working_directory'])
    # checkpoint_path=os.path.join(gConfig['working_directory'],'cnn_model.h5')
    #
    #
    # model=tf.keras.models.load_model(checkpoint_path)
    # index=tf.math.argmax(prediction[0]).numpy()
    return


if __name__ == '__main__':
    # gConfig=get_config()
    train()
