import tensorflow as tf 
import numpy as np
from tensorflow import keras
from tensorflow.keras import datasets, layers, optimizers, Sequential, callbacks
from matplotlib import pyplot as plt
from AConvNets_ATR import AConvNets, AConvNets_BN
import pathlib
import datetime

def load_from_path_label2(all_image_paths, all_image_labels):
    '''读取所有图片'''
    image_count = len(all_image_paths)
    images = np.zeros((image_count, 64, 64, 1))

    for i in range(0, image_count):
        image = tf.io.read_file(all_image_paths[i])
        image = tf.image.decode_jpeg(image)
        image = tf.cast(image, dtype=tf.float32) / 255.0  # 归一化到[0,1]范围
        images[i, :, :, :] = image

    return images, all_image_labels


def preprocess(path, label):
    '''读取图片'''
    image = tf.io.read_file(path)
    image = tf.image.decode_jpeg(image)
    image = tf.cast(image, dtype=tf.float32) / 255.0  # 归一化到[0,1]范围
    label = tf.one_hot(label, depth=2)
    label = tf.cast(label, dtype=tf.int32)
    return image, label


def show_image(db, row, col, title, label_names,is_preprocess=True):
    '''显示10个类别图片'''
    plt.figure()
    plt.suptitle(title, fontsize=14)
    j = 0
    for i, (image, label) in enumerate(db):
        if j == row * col :
            break
        if int(tf.argmax(label)) == int(j / col) :
            if is_preprocess == True :
                image = image * 255
            plt.subplot(row, col, j+1)
            plt.title(label_names[int(tf.argmax(label))], fontsize=8)
            plt.imshow(image, cmap='gray')
            plt.axis('off')
            j = j + 1
    plt.tight_layout()

def get_datasets(path, train=True, augmentation=True):
    '''获取数据集'''
    # 获得数据集文件路径
    data_path = pathlib.Path(path)
    # 获得所有类别图片的路径
    all_image_paths = list(data_path.glob('*/*'))
    all_image_paths = [str(path1) for path1 in all_image_paths]
    # 数据集图片数量
    image_count = len(all_image_paths)
    # 获得类别名称列表
    label_names = [item.name for item in data_path.glob('*/')]
    # 枚举类别名称并转化为数字标号
    label_index = dict((name, index) for index, name in enumerate(label_names))
    print(label_index)
    print(label_names)
    print(image_count)
    # 获得所有数据集图片的数字标号
    all_image_labels = [label_index[pathlib.Path(path).parent.name] for path in all_image_paths]
    for image, label in zip(all_image_paths[:5], all_image_labels[:5]):
        print(image, ' --->  ', label)
    images, labels = load_from_path_label2(all_image_paths, all_image_labels)
    
    db = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels)) 
    db = db.map(preprocess)
    if train == True:
        show_image(db, 5, 5, '(Train) Raw SAR Image', label_names, False)
        db = db.shuffle(4000).batch(64)
    else:
        show_image(db, 5, 5, '(Test) Raw SAR Image', label_names, False)
        db = db.shuffle(2000).batch(64)
            
    return db, images, labels, label_names


def main():    
    '''main函数'''
    print("Hello MSTAR_ATR")
    train_db, train_images, train_labels, train_label_names = get_datasets('E:/大学课程资料/大四上/毕业设计/MSTAR_Clutter/Train', True, True)
    test_db, test_images, test_labels, test_label_names = get_datasets('E:/大学课程资料/大四上/毕业设计/MSTAR_Clutter/Valid', False, False)
    print("train_images_shape : ", train_images.shape)
    #print("train_labels_shape : ", train_labels.shape)
    print(train_label_names)
    print("test_images_shape : ", test_images.shape)
    #print("test_labels_shape : ", test_labels.shape)
    
    #model = AConvNets()
    model = AConvNets_BN()
    model.summary()

    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    log_dir = 'logs/AConvNets_ATR_epoch50_' + current_time
    tb_callback = callbacks.TensorBoard(log_dir=log_dir)

    model.compile(optimizer=optimizers.Adam(lr=0.0001), loss=keras.losses.CategoricalCrossentropy(from_logits=True),metrics=['accuracy'])
    #model.fit(train_db, epochs=1, validation_data=test_db,validation_freq=1)

    '''
    model.fit(train_db, epochs=50, validation_data=test_db,validation_freq=1,callbacks=[tb_callback])
    #model.load_weights('./checkpoint/AConvNets_SOC_epoch50_weights.ckpt') 
    
    model.evaluate(test_db)

    model.save_weights('./checkpoint/AConvNets_ATR_epoch50_weights.ckpt')
    print('save weights')
    '''
if __name__ == '__main__':
    main()
    