import tensorflow as tf 
import numpy as np
from tensorflow import keras
from tensorflow.keras import datasets, layers, optimizers, Sequential, callbacks
from matplotlib import pyplot as plt
from AConvNets_pre import AConvNets, AConvNets_BN
import pathlib
import cv2
def load_from_path_label(path, label):
    '''读取图片'''
    image = tf.io.read_file(path)
    image = tf.image.decode_jpeg(image)
    label = tf.one_hot(label, depth=10)
    label = tf.cast(label, dtype=tf.int32)
    return image, label

def preprocess(image, label):
    '''图片预处理'''
    image = tf.image.adjust_gamma(image, 0.6)  # Gamma 
    #image = tf.image.resize(image, [128, 128])  # 重设为(128, 128)
    image = tf.image.resize_with_crop_or_pad(image, 88, 88)  # 以中心区域裁剪 88 × 88
    image = tf.cast(image, dtype=tf.float32) / 255.0  # 归一化到[0,1]范围
    
    return image, label

def get_datasets(path):
    '''获取数据集'''
    # 获得数据集文件路径
    data_path = pathlib.Path(path)
    # 获得所有类别图片的路径
    all_image_paths = list(data_path.glob('*/*'))
    all_image_paths = [str(path1) for path1 in all_image_paths]
    # 数据集图片数量
    image_count = len(all_image_paths)
    # 获得类别名称列表
    label_names = [item.name for item in data_path.glob('*/')]
    # 枚举类别名称并转化为数字标号
    label_index = dict((name, index) for index, name in enumerate(label_names))
    print(label_index)
    print(label_names)
    print(image_count)
    # 获得所有数据集图片的数字标号
    all_image_labels = [label_index[pathlib.Path(path).parent.name] for path in all_image_paths]
    for image, label in zip(all_image_paths[:5], all_image_labels[:5]):
        print(image, ' --->  ', label)
        # 建立dataset数据集
    db = tf.data.Dataset.from_tensor_slices((all_image_paths, all_image_labels)) 
    db = db.map(load_from_path_label)
    db = db.map(preprocess)
    db = db.shuffle(1000).batch(128)
    return db, label_names

def load_from_path(path):
    '''读取图片'''
    image_count = len(path)
    images = np.zeros((image_count, 88, 88, 1))
    for i in range(0, image_count):
        image = tf.io.read_file(path[i])
        image = tf.image.decode_jpeg(image)
        image = tf.cast(image, dtype=tf.float32) / 255.0  # 归一化到[0,1]范围
        
        images[i, :, :, :] = image

    return images


def get_test_image(path):
    data_path = pathlib.Path(path)
    # 获得所有类别图片的路径
    all_image_paths = list(data_path.glob('*'))
    all_image_paths = [str(path1) for path1 in all_image_paths]
    for image in all_image_paths:
        print(image, ' ------target')
    # 数据集图片数量
    image_count = len(all_image_paths)
    # 获得类别名称列表
    images = load_from_path(all_image_paths)
    return images, image_count



def main():
    test_db, test_label_names = get_datasets('E:\\大学课程资料\\大四上\\毕业设计\\code\\SARimage\\TEST')
    
    model = AConvNets_BN()
    model.summary()
    
    model.compile(optimizer=optimizers.Adam(lr=0.0001), loss=keras.losses.CategoricalCrossentropy(),metrics=['accuracy'])
    model.load_weights('./checkpoint/BN_AConvNets_SOC_epoch50_weights2.ckpt') 
    #model.evaluate(test_db)

    images, image_count = get_test_image('E:/大学课程资料/大四上/毕业设计/MSTAR_Clutter/crop_target')
    print(images.shape)
    pre_labels = model.predict(images)
    pre_test_labels = tf.argmax(pre_labels, axis=1)
    pre_name = [test_label_names[i] for i in pre_test_labels]
    print(pre_name)
    
    center_array = np.loadtxt('E:/大学课程资料/大四上/毕业设计/MSTAR_Clutter/center_array.txt')
    print(center_array.shape)
    img2 = cv2.imread('C:/Users/wangdi/Desktop/crop4.jpeg')
    for i in range(center_array.shape[0]):
        y = int(center_array[i][1])
        x = int(center_array[i][0])
        cv2.circle(img2, (y+32, x+32), 1, (0,0,255),8)
        cv2.rectangle(img2, (y+32-44, x+32-44), (y+32+44, x+32+44), (0, 255, 255), 1)
        cv2.putText(img2, pre_name[i] , (y+32-44, x+32-44), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 255), 2)
    cv2.imshow(' ',img2)
    cv2.imwrite('C:/Users/wangdi/Desktop/target_ATR.jpg', img2)
    cv2.waitKey(0)

if __name__ == '__main__':
    main()