from __future__ import absolute_import,division,print_function,unicode_literals
import tensorflow as tf
import pathlib #读图片路径
import random
from sklearn.model_selection import train_test_split
import IPython.display as display #显示图片
import os
import matplotlib.pyplot as plt

# 图像数据后台的缓存处理有关
AUTOTUNE = tf.data.experimental.AUTOTUNE


IMAGE_WIDTH = 192
IMAGE_HIGHT = 192
IMAME_CHANNEL = 3

# 数据集的路径
data_root_orig = '../datasets/fruits'
data_root = pathlib.Path(data_root_orig)
print(data_root)

# 获取所有图片文件并存入列表中
all_images_paths = list(data_root.glob('*/*'))

# 把 pathlib路径转换为字符串路径
all_images_paths = [str(path) for path in all_images_paths]
#打乱文件路径顺序
random.shuffle(all_images_paths)

# 获取不同花朵图片标签的名字
label_names = sorted(item.name for item in data_root.glob('*/') if item.is_dir())

# {'daisy': 0, 'dandelion': 1, 'roses': 2, 'sunflowers': 3, 'tulips': 4}
label_to_index = dict( (name,index) for index,name in enumerate(label_names)) #转数字

# 每一张图片对应的标签，形成一个对应的列表
#                    把类别名称转成下标     获取图片文件上一级目录名称      遍历所有的图片路径
all_images_labels = [label_to_index[pathlib.Path(path).parent.name] for path in all_images_paths]

# 我们得到两个变量
# all_images_paths： 所有图片路径
#all_images_labels： 对应标签
# print(all_images_paths[:5])
# print(all_images_labels[:5])

# from sklearn.model_selection import train_test_split
# 拆分训练集和测试集
trainx,testx,trainy,testy =  train_test_split(all_images_paths, all_images_labels, test_size=0.2)

# 图片预处理
def preprocess_img(image):
    image = tf.image.decode_jpeg(image, channels=IMAME_CHANNEL)  # 映射为图片
    image = tf.image.resize(image, [IMAGE_WIDTH, IMAGE_HIGHT])  # 修改大小
    image /= 255.0  # 归一化
    return image


def load_and_preprocess_image(path):
    image = tf.io.read_file(path)  # 这里注意的是这里读到的是许多图片参数
    return preprocess_img(image)  # 数据增强，手动调用相应的函数，比较灵活


# 自定义tf.dataset
trainx_path_ds = tf.data.Dataset.from_tensor_slices(trainx) #路径字符串集合
trainx_image_ds = trainx_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE) # 通过路径加载图片数据集

trainy_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(trainy, tf.int64))

testx_path_ds = tf.data.Dataset.from_tensor_slices(testx) #路径字符串集合
testx_image_ds = testx_path_ds.map(load_and_preprocess_image, num_parallel_calls=AUTOTUNE) # 通过路径加载图片数据集

testy_label_ds = tf.data.Dataset.from_tensor_slices(tf.cast(testy, tf.int64))

# 将图片数据和标签打包组合
train_image_label_ds = tf.data.Dataset.zip((trainx_image_ds, trainy_label_ds)) #图片和标签整合
test_image_label_ds = tf.data.Dataset.zip((testx_image_ds, testy_label_ds)) #图片和标签整合

Batch_size = 32  # 每一次训练，取32张图片，作为一个批次

train_ds = train_image_label_ds.shuffle(buffer_size=len(trainx)) #打乱数据
train_ds = train_ds.repeat() #数据重复
train_ds = train_ds.batch(Batch_size) #分割batch
train_ds = train_ds.prefetch(buffer_size=AUTOTUNE) #使数据集在后台取得 batch

test_ds = test_image_label_ds.shuffle(buffer_size=len(testx)) #打乱数据
test_ds = test_ds.repeat() #数据重复
test_ds = test_ds.batch(Batch_size) #分割batch
test_ds = test_ds.prefetch(buffer_size=AUTOTUNE) #使数据集在后台取得 batch


## 模型搭建
# IMAGE_WIDTH = 192
# IMAGE_HIGHT = 192
# IMAME_CHANNEL = 3
mobile_net = tf.keras.applications.MobileNetV2(input_shape=
(IMAGE_WIDTH,IMAGE_HIGHT,IMAME_CHANNEL),include_top=False)
mobile_net.trainable=False # 把CNN特征提取部分冻结起来 -- 冻结网络


model = tf.keras.Sequential([
    mobile_net,
    tf.keras.layers.GlobalAveragePooling2D(), #全局平均池化
    tf.keras.layers.Dense(256, activation='relu'),
    tf.keras.layers.Dense(512, activation='relu'),
    tf.keras.layers.Dense(1024, activation='relu'),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(len(label_names),activation='softmax') #分类
])


model.compile(optimizer=tf.keras.optimizers.RMSprop(),
              loss='sparse_categorical_crossentropy',
              metrics=["accuracy"])

model.summary()


model.fit(train_ds, epochs=5, steps_per_epoch=len(trainx)//Batch_size)

model.evaluate(test_ds,steps=len(testx)//Batch_size)

model.save("../model/cnn.h5")

del model

model = tf.keras.models.load_model('../model/cnn.h5')
model.evaluate(test_ds,steps=len(testx)//Batch_size)


