import os
import pandas as pd
import numpy as np
import shutil
import cv2
from PIL import Image, ImageFilter, ImageEnhance
from sklearn.model_selection import train_test_split
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Dense, Input, Conv2D, MaxPool2D, Flatten, AveragePooling2D, add, concatenate
import matplotlib.pyplot as plt
from keras import models, layers
from sklearn.preprocessing import LabelEncoder

origin_data_path = os.getcwd() + '\\' + 'origin_data'
img = os.listdir(origin_data_path)
img.sort(key = lambda x: int(x.split('.')[0]))
label = pd.read_csv(r'./rock_label.csv', encoding = 'gbk')
leibie = np.unique(label.iloc[:, 1])
new_data_path = os.getcwd() + '\\' + 'new_data'

# 最小图片的尺寸
image = cv2.imread('./origin_data/1.jpg')
min_w, min_h, min_d = image.shape
for i in img:
    image_path = os.path.join(origin_data_path, i)
    if image_path.endswith('.jpg') or image_path.endswith('.png'):
        # 打开图像文件
        image = Image.open(image_path)
        # 获取图像尺寸
        w, h = image.size
        if w < min_w:
            min_w = w
        if h < min_h:
            min_h = h
print('最小尺寸为（{}，{}）'.format(min_w, min_h))


# 图像增强
def adjust_brightness(image, brightness_factor):
    # 亮度调整
    enhancer = ImageEnhance.Brightness(image)
    adjusted_image = enhancer.enhance(brightness_factor)
    return adjusted_image


def enhance_color(image, color_factor):
    # 色彩增强
    enhancer = ImageEnhance.Color(image)
    enhanced_image = enhancer.enhance(color_factor)
    return enhanced_image


# 创建分组
if not os.path.exists('./new_data'):
    os.makedirs('./new_data')
    for i in range(len(leibie)):
        file_name = './new_data' + '/' + str(leibie[i])
        os.makedirs(file_name)
    # 进行文件分组
    label_dict = label.groupby(by = '样本类别').groups
    for i in label_dict:
        for j in label_dict[i]:
            label_idex = label.iloc[j, 0]
            shutil.copy(origin_data_path + '\\' + str(label_idex) + '.jpg',
                        new_data_path + '\\' + str(i))
        print('{}图片数：{}'.format(i, len(new_data_path + '\\' + str(i))))

    # 3*3切割
    cut_method_w = 3
    cut_method_h = 3
    # 统一尺寸,网格切割以及图像增强
    cut_w = int(min_w // cut_method_w)
    cut_h = int(min_h // cut_method_h)
    per = 1  # 图片名前缀

    for i in leibie:
        leibie_path = os.path.join(new_data_path, i)
        for j in os.listdir(leibie_path):
            new_image_path = os.path.join(leibie_path, j)
            new_img = Image.open(new_image_path)
            new_img_size = new_img.size
            b1 = int(round((new_img_size[0] - min_w) / 2))
            b2 = b1 + min_w
            a1 = int(round((new_img_size[1] - min_h) / 2))
            a2 = a1 + min_h
            box = (b1, a1, b2, a2)
            incrse = new_img.crop(box)  # 统一尺寸
            pos = 1  # 图片名后缀
            for k in range(cut_method_h):
                for h in range(cut_method_w):
                    box = (cut_w * h, cut_h * k, cut_w * (h + 1), cut_h * (k + 1))
                    mesh_cutting = incrse.crop(box)
                    mesh_cutting.save(leibie_path + '\\' +
                                      str('%03d' % per) + str('%03d' % pos) + str('%03d' % 1) + '.jpg')
                    # 旋转图像
                    rotated_image = mesh_cutting.rotate(180)
                    rotated_image.save(leibie_path + '\\' +
                                       str('%03d' % per) + str('%03d' % pos) + str('%03d' % 2) + '.jpg')
                    # 进行亮度调整
                    brightened_image = adjust_brightness(mesh_cutting, 1.1)
                    brightened_image.save(leibie_path + '\\' +
                                          str('%03d' % per) + str('%03d' % pos) + str('%03d' % 3) + '.jpg')
                    # 进行色彩增强
                    enhanced_image = enhance_color(mesh_cutting, 1.1)
                    enhanced_image.save(leibie_path + '\\' +
                                        str('%03d' % per) + str('%03d' % pos) + str('%03d' % 4) + '.jpg')
                    # 进行锐化
                    sharpened_image = mesh_cutting.filter(ImageFilter.UnsharpMask(1.1))
                    sharpened_image.save(leibie_path + '\\' +
                                         str('%03d' % per) + str('%03d' % pos) + str('%03d' % 5) + '.jpg')
                    # 进行模糊
                    blurred_image = mesh_cutting.filter(ImageFilter.GaussianBlur(1.1))
                    blurred_image.save(leibie_path + '\\' +
                                       str('%03d' % per) + str('%03d' % pos) + str('%03d' % 6) + '.jpg')

                    pos += 1

            per += 1
            new_img.close()  # 关闭
            os.remove(new_image_path)
        print('{}文件夹切割完成'.format(i))

# 数据处理
# 统一图片尺寸
data = []
Y = []
for i in leibie:
    re_path = os.path.join(new_data_path, str(i))
    files = os.listdir(re_path)
    for j in files:
        image_file_path = os.path.join(re_path, str(j))
        image_files = Image.open(image_file_path)
        im = image_files.resize((128, 128))
        data.append(np.array(im))
        Y.append(i)
        image_files.close()
X = np.array(data)
Y = np.array(Y)

label_encoder = LabelEncoder()
Y = label_encoder.fit_transform(Y)

train_image, test_image, train_label, test_label = train_test_split(X, Y, test_size = 0.2, random_state = 123,
                                                                    stratify = Y)
# 缩放，在处理尺寸过程已完成
# train_image=train_image.reshape((-1,128,128,3))
# test_image=test_image.reshape((-1,128,128,3))

# 数据归一化和标签独热编码
train_image_normal, test_image_normal = train_image / 255.0, test_image / 255.0


# 使用sparse_categorical_crossentropy可不进行标签独热编码
# train_label_onehot=np_utils.to_categorical(train_label)
# test_label_onehot=np_utils.to_categorical(test_label)


# 构建模型
def VGG16():
    # 定义输入层
    img_input = Input(shape = (224, 224, 3), name = 'input_img')
    x = Conv2D(filters = 64, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv1')(img_input)
    x = Conv2D(filters = 64, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv2')(x)
    # 最大池化
    x = MaxPool2D(pool_size = (2, 2), strides = (2, 2), name = 'pool1')(x)
    x = Conv2D(filters = 128, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv3')(x)
    x = Conv2D(filters = 128, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv4')(x)

    x = MaxPool2D(pool_size = (2, 2), strides = (2, 2), name = 'pool2')(x)
    x = Conv2D(filters = 256, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv5')(x)
    x = Conv2D(filters = 256, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv6')(x)
    x = Conv2D(filters = 256, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv7')(x)

    x = MaxPool2D(pool_size = (2, 2), strides = (2, 2), name = 'pool3')(x)
    x = Conv2D(filters = 512, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv8')(x)
    x = Conv2D(filters = 512, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv9')(x)
    x = Conv2D(filters = 512, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv10')(x)
    x = MaxPool2D(pool_size = (2, 2), strides = (2, 2), name = 'pool4')(x)

    x = Conv2D(filters = 512, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv11')(x)
    x = Conv2D(filters = 512, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv12')(x)
    x = Conv2D(filters = 512, kernel_size = (3, 3), strides = (1, 1), activation = 'relu', padding = 'same',
               name = 'conv13')(x)
    x = MaxPool2D(pool_size = (2, 2), strides = (2, 2), name = 'pool5')(x)
    # 平坦层：改变数据的形状，将其变成一维向量（数组）
    x = Flatten(name = 'flatten')(x)
    x = Dense(4096, activation = 'relu', name = 'fc1')(x)
    x = Dense(4096, activation = 'relu', name = 'fc2')(x)
    precondition = Dense(7, activation = 'softmax', name = 'predict')(x)  # 7种标签
    model = Model(img_input, precondition, name = 'vgg16')

    return model


# model_vgg16=VGG16()
# print(model_vgg16.summary())
# model=model_vgg16
# model.compile(loss='categorical_crossentropy',optimizer='sgd',metrics=['accuracy'])
# #修改当前模型的学习率
# from tensorflow.python.keras import backend
# backend.set_value(model.optimizer.lr, 0.001)

# 模型构建
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation = 'relu', input_shape = (128, 128, 3)))  # 过滤器个数，卷积核尺寸，激活函数，输入形状
model.add(layers.MaxPooling2D((2, 2)))  # 池化层
model.add(layers.Conv2D(64, (3, 3), activation = 'relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation = 'relu'))
model.add(layers.Flatten())  # 降维
model.add(layers.Dense(64, activation = 'relu'))  # 全连接层
model.add(layers.Dense(7, activation = 'softmax'))
model.summary()  # 显示模型的架构
model.compile(optimizer = 'adam',
              loss = 'sparse_categorical_crossentropy',
              metrics = ['accuracy'])  # 'sparse_categorical_crossentropy'多标签分类

# 进行训练
batch_size = 32
epochs = 1
history = model.fit(train_image_normal, train_label, batch_size = batch_size,
                    epochs = epochs, shuffle = True,
                    validation_data = (test_image_normal, test_label), use_multiprocessing = True)


# 1931s 9s/step - loss: 1.3603 - accuracy: 0.4699 - val_loss: 1.0733 - val_accuracy: 0.5835

##显示训练过程中的执行结果
def show_train_history(train_history, train, val):
    plt.plot(train_history.epoch, train_history.history[train], '-.o')
    plt.plot(train_history.epoch, train_history.history[val], '-.*')
    plt.title('Train History')
    plt.ylabel(train)
    plt.legend(['train', 'validation'], loc = 'upper left')
    plt.show()


show_train_history(history, 'accuracy', 'val_accuracy')
show_train_history(history, 'loss', 'val_loss')
