import pandas as pd
from IPython.core.display_functions import display
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Input
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import warnings

warnings.filterwarnings('ignore')

# 数据处理
# 导入数据
data = pd.read_csv('input/paddy-disease-classification/train.csv')
print(data.shape)
# 查看数据
print(data)
# image_id：图片标识，在train_images目录中的图像文件名
# label：水稻病害类型，有10类，包括normal
# variety：水稻品种
# age：水稻年龄/天

# 查看分类标签
print(data.label.unique())
# 查看分为多少类
num_class = len(data.label.unique())
display("Number of classes:", num_class)

# 每个图像的尺寸256*256
img_rows, img_cols = 256, 256
# 每一批的尺寸
batch_size = 128

# 数据探索性分析
# 查看数据信息
print(data.info())

# 强制类型转换
data['label'] = data['label'].astype('str')
data['variety'] = data['variety'].astype('str')
# 查看数据的基本情况
print(data.describe())  # 水稻的平均年里为64天，75%的数据集在70天以下
# 查看每种病害各有多少数据
print(data['label'].value_counts())

# 数据集中的水稻病害分布
fig, axes = plt.subplots(1, 1, figsize=(21, 7))
sns.histplot(data, x='label', bins=10, ax=axes)
plt.title('Disease distribution in the dataset')
plt.show()
# 结果：训练数据不平衡

# 水稻品种分布
fig, axes = plt.subplots(1, 1, figsize=(21, 7))
sns.histplot(data, x='variety', ax=axes)
plt.title('Variety distribution in the dataset')
plt.show()
# 结果：有10个不同的品种，其中ADT45品种占具大量

# 排除normal查看受影响最严重的水稻品种
fig, axes = plt.subplots(1, 1, figsize=(21, 7))
sns.histplot(data[data['label'] != 'normal'], x='variety', ax=axes)
plt.title('Variety distribution in the dataset')
plt.show()

# 查看每个品种的数量
print(data[data['label'] != 'normal']['variety'].value_counts())

# 分别查看10种水稻病害的其中一张图片用于观察
images = ['input/paddy-disease-classification/train_images/hispa/106590.jpg',
          'input/paddy-disease-classification/train_images/tungro/109629.jpg',
          'input/paddy-disease-classification/train_images/bacterial_leaf_blight/109372.jpg',
          'input/paddy-disease-classification/train_images/downy_mildew/102350.jpg',
          'input/paddy-disease-classification/train_images/blast/110243.jpg',
          'input/paddy-disease-classification/train_images/bacterial_leaf_streak/101104.jpg',
          'input/paddy-disease-classification/train_images/normal/109760.jpg',
          'input/paddy-disease-classification/train_images/brown_spot/104675.jpg',
          'input/paddy-disease-classification/train_images/dead_heart/105159.jpg',
          'input/paddy-disease-classification/train_images/bacterial_panicle_blight/101351.jpg',
          ]
diseases = ['hispa', 'tungro', 'bacterial_leaf_blight', 'downy_mildew', 'blast', 'bacterial_leaf_streak',
            'normal', 'brown_spot', 'dead_heart', 'bacterial_panicle_blight']
diseases = [disease + ' disease' for disease in diseases]

# 显示图片，每张图片大小为20*10英寸
plt.figure(figsize=(20, 10))
columns = 5
for i, image_loc in enumerate(images):
    plt.subplot(len(images) // columns + 1, columns, i + 1)
    image = plt.imread(image_loc)
    plt.title(diseases[i])
    plt.imshow(image)
plt.show()

# 数据增强
# 避免过度拟合和提高准确性
# 增加现有样本的可变性，弥补数据不足。
# 数据增强通过对原始数据集应用各种转换，从现有样本中生成数据。
# 该方法旨在增加唯一输入样本的数量，从而使模型在验证数据集上显示出更好的准确性。
aug_gens = ImageDataGenerator(
    rescale=1.0/255.0,
    featurewise_center=False,
    samplewise_center=False,
    featurewise_std_normalization=False,
    samplewise_std_normalization=False,
    zca_whitening=False,
    validation_split=0.1,
    rotation_range=10,
    shear_range=0.25,
    zoom_range=0.1,
    width_shift_range=0.1,
    height_shift_range=0.1,
    horizontal_flip=True,
    vertical_flip=True,
)

# 找出适合的一些数据，一共有9371个数据
train_loc = 'input/paddy-disease-classification/train_images/'
train_data = aug_gens.flow_from_directory(
    train_loc,
    subset="training",
    seed=2,
    target_size=(img_rows, img_cols),
    batch_size=batch_size,
    class_mode="categorical")

#
valid_data = aug_gens.flow_from_directory(
    train_loc,
    subset="validation",
    seed=2,
    target_size=(img_rows, img_cols),
    batch_size=batch_size,
    class_mode="categorical")

# 测试数据增强
test_loc = 'input/paddy-disease-classification/test_images'
test_data = ImageDataGenerator(rescale=1.0/255).flow_from_directory(
    directory=test_loc,
    target_size=(img_rows, img_cols),
    batch_size=batch_size,
    classes=['.'],
    shuffle=False,
)

# 类别序列
print(train_data.class_indices)
# 类别序列长度
print(len(train_data.class_indices))

# 激活函数 LeakyReLu
leakyrelu = tensorflow.keras.layers.LeakyReLU(alpha=0.01)

# 创建模型
# 基线模型
input_layer = Input((img_rows,img_cols,3))
conv2d_1_1 = Conv2D(filters=32, kernel_size=[3,3],strides =(1,1), activation='relu', padding="same", name ="conv2d_branch1")(input_layer)
maxpool = MaxPool2D(pool_size=[2,2])(conv2d_1_1)

conv2d_1_2 = Conv2D(filters=32, kernel_size=[3,3], activation='relu', padding="same")(maxpool)
maxpool = MaxPool2D(pool_size=[2,2])(conv2d_1_2)

conv2d_1_3 = Conv2D(filters=64, kernel_size=[3,3], activation='relu', padding="same")(maxpool)
maxpool = MaxPool2D(pool_size=[2,2])(conv2d_1_3)

conv2d_1_4 = Conv2D(filters=128, kernel_size=[3,3], activation='relu', padding="same")(maxpool)
maxpool = MaxPool2D(pool_size=[2,2])(conv2d_1_4)

flatten = Flatten()(maxpool)
dense_1 = Dense(1024, activation='swish')(flatten)

dense_2 = Dense(128, activation='swish')(dense_1)

output_dense = Dense(num_class, activation="softmax")(dense_2)

model = Model(inputs=[input_layer], outputs=[output_dense])

# Transfer Model - EffectiveNet(优化模型）
effectiveNet = tensorflow.keras.applications.EfficientNetB2(include_top=False, pooling='avg', weights='imagenet', input_shape=(img_rows,img_cols,3))

inputs=tensorflow.keras.Input(shape=(img_rows,img_cols,3))
x = effectiveNet(inputs, training=False)
dense = tensorflow.keras.layers.Dense(256,activation='relu')(x)
dropout = tensorflow.keras.layers.Dropout(0.4)(dense)
dense_2 = tensorflow.keras.layers.Dense(256,activation='relu')(dropout)
drop_2 = tensorflow.keras.layers.Dropout(0.4)(dense_2)
output = tensorflow.keras.layers.Dense(10, activation='softmax')(drop_2)
model_2 =  tensorflow.keras.Model(inputs,output)

# 输出模型各层的参数状况
model.summary()

#  学习中所有训练数据均被使用过一次时的更新次数
EPOCH = 50

lr_reduction = ReduceLROnPlateau(monitor='val_loss',patience=4, verbose=1,  factor=0.4, min_lr=0.0001)

early_stop = EarlyStopping(monitor='val_loss', min_delta=0.00001, patience=8, mode='auto', restore_best_weights=True)

model.compile(optimizer= 'adam',loss=tensorflow.losses.CategoricalCrossentropy(),metrics=['accuracy'])
model_fit = model.fit(train_data, epochs=EPOCH ,batch_size = batch_size ,validation_data=valid_data, verbose =1,callbacks=[early_stop,lr_reduction])


# 定义图像显示为20*7英寸
f = plt.figure(figsize=(20,7))

# 查看准确率
plt.figure('For Accuracy')
plt.plot(model_fit.epoch,model_fit.history['accuracy'],label = "training accuracy") # 训练集的准确度曲线
plt.plot(model_fit.epoch,model_fit.history['val_accuracy'],label = "val_accuracy") # 验证集的准确度曲线

plt.title("Accuracy Curve",fontsize=18)
plt.xlabel("Epochs",fontsize=15)
plt.ylabel("Accuracy",fontsize=15)
plt.grid(alpha=0.3)
plt.legend()
plt.show()

plt.figure('For Loss')
plt.plot(model_fit.epoch,model_fit.history['loss'],label="training loss") # 训练集的损失曲线
plt.plot(model_fit.epoch,model_fit.history['val_loss'],label="Validation loss") # 验证集的损失曲线

plt.title("Loss Curve",fontsize=18)
plt.xlabel("Epochs",fontsize=15)
plt.ylabel("Loss",fontsize=15)
plt.grid(alpha=0.3)
plt.legend()
plt.show()

# 保存模型
model.save('saved_model/my_model03.h5')


# 下面这一部分移动到predict_result.py文件里
# load_model = tensorflow.keras.models.load_model('saved_model/my_model02.h5')

# evaluate_test = model.evaluate(test_data, verbose=1)
# print("\nAccuracy =", "{:.7f}%".format(evaluate_test[1]*100))
# print("Loss     =" ,"{:.9f}".format(evaluate_test[0]))

# y_predict_max = np.argmax(model.predict(test_data),axis=1)

# inverse_map = {v:k for k,v in train_data.class_indices.items()}

# predictions = [inverse_map[k] for k in y_predict_max]

# files=test_data.filenames

# results=pd.DataFrame({"image_id":files,
#                       "label":predictions})
# results.image_id = results.image_id.str.replace('./', '')
# results.to_csv("submission.csv",index=False)
# print(results.head())

