##
# 使用卷积神经网络模型对鲸鱼和海豚的分类进行训练和预测。具体步骤包括：
# 构建卷积神经网络模型，包括多个卷积层、批量归一化层、激活函数、池化层、Dropout层和全连接层。
# 设置优化器为Adam，并定义损失函数为交叉熵。
# 编译模型，并打印模型摘要。
# 训练模型并保存训练历史。
# 将训练历史转换为DataFrame，并可视化训练过程的准确率和损失曲线。
# 加载测试数据集，并预测测试数据的标签。
# 将预测结果保存到CSV文件中。
# 整体通过卷积神经网络模型训练和预测来实现鲸鱼和海豚分类，同时对模型的训练过程进行了可视化和结果输出。


# 导包:
import numpy as np
import pandas as pd
import os
import random
import shutil
import glob
from sklearn.utils import shuffle

import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib.image as mpimg

from distutils.dir_util import copy_tree

from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Activation, Dropout, BatchNormalization, LeakyReLU
from tensorflow.keras.layers import Conv2D, AveragePooling2D, MaxPooling2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.callbacks import ReduceLROnPlateau


# 获取全局路径:
print(os.listdir('../input/happy-whale-and-dolphin'))

# 设置路径:
train = '../input/happy-whale-and-dolphin/train_images'
test = '../input/happy-whale-and-dolphin/test_images'

# 获取训练和测试数据集的数量:
print(len(os.listdir(train)))
print(len(os.listdir(test)))

print(os.listdir(train)[:5])
print(os.listdir(test)[:5])

# 设置图像路径:
train_jpg = tf.io.gfile.glob(train+'/*.jpg')
test_jpg = tf.io.gfile.glob(test+'/*.jpg')

# 查看训练数据集（train.csv）:
train_data = pd.read_csv('../input/happy-whale-and-dolphin/train.csv', sep = ',')
train_data.head()

# 检查是否有缺失值:
train_data.isnull().sum()

# 查看每个类别的样本数量:
train_data.species.value_counts()

# 通过文件路径加载图像文件:
def Load_Image(path):
    image_path = tf.io.read_file(path)
    image_path = tf.image.decode_image(image_path, channels = 3)
    image_path = tf.image.convert_image_dtype(image_path, tf.float32)
    return image_path

# 可视化:
# 每个物种及其百分比的图表:
plt.figure(figsize = [20,6])

color = sns.color_palette()[0]

sns.countplot(data = train_data, x = 'species', color = color);
plt.title('Simple Plot');
plt.ylabel('count');
plt.xlabel('species');

value_sum = train_data['species'].value_counts().sum()
value = train_data['species'].value_counts()

locs, labels = plt.xticks(rotation = 90)

for loc, label in zip(locs, labels):

    count = value[label.get_text()]
    text = '{:0.1f}%'.format(100 * count/value_sum)

    plt.text(loc, count+3, text, ha = 'center', color = 'black');

# 修正物种变量中的拼写错误:
train_data['species'] = train_data['species'].replace({
    'kiler_whale': 'killer_whale',
    'frasiers_dolphin': 'frasers_dolphin',
    'globis': 'globis_whale',
    'beluga': 'beluga_whale',
    'bottlenose_dolpin': 'bottlenose_dolphin'})

# 查看更改后每个类别的样本数量:
train_data.species.value_counts()

# 声明海豚和鲸鱼变量以供进一步分析:
dolphin = [
    'bottlenose_dolphin',
    'dusky_dolphin',
    'spinner_dolphin',
    'spotted_dolphin',
    'common_dolphin',
    'white_sided_dolphin',
    'pantropic_spotted_dolphin',
    'commersons_dolphin',
    'rough_toothed_dolphin',
    'frasers_dolphin']

whale = [
    'beluga_whale',
    'humpback_whale',
    'blue_whale',
    'false_killer_whale',
    'killer_whale',
    'melon_headed_whale',
    'minke_whale',
    'fin_whale',
    'gray_whale',
    'southern_right_whale',
    'sei_whale',
    'short_finned_pilot_whale',
    'cuviers_beaked_whale',
    'pilot_whale',
    'long_finned_pilot_whale',
    'brydes_whale',
    'globis_whale',
    'pygmy_killer_whale'
]

train_data['family'] = ''
train_data.loc[train_data['species'].isin(dolphin), 'family'] = 'dolphin'
train_data.loc[train_data['species'].isin(whale), 'family'] = 'whale'

train_data.sample(5)

# 可视化:
# 物种家族图及其百分比：
plt.figure(figsize = [10, 5])
labels = train_data.family.value_counts()

plt.subplot(1,2,1)
labels.plot(kind = 'pie', autopct = '%1.2f%%', shadow = True, startangle = 180)
plt.title('Simple Pie Plot',fontsize = 15)
plt.ylabel('Family',fontsize = 10);

plt.subplot(1,2,2)
sns.countplot(data = train_data, x = 'family', color = color);
plt.title('Simple Plot');
plt.ylabel('count');
plt.xlabel('Family');

# 可视化:
# 从训练图像中绘制一个示例:
from keras.preprocessing import image
from keras.utils import load_img, img_to_array


plt.figure(figsize = (20, 20))

plt.subplot(1, 4, 1)
image = load_img('../input/happy-whale-and-dolphin/train_images/80b5373b87942b.jpg')
plt.imshow(image);

# 可视化:
# 从训练图像中绘制一些示例:
fig, ax = plt.subplots(4, 5, figsize = (20, 20))

jpg = random.sample(train_jpg, 20)

for idx, name in enumerate(jpg):
    img = Load_Image(name)
    ax[idx//5, idx%5].imshow(img)
    ax[idx//5, idx%5].set_title('Train image')

# 可视化:
# 从测试图像中绘制一些示例:
fig, ax = plt.subplots(4, 5, figsize = (20, 20))

jpg = random.sample(test_jpg, 20)

for idx, name in enumerate(jpg):
    img = Load_Image(name)
    ax[idx//5, idx%5].imshow(img)
    ax[idx//5, idx%5].set_title('Test image')

# 可视化:
# 可视化特定物种:
train_data['path'] = '../input/happy-whale-and-dolphin/train_images/' + train_data['image']


def species_plot(data, variable):
     plt.figure(figsize=(12, 12))
     df = data[data['species'] == variable].reset_index(drop=True)
     plt.suptitle(variable)

     for idx, ele in enumerate(np.random.choice(df['path'], 16)):
         plt.subplot(4, 4, idx + 1)
         image_path = ele
         img = Load_Image(image_path)

         plt.imshow(img)

     plt.tight_layout()
     plt.show()

for var in train_data['species'].unique()[:5]:
     species_plot(train_data, var)


# 设置全局变量:
random_state = 42
batch_size = 256
epochs = 3
seed = 42
target_size = (64, 64)
input_shape = (64, 64, 3)

# 打乱数据集:
train_data = shuffle(train_data, random_state = random_state)

# 数据归一化:
data_norm = ImageDataGenerator(rescale = 1.0/255, validation_split = 0.20)

# 生成训练数据:
gen_train = data_norm.flow_from_dataframe(train_data,
                                          directory = train,
                                          x_col = 'image',
                                          y_col = 'species',
                                          subset = 'training',
                                          batch_size = batch_size,
                                          class_mode = 'categorical',
                                          seed = seed,
                                          target_size = target_size)

# 生成验证数据:
gen_valid = data_norm.flow_from_dataframe(train_data,
                                          directory = train,
                                          x_col = 'image',
                                          y_col = 'species',
                                          subset = 'validation',
                                          batch_size = batch_size,
                                          class_mode = 'categorical',
                                          seed = seed,
                                          target_size = target_size)

# 构建模型:
mod = Sequential()

mod.add(Conv2D(filters = 32, kernel_size = (5, 5), strides = (1, 1), input_shape = input_shape, padding ='valid'))
mod.add(BatchNormalization())
mod.add(Activation(LeakyReLU()))

mod.add(Conv2D(filters = 32, kernel_size = (5, 5), strides = (1, 1), input_shape = input_shape, padding ='valid'))
mod.add(BatchNormalization())
mod.add(Activation(LeakyReLU()))
mod.add(MaxPooling2D(pool_size = (2, 2)))
mod.add(Dropout(0.1))

mod.add(Conv2D(filters = 64, kernel_size = (5, 5), strides = (1, 1), input_shape = input_shape, padding ='valid'))
mod.add(Activation(LeakyReLU()))
mod.add(BatchNormalization())

mod.add(Conv2D(filters = 128, kernel_size = (5, 5), strides = (1, 1), input_shape = input_shape, padding ='valid'))
mod.add(BatchNormalization())
mod.add(Activation(LeakyReLU()))
mod.add(AveragePooling2D(pool_size = (2, 2)))

mod.add(Conv2D(filters = 128, kernel_size = (5, 5), strides = (1, 1), input_shape = input_shape, padding ='valid'))
mod.add(BatchNormalization())
mod.add(Activation(LeakyReLU()))
mod.add(AveragePooling2D(pool_size = (2, 2)))
mod.add(Dropout(0.1))

mod.add(Flatten())

# 添加输出层:
mod.add(Dense(train_data.species.nunique(), activation = 'softmax'))

# 设置优化器:
opt = Adam(learning_rate = 0.0001)

# 设置损失函数:
losses = tf.keras.losses.CategoricalCrossentropy()

# 编译模型:
mod.compile(loss = 'categorical_crossentropy', metrics = ['accuracy'], optimizer = opt)

# 打印模型摘要:
mod.summary()

# 训练模型:
fit = mod.fit(gen_train, epochs = epochs, validation_data = gen_valid)
fit

# 打印训练历史:
fit.history

# 将训练历史转换为DataFrame:
df = pd.DataFrame(fit.history)
df

# 可视化训练过程:
plt.figure(figsize = (8, 4))

plt.subplot(1, 2, 1)
plt.plot(df['accuracy'])
plt.plot(df['val_accuracy'])
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(['train', 'validate'], loc = 'lower right');

plt.subplot(1, 2, 2)
plt.plot(df['loss'])
plt.plot(df['val_loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(['train', 'validate'], loc = 'upper right');

# 读取测试数据:
test_data = os.listdir("../input/happy-whale-and-dolphin/test_images")
test_data = pd.DataFrame(data = test_data, columns = ['image'])

test_data['predictions'] = ''

test_data.head()

# 加载图像数据:
def Loading_Images(data, length, dataset):
    X_train = np.zeros((length, 64, 64, 3))
    count = 0
    for fig in tqdm(data['image']):
        img = load_img("../input/happy-whale-and-dolphin/" + dataset + "/" + fig, target_size = (64,64, 3))
        x = img_to_array(img)
        X_train[count] = x
        count += 1
    return X_train

# 准备标签:
def prepare_labels(arr):
    values = np.array(arr)
    label_encoder = LabelEncoder()
    integer_encoded = label_encoder.fit_transform(values)
    onehot_encoder = OneHotEncoder(sparse = False)
    integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
    onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
    arr = onehot_encoded
    return arr, label_encoder

from tqdm.autonotebook import tqdm
import gc

# 准备训练数据的标签:
ele, labels = prepare_labels(train_data['individual_id'])
length = len(test_data)
batch_size = 1000
batch_start = 0
batch_end = batch_size

# 批量预测测试数据:
while batch_start < length:
    lim = min(batch_end, length)
    test_batch = test_data.iloc[batch_start:lim]

    x = Loading_Images(test_batch, test_batch.shape[0], "test_images")
    x = x / 225

    pred = mod.predict(np.array(x), verbose=1)

    for ele, predict in enumerate(pred):
        predictions = predict.argsort()[:-5][::-1]
        predictions = predictions[:4]
        idx = -1
        s1_list = []
        s1 = ''
        s2 = ''

        for i in predictions:
            idx = idx + 1
            if predict[i] > 0.5:
                s1 = s1 + ' ' + labels.inverse_transform(predictions)[idx]
            else:
                s2 = s2 + ' ' + labels.inverse_transform(predictions)[idx]

        s = s1.strip() + ' new_individual ' + s2.strip()
        s = s.strip()

        test_data.loc[batch_start + ele, 'predictions'] = s

    batch_start += batch_size
    batch_end += batch_size

    del x
    del test_batch
    del pred
    gc.collect()

# 保存预测结果:
test_data.to_csv('submission.csv', index = False)
test_data