from download import download
import mindspore.dataset as ds
import mindspore.dataset.vision as vision
import matplotlib
import matplotlib.pyplot as plt
import mindspore as ms

dataset_url = "https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/datasets/intermediate/Canidae_data.zip"
# download(dataset_url, local_dir, kind="zip", replace=False)

batch_size = 18  # 批量大小
image_size = 224  # 训练图像空间大小
num_epochs = 10  # 训练周期数
lr = 0.001  # 学习率
momentum = 0.9  # 动量
workers = 4  # 并行线程个数

matplotlib.use('TkAgg')

# 数据集目录路径
local_dir = "../../datasets/datasets-Canidae"
data_path_train = local_dir + "/data/Canidae/train/"
data_path_val = local_dir + "/data/Canidae/val/"


# 创建训练数据集
def create_dataset_canidae(dataset_path, usage):
    """数据加载"""
    data_set = ds.ImageFolderDataset(dataset_path,
                                     num_parallel_workers=workers,
                                     shuffle=True, )

    # 数据增强操作
    mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
    std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
    scale = 32

    if usage == "train":
        # Define map operations for training dataset
        trans = [
            vision.RandomCropDecodeResize(size=image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
            vision.RandomHorizontalFlip(prob=0.5),
            vision.Normalize(mean=mean, std=std),
            vision.HWC2CHW()
        ]
    else:
        # Define map operations for inference dataset
        trans = [
            vision.Decode(),
            vision.Resize(image_size + scale),
            vision.CenterCrop(image_size),
            vision.Normalize(mean=mean, std=std),
            vision.HWC2CHW()
        ]

    # 数据映射操作
    data_set = data_set.map(operations=trans, input_columns='image', num_parallel_workers=workers)
    # 批量操作
    data_set = data_set.batch(batch_size)

    return data_set


dataset_train = create_dataset_canidae(data_path_train, "train")
step_size_train = dataset_train.get_dataset_size()
dataset_val = create_dataset_canidae(data_path_val, "val")
step_size_val = dataset_val.get_dataset_size()

data = next(dataset_train.create_dict_iterator())
images = data["image"]
labels = data["label"]
print("Tensor of image", images.shape)
print("Labels:", labels)

import matplotlib.pyplot as plt
import numpy as np

# class_name对应label，按文件夹字符串从小到大的顺序标记label
class_name = {0: "dogs", 1: "wolves"}

# plt.figure(figsize=(5, 5))
# for i in range(4):
#     # 获取图像及其对应的label
#     data_image = images[i].asnumpy()
#     data_label = labels[i]
#     # 处理图像供展示使用
#     data_image = np.transpose(data_image, (1, 2, 0))
#     mean = np.array([0.485, 0.456, 0.406])
#     std = np.array([0.229, 0.224, 0.225])
#     data_image = std * data_image + mean
#     data_image = np.clip(data_image, 0, 1)
#     # 显示图像
#     plt.subplot(2, 2, i + 1)
#     plt.imshow(data_image)
#     plt.title(class_name[int(labels[i].asnumpy())])
#     plt.axis("off")
#
# plt.show()

from mindspore import nn, train
import mindspore as ms
from resnet import resnet50


network = resnet50(pretrained=True)
lr = nn.cosine_decay_lr(min_lr=0.00001, max_lr=0.001, total_step=step_size_train * num_epochs,
                        step_per_epoch=step_size_train, decay_epoch=num_epochs)
# 定义优化器和损失函数
opt = nn.Momentum(params=network.trainable_params(), learning_rate=lr, momentum=0.9)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')


def forward_fn(inputs, targets):
    logits = network(inputs)
    loss = loss_fn(logits, targets)
    return loss

grad_fn = ms.value_and_grad(forward_fn, None, opt.parameters)


def train_step(inputs, targets):
    loss, grads = grad_fn(inputs, targets)
    opt(grads)
    return loss


# 全连接层输入层的大小
in_channels = network.fc.in_channels
# 输出通道数大小为狼狗分类数2
head = nn.Dense(in_channels, 2)
# 重置全连接层
network.fc = head

# 平均池化层kernel size为7
avg_pool = nn.AvgPool2d(kernel_size=7)
# 重置平均池化层
network.avg_pool = avg_pool

# 定义优化器和损失函数
opt = nn.Momentum(params=network.trainable_params(), learning_rate=lr, momentum=momentum)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

# 实例化模型
model = train.Model(network, loss_fn, opt, metrics={"Accuracy": train.Accuracy()})

grad_fn = ms.value_and_grad(forward_fn, None, opt.parameters)

# 创建迭代器
data_loader_train = dataset_train.create_tuple_iterator(num_epochs=num_epochs)

# 最佳模型保存路径
best_ckpt_dir = "../model/res50-trans/BestCheckpoint"
best_ckpt_path = best_ckpt_dir + "/resnet50-best.ckpt"

import os
import time

# 开始循环训练
# print("Start Training Loop ...")
# best_acc = 0
# for epoch in range(num_epochs):
#     losses = []
#     network.set_train()
#
#     epoch_start = time.time()
#     # 为每轮训练读入数据
#     for i, (images, labels) in enumerate(data_loader_train):
#         labels = labels.astype(ms.int32)
#         loss = train_step(images, labels)
#         losses.append(loss)
#
#     # 每个epoch结束后，验证准确率
#     acc = model.eval(dataset_val)['Accuracy']
#
#     epoch_end = time.time()
#     epoch_seconds = (epoch_end - epoch_start) * 1000
#     step_seconds = epoch_seconds / step_size_train
#
#     print("-" * 20)
#     print("Epoch: [%3d/%3d], Average Train Loss: [%5.3f], Accuracy: [%5.3f]" % (
#         epoch + 1, num_epochs, sum(losses) / len(losses), acc
#     ))
#     print("epoch time: %5.3f ms, per step time: %5.3f ms" % (
#         epoch_seconds, step_seconds
#     ))
#
#     if acc > best_acc:
#         best_acc = acc
#         if not os.path.exists(best_ckpt_dir):
#             os.mkdir(best_ckpt_dir)
#         ms.save_checkpoint(network, best_ckpt_path)
# print("=" * 80)
# print(f"End of validation the best Accuracy is: {best_acc: 5.3f}, "
#       f"save the best ckpt file in {best_ckpt_path}", flush=True)
#
# def visualize_model(best_ckpt_path, val_ds):
#     net = resnet50()
#     # 全连接层输入层的大小
#     in_channels = net.fc.in_channels
#     # 输出通道数大小为狼狗分类数2
#     head = nn.Dense(in_channels, 2)
#     # 重置全连接层
#     net.fc = head
#     # 平均池化层kernel size为7
#     avg_pool = nn.AvgPool2d(kernel_size=7)
#     # 重置平均池化层
#     net.avg_pool = avg_pool
#     # 加载模型参数
#     param_dict = ms.load_checkpoint(best_ckpt_path)
#     ms.load_param_into_net(net, param_dict)
#     model = train.Model(net)
#     # 加载验证集的数据进行验证
#     data = next(val_ds.create_dict_iterator())
#     images = data["image"].asnumpy()
#     labels = data["label"].asnumpy()
#     class_name = {0: "dogs", 1: "wolves"}
#     # 预测图像类别
#     output = model.predict(ms.Tensor(data['image']))
#     pred = np.argmax(output.asnumpy(), axis=1)
    # # 显示图像及图像的预测值
    # plt.figure(figsize=(5, 5))
    # for i in range(4):
    #     plt.subplot(2, 2, i + 1)
    #     # 若预测正确，显示为蓝色；若预测错误，显示为红色
    #     color = 'blue' if pred[i] == labels[i] else 'red'
    #     plt.title('predict:{}'.format(class_name[pred[i]]), color=color)
    #     picture_show = np.transpose(images[i], (1, 2, 0))
    #     mean = np.array([0.485, 0.456, 0.406])
    #     std = np.array([0.229, 0.224, 0.225])
    #     picture_show = std * picture_show + mean
    #     picture_show = np.clip(picture_show, 0, 1)
    #     plt.imshow(picture_show)
    #     plt.axis('off')
    #
    # plt.show()


# 使用固定特征进行训练的时候，需要冻结除最后一层之外的所有网络层。通过设置 requires_grad == False 冻结参数，以便不在反向传播中计算梯度。
print("固定特征进行训练")
net_work = resnet50(pretrained=True)

# 全连接层输入层的大小
in_channels = net_work.fc.in_channels
# 输出通道数大小为狼狗分类数2
head = nn.Dense(in_channels, 2)
# 重置全连接层
net_work.fc = head

# 平均池化层kernel size为7
avg_pool = nn.AvgPool2d(kernel_size=7)
# 重置平均池化层
net_work.avg_pool = avg_pool

# 冻结除最后一层外的所有参数
for param in net_work.get_parameters():
    if param.name not in ["fc.weight", "fc.bias"]:
        param.requires_grad = False

# 定义优化器和损失函数
opt = nn.Momentum(params=net_work.trainable_params(), learning_rate=lr, momentum=0.5)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')


def forward_fn(inputs, targets):
    logits = net_work(inputs)
    loss = loss_fn(logits, targets)
    return loss


grad_fn = ms.value_and_grad(forward_fn, None, opt.parameters)


def train_step(inputs, targets):
    loss, grads = grad_fn(inputs, targets)
    opt(grads)
    return loss


# 实例化模型
model1 = train.Model(net_work, loss_fn, opt, metrics={"Accuracy": train.Accuracy()})

dataset_train = create_dataset_canidae(data_path_train, "train")
step_size_train = dataset_train.get_dataset_size()

dataset_val = create_dataset_canidae(data_path_val, "val")
step_size_val = dataset_val.get_dataset_size()

num_epochs = 15

# 创建迭代器
data_loader_train = dataset_train.create_tuple_iterator(num_epochs=num_epochs)
data_loader_val = dataset_val.create_tuple_iterator(num_epochs=num_epochs)
best_ckpt_dir = "../model/res50-trans/BestCheckpoint"
best_ckpt_path = best_ckpt_dir + "/resnet50-best-freezing-param.ckpt"

# 开始循环训练
print("Start Training Loop ...")

best_acc = 0

for epoch in range(num_epochs):
    losses = []
    net_work.set_train()

    epoch_start = time.time()

    # 为每轮训练读入数据
    for i, (images, labels) in enumerate(data_loader_train):
        labels = labels.astype(ms.int32)
        loss = train_step(images, labels)
        losses.append(loss)

    # 每个epoch结束后，验证准确率

    acc = model1.eval(dataset_val)['Accuracy']

    epoch_end = time.time()
    epoch_seconds = (epoch_end - epoch_start) * 1000
    step_seconds = epoch_seconds / step_size_train

    print("-" * 20)
    print("Epoch: [%3d/%3d], Average Train Loss: [%5.3f], Accuracy: [%5.3f]" % (
        epoch + 1, num_epochs, sum(losses) / len(losses), acc
    ))
    print("epoch time: %5.3f ms, per step time: %5.3f ms" % (
        epoch_seconds, step_seconds
    ))
    if epoch == 10:
        print("解除参数冻结")
        net_work.set_grad(True)

    if acc > best_acc:
        best_acc = acc
        if not os.path.exists(best_ckpt_dir):
            os.mkdir(best_ckpt_dir)
        ms.save_checkpoint(net_work, best_ckpt_path)

print("=" * 80)
print(f"End of validation the best Accuracy is: {best_acc: 5.3f}, "
      f"save the best ckpt file in {best_ckpt_path}", flush=True)
