import mindspore.nn as nn
import mindspore.dataset as ds
import mindspore.dataset.vision as vision
import os
import mindspore as ms
import logging


ms.set_context(mode=ms.GRAPH_MODE)

logging.basicConfig(level=logging.INFO, filename="train.log",
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S')

batch_size = 32  # 批量大小
image_size = 224  # 训练图像空间大小
num_epochs = 15  # 训练周期数
lr = 0.001  # 学习率
momentum = 0.9  # 动量
workers = 4  # 并行线程个数

local_dir = "/Users/xuyi/.cache/modelscope/hub/datasets/AiguLiu/plants/master/data_files/extracted"
data_path_test = local_dir + "/0d19feef58332241f0c6ea9c7b438161d0684ec54c9dfb715eb4cba45fd1573d/test"


# 创建训练数据集
def create_dataset_plant(dataset_path, usage):
    """数据加载"""

    data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=workers, shuffle=True)

    # 数据增强操作
    mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
    std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
    scale = 32

    if usage == "train":
        # Define map operations for training dataset
        trans = [
            vision.RandomCropDecodeResize(size=image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
            vision.RandomHorizontalFlip(prob=0.5),
            vision.Normalize(mean=mean, std=std),
            vision.HWC2CHW()
        ]
    else:
        # Define map operations for inference dataset
        trans = [
            vision.Decode(),
            vision.Resize(image_size + scale),
            vision.CenterCrop(image_size),
            vision.Normalize(mean=mean, std=std),
            vision.HWC2CHW()
        ]

    # 数据映射操作
    data_set = data_set.map(operations=trans, input_columns='image', num_parallel_workers=workers)
    # 批量操作
    data_set = data_set.batch(batch_size)

    return data_set


from resnet import resnet50

# 模型训练与评估
# 此处我们展示了5个epochs的训练过程，如果想要达到理想的训练效果，建议训练80个epochs。
# 定义ResNet50网络
model = resnet50(pretrained=True)
# 输出通道数大小为30
model.fc = nn.Dense(model.fc.in_channels, 30)

dataset_test = create_dataset_plant(data_path_test, "test")
step_size_test = dataset_test.get_dataset_size()

# 创建迭代器
data_loader_test = dataset_test.create_tuple_iterator(num_epochs=num_epochs)
model_dir = "../../model/res50-plant/resnet50-best-param.ckpt"
param_dict = ms.load_checkpoint(model_dir)

param_not_load, _ = ms.load_param_into_net(model, param_dict)
print(param_not_load)

## 测试数据集
# for i, (images, labels) in enumerate(data_loader_test):
#     labels = labels.astype(ms.int32)
#     model.set_train(mode=False)
#     output = model.construct(images)
#     print(output)

# 每个epoch结束后，验证准确率
# acc = model.eval(dataset_test)['Accuracy']
# logging.info("Test Accuracy is: %5.3f", acc)
from PIL import Image
from mindspore.dataset import transforms


def traverse_dir(path):
    for file in os.listdir(path):
        file_name = os.path.join(path, file)
        if os.path.isdir(file_name):
            print("文件夹：", file_name)
            traverse_dir(file_name)
        else:
            # Define map operations for inference dataset
            # 数据增强操作
            mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
            std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
            scale = 32
            img_ori = Image.open(file_name).convert("RGB")
            print("Image.type: {}, Image.shape: {}".format(type(img_ori), img_ori.size))
            # composed = transforms.Compose(
            #     [
            #         vision.Decode(),
            #         vision.Resize(image_size + scale),
            #         vision.CenterCrop(image_size),
            #         vision.Normalize(mean=mean, std=std),
            #         vision.HWC2CHW()
            #     ]
            # )
            op1 = vision.Decode()
            op2 = vision.Resize(image_size + scale)
            op3 = vision.CenterCrop(image_size)
            op4 = vision.Normalize(mean=mean, std=std)
            op5 = vision.HWC2CHW()

            img = op1(img_ori)
            img = op2(img)
            img = op3(img)
            img = op4(img)
            img = op5(img)
            y = model.construct(img)
            softmax = nn.Softmax(axis=1)

            pred_probab = softmax(y)
            print("文件：", file_name, pred_probab)


traverse_dir(data_path_test)