import os
import json
import pickle
import random
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import torch


def read_split_data(root:str, val_rate:float=0.2):
    random.seed(0)  # 保证随机结果可复现。 0：随机种子
    assert os.path.exists(root), "dataset root: {} does not exist".format(root)

    # 花数据集：用于分类任务
    # 根目录下存在5个文件夹(5个类别，文件夹名字代表label),每个文件夹下存放各自label的图片数据

    # 遍历文件夹，一个文件夹对应一个类别
    flower_class = [cla for cla in os.listdir(root) if os.path.exists(root)]
    # 排序，保证顺序一致
    flower_class.sort()
    # 生成类别对应名称以及对应的数字索引
    # {"daisy":0}
    class_indices = dict((k, v) for v, k in enumerate(flower_class) if len(flower_class) != 0)
    # 将类别信息写入json
    # {"0": "daisy"}
    json_str = json.dumps(dict((val, key) for key, val in class_indices.items()), indent=4)  # indent:缩进格式，保证输出的格式整齐
    with open("class_indices.json", 'w') as json_file:
        json_file.write(json_str)

    train_data = {} # 存放训练数据
    val_data = {}   # 存放验证数据
    every_class_num = {} # 存放每个类别的图片数量
    supported = [".jpg", ".JPG", ".png", ".PNG"]  # 支持的文件后缀类型

    # 数据处理
    for cla in flower_class:
        cla_path = os.path.join(root, cla)
        # 遍历获取supported支持的所有文件路径
        images = [os.path.join(cla_path, i) for i in os.listdir(cla_path)
                  if os.path.splitext(i)[-1] in supported]
        # 获取类别对应的索引
        class_id = class_indices[cla]
        # 记录该类别的样本数量
        every_class_num[class_id] = len(images)
        # 按比例随机采样验证样本
        val_path = random.sample(images, k=int(len(images)*val_rate))
        train_path = list(set(images) - set(val_path))
        val_data[class_id] = val_path
        train_data[class_id] = train_path
    print("{} images were found in the dataset.".format(sum(num for num in every_class_num.values())))
    print("{} images for training.".format(sum(len(num) for num in train_data.values())))
    print("{} images for validation.".format(sum(len(num) for num in val_data.values())))

    plot_image = False
    if plot_image:
        # 绘制每种类别个数柱状图
        plt.bar(range(len(flower_class)), every_class_num.values(), align='center')
        # 将横坐标0,1,2,3,4替换为相应的类别名称
        plt.xticks(range(len(flower_class)), flower_class)
        # 在柱状图上添加数值标签
        for i, v in every_class_num.items():
            plt.text(x=i, y=v + 5, s=str(v), ha='center')
        # 设置x坐标
        plt.xlabel('image class')
        # 设置y坐标
        plt.ylabel('number of images')
        # 设置柱状图的标题
        plt.title('flower class distribution')

        plt.savefig("flower_class_distribution.png")
        plt.show()
        # 如果savefig写在show前面，则可以正常保存图片
        # 如果savefig写在show后面，只有保存一张空白图片，原因是在show后面会创建一个新的空白图片
        # 因此需要使用plt.gcf()固定图片，在保存
        # example:
        # f = plt.gcf()
        # f.savefig("xx.png")
        # f.show()
        # f.clear()
    return train_data, val_data, every_class_num


# helper function to show an image
# (used in the `plot_classes_preds` function below)
def matplotlib_imshow(img, one_channel=False):
    fig = plt.figure()
    if one_channel:
        img = img.mean(dim=0)
        plt.imshow(img.numpy(), cmap="Greys")
        # plt.show()
        return img

    else:
        img = img.numpy().transpose(1, 2, 0)
        unnorm_img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255
        img = img.astype('uint8')
        unnorm_img = unnorm_img.astype('uint8')
        norm_image = torch.Tensor(img).permute(2,0,1)
        plt.imshow(unnorm_img)
        plt.savefig("train_images.jpg")
        # plt.show()
        return norm_image, fig


def plot_data_loader_image(data_loader):
    batch_size = data_loader.batch_size
    plot_num = min(batch_size, 5)

    json_path = './class_indices.json'
    assert os.path.exists(json_path), json_path + " does not exist."
    json_file = open(json_path, 'r')
    class_indices = json.load(json_file)

    # fig = plt.figure(figsize=(plot_num * 2.5, 3), dpi=100)
    for data in data_loader:
        images, labels = data
        for i in range(plot_num):
            # [C, H, W] -> [H, W, C]
            img = images[i].numpy().transpose(1, 2, 0)
            # 反Normalize操作
            img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255
            label = labels[i].item()
            plt.subplot(1, plot_num, i+1)
            plt.xlabel(class_indices[str(label)])
            plt.xticks([])  # 去掉x轴的刻度
            plt.yticks([])  # 去掉y轴的刻度
            plt.imshow(img.astype('uint8'))
        plt.show()



def plot_class_preds(net,
                     images_dir: str,
                     transform,
                     num_plot: int = 5,
                     device="cpu"):
    if not os.path.exists(images_dir):
        print("not found {} path, ignore add figure.".format(images_dir))
        return None

    label_path = os.path.join(images_dir, "label.txt")
    if not os.path.exists(label_path):
        print("not found {} file, ignore add figure".format(label_path))
        return None

    # read class_indict
    json_label_path = './class_indices.json'
    assert os.path.exists(json_label_path), "not found {}".format(json_label_path)
    json_file = open(json_label_path, 'r')
    # {"0": "daisy"}
    flower_class = json.load(json_file)
    # {"daisy": "0"}
    class_indices = dict((v, k) for k, v in flower_class.items())

    # reading label.txt file
    label_info = []
    with open(label_path, "r") as rd:
        for line in rd.readlines():
            line = line.strip()
            if len(line) > 0:
                split_info = [i for i in line.split(" ") if len(i) > 0]
                assert len(split_info) == 2, "label format error, expect file_name and class_name"
                image_name, class_name = split_info
                image_path = os.path.join(images_dir, image_name)
                # 如果文件不存在，则跳过
                if not os.path.exists(image_path):
                    print("not found {}, skip.".format(image_path))
                    continue
                # 如果读取的类别不在给定的类别内，则跳过
                if class_name not in class_indices.keys():
                    print("unrecognized category {}, skip".format(class_name))
                    continue
                label_info.append([image_path, class_name])

    if len(label_info) == 0:
        return None

    # get first num_plot info
    if len(label_info) > num_plot:
        label_info = label_info[:num_plot]

    num_imgs = len(label_info)
    images = []
    labels = []
    for img_path, class_name in label_info:
        # read img
        img = Image.open(img_path).convert("RGB")
        label_index = int(class_indices[class_name])

        # preprocessing
        img = transform(img)
        images.append(img)
        labels.append(label_index)

    # batching images
    images = torch.stack(images, dim=0).to(device)

    # inference
    with torch.no_grad():
        output = net(images)
        probs, preds = torch.max(torch.softmax(output, dim=1), dim=1)
        probs = probs.cpu().numpy()
        preds = preds.cpu().numpy()

    # width, height
    fig = plt.figure(figsize=(num_imgs * 2.5, 3), dpi=100)
    for i in range(num_imgs):
        # 1：子图共1行，num_imgs:子图共num_imgs列，当前绘制第i+1个子图
        ax = fig.add_subplot(1, num_imgs, i+1, xticks=[], yticks=[])

        # CHW -> HWC
        npimg = images[i].cpu().numpy().transpose(1, 2, 0)

        # 将图像还原至标准化之前
        # mean:[0.485, 0.456, 0.406], std:[0.229, 0.224, 0.225]
        npimg = (npimg * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255
        plt.imshow(npimg.astype('uint8'))

        title = "{}, {:.2f}%\n(label: {})".format(
            flower_class[str(preds[i])],  # predict class
            probs[i] * 100,  # predict probability
            flower_class[str(labels[i])]  # true class
        )
        ax.set_title(title, color=("green" if preds[i] == labels[i] else "red"))

    return fig


def write_pickle(list_info: list, file_name: str):
    with open(file_name, 'wb') as f:
        pickle.dump(list_info, f)


def read_pickle(file_name: str) -> list:
    with open(file_name, 'rb') as f:
        info_list = pickle.load(f)
        return info_list

if __name__ == '__main__':
    read_split_data(root=r"D:\dataset\flow_data\train")


