# 首先导入包
import pickle
import pandas as pd
import json
from PIL import Image, ImageEnhance
import random
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import matplotlib.pyplot as plt

# 看看label文件长啥样
labels_data_frame = pd.read_csv('../data/train.csv')
print(labels_data_frame.head())


# 生成并保存label和num的互相转换的字典文件
def save_cls_num(cla_path, num_path):
    # 提取出label并统计类别个数 排序
    leaves_labels = sorted(list(set(labels_data_frame['label'])))  # set删掉重复的 sort排序
    n_classes = len(leaves_labels)  # 类别长度
    # 把label转成对应的数字
    class_to_num = dict(zip(leaves_labels, range(n_classes)))
    # 再转换回来，方便最后预测的时候使用
    num_to_class = {v: k for k, v in class_to_num.items()}

    with open(cla_path, 'w') as f:
        f.write(json.dumps(class_to_num))
    with open(num_path, 'w') as f:
        f.write(json.dumps(num_to_class))


# 图片数据增强
def enhance_image(image_path, resize_size=(224, 224), flip_left_right_prob=0.5, flip_top_bottom_prob=0.5):
    # 读取图像
    img = Image.open(image_path)

    # 尺寸统一
    img = img.resize(resize_size)

    # 水平翻转
    if random.random() < flip_left_right_prob:
        img = img.transpose(Image.Transpose.FLIP_LEFT_RIGHT)

    # 垂直翻转
    if random.random() < flip_top_bottom_prob:
        img = img.transpose(Image.Transpose.FLIP_TOP_BOTTOM)

    # 饱和度相对值调节
    color_enhance_factor = random.uniform(0.5, 1.5)
    enhancer = ImageEnhance.Color(img)
    img = enhancer.enhance(color_enhance_factor)

    return img


# 批量处理图片，保存且更新train.csv
def image_enhancement(img_path='../data/', enhance_times=2):
    # 获取目前路径有多少图像（行数）
    df = labels_data_frame
    num = len(labels_data_frame)
    # 循环处理每一张现有图片，进其增强后保存
    for i in tqdm(range(len(labels_data_frame))):
        image_path, label = labels_data_frame.iloc[i]
        final_path = img_path + image_path
        # 每张图片增强enhance_times次，产生对应张图片
        for _ in range(enhance_times):
            # 增强并保存
            new_img = enhance_image(final_path)
            new_img.save('../data/enhance_img/' + str(num) + '.jpg')
            # 保存更新总样本集
            new_image_path = 'images/' + str(num) + '.jpg'
            new_label = label
            new_dict = {'image': new_image_path, 'label': new_label}
            df = df.append(new_dict, ignore_index=True)
            # 更新num
            num += 1

    # 保存新train.csv和valid.csv
    # 计算划分点
    split_point = int(df.shape[0] * 0.8)

    # 随机抽取80%的行作为第一个DataFrame
    df1 = df.sample(n=split_point, random_state=1)

    # 获取剩余的行作为第二个DataFrame
    df2 = df.drop(df1.index)
    df1.to_csv('../data/train2.csv', index=False)
    df2.to_csv('../data/valid2.csv', index=False)


# 定义dataSet类
class MyDataset(Dataset):
    def __init__(self, data, labels):
        self.data = data
        self.labels = labels

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        sample = self.data[index]
        label = self.labels[index]
        return sample, label


# 制作训练集dataSet或验证集dataSet
def make_data_set(data_csv, save_name):
    # 读取csv文件至pd
    data = pd.read_csv('../data/' + data_csv)
    # 将features和labels转换成tensor

    length = len(data)
    i = 0
    step = 128
    num = 1
    while i < length:
        if i + step <= length:
            end = i + step
        else:
            end = length
        data2 = data.iloc[i:end]
        features, labels = data2['image'], data2['label']
        features = [Image.open('../data/' + str(x)).resize((224, 224)) for x in features]
        # 获得features，并归一化至[0,1]
        features = [np.array(x).astype(np.float32) / 255.0 for x in features]
        # 将labels的类别str转换成数字num
        with open('../data/train_files/class_to_num.txt', 'r') as f:
            class_to_num = json.loads(f.read())
        labels = [class_to_num[x] for x in labels]
        # 将features和labels转换成tensor
        features = [torch.from_numpy(x) for x in features]
        features = torch.stack(features)
        labels = torch.from_numpy(np.array(labels))
        # 设置成dataSet类型
        dataset = MyDataset(features, labels)
        # 保存dataSet
        with open(save_name + str(num) + ".pkl", "wb") as f:
            pickle.dump(dataset, f)
        print('{}/144'.format(num))
        num += 1
        i += step


def main():
    # save_cls_num('../data/train_files/class_to_num.txt', '../data/train_files/num_to_class.txt')
    # image_enhancement()
    # 制作训练数据集并保存
    make_data_set('train2.csv', 'train_data_set')
    # 制作验证数据集并保存
    make_data_set('valid2.csv', 'valid_data_set')

    return


if __name__ == '__main__':
    main()
