#数据集的分割
import math
import os
import shutil
from collections import Counter
def reorg_dog_data(data_dir, label_file, train_dir, test_dir, input_dir,
                   valid_ratio):
    # 读取训练数据标签，label.csv文件读取标签以及对应的文件名。
    with open(os.path.join(data_dir, label_file), 'r') as f:
        lines = f.readlines()[1:]
        tokens = [(line.split(',')[0], line.split(',')[1].strip()) for line in lines]
        idx_label = dict(tokens)
    labels = set(idx_label.values())

    num_train = len(os.listdir(os.path.join(data_dir, train_dir)))#获取训练集的数量便于数据集的分割
    # 训练集中数量最少一类的狗的数量。
    min_num_train_per_label = (
        Counter(idx_label.values()).most_common()[:-2:-1][0][1])
    # 验证集中每类狗的数量。
    num_valid_per_label = math.floor(min_num_train_per_label * valid_ratio)
    label_count = dict()

    def mkdir_if_not_exist(path):#判断是否有存放拆分后数据集的文件夹，没有就创建一个
        if not os.path.exists(os.path.join(*path)):
            os.makedirs(os.path.join(*path))

    # 整理训练和验证集，将数据集进行拆分复制到预先设置好的存放文件夹中。
    for train_file in os.listdir(os.path.join(data_dir, train_dir)):
        idx = train_file.split('.')[0]
        label = idx_label[idx]
        mkdir_if_not_exist([data_dir, input_dir, 'train_valid', label])
        shutil.copy(os.path.join(data_dir, train_dir, train_file),
                    os.path.join(data_dir, input_dir, 'train_valid', label))
        if label not in label_count or label_count[label] < num_valid_per_label:
            mkdir_if_not_exist([data_dir, input_dir, 'valid', label])
            shutil.copy(os.path.join(data_dir, train_dir, train_file),
                        os.path.join(data_dir, input_dir, 'valid', label))
            label_count[label] = label_count.get(label, 0) + 1
        else:
            mkdir_if_not_exist([data_dir, input_dir, 'train', label])
            shutil.copy(os.path.join(data_dir, train_dir, train_file),
                        os.path.join(data_dir, input_dir, 'train', label))

    # 整理测试集，将测试集复制存放在新建路径下的unknown文件夹中。
    mkdir_if_not_exist([data_dir, input_dir, 'test', 'unknown'])
    for test_file in os.listdir(os.path.join(data_dir, test_dir)):
        shutil.copy(os.path.join(data_dir, test_dir, test_file),
                    os.path.join(data_dir, input_dir, 'test', 'unknown'))

data_dir = "D:\\ai\\code\\data\\archive"#数据集的根目录
label_file = 'HAM10000_metadata.csv'#根目录中csv的文件名加后缀
train_dir = 'HAM10000_images_part_1'#根目录中的训练集文件夹的名字
test_dir = 'HAM10000_images_part_2'#根目录中的测试集文件夹的名字
input_dir = 'train_valid_test'#用于存放拆分数据集的文件夹的名字，可以不用先创建，会自动创建
batch_size = 4#送往训练的一批次中的数据集的个数
valid_ratio = 0.5#将训练集拆分为90%为训练集10%为验证集
#
# reorg_dog_data(data_dir, label_file, train_dir, test_dir, input_dir,
#                    valid_ratio)
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'

from torch.utils.data import DataLoader,Dataset
import matplotlib.pyplot as plt
import torch
from torchvision import transforms, datasets
from PIL import Image
import pandas as pd
import numpy as np
#过滤警告信息
import warnings
warnings.filterwarnings("ignore")
transform_train = transforms.Compose([
    # 随机对图像裁剪出面积为原图像面积0.08~1倍、且高和宽之比在3/4~4/3的图像，再放缩为高和宽均为224像素的新图像
    transforms.RandomResizedCrop(28, scale=(0.08, 1.0),
                                 ratio=(3.0/4.0, 4.0/3.0)),
    # 以0.5的概率随机水平翻转
    transforms.RandomHorizontalFlip(),
    # 随机更改亮度、对比度和饱和度
    transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
    transforms.ToTensor(),
    # 对各个通道做标准化，(0.485, 0.456, 0.406)和(0.229, 0.224, 0.225)是在ImageNet上计算得的各通道均值与方差
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])  # ImageNet上的均值和方差
])

# 在测试集上的图像增强只做确定性的操作
transform_test = transforms.Compose([
    transforms.Resize(28),
    # 将图像中央的高和宽均为224的正方形区域裁剪出来
    transforms.CenterCrop(28),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
import torchvision
#然后开始加载数据集
new_data_dir="D:\\ai\\code\\data\\archive\\train_valid_test"

train_ds = datasets.ImageFolder(root=os.path.join(new_data_dir, 'train'),transform=transform_train)
valid_ds = datasets.ImageFolder(root=os.path.join(new_data_dir, 'valid'),transform=transform_train)

train_loader = torch.utils.data.DataLoader(train_ds,batch_size=4,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_ds,batch_size=4,shuffle=True)
import matplotlib.pyplot as plt
import numpy as np

# 显示图像
def imshow(img):
 img = img / 2 + 0.5  # unnormalize
 npimg = img.numpy()
 plt.imshow(np.transpose(npimg, (1, 2, 0)))
 plt.show()
# 随机获取部分训练数据
dataiter = iter(train_loader)#换此处的值即可切换数据集
images, labels = next(dataiter)
# 显示图像
imshow(torchvision.utils.make_grid(images))
# 打印标签
print(labels[0],labels[1],labels[2],labels[3])