#数据的加载与预处理
"""
数据集类 FewShotLearningDatasetParallel，用于创建用于训练和评估的任务集。
图像增强和转换函数，例如 augment_image，用于在训练过程中增强图像。
数据集划分为训练集、验证集和测试集的逻辑。
支持多种数据集，如CIFAR-10、CIFAR-100、Omniglot和ImageNet。
"""

import json
import os
from collections import defaultdict

import numpy as np
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import tqdm
import concurrent.futures
import pickle
import torch
from torchvision import transforms
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

from utils.parser_utils import get_args

#图像旋转
class rotate_image(object):#自定义图像旋转类

    def __init__(self, k, channels):
        self.k = k
        self.channels = channels

    def __call__(self, image):
        if self.channels == 1 and len(image.shape) == 3:
            image = image[:, :, 0]
            image = np.expand_dims(image, axis=2)

        elif self.channels == 1 and len(image.shape) == 4:
            image = image[:, :, :, 0]
            image = np.expand_dims(image, axis=3)

        image = np.rot90(image, k=self.k).copy()
        return image


class torch_rotate_image(object):#PyTorch图像旋转类

    def __init__(self, k, channels):
        self.k = k
        self.channels = channels

    def __call__(self, image):
        rotate = transforms.RandomRotation(degrees=self.k * 90)
        if image.shape[-1] == 1:
            image = image[:, :, 0]
        image = Image.fromarray(image)
        image = rotate(image)
        image = np.array(image)
        if len(image.shape) == 2:
            image = np.expand_dims(image, axis=2)
        return image

#图像增强函数
def augment_image(image, k, channels, augment_bool, args, dataset_name):
    transform_train, transform_evaluation = get_transforms_for_dataset(dataset_name=dataset_name,
                                                                       args=args, k=k)
    if len(image.shape) > 3:
        images = [item for item in image]
        output_images = []
        for image in images:
            if augment_bool is True:
                for transform_current in transform_train:
                    image = transform_current(image)
            else:
                for transform_current in transform_evaluation:
                    image = transform_current(image)
            output_images.append(image)
        image = torch.stack(output_images)
    else:
        if augment_bool is True:
            # meanstd transformation
            for transform_current in transform_train:
                image = transform_current(image)
        else:
            for transform_current in transform_evaluation:
                image = transform_current(image)
    return image

#transforms
def get_transforms_for_dataset(dataset_name, args, k):
    if "cifar10" in dataset_name or "cifar100" in dataset_name or "FC100" in dataset_name:
        transform_train = [
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4847, 0.4408), (0.2675, 0.2565, 0.2761))]

            #transforms.RandomCrop(32, padding=4),
            #transforms.RandomHorizontalFlip(),
            #transforms.ToTensor(),
            #transforms.Normalize(args.classification_mean, args.classification_std)]

        transform_evaluate = [
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4847, 0.4408), (0.2675, 0.2565, 0.2761))]
            #transforms.ToTensor(),
            #transforms.Normalize(args.classification_mean, args.classification_std)]

    elif 'omniglot' in dataset_name:

        transform_train = [transforms.ToTensor()]
        transform_evaluate = [transforms.ToTensor()]

        #transform_train = [rotate_image(k=k, channels=args.image_channels), transforms.ToTensor()]
        #transform_evaluate = [transforms.ToTensor()]


    elif 'imagenet' in dataset_name or 'NEU' in dataset_name:

        transform_train = [transforms.Compose([

            transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])]

        transform_evaluate = [transforms.Compose([

            transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])]

    return transform_train, transform_evaluate


def load_datapaths_from_path(self, dataset_path):
    """
    Loads a dataset's dictionary files from a specific path.
    :param dataset_path: Path to the dataset
    :return: data_image_paths, index_to_label_name_dict_file, label_to_index
    """
    data_image_paths, index_to_label_name_dict_file, label_to_index = self.load_datapaths(dataset_path=dataset_path)
    return data_image_paths, index_to_label_name_dict_file, label_to_index


class FewShotLearningDatasetParallel(Dataset):
    def __init__(self, args):
        """
        A data provider class for few-shot learning.
        Training and validation sets come from the same dataset path,
        test set is loaded from a separate path.
        """
        self.args = args
        self.dataset_name = args.train_dataset_name
        self.num_classes_per_set = args.num_classes_per_set  # 每个任务中的类别数（如5-way）
        self.num_samples_per_class = args.num_samples_per_class  # 每类样本数（如1-shot）
        self.data_loaded_in_memory = getattr(args, 'load_into_memory', False)

        # Image parameters
        self.image_height = args.image_height
        self.image_width = args.image_width
        self.image_channel = args.image_channels

        # Dataset configuration
        self.indexes_of_folders_indicating_class = args.indexes_of_folders_indicating_class
        self.reverse_channels = args.reverse_channels
        self.labels_as_int = args.labels_as_int
        self.current_set_name = "train"  # Initial active dataset
        self.num_target_samples = args.num_target_samples
        self.reset_stored_filepaths = args.reset_stored_filepaths

        # Seed management
        self.init_seed = {
            "train": args.train_seed,
            "val": args.val_seed,
            "test": args.test_seed
        }
        self.seed = self.init_seed.copy()

        # Load dataset splits
        self.datasets = self.load_dataset()

        # Initialize dataset indices and sizes
        self.dataset_size_dict = {
            "train": {k: len(v) for k, v in self.datasets["train"].items()},
            "val": {k: len(v) for k, v in self.datasets["val"].items()},
            "test": {k: len(v) for k, v in self.datasets["test"].items()}
        }
        self.data_length = {
            name: sum(len(c) for c in splits.values())
            for name, splits in self.datasets.items()
        }

        self.classes = {
            "train": list(self.datasets["train"].keys()),
            "val": list(self.datasets["val"].keys()),
            "test": list(self.datasets["test"].keys())
        }

        print(f"Dataset sizes: {self.data_length}")

    def load_dataset(self):
        """Load and split dataset with train/val from same source"""
        print("Loading and splitting datasets...")

        # Load base dataset for train/val splits
        train_val_data, train_idx_to_label, train_label_to_idx = self.load_datapaths_from_path(
            self.args.train_dataset_path
        )

        # Split train/validation (80/20 per class)
        train_data, val_data = {}, {}
        split_ratio = 0.8
        rng = np.random.RandomState(42)  # Fixed seed for reproducibility

        for cls, paths in train_val_data.items():
            shuffled = rng.permutation(paths).tolist()
            split_idx = int(len(shuffled) * split_ratio)
            train_data[cls] = shuffled[:split_idx]
            val_data[cls] = shuffled[split_idx:]

        # Load separate test dataset
        test_data, test_idx_to_label, test_label_to_idx = self.load_datapaths_from_path(
            self.args.test_dataset_path
        )

        # Save label mappings
        self._save_label_mappings(
            train_idx_to_label, train_label_to_idx,
            self.args.train_dataset_path
        )
        self._save_label_mappings(
            test_idx_to_label, test_label_to_idx,
            self.args.test_dataset_path
        )

        # Memory loading if needed
        if self.args.load_into_memory:
            print("Loading data into RAM...")
            return {
                "train": self._load_to_memory(train_data),
                "val": self._load_to_memory(val_data),
                "test": self._load_to_memory(test_data)
            }

        return {
            "train": train_data,
            "val": val_data,
            "test": test_data
        }

    def load_datapaths_from_path(self, dataset_path):
        """Load image paths and create label mappings"""
        data = defaultdict(list)
        labels = set()

        for root, _, files in os.walk(dataset_path):
            for file in files:
                if file.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp')):
                    path = os.path.join(root, file)
                    label = self.get_label_from_path(path)
                    data[label].append(path)
                    labels.add(label)

        # Create mappings
        sorted_labels = sorted(labels)
        idx_to_label = {i: lbl for i, lbl in enumerate(sorted_labels)}
        label_to_idx = {lbl: i for i, lbl in enumerate(sorted_labels)}

        # Convert to standard dict format
        return (
            {label_to_idx[lbl]: paths for lbl, paths in data.items()},
            idx_to_label,
            label_to_idx
        )

    """
    def load_datapaths(self):
        
        If saved json dictionaries of the data are available, then this method loads the dictionaries such that the
        data is ready to be read. If the json dictionaries do not exist, then this method calls get_data_paths()
        which will build the json dictionary containing the class to filepath samples, and then store them.
        :return: data_image_paths: dict containing class to filepath list pairs.
                 index_to_label_name_dict_file: dict containing numerical indexes mapped to the human understandable
                 string-names of the class
                 label_to_index: dictionary containing human understandable string mapped to numerical indexes
        
        print("Loading data paths...")
        dataset_dir = os.environ['DATASET_DIR']
        data_path_file = "{}/{}.json".format(dataset_dir, self.dataset_name)
        self.index_to_label_name_dict_file = "{}/map_to_label_name_{}.json".format(dataset_dir, self.dataset_name)
        self.label_name_to_map_dict_file = "{}/label_name_to_map_{}.json".format(dataset_dir, self.dataset_name)

        if not os.path.exists(data_path_file):
            self.reset_stored_filepaths = True

        print("reset_stored_filepaths:", self.reset_stored_filepaths)

        if self.reset_stored_filepaths == True:
            if os.path.exists(data_path_file):
                os.remove(data_path_file)
            self.reset_stored_filepaths = False

        try:
            data_image_paths = self.load_from_json(filename=data_path_file)
            label_to_index = self.load_from_json(filename=self.label_name_to_map_dict_file)
            index_to_label_name_dict_file = self.load_from_json(filename=self.index_to_label_name_dict_file)
            return data_image_paths, index_to_label_name_dict_file, label_to_index
        except:
            print("Mapped data paths can't be found, remapping paths..")
            data_image_paths, code_to_label_name, label_name_to_code = self.get_data_paths()
            self.save_to_json(dict_to_store=data_image_paths, filename=data_path_file)
            self.save_to_json(dict_to_store=code_to_label_name, filename=self.index_to_label_name_dict_file)
            self.save_to_json(dict_to_store=label_name_to_code, filename=self.label_name_to_map_dict_file)
            print("Data image paths:", data_image_paths)
            print("Label to index:", code_to_label_name)
            print("Index to label name:", label_name_to_code)
            return self.load_datapaths()
    """

    def _save_label_mappings(self, idx_to_label, label_to_idx, base_path):
        """Save label mappings to specified directory"""
        idx_file = os.path.join(base_path, "index_to_label_name.json")
        label_file = os.path.join(base_path, "label_to_index.json")

        with open(idx_file, 'w') as f:
            json.dump(idx_to_label, f)
        with open(label_file, 'w') as f:
            json.dump(label_to_idx, f)

    def _load_to_memory(self, data_dict):
        """Load images into memory"""
        return {
            cls: [self.load_image(p, self.image_channel) for p in paths]
            for cls, paths in data_dict.items()
        }

    def save_to_json(self, filename, dict_to_store):
        with open(os.path.abspath(filename), 'w') as f:
            json.dump(dict_to_store, fp=f)

    def load_from_json(self, filename):
        with open(filename, mode="r") as f:
            load_dict = json.load(fp=f)

        return load_dict

    def load_test_image(self, filepath):
        """
        Tests whether a target filepath contains an uncorrupted image. If image is corrupted, attempt to fix.
        :param filepath: Filepath of image to be tested
        :return: Return filepath of image if image exists and is uncorrupted (or attempt to fix has succeeded),
        else return None
        """
        image = None
        try:
            image = Image.open(filepath)
        except RuntimeWarning:
            os.system("convert {} -strip {}".format(filepath, filepath))
            print("converting")
            image = Image.open(filepath)
        except:
            print("Broken image")

        if image is not None:
            return filepath
        else:
            return None

    def get_data_paths(self):
        """
        Method that scans the dataset directory and generates class to image-filepath list dictionaries.
        :return: data_image_paths: dict containing class to filepath list pairs.
                 index_to_label_name_dict_file: dict containing numerical indexes mapped to the human understandable
                 string-names of the class
                 label_to_index: dictionary containing human understandable string mapped to numerical indexes
        """
        print("Generating data paths...")
        print("Get images from", self.data_path)
        data_image_path_list_raw = []
        labels = set()
        for subdir, dir, files in os.walk(self.data_path):
            for file in files:
                if (".jpeg") in file.lower() or (".png") in file.lower() or (".jpg") in file.lower() or (".bmp") in file.lower():
                    filepath = os.path.abspath(os.path.join(subdir, file))
                    label = self.get_label_from_path(filepath)
                    data_image_path_list_raw.append(filepath)
                    labels.add(label)

        labels = sorted(labels)
        idx_to_label_name = {idx: label for idx, label in enumerate(labels)}
        label_name_to_idx = {label: idx for idx, label in enumerate(labels)}
        data_image_path_dict = {idx: [] for idx in list(idx_to_label_name.keys())}
        with tqdm.tqdm(total=len(data_image_path_list_raw)) as pbar_error:
            with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
                # Process the list of files, but split the work across the process pool to use all CPUs!
                for image_file in executor.map(self.load_test_image, (data_image_path_list_raw)):
                    pbar_error.update(1)
                    if image_file is not None:
                        label = self.get_label_from_path(image_file)
                        data_image_path_dict[label_name_to_idx[label]].append(image_file)

        return data_image_path_dict, idx_to_label_name, label_name_to_idx

    def get_label_set(self):
        """Get current label set based on active dataset"""
        if self.current_set_name in ("train", "val"):
            map_file = os.path.join(
                self.args.train_dataset_path,
                "index_to_label_name.json"
            )
        else:
            map_file = os.path.join(
                self.args.test_dataset_path,
                "index_to_label_name.json"
            )

        with open(map_file, 'r') as f:
            mapping = json.load(f)

        return set(mapping.keys())

    def get_index_from_label(self, label):
        """
        Given a class's (human understandable) string, returns the numerical index of that class
        :param label: A string of a human understandable class contained in the dataset
        :return: An int containing the numerical index of the given class-string
        """
        label_to_index = self.load_from_json(filename=self.label_name_to_map_dict_file)
        return label_to_index[label]

    def get_label_from_index(self, index):
        """
        Given an index return the human understandable label mapping to it.
        :param index: A numerical index (int)
        :return: A human understandable label (str)
        """
        index_to_label_name = self.load_from_json(filename=self.index_to_label_name_dict_file)
        return index_to_label_name[index]

    def get_label_from_path(self, filepath):
        normalized = os.path.normpath(filepath)
        parts = normalized.split(os.sep)

        # 路径结构示例：
        # parts = ["dataset_root", "images", "n01532829", "image1.jpg"]
        # 提取倒数第二层目录（索引 -2）作为类别名
        label = parts[-2]

        if self.labels_as_int:
            # 如果标签需要是整数（确保标签是纯数字）
            return int(label)
        else:
            return label

    def load_image(self, image_path, channels):
        """
        Given an image filepath and the number of channels to keep, load an image and keep the specified channels
        下载图像，图像预处理
        :param image_path: The image's filepath
        :param channels: The number of channels to keep
        :return: An image array of shape (h, w, channels), whose values range between 0.0 and 1.0.
        """
        if not self.data_loaded_in_memory:
            image = Image.open(image_path)
            if 'omniglot' in self.dataset_name:
                image = image.resize((self.image_height, self.image_width), resample=Image.LANCZOS)
                image = np.array(image, dtype=np.float32)
                if channels == 1:
                    image = np.expand_dims(image, axis=2)  # 从 (H, W) 到 (H, W, 1)
                    image = np.repeat(image, 3, axis=2)  # 从 (H, W, 1) 到 (H, W, 3)
            else:
                image = image.resize((self.image_height, self.image_width)).convert('RGB')
                image = np.array(image, np.float32)
                image = image / 255.0
            #print("Now image shape:", image.shape)
        else:
            image = image_path

        return image

    def load_batch(self, batch_image_paths):
        """
        Load a batch of images, given a list of filepaths
        :param batch_image_paths: A list of filepaths
        :return: A numpy array of images of shape batch, height, width, channels
        """
        image_batch = []

        if self.data_loaded_in_memory:
            for image_path in batch_image_paths:
                image_batch.append(image_path)
            image_batch = np.array(image_batch, dtype=np.float32)
            #print(image_batch.shape)
        else:
            image_batch = [self.load_image(image_path=image_path, channels=self.image_channel)
                           for image_path in batch_image_paths]
            image_batch = np.array(image_batch, dtype=np.float32)
            image_batch = self.preprocess_data(image_batch)

        return image_batch

    def load_parallel_batch(self, inputs):
        """
        Load a batch of images, given a list of filepaths
        :param batch_image_paths: A list of filepaths
        :return: A numpy array of images of shape batch, height, width, channels
        """
        class_label, batch_image_paths = inputs
        image_batch = []

        if self.data_loaded_in_memory:
            for image_path in batch_image_paths:
                image_batch.append(np.copy(image_path))
            image_batch = np.array(image_batch, dtype=np.float32)
        else:
            #with tqdm.tqdm(total=1) as load_pbar:
            image_batch = [self.load_image(image_path=image_path, channels=self.image_channel)
                           for image_path in batch_image_paths]
                #load_pbar.update(1)

            image_batch = np.array(image_batch, dtype=np.float32)
            image_batch = self.preprocess_data(image_batch)

        return class_label, image_batch

    def preprocess_data(self, x):
        """
        Preprocesses data such that their shapes match the specified structures
        :param x: A data batch to preprocess
        :return: A preprocessed data batch
        """
        x_shape = x.shape
        x = np.reshape(x, (-1, x_shape[-3], x_shape[-2], x_shape[-1]))
        if self.reverse_channels is True:
            reverse_photos = np.ones(shape=x.shape)
            for channel in range(x.shape[-1]):
                reverse_photos[:, :, :, x.shape[-1] - 1 - channel] = x[:, :, :, channel]
            x = reverse_photos
        x = x.reshape(x_shape)
        return x

    def reconstruct_original(self, x):
        """
        Applies the reverse operations that preprocess_data() applies such that the data returns to their original form
        :param x: A batch of data to reconstruct
        :return: A reconstructed batch of data
        """
        x = x * 255.0
        return x

    def shuffle(self, x, rng):
        """
        Shuffles the data batch along it's first axis
        :param x: A data batch
        :return: A shuffled data batch
        """
        indices = np.arange(len(x))
        rng.shuffle(indices)
        x = x[indices]
        return x

    def get_set(self, dataset_name, seed, augment_images=False):
        """
        Generates a task-set to be used for training or evaluation
        :param set_name: The name of the set to use, e.g. "train", "val" etc.
        :return: A task-set containing an image and label support set, and an image and label target set.
        """
        #seed = seed % self.args.total_unique_tasks
        rng = np.random.RandomState(seed)
        selected_classes = rng.choice(list(self.dataset_size_dict[dataset_name].keys()),
                                      size=self.num_classes_per_set, replace=True)
        rng.shuffle(selected_classes)
        k_list = rng.randint(0, 4, size=self.num_classes_per_set)
        k_dict = {selected_class: k_item for (selected_class, k_item) in zip(selected_classes, k_list)}
        episode_labels = [i for i in range(self.num_classes_per_set)]
        class_to_episode_label = {selected_class: episode_label for (selected_class, episode_label) in
                                  zip(selected_classes, episode_labels)}

        x_images = []
        y_labels = []

        for class_entry in selected_classes:
            choose_samples_list = rng.choice(self.dataset_size_dict[dataset_name][class_entry],
                                             size=self.num_samples_per_class + self.num_target_samples, replace=False)
            class_image_samples = []
            class_labels = []
            for sample in choose_samples_list:
                choose_samples = self.datasets[dataset_name][class_entry][sample]
                x_class_data = self.load_batch([choose_samples])[0]
                k = k_dict[class_entry]
                x_class_data = augment_image(image=x_class_data, k=k,
                                             channels=self.image_channel, augment_bool=augment_images,
                                             dataset_name=self.dataset_name, args=self.args)
                class_image_samples.append(x_class_data)
                class_labels.append(int(class_to_episode_label[class_entry]))
            class_image_samples = torch.stack(class_image_samples)
            x_images.append(class_image_samples)
            y_labels.append(class_labels)

        x_images = torch.stack(x_images)
        y_labels = np.array(y_labels, dtype=np.float32)

        support_set_images = x_images[:, :self.num_samples_per_class]
        support_set_labels = y_labels[:, :self.num_samples_per_class]
        target_set_images = x_images[:, self.num_samples_per_class:]
        target_set_labels = y_labels[:, self.num_samples_per_class:]

        return support_set_images, target_set_images, support_set_labels, target_set_labels, seed

    def __len__(self):
        total_samples = self.data_length[self.current_set_name]
        return total_samples

    def length(self, set_name):
        self.switch_set(set_name=set_name)
        return len(self)

    def set_augmentation(self, augment_images):
        self.augment_images = augment_images

    # 原代码中可能存在的模式切换逻辑需要调整
    def switch_set(self, set_name, current_iter=None):
        """Switch active dataset subset"""
        self.current_set_name = set_name
        if set_name == "train" and current_iter is not None:
            self.seed["train"] = self.init_seed["train"] + current_iter

    def update_seed(self, dataset_name, seed=100):
        self.seed[dataset_name] = seed

    def __getitem__(self, idx):
        support_set_images, target_set_image, support_set_labels, target_set_label, seed = \
            self.get_set(self.current_set_name, seed=self.seed[self.current_set_name] + idx,
                         augment_images=self.augment_images)

        return support_set_images, target_set_image, support_set_labels, target_set_label, seed

    def reset_seed(self):
        self.seed = self.init_seed


class MetaLearningSystemDataLoader(object):
    def __init__(self, args, current_iter=0):
        """
        初始化元学习数据加载器，自动管理训练/验证/测试集
        :param args: 包含所有超参数的配置对象
        :param current_iter: 当前训练迭代次数，用于恢复状态
        """
        self.args = args
        self.num_of_gpus = args.num_of_gpus
        self.batch_size = args.batch_size
        self.samples_per_iter = args.samples_per_iter
        self.num_workers = args.num_dataprovider_workers

        # 初始化数据集并设置初始状态
        self.dataset = FewShotLearningDatasetParallel(args=args)
        self.dataset.switch_set("train")  # 默认初始化为训练模式

        # 批次管理参数
        self.total_train_iters_produced = current_iter * (self.num_of_gpus * self.batch_size * self.samples_per_iter)
        self.full_data_length = {
            "train": self.dataset.data_length["train"],
            "val": self.dataset.data_length["val"],
            "test": self.dataset.data_length["test"]
        }

    def get_dataloader(self, mode):
        """
        获取指定模式的数据加载器
        :param mode: 数据集模式 (train/val/test)
        :return: 配置好的DataLoader实例
        """
        # 动态切换数据集模式
        self.dataset.switch_set(mode)

        return DataLoader(
            self.dataset,
            batch_size=self.num_of_gpus * self.batch_size * self.samples_per_iter,
            shuffle=False,
            num_workers=self.num_workers,
            drop_last=True
        )

    def switch_set(self, mode):
        """显式切换数据集模式 (train/val/test)"""
        self.dataset.switch_set(mode)  # 直接调用底层数据集的切换方法

    def get_batches(self, mode, total_batches=-1, augment_images=False):
        """
        通用批次生成方法
        :param mode: 数据集模式 (train/val/test)
        :param total_batches: 需要生成的批次数量，-1表示使用完整数据集
        :param augment_images: 是否进行数据增强
        :yield: 数据批次
        """
        # 设置数据集参数
        if total_batches == -1:
            self.dataset.data_length[mode] = self.full_data_length[mode]
        else:
            self.dataset.data_length[mode] = total_batches * self.batch_size

        self.dataset.set_augmentation(augment_images)

        # 获取数据加载器
        dataloader = self.get_dataloader(mode)

        # 生成批次
        for batch_idx, batch_data in enumerate(dataloader):
            if batch_idx >= total_batches and total_batches != -1:
                break
            yield batch_data

    def get_train_batches(self, total_batches=-1, augment_images=True):
        """获取训练批次 (默认启用数据增强)"""
        return self.get_batches("train", total_batches, augment_images)

    def get_val_batches(self, total_batches=-1, augment_images=False):
        """获取验证批次"""
        return self.get_batches("val", total_batches, augment_images)

    def get_test_batches(self, total_batches=-1, augment_images=False):
        """获取测试批次"""
        return self.get_batches("test", total_batches, augment_images)

    def continue_from_iter(self, current_iter):
        """
        从指定迭代次数恢复数据生成状态
        :param current_iter: 当前迭代次数
        """
        self.total_train_iters_produced = current_iter * (self.num_of_gpus * self.batch_size * self.samples_per_iter)

