import json
import os
import torch
import timm
from torch.utils.data import DataLoader
from ylcls.dataset import CLSDataset, MNISTCLSDataset
from ylcls.models import ClsModel
from ylcls.utils import same_seeds
PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true'  # global pin_memory for dataloaders
def get_save_path(config):
    output_dir = config.output_dir
    if config.model_args['model_type'] == 'mymodel':
        model_cfg_name = os.path.split(config.model_args['model_config'])[-1].split('.')[0]
    else:
        model_cfg_name = config.model_args['model_config']['model_name']
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    dir_list = os.listdir(output_dir)
    if dir_list:
        exp_list = [int(dir.split('_')[-1]) for dir in dir_list if model_cfg_name in dir]
        if exp_list:
            cur = max(exp_list) + 1
        else:
            cur = 0
    else:
        cur = 0
    save_dir_final = os.path.join(output_dir, f'{model_cfg_name}_{cur}')
    config.output_dir = save_dir_final
def get_model(config):
    if config.model_args['model_type'] == 'mymodel':
        config.model = ClsModel(config.model_args['model_config'], config.nc)
    elif config.model_args['model_type'] == 'timmmodel':
        config.model = timm.models.create_model(**config.model_args['model_config'])
def get_data_list(data_path, data_list_file_path):
    labels, images = [], []
    file_path = os.path.join(data_path, data_list_file_path)
    with open(file_path, "r", encoding="utf-8") as f:
        lines = f.readlines()
        for line in lines:
            line = line.strip().split()
            images.append(os.path.join(data_path, line[0]))
            labels.append(int(line[1]))
    return images, labels
def get_dataloader(config):
    same_seeds(config.seed) #设置随机种子使实验结果可复现
    generator = torch.Generator()
    generator.manual_seed(6148914691236517205 + config.seed)
    config.train_dataset = eval(config.dataset_name)(config, mode="train", mosaic = config.mosaic, transforms = config.transforms, max_nums = config.max_nums)
    config.train_dataloader = DataLoader(config.train_dataset, batch_size=config.train_batch_size, shuffle=True,
                                         drop_last=True, pin_memory=PIN_MEMORY, num_workers=config.workers,
                                         generator=generator)
    config.valid_dataset = eval(config.dataset_name)(config, mode="valid", mosaic = False, transforms = config.transforms, max_nums = config.max_nums)
    config.valid_dataloader = DataLoader(config.valid_dataset, batch_size=config.eval_batch_size, shuffle=True, drop_last=True, pin_memory=PIN_MEMORY, num_workers=config.workers, generator=generator)
def get_image_path_dataloader(config):
    config.train_images_file_list, config.train_labels_list = get_data_list(config.dataset_path,config.train_txt)
    config.valid_images_file_list, config.valid_labels_list = get_data_list(config.dataset_path, config.valid_txt)
    config.test_images_file_list, config.test_labels_list = get_data_list(config.dataset_path, config.test_txt)
    get_dataloader(config)
def get_index2label(config):
    with open(config.index2label_file_path, "r", encoding="utf-8") as f:
        index2index = json.load(f)
        label2index = {value: int(key) for key, value in index2index.items()}
        index2label = {value: key for key, value in label2index.items()}
    config.index2label = index2label
def get_config(config_dict, config_type):
    config = config_type(**config_dict)
    get_save_path(config)
    get_model(config)
    get_index2label(config)
    get_image_path_dataloader(config)
    return config