|
import argparse |
|
import datetime |
|
import logging |
|
import math |
|
import random |
|
import time |
|
import torch |
|
import gc |
|
from os import path as osp |
|
|
|
from basicsr.data import create_dataloader, create_dataset |
|
from basicsr.data.data_sampler import EnlargedSampler |
|
from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher |
|
from basicsr.models import create_model |
|
from basicsr.utils import (MessageLogger, check_resume, get_env_info, |
|
get_root_logger, get_time_str, init_tb_logger, |
|
init_wandb_logger, make_exp_dirs, mkdir_and_rename, |
|
set_random_seed) |
|
from basicsr.utils.dist_util import get_dist_info, init_dist |
|
from basicsr.utils.options import dict2str, parse |
|
from basicsr.utils.nano import apply_conv_n_deconv, psf2otf |
|
|
|
import numpy as np |
|
from tqdm import tqdm |
|
|
|
def parse_options(is_train=True): |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument( |
|
'-opt', type=str, required=True, help='Path to option YAML file.') |
|
parser.add_argument( |
|
'--launcher', |
|
choices=['none', 'pytorch', 'slurm'], |
|
default='none', |
|
help='job launcher') |
|
parser.add_argument( |
|
'--name', |
|
default=None, |
|
help='job launcher') |
|
import sys |
|
vv = sys.version_info.minor |
|
parser.add_argument('--local-rank', type=int, default=0) |
|
parser.add_argument('--local_rank', type=int, default=0) |
|
args = parser.parse_args() |
|
opt = parse(args.opt, is_train=is_train, name=args.name if args.name is not None and args.name != "" else None) |
|
|
|
|
|
|
|
if args.launcher == 'none': |
|
opt['dist'] = False |
|
print('Disable distributed.', flush=True) |
|
else: |
|
opt['dist'] = True |
|
if args.launcher == 'slurm' and 'dist_params' in opt: |
|
init_dist(args.launcher, **opt['dist_params']) |
|
else: |
|
init_dist(args.launcher) |
|
print('init dist .. ', args.launcher) |
|
|
|
opt['rank'], opt['world_size'] = get_dist_info() |
|
|
|
|
|
seed = opt.get('manual_seed') |
|
if seed is None: |
|
seed = random.randint(1, 10000) |
|
opt['manual_seed'] = seed |
|
set_random_seed(seed + opt['rank']) |
|
|
|
return opt |
|
|
|
|
|
def init_loggers(opt): |
|
log_file = osp.join(opt['path']['log'], |
|
f"train_{opt['name']}_{get_time_str()}.log") |
|
logger = get_root_logger( |
|
logger_name='basicsr', log_level=logging.INFO, log_file=log_file) |
|
logger.info(get_env_info()) |
|
logger.info(dict2str(opt)) |
|
|
|
|
|
if (opt['logger'].get('wandb') |
|
is not None) and (opt['logger']['wandb'].get('project') |
|
is not None) and ('debug' not in opt['name']): |
|
assert opt['logger'].get('use_tb_logger') is True, ( |
|
'should turn on tensorboard when using wandb') |
|
init_wandb_logger(opt) |
|
tb_logger = None |
|
if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']: |
|
tb_logger = init_tb_logger(log_dir=osp.join('tb_logger', opt['name'])) |
|
return logger, tb_logger |
|
|
|
|
|
def create_train_val_dataloader(opt, logger): |
|
|
|
for phase, dataset_opt in opt['datasets'].items(): |
|
if phase == 'train': |
|
dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1) |
|
train_set = create_dataset(dataset_opt) |
|
train_sampler = EnlargedSampler(train_set, opt['world_size'], |
|
opt['rank'], dataset_enlarge_ratio) |
|
train_loader = create_dataloader( |
|
train_set, |
|
dataset_opt, |
|
num_gpu=opt['num_gpu'], |
|
dist=opt['dist'], |
|
sampler=train_sampler, |
|
seed=opt['manual_seed'], |
|
) |
|
|
|
num_iter_per_epoch = math.ceil( |
|
len(train_set) * dataset_enlarge_ratio / |
|
(dataset_opt['batch_size_per_gpu'] * opt['world_size'])) |
|
total_iters = int(opt['train']['total_iter']) |
|
total_epochs = math.ceil(total_iters / (num_iter_per_epoch)) |
|
logger.info( |
|
'Training statistics:' |
|
f'\n\tNumber of train images: {len(train_set)}' |
|
f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}' |
|
f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}' |
|
f'\n\tWorld size (gpu number): {opt["world_size"]}' |
|
f'\n\tRequire iter number per epoch: {num_iter_per_epoch}' |
|
f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.') |
|
|
|
elif phase == 'val': |
|
val_set = create_dataset(dataset_opt) |
|
val_loader = create_dataloader( |
|
val_set, |
|
dataset_opt, |
|
num_gpu=opt['num_gpu'], |
|
dist=opt['dist'], |
|
sampler=None, |
|
seed=opt['manual_seed'], |
|
) |
|
logger.info( |
|
f'Number of val images/folders in {dataset_opt["name"]}: ' |
|
f'{len(val_set)}') |
|
|
|
else: |
|
raise ValueError(f'Dataset phase {phase} is not recognized.') |
|
|
|
return train_loader, train_sampler, val_loader, total_epochs, total_iters |
|
|
|
|
|
def main(): |
|
|
|
opt = parse_options(is_train=True) |
|
torch.backends.cudnn.benchmark = True |
|
|
|
|
|
state_folder_path = 'experiments/{}/training_states/'.format(opt['name']) |
|
import os |
|
try: |
|
states = os.listdir(state_folder_path) |
|
except: |
|
states = [] |
|
resume_state = None |
|
if len(states) > 0: |
|
max_state_file = '{}.state'.format(max([int(x[0:-6]) for x in states])) |
|
resume_state = os.path.join(state_folder_path, max_state_file) |
|
opt['path']['resume_state'] = resume_state |
|
|
|
|
|
if opt['path'].get('resume_state'): |
|
device_id = torch.cuda.current_device() |
|
resume_state = torch.load( |
|
opt['path']['resume_state'], |
|
map_location=lambda storage, loc: storage.cuda(device_id)) |
|
else: |
|
resume_state = None |
|
|
|
|
|
if resume_state is None: |
|
make_exp_dirs(opt) |
|
if opt['logger'].get('use_tb_logger') and 'debug' not in opt[ |
|
'name'] and opt['rank'] == 0: |
|
mkdir_and_rename(osp.join('tb_logger', opt['name'])) |
|
|
|
|
|
logger, tb_logger = init_loggers(opt) |
|
|
|
|
|
ks_params = opt['train'].get('ks', None) |
|
if not ks_params: |
|
raise NotImplementedError |
|
M = ks_params['num'] |
|
ks = torch.logspace(ks_params['start'], ks_params['end'], M) |
|
ks = ks.view(1,M,1,1,1,1).to("cuda") |
|
|
|
|
|
if resume_state: |
|
check_resume(opt, resume_state['iter']) |
|
model = create_model(opt) |
|
model.resume_training(resume_state) |
|
logger.info(f"Resuming training from epoch: {resume_state['epoch']}, " |
|
f"iter: {resume_state['iter']}.") |
|
start_epoch = resume_state['epoch'] |
|
current_iter = resume_state['iter'] |
|
|
|
else: |
|
model = create_model(opt) |
|
start_epoch = 0 |
|
current_iter = 0 |
|
|
|
|
|
|
|
|
|
result = create_train_val_dataloader(opt, logger) |
|
train_loader, train_sampler, val_loader, total_epochs, total_iters = result |
|
|
|
|
|
|
|
msg_logger = MessageLogger(opt, current_iter, tb_logger) |
|
|
|
|
|
prefetch_mode = opt['datasets']['train'].get('prefetch_mode') |
|
if prefetch_mode is None or prefetch_mode == 'cpu': |
|
prefetcher = CPUPrefetcher(train_loader) |
|
elif prefetch_mode == 'cuda': |
|
prefetcher = CUDAPrefetcher(train_loader, opt) |
|
logger.info(f'Use {prefetch_mode} prefetch dataloader') |
|
if opt['datasets']['train'].get('pin_memory') is not True: |
|
raise ValueError('Please set pin_memory=True for CUDAPrefetcher.') |
|
else: |
|
raise ValueError(f'Wrong prefetch_mode {prefetch_mode}.' |
|
"Supported ones are: None, 'cuda', 'cpu'.") |
|
|
|
|
|
logger.info( |
|
f'Start training from epoch: {start_epoch}, iter: {current_iter}') |
|
data_time, iter_time = time.time(), time.time() |
|
start_time = time.time() |
|
|
|
|
|
|
|
epoch = start_epoch |
|
pbar = tqdm(total = total_iters+1) |
|
pbar.update(current_iter) |
|
|
|
|
|
psf = torch.tensor(np.load("./psf.npy")).to("cuda") |
|
psf_n,psf_h,psf_w,_ = psf.shape |
|
psf_n_row = int(psf_n ** 0.5) |
|
sensor_h = opt['datasets']['train'].get('sensor_size') |
|
otf = psf2otf(psf, h=psf_h*3, w=psf_w*3, permute=True)[None] |
|
|
|
|
|
gt_size = opt['datasets']['train']['gt_size'] |
|
val_conv = opt['val'].get("apply_conv", True) |
|
|
|
|
|
while current_iter <= total_iters: |
|
train_sampler.set_epoch(epoch) |
|
prefetcher.reset() |
|
train_data = prefetcher.next() |
|
|
|
while train_data is not None: |
|
data_time = time.time() - data_time |
|
|
|
gt = train_data['gt'].to("cuda") |
|
padding = train_data['padding'] |
|
padding = torch.stack(padding).T |
|
lq, gt = apply_conv_n_deconv(gt, otf, padding, M, gt_size, ks=ks, ph=psf_h, num_psf=psf_n_row, sensor_h=sensor_h) |
|
|
|
|
|
|
|
current_iter += 1 |
|
if current_iter > total_iters: |
|
break |
|
|
|
model.update_learning_rate( |
|
current_iter, warmup_iter=opt['train'].get('warmup_iter', -1)) |
|
|
|
|
|
model.feed_train_data({'lq': lq, 'gt':gt}) |
|
model.optimize_parameters(current_iter) |
|
|
|
|
|
iter_time = time.time() - iter_time |
|
|
|
|
|
if current_iter % opt['logger']['print_freq'] == 0: |
|
log_vars = {'epoch': epoch, 'iter': current_iter} |
|
log_vars.update({'lrs': model.get_current_learning_rate()}) |
|
log_vars.update({'time': iter_time, 'data_time': data_time}) |
|
|
|
log_vars.update(model.get_current_log()) |
|
msg_logger(log_vars) |
|
|
|
|
|
if current_iter % opt['logger']['save_checkpoint_freq'] == 0: |
|
logger.info('Saving models and training states.') |
|
model.save(epoch, current_iter) |
|
|
|
|
|
if opt.get('val') is not None and ((current_iter % opt['val']['val_freq'] == 0)): |
|
rgb2bgr = opt['val'].get('rgb2bgr', True) |
|
|
|
use_image = opt['val'].get('use_image', True) |
|
model.validation(val_loader, current_iter, tb_logger, False, rgb2bgr, use_image, psf=otf, ks=ks, val_conv=val_conv) |
|
gc.collect() |
|
torch.cuda.empty_cache() |
|
|
|
data_time = time.time() |
|
iter_time = time.time() |
|
train_data = prefetcher.next() |
|
pbar.update(1) |
|
|
|
epoch += 1 |
|
|
|
|
|
|
|
consumed_time = str( |
|
datetime.timedelta(seconds=int(time.time() - start_time))) |
|
logger.info(f'End of training. Time consumed: {consumed_time}') |
|
logger.info('Save the latest model.') |
|
model.save(epoch=-1, current_iter=-1) |
|
if opt.get('val') is not None: |
|
rgb2bgr = opt['val'].get('rgb2bgr', True) |
|
use_image = opt['val'].get('use_image', True) |
|
psnr, others = model.validation(val_loader, current_iter, tb_logger, True, rgb2bgr, use_image, psf=otf, ks=ks, val_conv=val_conv) |
|
print("==================") |
|
print(f"Test results: PSNR: {psnr:.2f}, SSIM: {others['ssim']:.4f}, LPIPS: {others['lpips']:.4f}\n") |
|
|
|
if tb_logger: |
|
tb_logger.close() |
|
|
|
|
|
if __name__ == '__main__': |
|
main() |
|
|