KEEP / basicsr /models /keep_gan_model.py
rcfeng's picture
load from git
135075d
from collections import OrderedDict
import torch
import torch.nn.functional as F
import pdb
from einops import rearrange
from basicsr.utils import get_root_logger
from basicsr.utils.registry import MODEL_REGISTRY
from basicsr.archs import build_network
from basicsr.losses import build_loss
from basicsr.archs.arch_util import flow_warp, resize_flow
from .video_recurrent_model import VideoRecurrentModel
@MODEL_REGISTRY.register()
class KEEPGANModel(VideoRecurrentModel):
"""KEEPGAN Model.
"""
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt['train']
logger = get_root_logger()
# # load pretrained VQGAN models
# load_path = self.opt['path'].get('pretrain_network_vqgan', None)
# if load_path is not None:
# param_key = self.opt['path'].get('param_key_vqgan', 'params')
# self.load_network(self.net_g, load_path, False, param_key)
self.ema_decay = train_opt.get('ema_decay', 0)
if self.ema_decay > 0:
logger.info(
f'Use Exponential Moving Average with decay: {self.ema_decay}')
# define network net_g with Exponential Moving Average (EMA)
# net_g_ema is used only for testing on one GPU and saving
# There is no need to wrap with DistributedDataParallel
self.net_g_ema = build_network(
self.opt['network_g']).to(self.device)
# load pretrained model
load_path = self.opt['path'].get('pretrain_network_g', None)
if load_path is not None:
self.load_network(self.net_g_ema, load_path, self.opt['path'].get(
'strict_load_g', True), 'params_ema')
else:
self.model_ema(0) # copy net_g weight
self.net_g_ema.eval()
# define network net_d
self.net_d = build_network(self.opt['network_d'])
self.net_d = self.model_to_device(self.net_d)
self.print_network(self.net_d)
# load pretrained weights
load_path = self.opt['path'].get('pretrain_network_d', None)
if load_path is not None:
self.load_network(self.net_d, load_path,
self.opt['path'].get('strict_load_d', True))
self.net_d.train()
# define losses.
self.hq_feat_loss = train_opt.get('use_hq_feat_loss', False)
self.feat_loss_weight = train_opt.get('feat_loss_weight', 1.0)
self.cross_entropy_loss = train_opt.get('cross_entropy_loss', False)
self.entropy_loss_weight = train_opt.get('entropy_loss_weight', 0.5)
if self.cross_entropy_loss:
self.generate_idx_gt = True
assert self.opt.get(
'network_vqgan', None) is not None, f'Shoule have network_vqgan config or pre-calculated latent code.'
self.hq_vqgan_fix = build_network(
self.opt['network_vqgan']).to(self.device)
self.hq_vqgan_fix.eval()
for param in self.hq_vqgan_fix.parameters():
param.requires_grad = False
# load_path = self.opt['path'].get('pretrain_network_vqgan', None)
# assert load_path != None, "Should load pre-trained VQGAN"
# self.load_network(self.hq_vqgan_fix, load_path, strict=False)
else:
self.generate_idx_gt = False
logger.info(f'Need to generate latent GT code: {self.generate_idx_gt}')
if train_opt.get('pixel_opt'):
self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device)
else:
self.cri_pix = None
if train_opt.get('perceptual_opt'):
self.perceptual_type = train_opt['perceptual_opt']['type']
self.cri_perceptual = build_loss(
train_opt['perceptual_opt']).to(self.device)
else:
self.cri_perceptual = None
if train_opt.get('temporal_opt'):
self.temporal_type = train_opt.get('temporal_warp_type', 'GT')
self.cri_temporal = build_loss(
train_opt['temporal_opt']).to(self.device)
else:
self.cri_temporal = None
if train_opt.get('gan_opt'):
self.cri_gan = build_loss(train_opt['gan_opt']).to(self.device)
self.net_g_start_iter = train_opt.get('net_g_start_iter', 0)
self.net_d_iters = train_opt.get('net_d_iters', 1)
self.net_d_start_iter = train_opt.get('net_d_start_iter', 0)
# set up optimizers and schedulers
self.setup_optimizers()
self.setup_schedulers()
def setup_optimizers(self):
train_opt = self.opt['train']
logger = get_root_logger()
optim_names, freezed_names = [], []
# optimizer g
optim_params_g = []
for k, v in self.net_g.named_parameters():
if v.requires_grad:
optim_params_g.append(v)
optim_names.append(k)
else:
freezed_names.append(k)
logger.warning(f'--------------- Optimizing Params ---------------.')
for k in optim_names:
logger.warning(f'Params {k} will be optimized.')
logger.warning(f'--------------- Freezing Params ---------------.')
for k in freezed_names:
logger.warning(f'Params {k} will be freezed.')
optim_type = train_opt['optim_g'].pop('type')
self.optimizer_g = self.get_optimizer(
optim_type, optim_params_g, **train_opt['optim_g'])
self.optimizers.append(self.optimizer_g)
# optimizer d
optim_type = train_opt['optim_d'].pop('type')
self.optimizer_d = self.get_optimizer(
optim_type, self.net_d.parameters(), **train_opt['optim_d'])
self.optimizers.append(self.optimizer_d)
def optimize_parameters(self, current_iter):
# optimize net_g
for p in self.net_d.parameters():
p.requires_grad = False
self.optimizer_g.zero_grad()
if self.generate_idx_gt:
with torch.no_grad():
b, f, c, h, w = self.gt.shape
x = self.hq_vqgan_fix.encoder(self.gt.reshape(-1, c, h, w))
_, _, quant_stats = self.hq_vqgan_fix.quantize(x)
min_encoding_indices = quant_stats['min_encoding_indices']
self.idx_gt = min_encoding_indices.view(b*f, -1)
if self.hq_feat_loss or self.cross_entropy_loss:
self.output, logits, lq_feat, gen_feat_dict = self.net_g(
self.lq, detach_16=True, early_feat=True)
else:
self.output, gen_feat_dict = self.net_g(
self.lq, detach_16=True, early_feat=False)
if len(gen_feat_dict) == 0:
gen_feat_dict['HR'] = self.output
l_g_total = 0
loss_dict = OrderedDict()
# hq_feat_loss
if self.hq_feat_loss: # codebook loss
code_h = lq_feat.shape[-1]
quant_feat_gt = self.net_g.module.quantize.get_codebook_feat(
self.idx_gt, shape=[b*f, code_h, code_h, 256])
l_feat_encoder = torch.mean(
(quant_feat_gt.detach()-lq_feat)**2) * self.feat_loss_weight
l_g_total += l_feat_encoder
loss_dict['l_feat_encoder'] = l_feat_encoder
# cross_entropy_loss
if self.cross_entropy_loss:
# b(hw)n -> bn(hw)
cross_entropy_loss = F.cross_entropy(logits.permute(
0, 2, 1), self.idx_gt) * self.entropy_loss_weight
l_g_total += cross_entropy_loss
loss_dict['l_cross_entropy'] = cross_entropy_loss
# Temporal consistency loss
if self.cri_temporal:
assert len(
gen_feat_dict) != 0, "Empty features for temporal regularization."
with torch.no_grad():
if self.temporal_type == 'GT':
flows = self.net_g.module.get_flow(self.gt).detach()
flows = rearrange(flows, "b f c h w -> (b f) c h w")
elif self.temporal_type == 'HR':
flows = self.net_g.module.get_flow(self.output).detach()
flows = rearrange(flows, "b f c h w -> (b f) c h w")
elif self.temporal_type == 'Diff':
gt_flows = self.net_g.module.get_flow(self.gt).detach()
gt_flows = rearrange(gt_flows, "b f c h w -> (b f) c h w")
hr_flows = self.net_g.module.get_flow(self.output).detach()
hr_flows = rearrange(hr_flows, "b f c h w -> (b f) c h w")
else:
raise ValueError(
f'Unsupported temporal mode: {self.temporal_type}.')
l_temporal = 0
for f_size, feat in gen_feat_dict.items():
b, f, c, h, w = feat.shape
if self.temporal_type == 'GT' or self.temporal_type == 'HR':
flow = resize_flow(flows, 'shape', [h, w]) # B*(T-1) 2 H W
flow = rearrange(flow, "b c h w -> b h w c")
prev_feat = feat[:, :-1, ...].reshape(-1, c, h, w)
curr_feat = feat[:, 1:, ...].reshape(-1, c, h, w)
warp_feat = flow_warp(prev_feat, flow)
l_temporal += self.cri_temporal(curr_feat, warp_feat)
elif self.temporal_type == 'Diff':
gt_flow = resize_flow(gt_flows, 'shape', [
h, w]) # B*(T-1) 2 H W
gt_flow = rearrange(gt_flow, "b c h w -> b h w c")
hr_flow = resize_flow(hr_flows, 'shape', [
h, w]) # B*(T-1) 2 H W
hr_flow = rearrange(hr_flow, "b c h w -> b h w c")
prev_feat = feat[:, :-1, ...].reshape(-1, c, h, w)
curr_feat = feat[:, 1:, ...].reshape(-1, c, h, w)
gt_warp_feat = flow_warp(prev_feat, gt_flow)
hr_warp_feat = flow_warp(prev_feat, hr_flow)
l_temporal += self.cri_temporal(gt_warp_feat, hr_warp_feat)
l_g_total += l_temporal
loss_dict['l_temporal'] = l_temporal
# pixel loss
if self.cri_pix:
l_pix = self.cri_pix(self.output, self.gt)
l_g_total += l_pix
loss_dict['l_pix'] = l_pix
# perceptual loss
if self.cri_perceptual:
B, T, C, H, W = self.gt.shape
if self.perceptual_type == 'PerceptualLoss':
l_percep, l_style = self.cri_perceptual(
self.output.view(-1, C, H, W), self.gt.view(-1, C, H, W))
if l_percep is not None:
l_g_total += l_percep
loss_dict['l_percep'] = l_percep
if l_style is not None:
l_g_total += l_style
loss_dict['l_style'] = l_style
elif self.perceptual_type == 'LPIPSLoss':
l_percep = self.cri_perceptual(
self.output.view(-1, C, H, W), self.gt.view(-1, C, H, W))
l_g_total += l_percep
loss_dict['l_percep'] = l_percep
# gan loss
if current_iter > self.net_d_start_iter:
fake_g_pred = self.net_d(self.output)
l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False)
l_g_total += l_g_gan
loss_dict['l_g_gan'] = l_g_gan
l_g_total.backward()
self.optimizer_g.step()
if self.ema_decay > 0:
self.model_ema(decay=self.ema_decay)
# optimize net_d
if current_iter > self.net_d_start_iter:
for p in self.net_d.parameters():
p.requires_grad = True
self.optimizer_d.zero_grad()
# real
real_d_pred = self.net_d(self.gt)
l_d_real = self.cri_gan(real_d_pred, True, is_disc=True)
loss_dict['l_d_real'] = l_d_real
loss_dict['out_d_real'] = torch.mean(real_d_pred.detach())
l_d_real.backward()
# fake
fake_d_pred = self.net_d(self.output.detach())
l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True)
loss_dict['l_d_fake'] = l_d_fake
loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach())
l_d_fake.backward()
self.optimizer_d.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
def save(self, epoch, current_iter):
if hasattr(self, 'net_g_ema'):
self.save_network([self.net_g, self.net_g_ema], 'net_g',
current_iter, param_key=['params', 'params_ema'])
else:
self.save_network(self.net_g, 'net_g', current_iter)
self.save_network(self.net_d, 'net_d', current_iter)
self.save_training_state(epoch, current_iter)