import math
import paddle.fluid as fluid
from helm.dynamic.engine.callback import Events
from helm.dynamic.engine.sr import create_trainer, create_evaluator
from helm.dynamic.engine.sr.metrics import PSNR
from paddle.fluid.optimizer import Adam
from paddle.fluid.io import DataLoader

from helm.dynamic.engine.metrics import Loss
from helm.datasets import train_test_split
from helm.datasets.div2k import DIV2K, CachedDataset
from helm.datasets.srtest import SRTest
from helm.dynamic import set_seed, set_device
from helm.dynamic.lr_scheduler import CosineLR
from helm.dynamic.models.sr.rcan import RCAN
from helm.transforms import Compose
from helm.transforms.sr import RandomCrop, RandomHorizontalFlip, RandomVerticalFlip, RandomRot90, ToTensor

device = 'cpu'
set_seed(0)
place = set_device(device)
fluid.enable_dygraph(place)

lr_root = "/Users/hrvvi/Downloads/DIV2K_train_LR_bicubic/X2"
hr_root = "/Users/hrvvi/Downloads/DIV2K_train_HR"
test_root = '/Users/hrvvi/Downloads/SR_testing_datasets/Set5'
scale = 2

train_transform = Compose([
    RandomCrop(96, scale),
    RandomHorizontalFlip(),
    RandomVerticalFlip(),
    RandomRot90(),
    ToTensor(),
])

test_transform = Compose([
    ToTensor(),
])

ds = DIV2K(lr_root, hr_root, scale)
ds = train_test_split(ds, 0.1)[1]
ds_train = CachedDataset(ds, train_transform)
ds_test = CachedDataset(SRTest(test_root, scale), test_transform)

mean = [114.444, 111.4605, 103.02]
std = (1.0, 1.0, 1.0)
model = RCAN(mean, std, 2, 4, 16, scale, bn=False)

criterion = fluid.L1Loss()

batch_size = 16
step_per_epoch = math.ceil(len(ds_train) / batch_size)
epochs = 20
learning_rate = CosineLR(0.0001, step_per_epoch, epochs, 1e-7, 0.0001)
optimizer = Adam(learning_rate, 0.9, 0.999, parameter_list=model.parameters())

metrics = [
    Loss(),
]

test_metrics = [
    Loss(),
    PSNR(scale + 6)
]

train_loader = DataLoader(
    ds_train, batch_size=batch_size, shuffle=True, num_workers=2,
    places=place, return_list=True, use_shared_memory=False)
test_loader = DataLoader(
    ds_test, batch_size=1, shuffle=False, num_workers=2,
    places=place, return_list=True, use_shared_memory=False)

log_freq = 1
eval_freq = 1

train_engine = create_trainer(
    model, criterion, optimizer, metrics, log_freq)

eval_engine = create_evaluator(
    model, criterion, test_metrics, log_freq)

train_engine.call_on(
    Events.EPOCH_END, lambda _: eval_engine.run(test_loader, 1), eval_freq)

train_engine.run(train_loader, epochs)