from functools import partial

import albumentations as A
import cv2
import torch
from albumentations.pytorch import ToTensorV2
from rainbowneko.ckpt_manager import CkptManagerPKL
from rainbowneko.evaluate import MetricGroup, MetricContainer, Evaluator
from rainbowneko.models.wrapper import SingleWrapper
from rainbowneko.parser import make_base, CfgWDModelParser
from rainbowneko.train.data import BaseDataset
from rainbowneko.train.data import FixedCropBucket
from rainbowneko.train.data.handler import HandlerChain, ImageHandler, LoadImageHandler
from rainbowneko.train.data.source import UnLabelSource
from rainbowneko.train.loss import LossContainer
from torchmetrics.image import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure

from cfgs.py.train import train_base, tuning_base
from loss import CDCLoss
from model import HourGlassNetMultiScaleFormer

import PIL

PIL.Image.MAX_IMAGE_PIXELS = 900000000

hr_size = 320
sr_scale = 2

def make_cfg():
    dict(
        _base_=make_base(train_base, tuning_base)+[],
        exp_dir=f'exps/CDC-Former-L-v1',
        mixed_precision='fp16',

        model_part=CfgWDModelParser([
            dict(
                lr=1e-4,
                layers=[''],  # train all layers
            )
        ]),

        # func(_partial_=True, ...) same as partial(func, ...)
        ckpt_manager=CkptManagerPKL(_partial_=True, saved_model=(
            {'model':'model', 'trainable':False},
        )),

        train=dict(
            train_epochs=30,
            workers=4,
            max_grad_norm=None,
            save_step=2000,

            loss=LossContainer(CDCLoss(), key_map=('pred.sr_list -> sr_list', 'inputs.hr -> hr')),

            optimizer=partial(torch.optim.AdamW, weight_decay=1e-2),

            scale_lr=False,
            scheduler=dict(
                name='cosine',
                num_warmup_steps=1000,
            ),

            metrics=MetricGroup(metric_dict=dict(
                psnr=MetricContainer(PeakSignalNoiseRatio(data_range=tuple([-1.0, 1.0])),
                                     key_map=('pred.sr_list.3 -> 0', 'inputs.hr -> 1')),
                ssim=MetricContainer(StructuralSimilarityIndexMeasure(data_range=tuple([-1.0, 1.0])),
                                     key_map=('pred.sr_list.3 -> 0', 'inputs.hr -> 1')),
            )),
        ),

        model=dict(
            name='HG-Former',
            wrapper=partial(SingleWrapper,
                        key_map_in=('lr -> x',),
                        model=HourGlassNetMultiScaleFormer(upscale=sr_scale, HG_stage=(2, 3, 4)),
                        key_map_out=('0 -> sr_list', '1 -> sr_map'),
                    )
        ),

        data_train=dict(
            dataset1=partial(BaseDataset, batch_size=8, loss_weight=1.0,
                source=dict(
                    data_source1=UnLabelSource(
                        img_root='/mnt/data1/dzy/datas/CDC_max_face_size/train',
                    ),
                ),
                handler=HandlerChain(handlers=dict(
                    load_hr=LoadImageHandler(),
                    bucket=FixedCropBucket.handler, # bucket 会自带一些处理模块
                    image_hr=ImageHandler(transform=A.Compose([
                            A.HorizontalFlip(p=0.5),
                            A.VerticalFlip(p=0.5),
                        ]),
                    ),
                    image_lr=ImageHandler(transform=A.Compose([
                            A.OneOf([
                                A.Resize(hr_size // sr_scale, hr_size // sr_scale, interpolation=cv2.INTER_LINEAR),
                                A.Resize(hr_size // sr_scale, hr_size // sr_scale, interpolation=cv2.INTER_CUBIC),
                            ], p=1),
                            A.GaussNoise(var_limit=(3, 20), p=0.3),
                            A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
                            ToTensorV2(),
                        ]),
                        key_map_out=('image -> lr',),
                    ),
                    hr_tensor=ImageHandler(transform=A.Compose([
                            A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
                            ToTensorV2(),
                        ]),
                    ),
                ), key_map_out=('image -> hr', 'lr -> lr')),
                bucket=FixedCropBucket(target_size=hr_size),
            )
        ),

        evaluator=partial(Evaluator,
            interval=1000,
            metric=MetricGroup(metric_dict=dict(
                psnr=MetricContainer(PeakSignalNoiseRatio(data_range=tuple([-1.0, 1.0])),
                                     key_map=('pred.sr_list.3 -> 0', 'inputs.hr -> 1')),
                ssim=MetricContainer(StructuralSimilarityIndexMeasure(data_range=tuple([-1.0, 1.0])),
                                     key_map=('pred.sr_list.3 -> 0', 'inputs.hr -> 1')),
            )),
            dataset=dict(
                dataset1=partial(BaseDataset, batch_size=8, loss_weight=1.0,
                    source=dict(
                        data_source1=UnLabelSource(
                            img_root='/mnt/data1/dzy/datas/CDC_max_face_size/train',
                        ),
                    ),
                    handler=HandlerChain(handlers=dict(
                        load_hr=LoadImageHandler(),
                        bucket=FixedCropBucket.handler, # bucket 会自带一些处理模块
                        image_lr=ImageHandler(transform=A.Compose([
                                A.Resize(hr_size // sr_scale, hr_size // sr_scale, interpolation=cv2.INTER_CUBIC),
                                A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
                                ToTensorV2(),
                            ]),
                            key_map_out=('image -> lr',),
                        ),
                        hr_tensor=ImageHandler(transform=A.Compose([
                                A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
                                ToTensorV2(),
                            ]),
                        ),
                    ), key_map_out=('image -> hr', 'lr -> lr')),
                    bucket=FixedCropBucket(target_size=hr_size),
                )
            )
        ),
    )
