from fastai.vision.all import Callback, NoneReduce, reduce_loss, \
    tensor, Normalize, unsqueeze, store_attr, PILImage, RandTransform, L
import torch
from torch.distributions.beta import Beta
import random
from cassava_leaf.utils import tuple_tensor_to_float
import albumentations
import numpy as np


class RandomCutMixUp(Callback):
    run_after, run_valid = [Normalize], False

    def __init__(self, cutmix_alpha=1., mixup_alpha=0.4):
        self.cutmix_dist = Beta(tensor(cutmix_alpha), tensor(cutmix_alpha))
        self.mixup_dist = Beta(tensor(mixup_alpha), tensor(mixup_alpha))

    def before_fit(self):
        # self.stack_y = getattr(self.learn.loss_func, 'y_int', False)
        self.stack_y = True
        if self.stack_y:
            self.old_lf, self.learn.loss_func = self.learn.loss_func, self.lf
        self.learn._logger.info(f'RandomCutMixUp: stack_y: {self.stack_y}, lf: {self.learn.loss_func}')

    def after_fit(self):
        if self.stack_y:
            self.learn.loss_func = self.old_lf

    def lf(self, pred, *yb):
        if not self.training: return self.old_lf(pred, *yb)
        if hasattr(self, 'yb1'):
            with NoneReduce(self.old_lf) as lf:
                loss = torch.lerp(lf(pred, *self.yb1), lf(pred, *yb), self.lam)
            return reduce_loss(loss, getattr(self.old_lf, 'reduction', 'mean'))
        else:
            return self.old_lf(pred, *yb)

    def rand_bbox(self, W, H, lam):
        cut_rat = torch.sqrt(1. - lam)
        cut_w = (W * cut_rat).type(torch.long)
        cut_h = (H * cut_rat).type(torch.long)
        # uniform
        cx = torch.randint(0, W, (1,)).to(self.x.device)
        cy = torch.randint(0, H, (1,)).to(self.x.device)
        x1 = torch.clamp(cx - cut_w // 2, 0, W)
        y1 = torch.clamp(cy - cut_h // 2, 0, H)
        x2 = torch.clamp(cx + cut_w // 2, 0, W)
        y2 = torch.clamp(cy + cut_h // 2, 0, H)
        return x1, y1, x2, y2

    def before_batch(self):
        if not self.training:
            return
        r = random.random()
        if r <= 0.5:  # do cutmix
            W, H = self.xb[0].size(3), self.xb[0].size(2)
            lam = self.cutmix_dist.sample((1,)).squeeze().to(self.x.device)
            lam = torch.stack([lam, 1 - lam])
            self.lam = lam.max().float()
            shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
            xb1, self.yb1 = tuple(L(self.xb).itemgot(shuffle)), tuple(L(self.yb).itemgot(shuffle))
            nx_dims = len(self.x.size())
            x1, y1, x2, y2 = self.rand_bbox(W, H, self.lam)
            self.learn.xb[0][:, :, x1:x2, y1:y2] = xb1[0][:, :, x1:x2, y1:y2]
            self.lam = (1 - ((x2 - x1) * (y2 - y1)) / float(W * H)).item()

            if not self.stack_y:
                ny_dims = len(self.y.size())
                weight = unsqueeze(self.lam, n=ny_dims - 1)
                self.yb1 = tuple_tensor_to_float(self.yb1)
                self.yb = tuple_tensor_to_float(self.yb)
                _l = L(self.yb1, self.yb)
                self.learn.yb = tuple(
                    _l.map_zip(torch.lerp, weight=weight))
        elif r > 0.5:  # do mixup
            lam = self.mixup_dist.sample((self.y.size(0),)).squeeze().to(self.x.device)
            lam = torch.stack([lam, 1 - lam], 1)
            self.lam = lam.max(1)[0].float()
            shuffle = torch.randperm(self.y.size(0)).to(self.x.device)
            xb1, self.yb1 = tuple(L(self.xb).itemgot(shuffle)), tuple(L(self.yb).itemgot(shuffle))
            nx_dims = len(self.x.size())
            self.learn.xb = tuple(L(xb1, self.xb).map_zip(torch.lerp, weight=unsqueeze(self.lam, n=nx_dims - 1)))
            if not self.stack_y:
                ny_dims = len(self.y.size())
                weight = unsqueeze(self.lam, n=ny_dims - 1)
                self.yb1 = tuple_tensor_to_float(self.yb1)
                self.yb = tuple_tensor_to_float(self.yb)
                _l = L(self.yb1, self.yb)
                self.learn.yb = tuple(
                    _l.map_zip(torch.lerp, weight=weight))


class AlbumentationsTransform(RandTransform):
    "A transform handler for multiple `Albumentation` transforms"
    split_idx, order = None, 2

    def __init__(self, train_aug, valid_aug):
        store_attr()

    def before_call(self, b, split_idx):
        self.idx = split_idx

    def encodes(self, img: PILImage):
        if self.idx == 0:
            aug_img = self.train_aug(image=np.array(img))['image']
        else:
            aug_img = self.valid_aug(image=np.array(img))['image']
        return PILImage.create(aug_img)


def get_train_aug():
    return albumentations.Compose([
        albumentations.RandomResizedCrop(512, 512),
        #             albumentations.Transpose(p=0.5),
        albumentations.VerticalFlip(p=0.5),
        #             albumentations.ShiftScaleRotate(p=0.5),
        #             albumentations.HueSaturationValue(
        #                 hue_shift_limit=0.2,
        #                 sat_shift_limit=0.2,
        #                 val_shift_limit=0.2,
        #                 p=0.5),
        #             albumentations.CoarseDropout(p=0.5),
        #             albumentations.Cutout(p=0.5)
    ])


def get_valid_aug():
    return albumentations.Compose([
        albumentations.CenterCrop(512, 512, p=1.),
        albumentations.Resize(512, 512)
    ], p=1.)
