"""
Created by Kostas Triaridis (@kostino)
in August 2023 @ ITI-CERTH
"""
from collections import defaultdict
from typing import Dict, Tuple
import torchvision.transforms.functional as TF
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from clusterings.misc import get_model, to_one_hot
from models.modules import clusterings
from torchvision.transforms.functional import to_tensor, to_pil_image, normalize


class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self):
        self.initialized = False
        self.val = None
        self.avg = None
        self.sum = None
        self.count = None

    def initialize(self, val, weight):
        self.val = val
        self.avg = val
        self.sum = val * weight
        self.count = weight
        self.initialized = True

    def update(self, val, weight=1):
        if not self.initialized:
            self.initialize(val, weight)
        else:
            self.add(val, weight)

    def add(self, val, weight):
        self.val = val
        self.sum += val * weight
        self.count += weight
        self.avg = self.sum / self.count

    def value(self):
        return self.val

    def average(self):
        return self.avg


def add_bn(model):
    ''' Kai Zhang, 11/Jan/2019.
    '''
    for k, m in list(model.named_children()):
        if (isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d)):
            b = nn.BatchNorm2d(m.out_channels, momentum=0.1, eps=1e-05, affine=True)
            b.weight.data.fill_(1)
            new_m = nn.Sequential(model._modules[k], b)
            model._modules[k] = new_m
        add_bn(m)


class Fore(nn.Module):
    def __init__(self):
        super().__init__()
        # selfmask
        self.i = 1
        self.clusterer: callable = clusterings.SpectralClustering(use_gpu=True)
        self.cluster_sizes = (2, 3, 4)
        self.feature_types = ["mocov2", "swav", "dino"]
        self.device = torch.device("cuda:1")

    def mask_to_bbox(self, mask: np.ndarray) -> Dict[int, Tuple[int, int, int, int]]:
        """Given a binary mask, return a list of bounding box coordinates (ymin, ymax, xmin, xmax)."""
        mask_index_to_bbox = dict()
        if len(mask.shape) == 2:
            mask = mask[None]

        for mask_index, m in enumerate(mask):
            y_coords, x_coords = np.where(m)
            try:
                ymin, ymax, xmin, xmax = np.min(y_coords), np.max(y_coords), np.min(x_coords), np.max(x_coords)
            except ValueError:  # a mask which does not predict anything.
                continue
            mask_index_to_bbox[mask_index] = (ymin.item(), ymax.item(), xmin.item(), xmax.item())
        return mask_index_to_bbox

    def filter_masks(
            self,
            dt_masks: torch.Tensor,
            mask_index_to_bbox: dict,
            remove_long_masks: bool = True,
            remove_small_large_masks: bool = False
    ) -> Tuple[torch.Tensor, Dict[int, int]]:
        list_filtered_masks: list = list()
        new_index_to_prev_index: dict = dict()
        h, w = dt_masks.shape[-2:]
        new_index = 0

        for mask_index, bbox in mask_index_to_bbox.items():
            ymin, ymax, xmin, xmax = bbox
            if remove_long_masks:
                if ymin == 0 and ymax + 1 == h:
                    continue
                elif xmin == 0 and xmax + 1 == w:
                    continue

            if remove_small_large_masks:
                if dt_masks[mask_index].sum() < 0.05 * h * w:
                    continue
                elif (xmax - xmin) * (ymax - ymin) > 0.95 * h * w:
                    continue
            list_filtered_masks.append(dt_masks[mask_index])
            new_index_to_prev_index[new_index] = mask_index
            new_index += 1
        try:
            return torch.stack(list_filtered_masks, dim=0), new_index_to_prev_index
        except RuntimeError:  # rare case where all predictions are filtered.
            return dt_masks, {i: i for i in range(len(dt_masks))}

    def pad_input_image(self, image: torch.Tensor, total_stride: int):
        assert len(image.shape) == 4
        h_image, w_image = image.shape[-2:]
        pad_w = (total_stride - w_image % total_stride) % total_stride
        pad_h = (total_stride - h_image % total_stride) % total_stride
        image: torch.Tensor = TF.pad(image, [0, 0, pad_w, pad_h])

        h_padded_image, w_padded_image = h_image + pad_h, w_image + pad_w
        h_feat, w_feat = h_padded_image // total_stride, w_padded_image // total_stride
        return image, h_feat, w_feat

    def vote_mask(
            self,
            batch_pred_masks,
            remove_long_masks: bool = True,
            remove_small_large_masks: bool = False
    ):
        mask_index_to_bbox = self.mask_to_bbox(batch_pred_masks.squeeze(dim=0).cpu().numpy())
        masks, new_index_to_prev_index = self.filter_masks(
            dt_masks=batch_pred_masks.squeeze(dim=0),
            mask_index_to_bbox=mask_index_to_bbox,
            remove_long_masks=remove_long_masks,
            remove_small_large_masks=remove_small_large_masks
        )  # n x h x w
        masks_t = masks.permute(1, 2, 0)
        try:
            intersection = torch.logical_and(masks[..., None], masks_t[None]).sum(dim=(1, 2))  # n x n
            union = torch.logical_or(masks[..., None], masks_t[None]).sum(dim=(1, 2))  # n x n
        except RuntimeError:
            masks = masks.cpu()
            masks_t = masks_t.cpu()
            intersection = torch.logical_and(masks[..., None], masks_t[None]).sum(dim=(1, 2))  # n x n
            union = torch.logical_or(masks[..., None], masks_t[None]).sum(dim=(1, 2))  # n x n

        iou_table = intersection / (union + 1e-7)  # n x n
        ious = iou_table.sum(dim=1)  # n
        sorted_index = torch.argsort(ious, descending=True)
        best_mask_index = sorted_index[0].cpu().item()
        best_mask = masks[best_mask_index]
        return best_mask, best_mask_index, new_index_to_prev_index

    def extract_candidate_masks(self, p_images) -> Dict[str, np.ndarray]:
        filename_to_candidate_masks: Dict[str, list] = defaultdict(list)
        for feature_type in self.feature_types:
            print(f"========== Generating candidate masks with {feature_type} ==========")
            # load a model
            network = get_model(
                arch={"mocov2": "resnet50", "swav": "resnet50", "dino": "vit_small"}[feature_type],
                training_method=feature_type,
                patch_size=16  # for dino  16
            ).to(self.device)
            network.eval()

            # dataloader = DataLoader(dataset=CustomDataset(image_paths=p_images, image_size=512), batch_size=1,
            #                         shuffle=False)
            # for dict_data in tqdm(dataloader):
            for index in range(p_images.shape[0]):
                filename = str(index)
                batch_imgs: torch.Tensor = p_images[index].unsqueeze(0).to(self.device)  # b (=1) x 3 x H x W
                # print(batch_imgs.shape)
                # print("+++++++++++")
                # print(batch_imgs.shape)
                h_image, w_image = batch_imgs.shape[-2:]

                # filename: str = dict_data["filename"][0]
                # extract features froom a given model
                if feature_type in ["mocov2", "swav"]:
                    # dilated resnet50
                    total_stride: int = 8
                    batch_imgs, h_feat, w_feat = self.pad_input_image(batch_imgs, total_stride=total_stride)
                    features: torch.Tensor = network(batch_imgs)[-1]
                else:
                    # ViT-S/16
                    total_stride: int = 16
                    batch_imgs, h_feat, w_feat = self.pad_input_image(batch_imgs, total_stride=total_stride)
                    try:
                        batch_tokens = network(batch_imgs, layer="layer12")
                    except AttributeError:
                        batch_tokens = network.encoder(batch_imgs, layer="layer12")

                    batch_patch_tokens = batch_tokens[:, 1:, :]  # b (=1) x (h_feat * w_feat) x n_dims

                    # batch_patch_tokens
                    batch_patch_tokens = batch_patch_tokens.view(batch_imgs.shape[0], h_feat, w_feat, -1)
                    features = batch_patch_tokens.permute(0, 3, 1, 2)

                # upsample by 2 before clustering
                features = F.interpolate(features, scale_factor=2, mode="bilinear", align_corners=True)
                # iterate over a list of given cluster sizes
                for k in self.cluster_sizes:
                    # clustering
                    # batch_clusters: b (=1) x h x w
                    batch_clusters: np.ndarray = self.clusterer(features, k)

                    # batch_one_hot_masks: b (=1) x k x h x w -> k x h x w
                    batch_one_hot_masks: np.ndarray = to_one_hot(batch_clusters[0])

                    batch_one_hot_masks: torch.Tensor = F.interpolate(
                        batch_one_hot_masks[None],
                        scale_factor=(total_stride // 2, total_stride // 2),
                        mode="nearest"
                    )[0]

                    batch_one_hot_masks: torch.Tensor = batch_one_hot_masks[..., :h_image, :w_image]
                    one_hot_masks: np.ndarray = batch_one_hot_masks.numpy().astype(np.uint8)
                    filename_to_candidate_masks[filename].append(one_hot_masks)
        # concatenate a list of masks for each image into a numpy array
        for filename, candidate_mask in filename_to_candidate_masks.items():
            filename_to_candidate_masks[filename]: np.ndaray = np.concatenate(candidate_mask, axis=0)
        return filename_to_candidate_masks
    def forward(self, image):
        # selfmask
        mean: Tuple[float, float, float] = (0.485, 0.456, 0.406)
        std: Tuple[float, float, float] = (0.229, 0.224, 0.225)
        img = normalize(image, mean=mean, std=std)
        # print(img.shape)
        # img = img.unsqueeze(0)   # c h w

        filename_to_candidate_masks: Dict[str, np.ndarray] = self.extract_candidate_masks(img)
        self.i = self.i + 1
        result = []
        for filename, candidate_masks in filename_to_candidate_masks.items():
            candidate_masks: torch.Tensor = torch.tensor(candidate_masks).to(self.device)[None]  # 1 x k x h x w

            # salient_mask: torch.Tensor, h x w
            salient_mask, _, _ = self.vote_mask(
                candidate_masks,
                remove_long_masks=True,
                remove_small_large_masks=False
            )
            salient_mask = salient_mask.unsqueeze(0)
            out = torch.tile(salient_mask, (3, 1, 1))
            out = out.unsqueeze(0)
            result.append(out.float())
        result = torch.cat(result,0)


        return result  #  3 × h × w

class SRMFilter(nn.Module):
    def __init__(self):
        super().__init__()
        f1 = [[0, 0, 0, 0, 0],
              [0, -1, 2, -1, 0],
              [0, 2, -4, 2, 0],
              [0, -1, 2, -1, 0],
              [0, 0, 0, 0, 0]]

        f2 = [[-1, 2, -2, 2, -1],
              [2, -6, 8, -6, 2],
              [-2, 8, -12, 8, -2],
              [2, -6, 8, -6, 2],
              [-1, 2, -2, 2, -1]]

        f3 = [[0, 0, 0, 0, 0],
              [0, 0, 0, 0, 0],
              [0, 1, -2, 1, 0],
              [0, 0, 0, 0, 0],
              [0, 0, 0, 0, 0]]

        q = torch.tensor([[4.], [12.], [2.]]).unsqueeze(-1).unsqueeze(-1)
        filters = torch.tensor([[f1, f1, f1], [f2, f2, f2], [f3, f3, f3]], dtype=torch.float) / q
        self.register_buffer('filters', filters)
        self.truc = nn.Hardtanh(-2, 2)

    def forward(self, x):
        x = F.conv2d(x, self.filters, padding='same', stride=1)
        x = self.truc(x)
        return x


class BayarConv2d(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=5, stride=1, padding=0):
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.minus1 = (torch.ones(self.in_channels, self.out_channels, 1) * -1.000)

        super().__init__()
        # only (kernel_size ** 2 - 1) trainable params as the center element is always -1
        self.kernel = nn.Parameter(torch.rand(self.in_channels, self.out_channels, kernel_size ** 2 - 1),
                                   requires_grad=True)

    def bayarConstraint(self):
        self.kernel.data = self.kernel.data.div(self.kernel.data.sum(-1, keepdims=True))
        ctr = self.kernel_size ** 2 // 2
        real_kernel = torch.cat((self.kernel[:, :, :ctr], self.minus1.to(self.kernel.device), self.kernel[:, :, ctr:]), dim=2)
        real_kernel = real_kernel.reshape((self.out_channels, self.in_channels, self.kernel_size, self.kernel_size))
        return real_kernel

    def forward(self, x):
        x = F.conv2d(x, self.bayarConstraint(), stride=self.stride, padding=self.padding)
        return x


def nchw_to_nlc(x):
    """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor.

    Args:
        x (Tensor): The input tensor of shape [N, C, H, W] before conversion.

    Returns:
        Tensor: The output tensor of shape [N, L, C] after conversion.
    """
    assert len(x.shape) == 4
    return x.flatten(2).transpose(1, 2).contiguous()


def nlc_to_nchw(x, hw_shape):
    """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor.

    Args:
        x (Tensor): The input tensor of shape [N, L, C] before conversion.
        hw_shape (Sequence[int]): The height and width of output feature map.

    Returns:
        Tensor: The output tensor of shape [N, C, H, W] after conversion.
    """
    H, W = hw_shape
    assert len(x.shape) == 3
    B, L, C = x.shape
    assert L == H * W, 'The seq_len does not match H, W'
    return x.transpose(1, 2).reshape(B, C, H, W).contiguous()


if __name__ == "__main__":
    filter = SRMFilter()
    fore = Fore()
    srm = BayarConv2d(3,3,2)
    from PIL import Image

    path = '/home/wc/disk1/datasets/COVER/tampered/95t.tif'
    img = Image.open(path).convert('RGB')
    print(np.array(img))
    print(type(img))
    print(img.size)
    # img.show()
    img_tensor = to_tensor(img)
    # out = filter(img_tensor)
    out_srm = srm(img_tensor)
    out = fore(img_tensor)
    out = torch.tile(out, (3, 1, 1))
    print(out)
    # print(out)
    print(out.shape)
    # out = (out + 2) / 4.
    # out_img = to_pil_image(out[2])
    # out_img.show()
    # print(filter.filters.size())
