#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch
from typing import Tuple, List
import torch.nn.functional as F
from torch.fft import fft2, fftshift, ifft2, ifftshift
from torch import Tensor
import torchvision.transforms.functional as TF
import os, torchvision, cv2
from kornia import create_meshgrid

def mse(img1, img2):
    return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)

def psnr(img1, img2):
    mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
    return 20 * torch.log10(1.0 / torch.sqrt(mse))

def img2fft(X):
    return fftshift(fft2(X))

def fft2img(X):
    return ifft2(ifftshift(X)).real

def soft_linear_split_freq(X:Tensor, r:int, buffer_width:float=5.0):
    """
    ## Parameters
        - X: Tensor in shape [C, H, W]. Input image.
        - r: int, radii of freq map defined low freq.
        - buffer_width: buffer width from low to high
    ## Returns
        - X_freqs: List[Tensor]. The splitted images of each freq range.
    """
    def distance_from_center_to_coord(coords):
        return torch.sqrt(coords[..., 0]**2 + coords[..., 1]**2)
    
    def get_mask(H, W, r, buffer_width):
        R = r + buffer_width
        mask = torch.ones((H, W))
        grid = grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
        i, j = grid.unbind(-1)
        coord = torch.stack([i-W//2+1, j-H//2+1], -1).abs()
        dis = distance_from_center_to_coord(coord)

        mask[dis > r] = (R - dis[dis > r]) / (R - r)
        mask[dis > R] = 0.0
        
        return mask
    
    H, W = X.shape[-2:]
    mask = get_mask(H, W, r, buffer_width)
    freq = img2fft(X)
    low_freq = freq * mask
    high_freq = freq * (1 - mask)
    
    split_images = []
    split_images.append(fft2img(low_freq))
    split_images.append(fft2img(high_freq))
        
    return split_images
    
def soft_exp_split_freqs(X:Tensor, scale:float=0.2) -> List[Tensor]:
    '''
    ## Parameters
    - X: Tensor in shape [C, H, W]. Input image.
    - scale_w: int, optional. Balancing factor for cut-points, larger value biases to linear while smaller biases to exponential.
    ## Returns
    - X_freqs: List[Tensor]. The splitted images of each freq range.
    '''
    
    def generate_mask(H, W, scale):
        grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
        i, j = grid.unbind(-1)
        coord = torch.stack([i-W//2+1, j-H//2+1], -1).abs()
        mask = torch.sqrt(coord[..., 0]**2 + coord[..., 1]**2)
        mask[mask==0] = 1.0
        return 1/mask**scale
    
    H, W = X.shape[-2:]
    mask = generate_mask(H, W, scale)
    
    freq = img2fft(X)
    low_freq = freq * mask
    low_img = fft2img(low_freq)    
    return [low_img, X-low_img]

def mean_blur(X, blur_method='mean', k=5):
    
    def to_torch(x):
        return torch.from_numpy(x).permute(2, 0, 1)
    
    if isinstance(X, torch.Tensor):
        X = X.numpy()
    if X.shape[0] == 3:
        X = X.transpose(1,2,0)
    
    if blur_method == 'mean':
        X1 = cv2.blur(X, (k,k))
    elif blur_method == 'gaussian':
        X1 = cv2.GaussianBlur(X, (k,k), 2)
    elif blur_method == 'mid':
        X1 = cv2.medianBlur(X, k)
    else:
        raise ValueError(f"{blur_method} is not supported ! ")
    
    X2 = X - X1
    
    return [to_torch(X1), to_torch(X2)]
    
def soft_combine_freqs_torch(imgs:List[Tensor]) -> List[Tensor]:
    fft = 0
    for img in imgs:
        fft += img2fft(img)
    return fft2img(fft)
