#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use 
# under the terms of the LICENSE.md file.
#
# For inquiries contact  george.drettakis@inria.fr
#

import torch
from typing import Tuple, List
import torch.nn.functional as F
from torch.fft import fft2, fftshift, ifft2, ifftshift
from torch import Tensor
import torchvision.transforms.functional as TF
import numpy as np
import os, torchvision
from kornia import create_meshgrid
from scipy import fftpack


def mse(img1, img2):
    return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)

def psnr(img1, img2):
    mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
    return 20 * torch.log10(1.0 / torch.sqrt(mse))

def split_freqs_torch(X:Tensor, n_bands:int=4, scale_w:float=0.1, method='cumulative') -> List[Tensor]:
    '''
    ## Parameters
    - X: Tensor in shape [C, H, W]. Input image.
    - n_bands: int, optional. How many freqency bands to split to.
    - scale_w: int, optional. Balancing factor for cut-points, larger value biases to linear while smaller biases to exponential.
    - method: str, optional. Split method, choose from ["cumulative", "addictive"].
    ## Returns
    - X_freqs: List[Tensor]. The splitted images of each freq range.
    '''
    assert len(X.shape) == 3, 'X should be shape of [C, H, W]'
    assert method in ['cumulative', 'addictive'], 'choose method from ["cumulative", "addictive"]'

    def get_cp(scale_w:float) -> Tuple[List[int]]:
        nonlocal H, W
        cp_l_h = np.linspace(0, H // 2, n_bands + 1)
        cp_l_w = np.linspace(0, W // 2, n_bands + 1)
        cp_m_h = np.exp(np.linspace(0, np.log(H) // 2 + 1, n_bands + 1)) - 1
        cp_m_w = np.exp(np.linspace(0, np.log(W) // 2 + 1, n_bands + 1)) - 1
        cp_h = cp_l_h * scale_w + cp_m_h * (1 - scale_w)
        cp_w = cp_l_w * scale_w + cp_m_w * (1 - scale_w)
        cp_h[-1] = cp_l_h[-1]
        cp_w[-1] = cp_l_w[-1]
        cp_h = np.clip(np.round(cp_h).astype(int), 0, H//2).tolist()
        cp_w = np.clip(np.round(cp_w).astype(int), 0, W//2).tolist()
        return cp_h, cp_w

    def get_masked(D:Tensor, h_out:int, w_out:int, h_in:int=None, w_in:int=None) -> Tensor:
        nonlocal H, W
        h, w = H//2, W//2     # central point
        D_hat = torch.zeros_like(D)
        slicer_h = slice(max(0, h-h_out), min(H-1, h+h_out))
        slicer_w = slice(max(0, w-w_out), min(W-1, w+w_out))
        D_hat[..., slicer_h, slicer_w] = D[..., slicer_h, slicer_w]
        if h_in and w_in:
            slicer_h = slice(max(0, h-h_in), min(H-1, h+h_in))
            slicer_w = slice(max(0, w-w_in), min(W-1, w+w_in))
            D_hat[..., slicer_h, slicer_w] = 0.0
        return D_hat

    D = fftshift(fft2(X))
    H, W = X.shape[-2:]
    cp_h, cp_w = get_cp(scale_w)
    if method == 'cumulative':        # [low, low+mid, ..., gt]
        D_freqs = [get_masked(D, cp_h[i+1], cp_w[i+1]) for i in range(n_bands)] + [D]
    elif method == 'addictive':       # [gt, low, mid, ..., high]
        D_freqs = [D] + [get_masked(D, cp_h[i+1], cp_w[i+1], cp_h[i], cp_w[i]) for i in range(n_bands)]
    ifft2(ifftshift(D))
    return [ifft2(ifftshift(D_freq)).real for D_freq in D_freqs]
    

def combine_freqs_torch(X_freqs:List[Tensor]) -> Tensor:
    '''
    ## Parameters
    - X_freqs: List[Tensor] with Tensor in shape [C, H, W]. Input sub-freq images.
    ## Returns
    - X: Tensor. The combined image.
    '''
    for X_freq in X_freqs: assert len(X_freq.shape) == 3, 'X should be shape of [C, H, W]'

    return ifft2(torch.stack([fft2(X_freq) for X_freq in X_freqs], dim=0).sum(dim=0)).real

# ===================================================

def img2fft(X):
    return fftshift(fft2(X))

def fft2img(X):
    return ifft2(ifftshift(X)).real

def distance_from_center_to_coord(coords):
        return torch.sqrt(coords[..., 0]**2 + coords[..., 1]**2)
    
def get_mask(H, W, r, R):
    mask = torch.ones((H, W))
    grid = grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
    i, j = grid.unbind(-1)
    coord = torch.stack([i-W//2+1, j-H//2+1], -1).abs()
    dis = distance_from_center_to_coord(coord)

    mask[dis > r] = (R - dis[dis > r]) / (R - r)
    mask[dis > R] = 0.0
    
    return mask

def padding_img(X, padding=5):
    C, H, W = X.shape
    hh = H + 2 * padding
    ww = W + 2 * padding
    padding_img = torch.zeros((C, hh, ww), dtype=X.dtype)
    padding_img[:, padding: padding+H, padding: padding+W] = X
    return padding_img

def soft_linear_split_freq(X:Tensor, d=0.1, D=0.2, padding=20):    
    h, w = X.shape[-2:]
    if padding > 0:
        X = torch.nn.functional.pad(X, (padding, padding, padding, padding), mode='reflect')

    H, W = X.shape[-2:]
    mask = get_mask(H, W, d, D)
    freq = img2fft(X)
    base_freq = freq * mask
    
    base_img = fft2img(base_freq)
    res_img = X - base_img
    
    if padding > 0:
        base_img = base_img[:, padding: padding+h, padding: padding+w]
        res_img = res_img[:, padding: padding+h, padding: padding+w]
        
    save_dir = 'res'
    os.makedirs(save_dir, exist_ok=True)
    idx = np.random.randint(0, 1000)
    torchvision.utils.save_image(torch.cat([base_img, res_img], -1), os.path.join(save_dir, f'{idx:03d}.png'))
    
    return [base_img, res_img]
    
def soft_split_freqs_torch(X:Tensor, scale:float=0.2) -> List[Tensor]:    
    def generate_mask(H, W, scale):
        grid = create_meshgrid(H, W, normalized_coordinates=False)[0]
        i, j = grid.unbind(-1)
        coord = torch.stack([i-W//2+1, j-H//2+1], -1).abs()
        mask = torch.sqrt(coord[..., 0]**2 + coord[..., 1]**2)
        mask[mask==0] = 1.0
        return 1/mask**scale
    
    H, W = X.shape[-2:]
    mask = generate_mask(H, W, scale)
    freq = img2fft(X)
    low_freq = freq * mask   
    low_img = fft2img(low_freq)
        
    return [low_img, X-low_img]

def img2fft_numpy(X):
    ''' X: (H, W, 1) '''
    return np.fft.fftshift(np.fft.fft2(X))

def fft2img_numpy(freq):
    return np.abs(np.fft.ifft2(np.fft.ifftshift(freq)))
     
def soft_split_numpy(X:Tensor, d=0.1, D=0.2):
    X = X.permute(1,2,0).numpy()
    H, W, C = X.shape    
    
    mask = get_mask(H, W, d, D).numpy()
    window = np.hanning(H)[:, None] * np.hanning(W)[None, :]
    
    freqs = [img2fft_numpy(X[:,:,i] * window) for i in range(C)]
    base_freqs = [freqs[i] * mask for i in range(C)]
    
    base_img = np.stack([fft2img_numpy(base_freqs[i]) for i in range(C)], -1) # (H, W, 3)
    res_img = X - base_img
    
    base_img = torch.FloatTensor(base_img).permute(2,0,1)
    res_img = torch.FloatTensor(res_img).permute(2,0,1)
    
    return [base_img, res_img]

def soft_combine_freqs_torch(imgs:List[Tensor]) -> List[Tensor]:
        
    fft_low = img2fft(imgs[0])
    fft_high = img2fft(imgs[1])
    
    fft = fft_low + fft_high
    
    return fft2img(fft)