import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable


def data_consistency(k, k0, mask, noise_lvl=None):
    """
    k    - input in k-space
    k0   - initially sampled elements in k-space
    mask - corresponding nonzero location
    """
    v = noise_lvl
    if v:  # noisy case
        out = (1 - mask) * k + mask * (k + v * k0) / (1 + v)
    else:  # noiseless case
        out = (1 - mask) * k + mask * k0
    return out


class DataConsistencyInKspace(nn.Module):
    """ Create data consistency operator

    Warning: note that FFT2 (by the default of torch.fft) is applied to the last 2 axes of the input.
    This method detects if the input tensor is 4-dim (2D data) or 5-dim (3D data)
    and applies FFT2 to the (nx, ny) axis.
主要作用是运行perform方法，将x，k0，mark重新生成输入数据，通过先将x用傅里叶变换转化到k空间，再调用data_consistency方法将转化后的k与k0，mark生成新的输入数据。
    mask是掩膜，即傅立叶域中的欠采样掩码。掩膜用来滤掉了部分信息，只选取了掩膜“感兴趣”的区域。
    """

    def __init__(self, noise_lvl=None, norm='ortho'):
        super(DataConsistencyInKspace, self).__init__()
        self.normalized = norm == 'ortho'
        self.noise_lvl = noise_lvl

    def forward(self, *input, **kwargs):
        return self.perform(*input)

    def perform(self, x, k0, mask):
        """
        x    - input in image domain, of shape (n, 3, nx, ny)
        k0   - initially sampled elements in k-space (n, 3, nx, ny),即T2Dn
        mask - corresponding nonzero location, of shape (n, 3, nx, ny,2)
        """
        x = x.view(x.shape[0],x.shape[1],x.shape[2],x.shape[3],1)   #(n, 3, nx, ny,1)
        k0 = k0.view(k0.shape[0],k0.shape[1],k0.shape[2],k0.shape[3],1)
        
        add = torch.zeros((x.shape)).float()
        add_x = Variable(add.clone())
        add_x = add_x.cuda()
        add_k0 = Variable(add.clone())
        add_k0 = add_k0.cuda()        
        
        x_complex = torch.cat((x,add_x),-1)      #(n, 3, nx, ny,2),因为torch.fft函数要求矩阵最后是2维，分别放虚部与实部
        k0_complex = torch.cat((k0,add_k0),-1)   #(n, 3, nx, ny,2)
#         print("x_complex",x_complex.shape)


        k = torch.fft(x_complex, 2, normalized=self.normalized)
        k0_fft = torch.fft(k0_complex, 2, normalized=self.normalized)
        
        out = data_consistency(k, k0_fft, mask, self.noise_lvl)
        x_res = torch.ifft(out, 2, normalized=self.normalized)  #(n, 3, nx, ny,2)
        
        return_x = x_res.narrow(-1,0,1)  #(n, 3, nx, ny,1)
        return_x = torch.squeeze(return_x,-1)   #(n, 3, nx, ny)
#         return_x = return_x.contiguous()
#         print("return_x",return_x.shape)


        return return_x
