from functools import partial

import torch
import torch.nn.functional as F


class MetricMethod:

    def __init__(self, attrs=None):

        self.mtc_fn = self._ssd_by_fft
        if attrs is None:
            return

        name = attrs["name"]
        attrs.pop("name")
        if name == "SSD":
            self.mtc_fn = partial(self._ssd_by_fft, **attrs)
        elif name == "Conv":
            self.mtc_fn = partial(self._conv_by_fft, **attrs)
        elif name == "Mask SSD":
            self.mtc_fn = partial(self.ssd_fft_mask, **attrs)


    def _ssd_by_fft(
    self, search_patch, template_patch, *, 
    omit_monomial=True, mean=True, ret_sim=True
    ):
        """
        calculate ssd of patchs by fft

        Param
        -----
        omit_monomial:  omit Sum(I_search^2)
        mean:           get mean of template size
        ret_sim:        return similarity but not dis
        """

        b, c, sh, sw = search_patch.size()
        b, c, th, tw = template_patch.size()
        mh, mw = sh - th + 1, sw - tw + 1

        device = search_patch.device

        T = torch.zeros(search_patch.size()).to(device)
        T[:, :, 0:th, 0:tw] = 1

        sen_x = search_patch ** 2

        tmp1 = torch.fft.fft2(sen_x)
        tmp2 = torch.fft.fft2(T)

        tmp_sum = torch.sum(tmp1 * torch.conj(tmp2), 1)

        ssd_f_1 = torch.fft.ifft2(tmp_sum)

        ssd_fr_1 = torch.real(ssd_f_1)
        ssd_fr_1 = ssd_fr_1[:, 0:mh, 0:mw]


        ref_Tx = torch.zeros((b, c, sh, sw)).to(device)
        ref_Tx[:, :, 0:th, 0:tw] = template_patch

        tmp1 = torch.fft.fft2(search_patch)
        tmp2 = torch.fft.fft2(ref_Tx)

        tmp_sum = torch.sum(tmp1 * torch.conj(tmp2), 1)
        ssd_f_2 = torch.fft.ifft2(tmp_sum)

        ssd_fr_2 = torch.real(ssd_f_2)

        ssd_fr_2 = ssd_fr_2[:, 0:mh, 0:mw]

        ssd_batch = (ssd_fr_1 - 2 * ssd_fr_2)


        if not omit_monomial:
            opt = torch.sum(template_patch**2, dim=(1, 2, 3))
            opt = opt.view(b, 1, 1)
            ssd_batch += opt

        if mean:
            ssd_batch /= th
            ssd_batch /= tw

        if ret_sim:
            return 1 - ssd_batch
        
        return ssd_batch


    def _spatial_softmax(self, heatmap):
        '''
        Apply softmax per channel
        '''
        b,c,h,w = heatmap.size()
        x = heatmap.reshape(b, c, -1).transpose(2, 1)
        x = F.softmax(x, dim=1) # batch维度不参与运算
        return x.transpose(2, 1).reshape(b, c, h, w)


    def _conv_by_fft(
        self, search_patch, template_patch, *, 
        mean=True, softmax=False
    ):
        """
        calculate conv of patchs by fft

        Param
        -----
        mean: get mean of template size
        """

        b, c, sh, sw = search_patch.size()
        b, c, th, tw = template_patch.size()
        mh, mw = sh - th + 1, sw - tw + 1

        device = search_patch.device
        
        ref_Tx = torch.zeros((b, c, sh, sw)).to(device)
        ref_Tx[:, :, 0:th, 0:tw] = template_patch

        tmp1 = torch.fft.fft2(search_patch)
        tmp2 = torch.fft.fft2(ref_Tx)

        tmp_sum = torch.sum(tmp1 * torch.conj(tmp2), 1)
        conv_f = torch.fft.ifft2(tmp_sum)

        conv_f = torch.real(conv_f)

        conv_f = conv_f[:, 0:mh, 0:mw]

        if mean:
            conv_f /= th
            conv_f /= tw

        if softmax:
            conv_f = conv_f.unsqueeze(dim=0)
            conv_f = self._spatial_softmax(conv_f)
            conv_f = conv_f.squeeze()

        return conv_f


    def ssd_fft_mask(self, search_patch, template_patch, mask, *, 
        omit_monomial=True, mean=True, ret_sim=True
    ):

        b, c, sh, sw = search_patch.size()
        b, c, th, tw = template_patch.size()
        mh, mw = sh - th + 1, sw - tw + 1

        device = search_patch.device

        T = torch.zeros(search_patch.size()).to(device)
        T[:, :, 0:th, 0:tw] = 1

        sen_x = search_patch ** 2
        Tmask = torch.zeros((b, c, sh, sw)).to(device)
        Tmask[:, :, 0:th, 0:tw] = mask

        tmp1 = torch.fft.fft2(sen_x)
        tmp2 = torch.fft.fft2(Tmask)

        tmp_sum = torch.sum(tmp1 * torch.conj(tmp2), 1)

        ssd_f_1 = torch.fft.ifft2(tmp_sum)

        ssd_fr_1 = torch.real(ssd_f_1)
        ssd_fr_1 = ssd_fr_1[:, 0:mh, 0:mw]

        ref_WT = torch.zeros((b, c, sh, sw)).to(device)
        ref_WT[:, :, 0:th, 0:tw] = template_patch * mask

        tmp1 = torch.fft.fft2(search_patch)
        tmp2 = torch.fft.fft2(ref_WT)

        tmp_sum = torch.sum(tmp1 * torch.conj(tmp2), 1)
        ssd_f_2 = torch.fft.ifft2(tmp_sum)

        ssd_fr_2 = torch.real(ssd_f_2)

        ssd_fr_2 = ssd_fr_2[:, 0:mh, 0:mw]

        ssd_batch = (ssd_fr_1 - 2 * ssd_fr_2)

        if not omit_monomial:
            search_square = template_patch**2 * mask
            opt = torch.sum(search_square, dim=(1, 2, 3))
            opt = opt.view(b, 1, 1)
            ssd_batch += opt

        if mean:
            ssd_batch /= th
            ssd_batch /= tw

        if ret_sim:
            return 1 - ssd_batch

        return ssd_batch  
