import torch
import torch.nn as nn
import torch.nn.functional as F

__all__ = ['PartAttentionModule']

from typing import Optional, Callable


class PartAttentionModule(nn.Module):

    def __init__(self, num_parts: int, num_in_features: int,
                 dropout_rate=0.,
                 rank=1, lowrank=False, fusion=True,
                 debug: bool = False,
                 device: torch.device = 'cpu') -> None:
        super(PartAttentionModule, self).__init__()

        self.num_parts: int = num_parts
        self.in_channels: int = num_in_features
        self.out_channels: int = num_parts
        self.dropout_rate: int = dropout_rate
        self.device: torch.device = device
        self._debug: bool = debug
        self._lowrank: bool = lowrank
        self._fusion: bool = fusion
        if lowrank == False and not (rank == 1 or rank == num_parts):
            raise ValueError()
        else:
            self.rank = rank

        _empty = torch.empty(num_in_features, rank,
                             device=device, dtype=torch.float32, requires_grad=True)
        nn.init.constant_(_empty, 1 / num_in_features)
        self.weights = nn.Parameter(_empty, requires_grad=True)
        self.refine_feature = nn.Sequential(
            nn.Linear(num_in_features, num_in_features // 4),
            nn.Linear(num_in_features // 4, num_in_features)
        ).to(device)
        if self._lowrank:
            _expander = torch.ones(rank, num_parts,
                                   device=device, dtype=torch.float32, requires_grad=True)
            self.expander = nn.Parameter(_expander, requires_grad=True)
        self.dropout = nn.Dropout(p=dropout_rate).to(device)
        if self._fusion:
            self.fc1 = nn.Linear(num_parts, 1).to(device)
            self.fc2 = nn.Linear(num_parts, 1).to(device)

    def forward(self, x: torch.tensor, mask: torch.tensor) -> torch.tensor:
        assert len(x.size()) == 3, f"excepted input tensor 'x' as 3D tensor of shape (B, C, N) " \
                                   f"but got a {len(x.size())}D tensor instead"
        assert x.size(2) == self.num_parts and x.size(1) == self.in_channels, \
            f"excepted input tensor 'x' shape as (*, {self.in_channels}, {self.num_parts}), " \
            f"but got input tensor with shape of (*, {x.size(1)}, {x.size(2)}) instead"
        assert len(mask.size()) == 4, f"excepted mask tensor as 4D tensor of shape (B, N, H, W) " \
                                      f"but got a {len(x.size())}D tensor instead"
        assert mask.size(1) == self.num_parts, \
            f"excepted mask tensor shape as (*, {self.num_parts}, *, *), " \
            f"but got input tensor with shape of (*, {x.size(1)}, *, *) instead"

        # feature attention
        if self._lowrank:
            weight = torch.matmul(self.weights, self.expander)
        else:
            weight = self.weights
        weight = self.dropout(weight)
        feat_attn: torch.tensor = x * weight
        feat_attn = F.softmax(torch.sum(feat_attn, dim=1), dim=1)

        # size attention
        size_attn: torch.tensor = torch.sum(mask, dim=[2, 3]) + 1  # (B, N)
        size_attn = F.normalize(size_attn, p=1)

        if self._fusion:
            # gate mechanism
            g_weight: torch.tensor = F.softmax(torch.cat([self.fc1(feat_attn), self.fc2(size_attn)], dim=1), dim=1)
            #  feat_attn 特征权重   size_attn  面积权重
            fused_attn: torch.tensor = g_weight[:, :1] * feat_attn + g_weight[:, 1:] * size_attn
        else:
            g_weight: torch.tensor = torch.tensor([[1., 0.]], device=self.device).expand(size_attn.size(0), -1)
            fused_attn: torch.tensor = feat_attn

        if self._debug:
            return fused_attn, feat_attn, size_attn, g_weight, self.weights
        else:
            return fused_attn
