from ..builder import MODELS
from .base_classifier import BaseClassifier
from ..modules.pointnet2 import square_distance, farthest_point_sample, uniform_point_sample, index_points

import torch
import torch.nn as nn
import torch.nn.functional as F

NORM_TYPE = 'bn'

class Permute(nn.Module):
    def __init__(self, dims):
        super().__init__()
        self.dims = dims
    def forward(self, inputs):
        if isinstance(inputs, torch.Tensor):
            return inputs.permute(self.dims)
        else:
            return [x.permute(d) for x,d in zip(inputs, self.dims)]

class ConcatXYZ(nn.Module):
    def __init__(self):
        super().__init__()
    def forward(self, inputs):
        xyz, data = inputs # [BS, N, C]
        data = torch.cat([data, xyz], dim=-1)
        return xyz, data

class PointAttentionLocal(nn.Module):
    def __init__(self, dim, qkdim, nhead, local_size, relative, cat_xyz, dropout, res=True):
        super().__init__()
        self.dim = dim
        self.qkdim = qkdim
        self.nhead = nhead
        self.local_size = local_size
        self.relative = relative
        self.dropout = dropout
        self.cat_xyz = cat_xyz
        self.res = res

        head_dim = self.dim // self.nhead
        assert head_dim * self.nhead == self.dim, "embed_dim must be divisible by nheads"

        qkdim = self.qkdim // self.nhead
        assert qkdim * self.nhead == self.qkdim, "qk_dim must be divisible by nheads"
        if cat_xyz:
            self.q_proj = nn.Linear(dim+3, self.qkdim)
            self.k_proj = nn.Linear(dim+3, self.qkdim)
            self.v_proj = nn.Linear(dim+3, dim)
        else:
            self.q_proj = nn.Linear(dim, self.qkdim)
            self.k_proj = nn.Linear(dim, self.qkdim)
            self.v_proj = nn.Linear(dim, dim)
    
    def forward(self, args):
        """
        xyz: [BS, npoint, dim]
        """
        xyz, inp_data = args
        data = inp_data
        if self.cat_xyz:
            data = torch.cat([data, xyz], dim=-1)
        BS, N, dim = data.shape
        local_size = self.local_size

        qkdim = self.qkdim // self.nhead
        head_dim = self.dim // self.nhead
        scaling = float(head_dim) ** -0.5

        dist = square_distance(xyz, xyz) # [B, N, N]
        dist[dist<1e-8] = float("Inf")
        _, local_index = torch.topk(dist, local_size-1, dim=-1, largest=False, sorted=False) # [B, N, local_size - 1]
        self_index = torch.arange(N, dtype=local_index.dtype, device=local_index.device)
        self_index = self_index.reshape([1, N, 1]).repeat(BS, 1, 1)
        local_index = torch.cat([self_index, local_index], dim=-1)
        local_data = index_points(data, local_index) # [B, N, local_size, C]
        local_data = local_data.reshape([BS*N, local_size, dim]).permute([1, 0, 2]) # [local_size, BS*N, dim]

        if self.relative:
            v = local_data - data.reshape([1, BS*N, dim])
        else:
            v = local_data

        q = self.q_proj(data) # [B, N, qk_dim*nhead]
        k = self.k_proj(local_data) # [local_size, B*N, qk_dim*nhead]
        v = self.v_proj(v) # [local_size, B*N, dim]
        
        q = q * scaling

        q = q.reshape([1, BS * N * self.nhead, qkdim]).transpose(0, 1) # [BS*N*nheads, 1, qk_dim]
        k = k.reshape([-1, BS * N * self.nhead, qkdim]).transpose(0, 1) # [BS*N*nheads, local_size, qk_dim]
        v = v.reshape([-1, BS * N * self.nhead, head_dim]).transpose(0, 1) # [BS*N*nheads, local_size, head_dim]

        attn_weights = torch.bmm(q, k.transpose(1, 2))
        assert attn_weights.shape == (BS * N * self.nhead, 1, local_size)

        attn_weights = F.softmax(attn_weights, dim=-1)
        attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
        attn_output = torch.bmm(attn_weights, v)
        assert attn_output.shape == (BS * N * self.nhead, 1, head_dim)
        attn_output = attn_output.unsqueeze(1).reshape([BS, N, self.dim])
        if self.res:
            attn_output = attn_output + inp_data
        return xyz, attn_output

class PointAttentionGlobal(nn.Module):
    def __init__(self, dim, qkdim, nhead, cat_xyz, dropout, res=True):
        super().__init__()
        self.dim = dim
        self.qkdim = qkdim
        self.nhead = nhead
        self.dropout = dropout
        self.cat_xyz = cat_xyz
        self.res = res

        head_dim = self.dim // self.nhead
        assert head_dim * self.nhead == self.dim, "embed_dim must be divisible by nheads"

        qkdim = self.qkdim // self.nhead
        assert qkdim * self.nhead == self.qkdim, "qk_dim must be divisible by nheads"
        if cat_xyz:
            self.q_proj = nn.Linear(dim+3, self.qkdim)
            self.k_proj = nn.Linear(dim+3, self.qkdim)
            self.v_proj = nn.Linear(dim+3, dim)
        else:
            self.q_proj = nn.Linear(dim, self.qkdim)
            self.k_proj = nn.Linear(dim, self.qkdim)
            self.v_proj = nn.Linear(dim, dim)
    
    def forward(self, args):
        """
        xyz: [BS, npoint, dim]
        """
        xyz, inp_data = args
        data = inp_data
        if self.cat_xyz:
            data = torch.cat([data, xyz], dim=-1)
        BS, N, dim = data.shape

        qkdim = self.qkdim // self.nhead
        head_dim = self.dim // self.nhead
        scaling = float(head_dim) ** -0.5

        q = self.q_proj(data) # [B, N, qk_dim*nhead]
        k = self.k_proj(data) # [B, N, qk_dim*nhead]
        v = self.v_proj(data) # [B, N, head_dim*nhead]
        
        q = q * scaling
        q = q.permute([1, 0, 2]).reshape([N, BS * self.nhead, qkdim]).permute([1, 0, 2]) # [BS*nhead, N, qk_dim]
        k = k.permute([1, 0, 2]).reshape([N, BS * self.nhead, qkdim]).permute([1, 0, 2]) # [BS*nhead, N, qk_dim]
        v = v.permute([1, 0, 2]).reshape([N, BS * self.nhead, head_dim]).permute([1, 0, 2]) # [BS*nhead, N, head_dim]

        attn_weights = torch.bmm(q, k.transpose(1, 2)) # [BS*nhead, N, N]
        assert attn_weights.shape == (BS * self.nhead, N, N)

        attn_weights = F.softmax(attn_weights, dim=-1)
        attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
        attn_output = torch.bmm(attn_weights, v)
        assert attn_output.shape == (BS * self.nhead, N, head_dim)
        attn_output = attn_output.permute([1, 0, 2]).reshape([N, BS, self.dim]) # self.dim == nhead * head_dim
        attn_output = attn_output.permute([1, 0, 2]) # [BS, N, self.dim]
        if self.res:
            attn_output = attn_output + inp_data
        return xyz, attn_output

class PointLBR(nn.Module):
    def __init__(self, in_channels, out_channels, norm=NORM_TYPE, relu=True):
        super().__init__()
        self.linear = nn.Linear(in_channels, out_channels, bias=not norm)
        if norm == 'bn':
            self.norm = nn.BatchNorm1d(out_channels)
        elif norm == 'gn':
            self.norm = nn.GroupNorm(8, out_channels)
        else:
            self.norm = None
        if relu:
            self.relu = nn.ReLU()
        
    def forward(self, args):
        """
        x: [BS, npoint, dim]
        """
        xyz, data = args
        data = self.linear(data)
        if self.norm:
            data = data.permute([0, 2, 1])
            data = self.norm(data)
            data = data.permute([0, 2, 1])
        if hasattr(self, 'relu'):
            data = self.relu(data)
        return xyz, data

class PointDownsample(nn.Module):
    def __init__(self, n=None, ratio=None, interpolation='fps'):
        super().__init__()
        assert bool(n) ^ bool(ratio), f"only one of n and ratio can be True, but get n={n}, ratio={ratio}"
        self.n = n
        self.ratio = ratio
        self.interpolation = interpolation
    def forward(self, args):
        xyz, data = args
        BS, N, C = xyz.shape
        if self.ratio:
            n = int(N * self.ratio)
        else:
            n = self.n
        if self.interpolation == 'uniform':
            idx = uniform_point_sample(xyz, n)
        elif self.interpolation == 'fps':
            idx = farthest_point_sample(xyz, n)
        new_xyz = index_points(xyz, idx)
        new_data = index_points(data, idx)
        return new_xyz, new_data

class CheckNpoints(nn.Module):
    def __init__(self, npoints):
        super().__init__()
        self.npoints = npoints
    def forward(self, args):
        xyz, data = args
        BS, N, C = xyz.shape
        assert N == self.npoints, f"CheckNpoints, expect {self.npoints} points, but {N}"
        return xyz, data


class PointMaxPooling(nn.Module):
    def __init__(self, local_size, is_global=False):
        super().__init__()
        assert not (local_size and is_global), "local_size and is_global cannot all be True"
        self.local_size = local_size
        self.is_global = is_global

    def forward(self, args):
        xyz, data = args
        device = xyz.device
        BS, N, C = xyz.shape
        if self.is_global:
            data = torch.max(data, 1)[0].unsqueeze(1)
            xyz = torch.zeros([BS, 1, C], device=xyz.device)
            return xyz, data
        else:
            dist = square_distance(xyz, xyz) # [B, N, N]
            dist[dist<1e-8] = float("Inf")
            _, local_index = torch.topk(dist, self.local_size, dim=-1, largest=False, sorted=False) # [B, npoint, self.local_size]
            self_index = torch.arange(N, device=device, dtype=local_index.dtype).reshape([1, N, 1]).repeat(BS, 1, 1)
            local_index = torch.cat([self_index, local_index], dim=-1)

            local_data = index_points(data, local_index) # [BS, N, self.local_size, C]
            local_data, _ = torch.max(local_data, dim=2)
            return xyz, local_data

class PointAttentionHybrid(nn.Module):
    def __init__(self, dim, qkdim, nhead, local_size, relative, cat_xyz, dropout, res=True):
        super().__init__()
        self.attn_local = PointAttentionLocal(dim, qkdim, nhead, local_size, relative, cat_xyz, dropout, res)
        self.attn_global = PointAttentionGlobal(dim, qkdim, nhead, cat_xyz, dropout, res)
        self.alpha_local = nn.Parameter(torch.ones(dim))
        self.alpha_global = nn.Parameter(torch.ones(dim))
    
    def forward(self, args):
        _, data_local = self.attn_local(args)
        _, data_global = self.attn_global(args)
        out = data_local * self.alpha_local + data_global * self.alpha_global
        return args[0], out

@MODELS.register_module()
class AttNet_dev(BaseClassifier):
    def __init__(self, num_class, norm=NORM_TYPE):
        super().__init__()
        self.fc1 = nn.Linear(1024, 512)
        self.drop1 = nn.Dropout(0.4)
        self.fc2 = nn.Linear(512, 256)
        self.drop2 = nn.Dropout(0.5)
        self.fc3 = nn.Linear(256, num_class)

        if norm == 'bn':
            self.norm1 = nn.BatchNorm1d(512)
            self.norm2 = nn.BatchNorm1d(256)
        elif norm == 'gn':
            self.norm1 = nn.GroupNorm(8, 512)
            self.norm2 = nn.GroupNorm(8, 256)
        else:
            raise ValueError('Unkown norm type: {norm}')


        #sa2
        self.sa1 = nn.Sequential(
            Permute([(0, 2, 1), (0, 2, 1)]),
            PointDownsample(ratio=0.5),
            PointLBR(3, 8),
            PointAttentionLocal(dim=8, qkdim=8, nhead=2, local_size=16, relative=True, cat_xyz=False, dropout=0.0),
            PointLBR(8, 32),
            # PointAttentionHybrid(dim=32, qkdim=32, nhead=4, local_size=16, relative=False, cat_xyz=True, dropout=0.0),
            PointLBR(32, 128),
            PointMaxPooling(16),
            PointDownsample(ratio=0.25),
            CheckNpoints(128),
            Permute([(0, 2, 1), (0, 2, 1)]),
        )
        self.sa2 = nn.Sequential(
            Permute([(0, 2, 1), (0, 2, 1)]),
            PointAttentionHybrid(dim=128, qkdim=64, nhead=4, local_size=16, relative=True, cat_xyz=True, dropout=0.0),
            PointLBR(128, 128),
            PointLBR(128, 256),
            PointMaxPooling(16),
            PointDownsample(ratio=0.25),
            Permute([(0, 2, 1), (0, 2, 1)]),
        )
        self.sa3 = nn.Sequential(
            Permute([(0, 2, 1), (0, 2, 1)]),
            ConcatXYZ(),
            PointLBR(256+3, 256),
            PointLBR(256, 512),
            PointLBR(512, 1024),
            PointMaxPooling(None, is_global=True),
            Permute([(0, 2, 1), (0, 2, 1)]),
        )

    def inference(self, points):
        B, dim, N = points.shape
        l1_xyz, l1_points = self.sa1((points, points))
        l2_xyz, l2_points = self.sa2((l1_xyz, l1_points))
        l3_xyz, l3_points = self.sa3((l2_xyz, l2_points))
        x = l3_points.reshape(B, 1024)
        x = self.drop1(F.relu(self.norm1(self.fc1(x))))
        x = self.drop2(F.relu(self.norm2(self.fc2(x))))
        logits = self.fc3(x)
        return logits
