"""
Prepare to deprecate this file
"""
from ..builder import MODELS
from .base_classifier import BaseClassifier
from ..modules.pointnet2 import square_distance, farthest_point_sample, index_points

import torch
import torch.nn as nn
import torch.nn.functional as F

class PointAttention(nn.Module):
    def __init__(self, qdim, kdim, vdim, qkdim, edim, nhead, local_size, relative, cat_xyz, dropout):
        super().__init__()
        self.qdim = qdim
        self.kdim = kdim
        self.vdim = vdim
        self.qkdim = qkdim
        self.edim = edim
        self.nhead = nhead
        self.local_size = local_size
        self.relative = relative
        self.dropout = dropout
        self.cat_xyz = cat_xyz

        head_dim = self.edim // self.nhead
        assert head_dim * self.nhead == self.edim, "embed_dim must be divisible by nheads"

        qkdim = self.qkdim // self.nhead
        assert qkdim * self.nhead == self.qkdim, "qk_dim must be divisible by nheads"
        if cat_xyz:
            self.q_proj = nn.Linear(qdim+3, self.qkdim)
            self.k_proj = nn.Linear(kdim+3, self.qkdim)
            self.v_proj = nn.Linear(vdim+3, edim)
        else:
            self.q_proj = nn.Linear(qdim, self.qkdim)
            self.k_proj = nn.Linear(kdim, self.qkdim)
            self.v_proj = nn.Linear(vdim, edim)
    
    def forward(self, args):
        """
        xyz: [BS, npoint, dim]
        """
        xyz, data = args
        if self.cat_xyz:
            data = torch.cat([data, xyz], dim=-1)
        BS, N, dim = data.shape
        local_size = self.local_size

        qkdim = self.qkdim // self.nhead
        head_dim = self.edim // self.nhead
        scaling = float(head_dim) ** -0.5

        dist = square_distance(xyz, xyz) # [B, N, N]
        dist[dist<1e-8] = float("Inf")
        _, local_index = torch.topk(dist, local_size-1, dim=-1, largest=False, sorted=False) # [B, N, local_size - 1]
        self_index = torch.arange(N, dtype=local_index.dtype, device=local_index.device)
        self_index = self_index.reshape([1, N, 1]).repeat(BS, 1, 1)
        local_index = torch.cat([self_index, local_index], dim=-1)
        local_data = index_points(data, local_index) # [B, N, local_size, C]
        local_data = local_data.reshape([BS*N, local_size, dim]).permute([1, 0, 2]) # [local_size, BS*N, dim]

        if self.relative:
            v = local_data - data.reshape([1, BS*N, dim])
        else:
            v = local_data

        q = self.q_proj(data) # [B, N, qk_dim*nhead]
        k = self.k_proj(local_data) # [local_size, B*N, qk_dim*nhead]
        v = self.v_proj(v) # [local_size, B*N, edim]
        
        q = q * scaling

        q = q.reshape([1, BS * N * self.nhead, qkdim]).transpose(0, 1) # [BS*N*nheads, 1, qk_dim]
        k = k.reshape([-1, BS * N * self.nhead, qkdim]).transpose(0, 1) # [BS*N*nheads, local_size, qk_dim]
        v = v.reshape([-1, BS * N * self.nhead, head_dim]).transpose(0, 1) # [BS*N*nheads, local_size, head_dim]

        attn_weights = torch.bmm(q, k.transpose(1, 2))
        assert attn_weights.shape == (BS * N * self.nhead, 1, local_size)

        attn_weights = F.softmax(attn_weights, dim=-1)
        attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
        attn_output = torch.bmm(attn_weights, v)
        assert attn_output.shape == (BS * N * self.nhead, 1, head_dim)
        attn_output = attn_output.unsqueeze(1).reshape([BS, N, self.edim])
        return xyz, attn_output

class LSA(nn.Module):
    def __init__(self, dim, local_size, nheads, dropout):
        super().__init__()
        self.dim = dim
        self.nheads = nheads
        self.dropout = dropout
        self.local_size = local_size
        self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=nheads, dropout=dropout)
    
    def forward(self, args):
        """
        x: [BS, npoint, dim]
        """
        xyz, data = args
        BS, npoint, dim = data.shape
        assert dim == self.dim, "shape inconsistency"
        dist = square_distance(xyz, xyz) # [B, npoint, npoint]
        dist[dist<1e-8] = float("Inf")
        _, local_index = torch.topk(dist, self.local_size, dim=-1, largest=False, sorted=False) # [B, npoint, self.local_size]
        attn_mask = torch.zeros([BS, npoint, npoint], dtype=torch.uint8, device=xyz.device) # [npoint, npoint]
        attn_mask.scatter_(-1, local_index, torch.ones_like(local_index, dtype=torch.uint8))
        attn_mask = attn_mask.unsqueeze(1).repeat(1, self.nheads, 1, 1) # [BS, nheads, npoint, npoint]
        attn_mask = attn_mask.reshape(-1, npoint, npoint)
        # xyz = xyz.permute([1, 0, 2])
        data = data.permute([1, 0, 2]) # [npoint, bs, dim]
        attn_output, _ = self.attn(query=data, key=data, value=data, attn_mask=attn_mask) # [npoint, BS, dim]
        return xyz, attn_output.permute(1, 0, 2)

class PointLBR(nn.Module):
    def __init__(self, in_channels, out_channels, bn=True, relu=True):
        super().__init__()
        self.linear = nn.Linear(in_channels, out_channels, bias=not bn)
        if bn:
            self.bn = nn.BatchNorm1d(out_channels)
        if relu:
            self.relu = nn.ReLU()
        
    def forward(self, args):
        """
        x: [BS, npoint, dim]
        """
        xyz, data = args
        data = self.linear(data)
        if hasattr(self, 'bn'):
            data = data.permute([0, 2, 1])
            data = self.bn(data)
            data = data.permute([0, 2, 1])
        if hasattr(self, 'relu'):
            data = self.relu(data)
        return xyz, data

class PointMaxPooling(nn.Module):
    def __init__(self, local_size, stride, is_global=False):
        super().__init__()
        assert not (local_size and is_global), "local_size and is_global cannot all be True"
        self.local_size = local_size
        self.stride = stride
        self.is_global = is_global

    def forward(self, args):
        xyz, data = args
        BS, N, C = xyz.shape
        if self.is_global:
            data = torch.max(data, 1)[0].unsqueeze(1)
            xyz = torch.zeros([BS, 1, C], device=xyz.device)
            return xyz, data
        else:
            N1 = N // self.stride
            index = farthest_point_sample(xyz, N1) # [BS, npoint//self.stride]
            new_xyz = index_points(xyz, index)

            dist = square_distance(new_xyz, xyz) # [B, N', N]
            dist[dist<1e-8] = float("Inf")
            _, local_index = torch.topk(dist, self.local_size, dim=-1, largest=False, sorted=False) # [B, npoint, self.local_size]
            local_index = torch.cat([index.reshape([BS, N1, 1]), local_index], dim=-1)

            local_data = index_points(data, local_index) # [BS, N, N', C]
            local_data, _ = torch.max(local_data, dim=2)
            return new_xyz, local_data

class PointGlobalAvgPooling(nn.Module):
    def __init__(self):
        super().__init__()
    def forward(self, args):
        xyz, data = args
        BS, N, C = xyz.shape
        data = data.permute(0, 2, 1) # BS, C, N
        data = F.adaptive_avg_pool1d(data, 1).squeeze(-1)
        return xyz, data



@MODELS.register_module()
class LSANet(BaseClassifier):
    # local self attention network
    def __init__(self, num_class):
        super().__init__()
        self.num_class = num_class

        self.backbone = nn.Sequential(
            PointLBR(3, 16, bn=False, relu=False),
            LSA(16, 8, 4, 0.2),
            PointLBR(16, 16),
            PointMaxPooling(8, 2),

            PointLBR(16, 32),
            LSA(32, 8, 4, 0.2),
            PointLBR(32, 32),
            LSA(32, 8, 4, 0.2),
            PointLBR(32, 32),
            PointMaxPooling(8, 2),

            PointLBR(32, 64),
            LSA(64, 8, 4, 0.2),
            PointLBR(64, 64),
            LSA(64, 8, 4, 0.2),
            PointLBR(64, 64),
            PointMaxPooling(8, 2),

            PointLBR(64, 128),
            LSA(128, 8, 4, 0.2),
            PointLBR(128, 128),
            LSA(128, 8, 4, 0.2),
            PointLBR(128, 128),
            PointMaxPooling(8, 2),

            PointLBR(128, 256),
            LSA(256, 8, 4, 0.2),
            PointLBR(256, 256),
            LSA(256, 8, 4, 0.2),
            PointLBR(256, 256),
            PointMaxPooling(8, 2),

            PointLBR(256, 512),
            LSA(512, 8, 4, 0.2),
            PointLBR(512, 512),
        )
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Linear(512, self.num_class)

    def inference(self, xyz):
        """
        xyz: [BS, dim, npoint]
        """
        xyz = xyz.permute([0, 2, 1]) # [BS, npoint, dim]
        xyz, data = self.backbone((xyz, xyz))
        data = data.permute([0, 2, 1]) # [BS, dim, npoint]
        data = self.avgpool(data).squeeze(-1) # [BS, dim]
        logits = self.fc(data)
        return logits