import torch
from torch import nn

import math
import numpy as np

from einops import rearrange, repeat, reduce

class SparseMultiHeadCrossAttention(nn.Module):
    def __init__(self, ch, num_heads, dropout=0.1):
        super().__init__()
        self.ch = ch
        self.num_heads = num_heads
        self.scale = np.sqrt(self.ch / self.num_heads)

        assert ch % num_heads == 0

        self.linear_q = nn.Linear(ch, ch)
        self.linear_k = nn.Linear(ch, ch)
        self.linear_v = nn.Linear(ch, ch)
        self.linear_o = nn.Linear(ch, ch)

        self.dropout = nn.Dropout(dropout)

    def forward(self, q, k, v, linked_pair):
        M = linked_pair.shape[0]
        N = k.shape[0]
        device = q.device

        q = self.linear_q(q)
        k = self.linear_k(k)
        v = self.linear_v(v)

        q = q[linked_pair[:, 0]
              ].reshape(-1, self.num_heads, self.ch // self.num_heads)
        k = k[linked_pair[:, 1]
              ].reshape(-1, self.num_heads, self.ch // self.num_heads)

        e = torch.sum(q * k, dim=-1)
        max_e = torch.zeros((N, self.num_heads), device=device).index_reduce_(
            0, linked_pair[:, 1], e, "amax")
        max_e = max_e[linked_pair[:, 1]]
        e = e - max_e

        attn = torch.exp(e)
        sum_attn = torch.zeros((N, self.num_heads), device=device).index_add_(
            0, linked_pair[:, 1], attn)
        sum_attn = sum_attn[linked_pair[:, 1]]
        attn = attn / sum_attn

        attn = self.dropout(attn)

        v = v[linked_pair[:, 0]
              ].reshape(-1, self.num_heads, self.ch // self.num_heads)
        v = (v * attn.unsqueeze(-1)).view(-1, self.ch)
        o = torch.zeros((N, self.ch), device=device).index_add_(
            0, linked_pair[:, 1], v)
    
        o = self.linear_o(o)
        return o


class AgentAttention(nn.Module):
    def __init__(
        self,
        ch,
        num_heads,
        agent_size,
        kernel_size_list=[13, 5],
        dropout=0.1,
    ):
        super().__init__()
        self.ch = ch
        self.agent_size = agent_size

        self.I = nn.Parameter(torch.Tensor(1, agent_size * agent_size, ch))
        nn.init.xavier_uniform_(self.I)
        
        self.cross1 = SparseMultiHeadCrossAttention(ch, num_heads, dropout)
        self.cross2 = SparseMultiHeadCrossAttention(ch, num_heads, dropout)

        self.pw1 = nn.Conv2d(ch, ch, 1, 1, 0)
        self.bn1 = nn.BatchNorm2d(ch)
        self.act1 = nn.GELU()
        
        self.group_conv = nn.ModuleList()
        self.group_bn = nn.ModuleList()
        for i, kernel_size in enumerate(kernel_size_list):
            self.group_conv.add_module("conv{}".format(i), nn.Conv2d(
                ch, ch, kernel_size=kernel_size, stride=1, padding=kernel_size//2, groups=ch))
            self.group_bn.add_module("bn{}".format(i), nn.BatchNorm2d(ch))
               
        self.act2 = nn.GELU()
        self.pw2 = nn.Conv2d(ch, ch, 1, 1, 0)
        self.bn2 = nn.BatchNorm2d(ch)

    def forward(self, voxel_features, voxel_coords, linked_pair, batch_size, H_indice):
        H = self.I.repeat(batch_size, 1, 1).reshape(-1, self.ch)
        feat = self.cross1(voxel_features, H, voxel_features, linked_pair)
        feat = feat.view(batch_size, self.agent_size, self.agent_size, self.ch)
        feat = torch.permute(feat, (0, 3, 1, 2))
        feat = self.pw1(feat)
        feat = self.bn1(feat)
        feat = self.act1(feat)

        for i, (conv, bn) in enumerate(zip(self.group_conv, self.group_bn)):
            if i == 0:
                group_feat = bn(conv(feat))
            else:
                group_feat = group_feat + bn(conv(feat))
            
        feat = self.act2(group_feat)

        feat = self.pw2(feat)
        feat = self.bn2(feat)
        feat = torch.permute(feat, (0, 2, 3, 1))
        feat = feat.view(-1, self.ch)
        voxel_features = self.cross2(
            feat, voxel_features, feat, torch.flip(linked_pair, dims=[1]))
        
        return voxel_features
