'''
Author: devis.dong
Email: devis.dong@gmail.com
Date: 2021-12-09 21:19:45
LastEditTime: 2022-11-18 01:06:28
LastEditors: devis.dong
Description:
'''
from myutils import *
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F

class FC(nn.Sequential):

    def __init__(
            self,
            dims: List[int], #每一层数据的维度
            bn: bool = True,
            activation=nn.ReLU(),
            dropout=0.4,
            preact: bool = False,
            name: str = ""
    ):
        super().__init__()

        self.layers = nn.Sequential()
        num_layers = len(dims)
        for i in range(num_layers - 1):
            self.layers.add_module(name + '_fc_%d' % i, nn.Linear(dims[i], dims[i+1]))

            if 0 < dropout < 1.0:
                self.layers.add_module(name+'_droput_%d' % i, nn.Dropout(dropout))

            if bn:
                self.layers.add_module(name+'_bn_%d' % i, nn.BatchNorm1d(dims[i+1]))

            if activation:
                self.layers.add_module(name+'_activation_%d' % i, activation)


class SharedMLP(nn.Module):
    # args列表里第一个数为输入维度，最后一个数为输出维度，其余为中间维度
    def __init__(
            self,
            dims: List[int],
            bn: bool = True,
            activation=nn.ReLU(),
            preact: bool = False,
            name: str = "",
    ):
        super().__init__()

        self.layers = nn.Sequential()
        num_layers = len(dims)
        for i in range(num_layers - 1):
            self.layers.add_module(name + '_conv1d_%d' % i, nn.Conv1d(dims[i], dims[i+1], 1))

            if bn:
                self.layers.add_module(name+'_bn_%d' % i, nn.BatchNorm1d(dims[i+1]))

            if (i+1 != num_layers-1) and (activation is not None):
                self.layers.add_module(name+'_activation_%d' % i, activation)


    def forward(self, x: torch.Tensor):
        """[summary]

        Args:
            x ([type]): [B, N, C_in]
        """
        while x.dim() < 3:
            x = x.unsqueeze(0)
        x = x.permute(0, 2, 1) # [B, C_in, N]
        x = self.layers(x) # [B, C_out, N]
        x = x.permute(0, 2, 1) # [B, N, C_out]
        return x

class TNet(nn.Module):
    def __init__(self, k, activation=nn.ReLU()):
        super().__init__()
        self.mlp = SharedMLP([k, 64, 128, 1024], activation=activation)
        self.fc = FC([1024, 512, 256], activation=activation, dropout=0)
        self.ln = nn.Linear(256, k*k)
        self.k = k

    def forward(self, x):
        """[summary]

        Args:
            x ([type]): [B, N, k]

        Returns:
            torch.Tensor: [B, N, K]
        """
        T = self.mlp(x) # [B, N, 1024]
        T = torch.max(T, 1, keepdim=False)[0] #[B, 1024]
        # T = T.view(-1, 1024) # [B, 1024]
        T = self.fc(T) # [B, 256]
        T = self.ln(T) # [B, k*k]

        I = torch.eye(self.k).view(1, self.k, self.k).repeat(x.size(0), 1, 1)
        if T.is_cuda:
            I = I.cuda()
        T = T.view(-1, self.k, self.k) + I
        x = torch.bmm(x, T)
        return x


class PointNetEncoder(nn.Module):
    # dims为从输入维度到输出维度的全部维度
    def __init__(self, dims=[3,64,64,128,1024], with_input_trans=True, activation=nn.ReLU()):
        super().__init__()
        self.with_input_trans=with_input_trans
        self.trans_input = None
        if with_input_trans:
            self.trans_input = TNet(dims[0], activation=activation)
        self.sharedmlp = SharedMLP(dims, activation=activation)

    def forward(self, x: torch.Tensor):
        """[summary]

        Args:
            x (torch.Tensor): [B, N, C_in]

        Returns:
            torch.Tensor: [B, C_out]
        """

        if self.with_input_trans:
            x = self.trans_input(x)

        x = self.sharedmlp(x)

        x = torch.max(x, 1, keepdim=False)[0]

        return x

#自定义激活函数
class MyActivation(torch.nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self,x):
        x=x*torch.tanh(F.softplus(x))
        return x
