import torch
from torch import nn
import math

class Star(nn.Module):
    def __init__(self, input_dim, output_dim, precision=torch.float32, require_bias=True):
        super(Star, self).__init__()
        self.Linear1=nn.Linear(input_dim, output_dim, bias=require_bias, dtype=precision)
        self.Linear2=nn.Linear(input_dim, output_dim, bias=require_bias, dtype=precision)
    def forward(self, x):
        x1=self.Linear1(x)
        x2=self.Linear2(x)
        return x1*x2

class NaiveFFN(nn.Module):
    def __init__(self, input_dim, hidden_dim=None, output_dim=None, dropout=0.1, precision=torch.float32, require_bias=True, activate=None):
        super(NaiveFFN, self).__init__()
        if hidden_dim is None:
            hidden_dim=input_dim*4
        if output_dim is None:
            output_dim=input_dim
        self.linear1=nn.Linear(input_dim, hidden_dim, bias=require_bias, dtype=precision)
        self.linear2=nn.Linear(hidden_dim, output_dim, bias=require_bias, dtype=precision)
        self.dropout=nn.Dropout(dropout)
        
        if activate is None:
            self.activate=nn.ReLU()
        else:
            self.activate=activate
        self.precision=precision
        self.output_dim=output_dim
            
    def forward(self, x):
        x=self.linear1(x)
        x=self.activate(x)
        x=self.dropout(x)
        x=self.linear2(x)
        return x
    
class StarFFN(NaiveFFN):
    def __init__(self, input_dim, hidden_dim=None, output_dim=None, dropout=0.1, precision=torch.float32, require_bias=True, activate=None):
        super(StarFFN, self).__init__(input_dim, hidden_dim, output_dim, dropout, precision, require_bias, activate)
        if hidden_dim is None:
            hidden_dim=math.ceil(math.sqrt(4*input_dim))
        if output_dim is not None:
            output_dim=input_dim
        self.linear1=Star(input_dim, hidden_dim, precision, require_bias)
        self.linear2=Star(hidden_dim, output_dim, precision, require_bias)