import torch
import math
import torch.nn as nn
import torch.nn.functional as F

def gelu(x):
    """Implementation of the gelu activation function.
       For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
        0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
        Also see https://arxiv.org/abs/1606.08415
    """
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))


class Classifier(nn.Module):
    def __init__(self):
        super().__init__()
        self.feature = nn.Sequential(
            nn.Conv2d(1, 4, 3),
            nn.BatchNorm2d(4, eps=1e-3),
#             nn.ReLU(True),
            nn.GELU(),
            nn.Conv2d(4, 8, 3),
            nn.BatchNorm2d(8, eps=1e-3),
#             nn.ReLU(True),
            nn.GELU(),
        )
        self.fc = nn.Sequential(
            nn.Linear(8*24*24, 1024),
            nn.BatchNorm1d(1024),
            nn.ReLU(True),
            nn.Dropout(p=0.2),
            nn.Linear(1024, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(True),
            nn.Dropout(p=0.2),
            nn.Linear(256, 10),
            nn.ReLU(True),
        )
        self.log_softmax = F.log_softmax
        
    def forward(self, x):
        x = x.view(-1,28,28).unsqueeze(1)
        x = self.feature(x)
        x = x.view(-1, 8*24*24)
        x = self.fc(x)
        x = self.log_softmax(x, dim=1)
        
        return x

# class Net(nn.Module):
#     def __init__(self):
#         super(Net,self).__init__()
#         self.features = nn.Sequential(
#             nn.Conv2d(3, 3, 3,  padding=1)
#         )
        
#         self.conv1 = nn.Conv2d(3,6,5)
#         self.pool = nn.MaxPool2d(2,2)
#         self.conv2 = nn.Conv2d(6,16,5)
#         self.fc1 = nn.Linear(16 * 18 * 18,800)
#         self.fc2 = nn.Linear(800,120)
#         self.fc3 = nn.Linear(120,10)
#         self.conv3 = D3_Conv((3, 3, 3))
        
#         self.ca = ChannelAttention(16)
#         self.sq = SpatialAttention()
        
#     def forward(self,x):
#         x = self.conv3(x)
#         x = self.features(x)
#         x = self.sq(x) * x
#         x = self.pool(F.relu(self.conv1(x)))
#         x = self.pool(F.relu(self.conv2(x)))
#         x = x * self.ca(x)
#         x = x.view(-1,16 * 18 * 18)
#         x = F.relu(self.fc1(x))
#         x = F.relu(self.fc2(x))
#         x = self.fc3(x)
        
#         return x
# class Classifier(nn.Module):
#     def __init__(self):
#         super().__init__()
#         # 5 Hidden Layer Network
#         self.fc1 = nn.Linear(28*28, 512)
#         self.fc2 = nn.Linear(512, 256)
#         self.fc3 = nn.Linear(256, 128)
#         self.fc4 = nn.Linear(128, 64)
#         self.fc5 = nn.Linear(64, 10)
        
#         # Dropout module with 0.2 probbability
#         self.dropout = nn.Dropout(p=0.2)
#         # Add softmax on output layer
#         self.log_softmax = F.log_softmax
        
#     def forward(self, x):
#         x = self.dropout(F.relu(self.fc1(x)))
#         x = self.dropout(F.relu(self.fc2(x)))
#         x = self.dropout(F.relu(self.fc3(x)))
#         x = self.dropout(F.relu(self.fc4(x)))
        
#         x = self.log_softmax(self.fc5(x), dim=1)
        
#         return x
# class Classifier(nn.Module):
#     def __init__(self):
#         super().__init__()
#         # 5 Hidden Layer Network
#         self.fc1 = nn.Linear(28*28, 512)
#         self.fc2 = nn.Linear(512, 256)
#         self.fc3 = nn.Linear(256, 128)
#         self.fc4 = nn.Linear(128, 64)
#         self.fc5 = nn.Linear(64, 10)
        
#         # Dropout module with 0.2 probbability
#         self.dropout = nn.Dropout(p=0.2)
#         # Add softmax on output layer
#         self.log_softmax = F.log_softmax
        
#     def forward(self, x):
#         x = self.fc1(x)
#         x = self.fc2(x)
#         x = self.fc3(x)
#         x = self.fc4(x)
        
#         x = self.log_softmax(self.fc5(x), dim=1)
        
#         return x
