from SpatialTransformer import SpatialTransformer
import os 
import torch 
import torch.nn as nn 
import torch.nn.functional as F 

class BaseSVHNet(nn.Module):
    """
    Base SVHN Net to be trained
    """
    def __init__(self,in_channles,kernel_size,num_classes=10,use_dropout=False):
        super(BaseSVHNet,self).__init__()
        self._in_ch = in_channles
        self._ksize = kernel_size 
        self.ncls = num_classes 
        self.dropout = use_dropout 
        self.drop_prob = 0.5 
        self.stride = 1 

        self.conv1 = nn.Conv2d(self._in_ch,32,kernel_size=self._ksize,stride=-self.stride,padding=1,bias=False)
        self.conv2 = nn.Conv2d(32,32,kernel_size=self._ksize,stride=1,padding=1,bias=False)
        self.conv3 = nn.Conv2d(32,32,kernel_size=self._ksize,stride=1,padding=1,bias=False)

        self.fc1 = nn.Linear(32*4*4,1024)
        self.fc2 = nn.Linear(1024,self.ncls)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x,2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x,2)
        x = F.relu(self.conv3(x))
        print(x.size())

        x = x.view(-1,32*4*4)
        if self.dropout:
            x = F.dropout(self.fc1(x),p=0.5)
        else:
            x = self.fc1(x)
        x = self.fc2(x)
        return x 


class STNSVHNet(nn.Module):
    def __init__(self,spatial_dim,in_channels,stn_kernel_size,kernel_size,num_classes=10,use_dropout=False):
        super(STNSVHNet,self).__init__()
        self._in_ch = in_channels 
        self._ksize = kernel_size 
        self._sksize = stn_kernel_size 
        self.ncls = num_classes 
        self.dropout = use_dropout 
        self.drop_prob = 0.5 
        self.stride = 1 
        self.spatial_dim = spatial_dim 

        self.stnmod = SpatialTransformer(self._in_ch,self.spatial_dim,self._sksize)
        self.conv1 = nn.Conv2d(self._in_ch, 32, kernel_size=self._ksize, stride=self.stride, padding=1, bias=False)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=self._ksize, stride=1, padding=1, bias=False)
        self.conv3 = nn.Conv2d(64, 128, kernel_size=self._ksize, stride=1, padding=1, bias=False)

        self.fc1 = nn.Linear(128*4*4, 3092)
        self.fc2 = nn.Linear(3092, self.ncls)
    
    def forward(self,x):
        rois,affine_grid = self.stnmod(x)
        out = F.relu(self.conv1(rois))
        out = F.max_pool2d(out,2)
        out = F.relu(self.conv2(out))
        out = F.max_pool2d(out,2)
        out = F.relu(self.conv3(out))
        out = out.view(-1,128*4*4)
        if self.dropout:
            out = F.dropout(self.fc1(out),p=0.5)
        else:
            out = self.fc1(out)
        out = self.fc2(out)

        return out 
        