import torch
import torch.nn as nn
import math
from torch.autograd import Variable

class TCN_Pair2(nn.Module):

    def __init__(self):

        super(TCN_Pair2,self).__init__()

        self.cnn1 = nn.Conv2d(in_channels=1,out_channels=1,kernel_size=(5,1))
        self.relu1 = nn.ReLU()
        self.pool1 = nn.AvgPool2d(kernel_size=(3,1),stride=(1,1))

        self.cnn2 = nn.Conv2d(in_channels=1,out_channels=1,kernel_size=(5,1))
        self.relu2 = nn.ReLU()
        self.pool2 = nn.AvgPool2d(kernel_size=(3,1),stride=(1,1))

        self.fc1 = nn.Linear(2*6*202,500)
        self.fc2 = nn.Linear(500,2)

        self.init_weight()

    def forward(self, x1, x2):

        batchsize = x1.size(0)

        x1 = self.cnn1(x1)
        x1 = self.relu1(x1)
        x1 = self.pool1(x1)

        x2 = self.cnn2(x2)
        x2 = self.relu2(x2)
        x2 = self.pool2(x2)

        x = torch.cat([x1,x2],dim=1)

        x = x.view(batchsize,-1)
        x = self.fc1(x)
        x = self.fc2(x)

        return x

    def init_weight(self):
       for m in self.modules():
           if isinstance(m, nn.Conv2d):
               n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
               m.weight.data.normal_(0, math.sqrt(2. / n))

class TCN_Class2(nn.Module):

    def __init__(self):
        super(TCN_Class2,self).__init__()
        self.fc1 = nn.Linear(202*202,2000)
        self.fc2 = nn.Linear(2000,201)

    def forward(self, x):
        x = self.fc1(x)
        x = self.fc2(x)
        return x

    def init_weight(self):
        for m in self.modules():
            if isinstance(m,nn.Linear) or isinstance(m,nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))


if __name__=='__main__':

    m = TCN_Pair2()

    x = torch.randn((512,1,12,202))
    x = Variable(x)

    o = m(x,x)

    print(o.size())
