import torch
import torch.nn as nn
import torch.nn.functional as F



class ContrastiveLoss(torch.nn.Module):
    """
    Contrastive loss function.
    Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
    """

    def __init__(self, margin=1.0):
        super(ContrastiveLoss, self).__init__()
        self.margin = margin

    def forward(self, output1, output2, label):
        euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True)
        loss_contrastive = torch.mean((label) * torch.pow(euclidean_distance, 2) +
                                      (1-label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))


        return loss_contrastive


class SiameseNetwork(nn.Module):
    def __init__(self):
        super(SiameseNetwork, self).__init__()

        self.cnn1 = nn.Sequential(
            nn.ReflectionPad2d(1),
            nn.Conv2d(1, 64, kernel_size=3),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),
            nn.BatchNorm2d(64),
            nn.Dropout2d(p=.2),

            nn.ReflectionPad2d(1),
            nn.Conv2d(64, 128, kernel_size=3),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),
            nn.BatchNorm2d(128),
            nn.Dropout2d(p=.2),

            nn.ReflectionPad2d(1),
            nn.Conv2d(128, 256, kernel_size=3),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2),
            nn.BatchNorm2d(256),
            nn.Dropout2d(p=.2),
        )

        self.fc1 = nn.Sequential(
            nn.Linear(25600, 4096),
            nn.Sigmoid())

        # self.fc1 = nn.Sequential(
        #     nn.Linear(25600, 4096),
        #     nn.Sigmoid(inplace=True),
            #
            # nn.Linear(4096, 500),
            # nn.ReLU(inplace=True),
            #
            # nn.Linear(500, 5),
           # )

    def forward_once(self, x):
        output = self.cnn1(x)
        output = output.view(output.size()[0], -1)
        output = self.fc1(output)
        return output

    def forward(self, input1, input2):
        output1 = self.forward_once(input1)
        output2 = self.forward_once(input2)
        return output1, output2


class Siamese(nn.Module):

    def __init__(self):
        super(Siamese, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(1, 64, 3, padding=1),  # 64@96*96
            nn.LeakyReLU(inplace=True),
            nn.MaxPool2d(2),  # 64@48*48
            nn.BatchNorm2d(64),
            nn.Dropout2d(p=.2),

            nn.Conv2d(64, 128, 3, padding=1),
            nn.LeakyReLU(),    # 128@42*42
            nn.MaxPool2d(2),   # 128@21*21
            nn.BatchNorm2d(128),
            nn.Dropout2d(p=.2),

            nn.Conv2d(128, 128, 3, padding=1),
            nn.LeakyReLU(), # 128@18*18
            nn.MaxPool2d(2), # 128@9*9
            nn.BatchNorm2d(128),
            nn.Dropout2d(p=.2),

            nn.Conv2d(128, 256, 3, padding=1),
            nn.LeakyReLU(),   # 256@6*6
        )
        # self.liner = nn.Sequential(
        #     nn.Linear(25600, 4096),
        #     nn.LeakyReLU())

        self.liner = nn.Sequential(
            nn.Linear(20480, 4096),
            nn.Sigmoid())

        # self.out = nn.Sequential(
        #     nn.Linear(4096, 1),
        #     nn.Sigmoid())
        # self.out = nn.Linear(4096, 1)

        self.out = nn.Sequential(
            nn.Linear(4096, 500),
            nn.LeakyReLU(),

            nn.Linear(500, 500),
            nn.LeakyReLU(),

            nn.Linear(500, 1),
            nn.Sigmoid()
        )

    def forward_one(self, x):
        x = self.conv(x)
        x = x.view(x.size()[0], -1)
        x = self.liner(x)
        return x

    def forward(self, x1, x2):
        out1 = self.forward_one(x1)
        out2 = self.forward_one(x2)
        dis = torch.abs(out1 - out2)
        out = self.out(dis)
        return out
        # return out1, out2


class CRNN(nn.Module):
    #                   32    1   37     256
    def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False):
        super(CRNN, self).__init__()
        assert imgH % 16 == 0, 'imgH has to be a multiple of 16'

        ks = [3, 3, 3, 3, 3, 3, 2]
        ps = [1, 1, 1, 1, 1, 1, 0]
        ss = [1, 1, 1, 1, 1, 1, 1]
        nm = [64, 128, 256, 256, 512, 512, 512]

        cnn = nn.Sequential()

        def convRelu(i, batchNormalization=False):
            nIn = nc if i == 0 else nm[i - 1]
            nOut = nm[i]
            cnn.add_module('conv{0}'.format(i),
                           nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i]))
            if batchNormalization:
                cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut))
            if leakyRelu:
                cnn.add_module('relu{0}'.format(i),
                               nn.LeakyReLU(0.2, inplace=True))
            else:
                cnn.add_module('relu{0}'.format(i), nn.ReLU(True))

        convRelu(0)
        cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2))  # 64x16x64
        convRelu(1)
        cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2))  # 128x8x32
        convRelu(2, True)
        convRelu(3)
        cnn.add_module('pooling{0}'.format(2),
                       nn.MaxPool2d((2, 2), (2, 1), (0, 1)))  # 256x4x16
        convRelu(4, True)
        convRelu(5)
        cnn.add_module('pooling{0}'.format(3),
                       nn.MaxPool2d((2, 2), (2, 1), (0, 1)))  # 512x2x16
        convRelu(6, True)  # 512x1x16

        self.cnn = cnn
        self.rnn = nn.Sequential(
            BidirectionalLSTM(512, nh, nh),
            BidirectionalLSTM(nh, nh, nclass))

        self.out = nn.Sequential(
            nn.Linear(20992, 1024),
            nn.LeakyReLU(),
            nn.BatchNorm1d(1024),

            nn.Linear(1024, 100),
            nn.LeakyReLU(),

            nn.Linear(100, 1),
            nn.Sigmoid(),
        )

    def forward(self, input1, input2):
        # conv features
        #print('---forward propagation---')
        conv1 = self.cnn(input1)
        b1, c1, h1, w1 = conv1.size()
        assert h1 == 1, "the height of conv must be 1"
        conv1 = conv1.squeeze(2) # b *512 * width
        conv1 = conv1.permute(2, 0, 1)  # [w, b, c]
        rnn1 = self.rnn(conv1)
        out1 = rnn1.permute(1, 0, 2).contiguous()  # [w, b, c]
        out1 = out1.view(out1.size()[0], -1)

        conv2 = self.cnn(input2)
        b2, c2, h2, w2 = conv2.size()
        assert h2 == 1, "the height of conv must be 1"
        conv2 = conv2.squeeze(2)  # b *512 * width
        conv2 = conv2.permute(2, 0, 1)  # [w, b, c]
        rnn2 = self.rnn(conv2)
        out2 = rnn2.permute(1, 0, 2).contiguous()  # [w, b, c]
        out2 = out2.view(out2.size()[0], -1)

        dis = torch.abs(out1 - out2)
        out = self.out(dis)

        return out



class BidirectionalLSTM(nn.Module):
    # Inputs hidden units Out
    def __init__(self, nIn, nHidden, nOut):
        super(BidirectionalLSTM, self).__init__()

        self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
        self.embedding = nn.Linear(nHidden * 2, nOut)

    def forward(self, input):
        recurrent, _ = self.rnn(input)
        T, b, h = recurrent.size()
        t_rec = recurrent.view(T * b, h)

        output = self.embedding(t_rec)  # [T * b, nOut]
        output = output.view(T, b, -1)

        return output


# for test
if __name__ == '__main__':
    net = Siamese()
    print(net)
    print(list(net.parameters()))
