import torch
import torch.nn as nn


def build_cnn(channel_num, kernel_size, stride_length, padding, padding_mode='zeros'):
    layers = []
    for i in range(len(channel_num) - 1):
        layers.append(nn.Conv2d(channel_num[i], channel_num[i + 1], kernel_size, stride_length, padding,
                                padding_mode=padding_mode))
        layers.append(nn.BatchNorm2d(channel_num[i + 1]))
        layers.append(nn.ReLU())

    return nn.Sequential(*layers)


def build_fcnn(fcnn_size):
    layers = []
    for i in range(len(fcnn_size) - 2):
        layers.append(nn.Linear(fcnn_size[i], fcnn_size[i + 1]))
        layers.append(nn.BatchNorm1d(fcnn_size[i + 1]))
        layers.append(nn.ReLU())
    layers.append(nn.Linear(fcnn_size[-2], fcnn_size[-1]))
    layers.append(nn.BatchNorm1d(fcnn_size[-1]))
    layers.append(nn.Tanh())

    return nn.Sequential(*layers)


class CnnSim(nn.Module):
    def __init__(self):
        super(CnnSim, self).__init__()
        self.channel_num_a = [1, 16, 32, 64, 128, 128]
        self.kernel_size_a = 3
        self.stride_length_a = 1
        self.padding_a = 1
        self.padding_mode_a = 'zeros'

        self.channel_num_b = [1, 16, 32, 32, 64, 64]
        self.kernel_size_b = 1
        self.stride_length_b = 1
        self.padding_b = 1
        self.padding_mode_b = 'zeros'

        self.cnn_a = build_cnn(self.channel_num_a, self.kernel_size_a, self.stride_length_a, self.padding_a,
                               self.padding_mode_a)

        self.cnn_b = build_cnn(self.channel_num_b, self.kernel_size_b, self.stride_length_b, self.padding_b,
                               self.padding_mode_b)

        self.fcnn_size = [128 * 5 * 5 + 64 * 15 * 15, 4096, 4096, 2048, 2048, 1208]
        self.fcnn = build_fcnn(self.fcnn_size)

    def forward(self, x):
        x_a = self.cnn_a(x)
        x_b = self.cnn_b(x)
        x_a = x_a.view(-1, 128 * 5 * 5)
        x_b = x_b.view(-1, 64 * 15 * 15)
        y = torch.cat((x_a, x_b), dim=1)

        return self.fcnn(y)
