import torch
import torch.nn as nn
import torch.nn.functional as F

def conv_2(in_c, out_c):
    return nn.Conv2d(in_c, out_c, kernel_size=3, padding=1)

def models(in_channels):
    layers = []
    layers += [conv_2(in_channels, 64), nn.ReLU(inplace=True)]
    layers += [conv_2(64, 64), nn.ReLU(inplace=True)]
    layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
    layers += [conv_2(64, 128), nn.ReLU(inplace=True)]
    layers += [conv_2(128, 128), nn.ReLU(inplace=True)]
    layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
    layers += [conv_2(128, 256), nn.ReLU(inplace=True)]
    layers += [conv_2(256, 256), nn.ReLU(inplace=True)]
    layers += [conv_2(256, 256), nn.ReLU(inplace=True)]
    layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
    layers += [conv_2(256, 512), nn.ReLU(inplace=True)]
    layers += [conv_2(512, 512), nn.ReLU(inplace=True)]
    layers += [conv_2(512, 512), nn.ReLU(inplace=True)]
    layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
    layers += [conv_2(512, 512), nn.ReLU(inplace=True)]
    layers += [conv_2(512, 512), nn.ReLU(inplace=True)]
    layers += [conv_2(512, 512), nn.ReLU(inplace=True)]
    layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
    layers += [conv_2(512, 1024), nn.ReLU(inplace=True)]
    layers += [conv_2(1024, 1024), nn.ReLU(inplace=True)]
    layers += [conv_2(1024, 1024), nn.ReLU(inplace=True)]
    layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
    layers += [conv_2(1024, 512), nn.ReLU(inplace=True)]
    layers += [conv_2(512, 512), nn.ReLU(inplace=True)]
    layers += [conv_2(512, 512), nn.ReLU(inplace=True)]
    layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
    layers += [conv_2(512, 512), nn.ReLU(inplace=True)]
    layers += [conv_2(512, 512), nn.ReLU(inplace=True)]
    layers += [conv_2(512, 512), nn.ReLU(inplace=True)]

    return layers