import torch
import torchvision
import torch.nn.functional as F


def _make_transition_layer(in_channels: list, out_channels: list):
    layers_i = []
    for i in range(len(in_channels)):
        layers_j = []
        for j in range(len(out_channels[:-1])):
            if i == j:
                if in_channels[i] == out_channels[j]:
                    layers_j.append(None)
                else:
                    layers_j.append(torch.nn.Sequential(
                        torch.nn.Conv2d(in_channels[i], out_channels[j], (3, 3), stride=(1, 1), padding=(1, 1),
                                        bias=False), torch.nn.BatchNorm2d(out_channels[j]),
                        torch.nn.ReLU(inplace=True)))
            elif i < j:

                down_sample_layers = []
                for z in range(j - i - 1):
                    down_sample_layers.append(
                        torch.nn.Sequential(
                            torch.nn.Conv2d(in_channels[i], in_channels[i], kernel_size=(3, 3), stride=(2, 2),
                                            padding=(1, 1)),
                            torch.nn.BatchNorm2d(in_channels[i]),
                            torch.nn.ReLU()
                        ))

                down_sample_layers.append(torch.nn.Sequential(
                    torch.nn.Conv2d(in_channels[i], out_channels[j], kernel_size=(3, 3), stride=(2, 2),
                                    padding=(1, 1)),
                    torch.nn.BatchNorm2d(out_channels[j]),

                ))

                layers_j.append(torch.nn.Sequential(*down_sample_layers))
            else:
                layers_j.append(torch.nn.Sequential(
                    torch.nn.Conv2d(in_channels[i], out_channels[j], kernel_size=(1, 1), bias=False),
                    torch.nn.BatchNorm2d(out_channels[j]),
                    torch.nn.Upsample(scale_factor=2 ** (i - j), mode='nearest')
                ))
        layers_i.append(torch.nn.ModuleList(layers_j))

    layers_i.append(torch.nn.Sequential(
        torch.nn.Conv2d(in_channels[-1], out_channels[-1], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1),
                        bias=False),
        torch.nn.BatchNorm2d(out_channels[-1]),
        torch.nn.ReLU(inplace=True)
    ))

    return torch.nn.ModuleList(layers_i)


def _make_layers(block, in_channels, out_channels, num_branches, num_modules, num_blocks=4):
    modules = []
    for m in range(num_modules):
        branches = []
        for b in range(num_branches):
            layers = [block(in_channels[b], out_channels[b])]
            for i in range(num_blocks - 1):
                layers.append(block(out_channels[b], out_channels[b]))
            branches.append(torch.nn.Sequential(*layers))
        modules.append(torch.nn.ModuleList(branches))
    return torch.nn.ModuleList(modules)


class HRNet(torch.nn.Module):

    def __init__(self):
        super().__init__()
        self.conv1 = torch.nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
        self.bn1 = torch.nn.BatchNorm2d(64)
        self.conv2 = torch.nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
        self.bn2 = torch.nn.BatchNorm2d(64)
        self.relu = torch.nn.ReLU(inplace=True)
        self.stage1 = _make_layers(self.BottleNeck, in_channels=[64], out_channels=[64 * 4], num_branches=1,
                                   num_modules=1)  # bottleneck
        self.transition1 = _make_transition_layer(in_channels=[256],
                                                  out_channels=[64, 128])  # type: torch.nn.ModuleList

        self.stage2 = _make_layers(self.BasicBlock, in_channels=[64, 128], out_channels=[64, 128], num_branches=2,
                                   num_modules=1)
        self.transition2 = _make_transition_layer(in_channels=[64, 128], out_channels=[64, 128, 256])

        self.stage3 = _make_layers(self.BasicBlock, in_channels=[64, 128, 256], out_channels=[64, 128, 256],
                                   num_branches=3, num_modules=4)
        self.transition3 = _make_transition_layer(in_channels=[64, 128, 256], out_channels=[64, 128, 256, 512])

        self.stage4 = _make_layers(self.BasicBlock, in_channels=[64, 128, 256, 512], out_channels=[64, 128, 256, 512],
                                   num_branches=4, num_modules=3)

    def forward(self, x):
        x = self.relu(self.bn2(self.conv2(self.bn1(self.conv1(x)))))
        x_list = [x]

        stages = [self.stage1, self.stage2, self.stage3, self.stage4]
        transitions = [self.transition1, self.transition2, self.transition3, None]

        for s, t in zip(stages, transitions):
            for m in s:  # type: torch.nn.ModuleList
                for i, b in enumerate(m):
                    x_list[i] = b(x_list[i])

            if t is None:
                continue

            new_x_list = [0] * len(x_list)

            for i, t1 in enumerate(t[:-1]):  # type t1:torch.nn.ModuleList
                for j, tt1 in enumerate(t1):

                    if tt1 is None:
                        new_x_list[j] += x_list[i]
                    else:
                        new_x_list[j] += tt1(x_list[i])
            new_x_list.append(t[-1](x_list[-1]))

            x_list = new_x_list
        return x_list

    class BasicBlock(torch.nn.Module):
        def __init__(self, in_channels, out_channels):  # in_channels === out_channels
            super().__init__()
            self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=(3, 3), padding=(1, 1),
                                         bias=False)
            self.bn1 = torch.nn.BatchNorm2d(out_channels)
            self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1),
                                         bias=False)
            self.bn2 = torch.nn.BatchNorm2d(out_channels)

        def forward(self, x):
            residual = x
            out = F.relu(self.bn1(self.conv1(x)), inplace=True)
            out = self.conv2(out)
            out = self.bn2(self.conv2(out))

            out += residual
            out = F.relu(out)

            return out

    class BottleNeck(torch.nn.Module):
        def __init__(self, in_channels, out_channels):
            super().__init__()
            self.conv1 = torch.nn.Conv2d(in_channels, in_channels, kernel_size=(1, 1), bias=False)
            self.bn1 = torch.nn.BatchNorm2d(in_channels)
            self.conv2 = torch.nn.Conv2d(in_channels, in_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1),
                                         bias=False)
            self.bn2 = torch.nn.BatchNorm2d(in_channels)
            self.conv3 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=(1, 1), bias=False)

            self.bn3 = torch.nn.BatchNorm2d(out_channels)

            self.relu = torch.nn.ReLU(inplace=True)

            self.residual_layer = torch.nn.Sequential(
                torch.nn.Conv2d(in_channels, out_channels, stride=(1, 1), kernel_size=(1, 1),
                                bias=False),
                torch.nn.BatchNorm2d(out_channels)) if in_channels != out_channels else None

        def forward(self, x):
            residual = self.residual_layer(x) if self.residual_layer else x

            out = self.conv1(x)
            out = self.bn1(out)
            out = self.relu(out)

            out = self.conv2(out)
            out = self.bn2(out)
            out = self.relu(out)

            out = self.conv3(out)
            out = self.bn3(out)

            out += residual
            out = self.relu(out)

            return out
