import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary


"""
num_blocks, num_channels are for each branch
"""
cfg_hrnet8 = {
    'input_chn': 8,
    'stage1': {
        'num_branches': 1,
        'num_blocks': [1],
        'num_channels': [16],
        'block': 'BOTTLENECK',
        'expansion': 2,
    },
    'stage2': {
        'num_branches': 2,
        'num_blocks': [2, 2],
        'num_channels': [8, 11],
        'block': 'BASIC',
        'expansion': 1,
    },
    'stage3': {
        'num_branches': 3,
        'num_blocks': [2, 2, 2],
        'num_channels': [8, 11, 16],
        'block': 'BASIC',
        'expansion': 1,
    },
    'stage4': {
        'num_branches': 4,
        'num_blocks': [2, 2, 2, 2],
        'num_channels': [8, 11, 16, 22],
        'block': 'BASIC',
        'expansion': 1,
    },
}


def conv3x3(n_channel_in, n_channel_out, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(n_channel_in, n_channel_out, 3, stride, 1, bias=False)


def conv1x1(n_channel_in, n_channel_out, stride=1):
    """1x1 convolution"""
    return nn.Conv2d(n_channel_in, n_channel_out, 1, stride, 0, bias=False)


def conv7x1(n_channel_in, n_channel_out, stride=1):
    return nn.Conv2d(n_channel_in, n_channel_out, (7, 1), stride, (3, 0), bias=False)


class BasicBlock(nn.Module):
    def __init__(self, n_channel_in, n_channel_out, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = conv7x1(n_channel_in, n_channel_out, stride)
        self.bn1 = nn.BatchNorm2d(n_channel_out)
        self.conv2 = conv7x1(n_channel_out, n_channel_out)
        # self.bn2 = nn.BatchNorm2d(n_channel_out)
        self.act = nn.GELU('tanh')

    def forward(self, x):
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.conv2(out)
        # out = self.bn2(out)
        out = self.act(out)
        return out


class Bottleneck(nn.Module):
    def __init__(self, n_channel_out, expansion=(4, 4)):
        super(Bottleneck, self).__init__()
        width = int(n_channel_out / expansion[1])
        n_channel_in = int(width * expansion[1])
        self.conv1 = conv7x1(n_channel_in, width)
        self.bn1 = nn.BatchNorm2d(width)
        self.conv2 = conv7x1(width, width)
        self.bn2 = nn.BatchNorm2d(width)
        self.conv3 = conv7x1(width, n_channel_out)
        # self.bn3 = nn.BatchNorm2d(n_channel_out)
        self.act = nn.GELU('tanh')

    def forward(self, x):
        # identity 用于resnet那样的数据旁路，需要吗？
        # identity = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.conv2(out)
        out = self.bn2(out)
        # out = self.act(out)
        out = self.conv3(out)
        # out = self.bn3(out)
        out = self.act(out)
        return out


class Transitioner(nn.Module):
    def __init__(self, cfg_stage_prev, cfg_stage_now):
        super().__init__()
        self.n_branch_prev = cfg_stage_prev['num_branches']
        self.n_branch_now = cfg_stage_now['num_branches']
        self.expansino_prev = cfg_stage_prev['expansion']
        self.n_chn_prev = cfg_stage_prev['num_channels']
        self.n_chn_now = cfg_stage_now['num_channels']
        self.n_chn_prev = [c * self.expansino_prev for c in self.n_chn_prev]
        # 初始化层间转换模型数组
        self.trans_layers = []
        for i in range(self.n_branch_now):
            trans_layer = []
            for j in range(self.n_branch_prev):
                if i > j:  # 当前分支序号大于前分支序号，则需要降分辨率
                    convs = []
                    for k in range(i-j):  # 当前分支序号与前分支序号差值
                        convs.append(conv7x1(
                            self.n_chn_prev[j+k], self.n_chn_now[j+k+1], 4))
                        convs.append(nn.BatchNorm2d(self.n_chn_now[j+k+1]))
                    convs.append(nn.GELU('tanh'))
                    trans_layer.append(nn.Sequential(*convs))
                elif i == j:  # 直接复制
                    if self.expansino_prev > 1:  # 通道数需要调整
                        trans_layer.append(
                            conv1x1(self.n_chn_prev[j], self.n_chn_now[i]))
                    else:
                        trans_layer.append(None)
                else:   # 转换通道数
                    trans_layer.append(nn.Sequential(
                        conv1x1(self.n_chn_prev[j], self.n_chn_now[i]),
                        nn.BatchNorm2d(self.n_chn_now[i]),
                        nn.GELU('tanh')
                    ))
            self.trans_layers.append(nn.ModuleList(trans_layer))
        self.trans_layers = nn.ModuleList(self.trans_layers)

    def forward(self, x):
        x_trans = []
        for i in range(self.n_branch_now):
            x_tran = None  # 当前分支的输出
            for j in range(self.n_branch_prev):
                if i == j:  # 对stage1→2要调整通道数，其它直接复制
                    result = x[j] if self.trans_layers[i][j] is None else \
                        self.trans_layers[i][j](x[j])
                elif i > j:  # 经过模型，降分辨率
                    result = self.trans_layers[i][j](x[j])
                else:  # 插值提高分辨率
                    width = x[i].shape[-1]
                    height = x[i].shape[-2]
                    result = F.interpolate(
                        self.trans_layers[i][j](x[j]),
                        size=[height, width],
                        mode='bilinear',
                        align_corners=True
                    )
                if x_tran is None:
                    x_tran = result
                else:
                    x_tran = x_tran + result
            x_trans.append(x_tran)
        return x_trans


class Model(nn.Module):
    def __init__(self, cfg=cfg_hrnet8):
        super().__init__()
        print("initializing HRNet")
        self.configs = cfg
        self.inputconv = self._make_inputconv(3, cfg['input_chn'])
        self.stage1 = self._make_stage1(cfg['stage1'], cfg['input_chn'])
        self.transition1 = Transitioner(cfg['stage1'], cfg['stage2'])
        self.stage2 = self._make_stage(cfg['stage2'])
        self.transition2 = Transitioner(cfg['stage2'], cfg['stage3'])
        self.stage3 = self._make_stage(cfg['stage3'])
        self.transition3 = Transitioner(cfg['stage3'], cfg['stage4'])
        self.stage4 = self._make_stage(self.configs['stage4'])
        oc_inputchannel = sum(cfg['stage4']['num_channels'])
        self.outputconv = self._make_outputconv(oc_inputchannel, 3)

    def _make_stage1(self, stage_configs, num_inchannels):
        n_channel = stage_configs['num_channels'][0]
        exps = stage_configs['expansion']
        branch = []
        # conv to num_channels
        branch.append(conv1x1(num_inchannels, n_channel*exps))
        branch.append(nn.BatchNorm2d(n_channel*exps))
        for _ in range(stage_configs['num_blocks'][0]):
            branch.append(Bottleneck(n_channel*exps, (exps, exps)))
        return nn.Sequential(*branch)

    def _make_branch(self, num_blocks, num_channels):
        branch = [BasicBlock(num_channels, num_channels)
                  for _ in range(num_blocks)]
        return nn.Sequential(*branch)

    def _make_stage(self, stage_configs):
        num_branches = stage_configs['num_branches']
        num_blocks = stage_configs['num_blocks']
        num_channels = stage_configs['num_channels']
        branches = []
        for i in range(num_branches):
            branches.append(self._make_branch(num_blocks[i], num_channels[i]))
        return nn.ModuleList(branches)

    def _make_inputconv(self, in_channel, out_channel):
        layers = []
        layers.append(conv7x1(in_channel, out_channel))
        layers.append(nn.BatchNorm2d(out_channel))
        layers.append(conv7x1(out_channel, out_channel))
        # layers.append(nn.BatchNorm2d(out_channel))
        layers.append(nn.GELU('tanh'))
        return nn.Sequential(*layers)

    def _make_outputconv(self, in_channel, out_channel):
        layers = []
        layers.append(conv1x1(in_channel, out_channel))
        layers.append(nn.BatchNorm2d(out_channel))
        layers.append(nn.Conv2d(out_channel, out_channel,
                      kernel_size=3, padding=1, bias=False))
        layers.append(nn.Softmax(1))
        return nn.Sequential(*layers)

    def forward(self, x):
        x = x.unsqueeze(3)
        stage1_input = self.inputconv(x)
        # stage1
        stage1_output = self.stage1(stage1_input)
        stage1_output = [stage1_output]
        trans1_output = self.transition1(stage1_output)
        # stage2
        stage2_output = []
        for i in range(self.configs['stage2']['num_branches']):
            stage2_output.append(self.stage2[i](trans1_output[i]))
        trans2_output = self.transition2(stage2_output)
        # stage3
        stage3_output = []
        for i in range(self.configs['stage3']['num_branches']):
            stage3_output.append(self.stage3[i](trans2_output[i]))
        trans3_output = self.transition3(stage3_output)
        # stage4
        stage4_output = []
        for i in range(self.configs['stage4']['num_branches']):
            stage4_output.append(self.stage4[i](trans3_output[i]))
        # output upsampling
        out_h = stage4_output[0].size(-2)
        out_w = stage4_output[0].size(-1)
        x1 = F.interpolate(stage4_output[1], size=(out_h, out_w),
                           mode='bilinear', align_corners=True)
        x2 = F.interpolate(stage4_output[2], size=(out_h, out_w),
                           mode='bilinear', align_corners=True)
        x3 = F.interpolate(stage4_output[3], size=(out_h, out_w),
                           mode='bilinear', align_corners=True)

        x = torch.cat([stage4_output[0], x1, x2, x3], 1)
        x = self.outputconv(x)
        return x.squeeze(dim=3)


class DistanceLoss(torch.nn.Module):
    def __init__(self, wp=0.5):
        super().__init__()
        self.wp = wp
        self.ws = 1-wp

    def forward(self, x, y):
        # dim=(batchsize, 2, len)
        # return torch.mean(torch.pow((torch.argmax(x, 2)-torch.argmax(y, 2)), 2)/x.shape[2]**2)
        return torch.mean(torch.abs((torch.argmax(x, 2)-torch.argmax(y, 2)))/x.shape[2])


if __name__ == '__main__':
    model = Model(cfg_hrnet8).cpu()
    # model = Transitioner(cfg_hrnet8['stage2'], cfg_hrnet8['stage3'])
    # print(model)
    # torch.save(model,'./model/hrnet_test.pth')
    x = torch.randn(128, 3, 4000)
    y = model(x)[:, :2]
    print(y.shape)

    lossfun = DistanceLoss(wp=0.5)
    loss = lossfun(x[:, :2], y)
    print(loss)

    # summary(model, (3, 4000), device='cpu')
