# coding = utf-8

import os
import torch
import torch.nn as nn 
from yacs.config import CfgNode as CN

BN_MOMENTUM = 0.1

class BasicBlock(nn.Module):
    expansion = 1
    def __init__(self, inplanes, planes, kernel_size=3, stride=1, downsample=None):
        super(BasicBlock, self).__init__()
        '''
        3x3 convolution with padding
        inplanes maybe different from planes
        '''
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size, stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
        self.relu = nn.ReLU(inplace=True)

        '''
        inplanes and planes are the same equal to planes
        '''
        self.conv2 = nn.Conv2d(planes, planes, kernel_size, stride, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        if self.downsample is not None:
            residual = self.downsample(x)
        
        out += residual
        out = self.relu(out)

        return out
    

class HRModule(nn.Module):
    def __init__(self, num_branches, blocks, num_blocks, in_channels, out_channels):
        super(HRModule, self).__init__()
        
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_branches = num_branches
        self.fuse_layers = self._make_fuse_layers(num_branches, blocks, num_blocks, out_channels)

        self.branches = self._make_branches(num_branches, blocks, num_blocks, out_channels)
        self.relu = nn.ReLU(False)

    def _make_branches(self, num_branches, blocks, num_blocks, out_channels):
        '''
        pesudo code
        for i in range(num_branches):
            x[i] = self._make_one_branches(branch_index=i)
        return x
        '''
        branches = []
        for i in range(num_branches):
            branches.append(self._make_one_branch(i, blocks, num_blocks, out_channels))
        return branches

    def _make_one_branch(self, branch_index, block, num_blocks, num_channels):
        '''
        Build a branch which consist of num_blocks[branch_index] block
        pesudo code
        for i in range (num_blocks):
            x = block(x, num_channels)
        return x
        '''
        '''
        If the channels of input is different from requirement of the current branch,
        we do upsample or downsample
        '''
        downsample = None
        if self.in_channels[branch_index] != num_channels[branch_index] * block.expansion:
            # Use the 1x1 size kernel to do the up/downsample and then do the batch normalization
            downsample = nn.Sequential(
                nn.Conv2d(self.in_channels[branch_index],
                          num_channels[branch_index] * block.expansion,
                          kernel_size=1, stride=1, bias=False),
                nn.BatchNorm2d(num_channels[branch_index] * block.expansion,
                            momentum=BN_MOMENTUM))
        # Check the first one, and the rest blocks have the same channels
        branch = []
        branch.append(block(
            self.in_channels[branch_index], num_channels[branch_index], downsample=downsample))
        # Update the in_channels[branch_index]
        self.in_channels[branch_index] = num_channels[branch_index] * block.expansion
        for i in range(1, num_blocks[branch_index]):
            branch.append(block(self.in_channels[branch_index], num_channels[branch_index]))
        return nn.Sequential(*branch)

    def _make_fuse_layers(self, num_branches, block, num_blocks, num_channels):
        '''
        pesudo code
        for i in range(num_branches)
            for j in range(num_branches):
                if i != j:
                    do downsample or upsample
                y += x[j]
        '''
        if num_branches == 1:
            return None
        fuse_layers = []
        for i in range(num_branches):
            fuse_layer = []
            for j in range(num_branches):
                if i == j:
                    # at the same branch
                    fuse_layer.append(None)
                elif j > i:
                    # target size larger than current one
                    fuse_layer.append(nn.Sequential(
                        nn.Conv2d(self.in_channels[j], self.in_channels[i], 1, 1, 0, bias=False),
                        nn.BatchNorm2d(self.in_channels[i], momentum=BN_MOMENTUM),
                        nn.Upsample(scale_factor=2**(j-i), mode='nearest')))
                else:
                    # target size small than current one
                    conv3x3s = []
                    for k in range(i-j):
                        if k == i - j - 1:
                            conv3x3s.append(nn.Sequential(
                                nn.Conv2d(self.in_channels[j], self.in_channels[i], 3, 2, 1, bias=False),
                                nn.BatchNorm2d(self.in_channels[j], momentum=BN_MOMENTUM)))
                        else:
                            conv3x3s.append(nn.Sequential(
                                nn.Conv2d(self.in_channels[j], self.in_channels[j], 3, 2, 1, bias=False),
                                nn.BatchNorm2d(self.in_channels[j], momentum=BN_MOMENTUM)))
                    fuse_layer.append(nn.Sequential(*conv3x3s))
            fuse_layers.append(nn.ModuleList(fuse_layer))
        return nn.ModuleList(fuse_layers)
        
    def forward(self, x):
        if self.num_branches == 1:
            return [self.branches[0](x[0])]

        for i in range(self.num_branches):
            x[i] = self.branches[i](x[i])

        x_fuse = []
        for i in range(len(self.fuse_layers)):
            y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
            for j in range(1, self.num_branches):
                if i == j:
                    y = y + x[j]
                else:
                    y = y + self.fuse_layers[i][j](x[j])
            x_fuse.append(self.relu(y))

        return x_fuse


class HRNet(nn.Module):
    def __init__(self, cfg, **kwargs):
        super(HRNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(32, momentum=BN_MOMENTUM)
        # self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
        # self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
        self.relu = nn.ReLU(inplace=True)
        # Stages
        #self.stage1_cfg = cfg['MODEL']['EXTRA']['STAGE1']
        num_channels = 32
        block = BasicBlock
        num_blocks = 4
        self.layer1 = self._make_layer(block, num_blocks, 32, num_channels)
        stage1_out_channel=num_channels

        self.stage2_cfg = CN()
        self.stage2_cfg.NUM_MODULES = 1
        self.stage2_cfg.NUM_BRANCHES = 2
        self.stage2_cfg.NUM_BLOCKS = [4,4]
        self.stage2_cfg.NUM_CHANNELS = [32, 64]
        self.stage2_cfg.BLOCK = "BasicBlock"
        self.stage2_cfg.FUSE_METHOD = 'SUM'
        num_channels = [32, 64]
        # num_channels = [num_channels[i] for i in range(len(num_channels))]
        self.transition1 = self._make_transition_layer(
            [stage1_out_channel], num_channels)
        self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels)


        self.stage3_cfg = CN()
        self.stage3_cfg.NUM_MODULES = 1
        self.stage3_cfg.NUM_BRANCHES = 3
        self.stage3_cfg.NUM_BLOCKS = [4,4,4]
        self.stage3_cfg.NUM_CHANNELS = [32, 64, 128]
        self.stage3_cfg.BLOCK = "BasicBlock"
        self.stage3_cfg.FUSE_METHOD = 'SUM'
        num_channels = [32, 64, 128]
        block = BasicBlock
        num_channels = [num_channels[i] for i in range(len(num_channels))]
        self.transition2 = self._make_transition_layer(
            pre_stage_channels, num_channels)
        self.stage3, pre_stage_channels = self._make_stage(
            self.stage3_cfg, num_channels)


        self.stage4_cfg = CN()
        self.stage4_cfg.NUM_MODULES = 1
        self.stage4_cfg.NUM_BRANCHES = 4
        self.stage4_cfg.NUM_BLOCKS = [4,4,4,4]
        self.stage4_cfg.NUM_CHANNELS = [32, 64, 128,256]
        self.stage4_cfg.BLOCK = "BasicBlock"
        self.stage4_cfg.FUSE_METHOD = 'SUM'
        num_channels = [32, 64, 128,256]
        block = BasicBlock
        num_channels = [num_channels[i] for i in range(len(num_channels))]
        self.transition3 = self._make_transition_layer(
            pre_stage_channels, num_channels)
        self.stage4, pre_stage_channels = self._make_stage(
            self.stage4_cfg, num_channels)
        # First layer


        # classfication head
        self.incre_modules, self.downsamp_modules, \
            self.final_layer = self._make_head(pre_stage_channels)
        self.classifier = nn.Linear(2048, 10)


    def _make_head(self, pre_stage_channels):
        '''
        The function from feature maps to classifier.
        pesudo code:
        '''
        ##incre modules : increase channels after all stages
        head_block=BasicBlock
        head_channels=[32,64,128,256]

        incre_modules=[]
        for i,out_channel in enumerate(pre_stage_channels):
            incre_module=self._make_layer(head_block,
                                            1,
                                            head_channels[i],
                                            out_channel,
                                            stride=1)


            incre_modules.append(incre_module)
        incre_modules=nn.ModuleList(incre_modules)

        ##downsample modules: fit szie of feature maps before add together 
        downsamp_modules=[]
        for i in range (len(pre_stage_channels)-1):
            in_channel=head_channels[i]
            out_channel=head_channels[i+1]

            downsamp_module=nn.Sequential(
                nn.Conv2d(in_channel,
                        out_channel,
                        kernel_size=3,
                        stride=2,
                        padding=1),
                nn.BatchNorm2d(out_channel,momentum=BN_MOMENTUM),
                nn.ReLU(inplace=True))
            downsamp_modules.append(downsamp_module)
        downsamp_modules = nn.ModuleList(downsamp_modules)

        ##final layer:output feature map with 2048 channels

        final_layer=nn.Sequential(
            nn.Conv2d(head_channels[3],
                    2048,
                    kernel_size=1,
                    stride=1,
                    padding=0),
            nn.BatchNorm2d(2048,momentum=BN_MOMENTUM),
            nn.ReLU(inplace=True))         


        return incre_modules,downsamp_modules,final_layer

    def _make_layer(self,block,num_block,in_channel,out_channel,stride=1):

        ##merge blocks to a layer at each branch in incre module 
        downsample=None
        if stride !=1 or in_channel!= out_channel:
           
            downsample = nn.Sequential(
                nn.Conv2d(in_channel,out_channel ,
                          kernel_size=1, stride=1, bias=False),
                nn.BatchNorm2d(out_channel ,
                            momentum=BN_MOMENTUM))
        
        layers = []
        layers.append(block(in_channel, out_channel, downsample=downsample))
       
        # in_channels[branch_index] = out_channels[branch_index] 

        for i in range(1, num_block):
            layers.append(block(in_channel, out_channel))
        return nn.Sequential(*layers)

    def _make_transition_layer(self,num_channels_per_layer,num_channels_cur_layer):
        
        #return the input of cur stage accroding to the output of pre stage 
        num_branches_cur=len(num_channels_cur_layer)
        num_branches_per=len(num_channels_per_layer)

        transition_layers=[]
        for i in range(num_branches_cur):
            if i < num_branches_per:
                if num_channels_cur_layer[i]!=num_channels_per_layer[i]:#fit channel number
                    transition_layers.append(nn.Sequential(
                                nn.Conv2d(num_channels_per_layer[i],
                                        num_channels_cur_layer[i],
                                        kernel_size=3,
                                        padding=1,
                                        stride=1),
                                nn.BatchNorm2d(num_channels_cur_layer[i],momentum=BN_MOMENTUM),
                                nn.ReLU(inplace=True)))
                else:
                    transition_layers.append(None)#use pre feature map directly 
            else:#downsample 
                transition_layers.append(nn.Sequential(
                                nn.Conv2d(num_channels_per_layer[-1],
                                        num_channels_cur_layer[i],
                                        kernel_size=3,
                                        padding=1,
                                        stride=2),
                                nn.BatchNorm2d(num_channels_cur_layer[i],momentum=BN_MOMENTUM),
                                nn.ReLU(inplace=True)))
        return nn.ModuleList(transition_layers)
        

    def _make_stage(self, parameter_list,num_inchannels):

        num_modules = parameter_list['NUM_MODULES']
        num_branches = parameter_list['NUM_BRANCHES']
        num_blocks = parameter_list['NUM_BLOCKS']
        num_channels = parameter_list['NUM_CHANNELS']
        block = BasicBlock
        fuse_method = parameter_list['FUSE_METHOD']
        modules = []
        for i in range(num_modules):
            # multi_scale_output is only used last module
            modules.append(
                HRModule(num_branches,
                                        block,
                                        num_blocks,
                                        num_inchannels,
                                        num_channels
                                        )
            )

        return nn.Sequential(*modules), num_channels

    def init_weights(self):
        '''
        Initialized weights without pretrained model.
        '''
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(
                    m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # The resolution of images in cifar-10 is only 32*32*3.
        # May reduce the first two convolution layer to save the resolution.
        # x = nn.Conv2d()
        x = self.conv1(x)
        x = self.bn1(x)
        # stage 1
        x = self.layer1(x)
        # stage 2
        x_list = []
        for i in range(self.stage2_cfg['NUM_BRANCHES']):
            if self.transition1[i] is not None:
                x_list.append(self.transition1[i](x))
            else:
                x_list.append(x)
        y_list = self.stage2(x_list)
        # stage 3
        x_list = []
        for i in range(self.stage3_cfg['NUM_BRANCHES']):
            if self.transition2[i] is not None:
                x_list.append(self.transition2[i](y_list[-1]))
            else:
                x_list.append(y_list[i])
        y_list = self.stage3(x_list)
        # stage 4
        x_list = []
        for i in range(self.stage4_cfg['NUM_BRANCHES']):
            if self.transition3[i] is not None:
                x_list.append(self.transition3[i](y_list[-1]))
            else:
                x_list.append(y_list[i])
        y_list = self.stage4(x_list)

        # classification head

        y = self.final_layer(y)

        if torch._C._get_tracing_state():
            y = y.flatten(start_dim=2).mean(dim=2)
        else:
            y = F.avg_pool2d(y, kernel_size=y.size()
                                 [2:]).view(y.size(0), -1)

        y = self.classifier(y)

        return y


def cls_net(config, **kwargs):
    model = HRNet(config, **kwargs)
    model.init_weights()
    return model