import torch 
import torch.nn as nn 
from torch.quantization import fuse_modules 
import torch.nn.functional as F 

class ConvBNReLU(nn.Sequential):
    """
    三个层在计算过程中应当进行融合
    使用ReLU作为激活函数可以限制
    数值范围，从而有利于量化处理。
    """

    def __init__(self, n_in, n_out,
                 kernel_size=5, stride=1,
                 groups=1, norm_layer=nn.BatchNorm2d):
        # padding为same时两边添加(K-1)/2个0
        padding = (kernel_size - 1) // 2
        # 本层构建三个层，即0：卷积，1：批标准化，2：ReLU
        super(ConvBNReLU, self).__init__(
            nn.Conv2d(n_in, n_out, kernel_size,
                      stride, padding, groups=groups,
                      bias=False),
            nn.BatchNorm2d(n_out),
            nn.ReLU(inplace=True)
        )

class InvertedResidual(nn.Module):
    """
    本个模块为MobileNetV2中的可分离卷积层
    中间带有扩张部分，如图10-2所示
    """

    def __init__(self, n_in, n_out,
                 stride, expand_ratio, norm_layer=nn.BatchNorm2d):
        super().__init__()
        self.stride = stride
        # 隐藏层需要进行特征拓张，以防止信息损失
        hidden_dim = int(round(n_in * expand_ratio))
        # 当输出和输出维度相同时，使用残差结构
        self.use_res = self.stride == 1 and n_in == n_out
        # 构建多层
        layers = []
        if expand_ratio != 1:
            # 逐点卷积，增加通道数
            layers.append(
                ConvBNReLU(n_in, hidden_dim, kernel_size=1,
                           norm_layer=norm_layer))
        layers.extend([
            # 逐层卷积，提取特征。当groups=输入通道数时为逐层卷积
            ConvBNReLU(
                hidden_dim, hidden_dim,
                stride=stride, groups=hidden_dim, norm_layer=norm_layer),
            # 逐点卷积，本层不加激活函数
            nn.Conv2d(hidden_dim, n_out, 1, 1, 0, bias=False),
            nn.BatchNorm2d(n_out),
        ])
        # 定义多个层
        self.conv = nn.Sequential(*layers)

    def forward(self, x):
        if self.use_res:
            return x + self.conv(x)
        else:
            return self.conv(x)


class QInvertedResidual(InvertedResidual):
    """量化模型修改"""

    def __init__(self, *args, **kwargs):
        super(QInvertedResidual, self).__init__(*args, **kwargs)
        # 量化模型应当使用量化计算方法
        self.skip_add = nn.quantized.FloatFunctional()

    def forward(self, x):
        if self.use_res:
            # 量化加法
            # return self.skip_add.add(x, self.conv(x))
            return x + self.conv(x)
        else:
            return self.conv(x)

    def fuse_model(self):
        # 模型融合
        for idx in range(len(self.conv)):
            if type(self.conv[idx]) == nn.Conv2d:
                # 将本个模块最后的卷积层和BN层融合
                fuse_modules(
                    self.conv,
                    [str(idx), str(idx + 1)], inplace=True)

class ConvTBNReLU(nn.Sequential):
    """
    三个层在计算过程中应当进行融合
    使用ReLU作为激活函数可以限制
    数值范围，从而有利于量化处理。
    """

    def __init__(self, n_in, n_out,
                 kernel_size=5, stride=1, padding=1, output_padding=1, bias=True, dilation=1,
                 groups=1, norm_layer=nn.BatchNorm2d):
        # padding为same时两边添加(K-1)/2个0
        # 本层构建三个层，即0：卷积，1：批标准化，2：ReLU
        super(ConvTBNReLU, self).__init__(
            nn.UpsamplingNearest2d(scale_factor=tuple(stride)),
            QInvertedResidual(n_in, n_out, 1, 2),
        )

class OutLayer(nn.Module):
    def __init__(self, nin, nout):
        super().__init__() 
        self.layers = nn.Sequential(
            QInvertedResidual(nin, nin, 1, 1), 
            QInvertedResidual(nin, nout//2, 1, 1), 
        )
    def forward(self, x):
        x = self.layers(x) 
        return x 
class YoloLayer1(nn.Module):
    def __init__(self, nin, nout, branch=True):
        super().__init__() 
        self.layers = nn.Sequential(
            OutLayer(nin, nout), 
        )
        self.out = nn.Sequential(
            ConvBNReLU(nout//2, nout, 3, 1), 
            nn.Conv2d(nout, 255, kernel_size=1), 
        )
        if branch:
            self.brt = nn.Sequential(
                ConvBNReLU(nout//2, nout//4, 1, 1), 
                nn.UpsamplingNearest2d(scale_factor=2), 
            )
        self.branch = branch
    def forward(self, x):
        h = self.layers(x) 
        y1 = self.out(h)
        y2 = self.brt(h)
        return y1, y2
class YoloLayer2(nn.Module):
    def __init__(self, nin, nout, branch=True):
        super().__init__() 
        self.layers = nn.Sequential(
            OutLayer(nin, nout), 
        )
        self.out = nn.Sequential(
            ConvBNReLU(nout//2, nout, 3, 1), 
            nn.Conv2d(nout, 255, kernel_size=1), 
        )
        if branch:
            self.brt = nn.Sequential(
                ConvBNReLU(nout//2, nout//4, 1, 1), 
                nn.UpsamplingNearest2d(scale_factor=2), 
            )
        self.branch = branch
    def forward(self, x):
        h = self.layers(x) 
        y = self.out(h)
        return y 
def _make_grid(nx=20, ny=20):
    yv, xv = torch.meshgrid(torch.arange(ny), torch.arange(nx))
    return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
def mkoutput(x, stride=8):
    B, C, H, W = x.shape 
    x = x.reshape(B, 3, 85, H, W).permute(0, 1, 3, 4, 2).contiguous()
    return x 
class YoloModel(nn.Module):
    """YOLOv3 object detection model"""
    def __init__(self):
        super().__init__()
        self.base0 = nn.Sequential(
            ConvBNReLU(3, 16, 3, 1), 
            QInvertedResidual(16, 32, 2, 2), 
            QInvertedResidual(32, 32, 1, 2), 
            QInvertedResidual(32, 48, 2, 2), 
            QInvertedResidual(48, 48, 1, 2), 
            QInvertedResidual(48, 64, 2, 2), 
            QInvertedResidual(64, 64, 1, 2), 
        )
        self.base1 = nn.Sequential(
            QInvertedResidual(64, 72, 2, 2), 
            QInvertedResidual(72, 72, 1, 2), 
        )
        self.base2 = nn.Sequential(
            QInvertedResidual(72, 128, 2, 2), 
            QInvertedResidual(128, 128, 1, 2), 
        )

        self.yolo2 = YoloLayer1(128, 128, True) 
        self.yolo1 = YoloLayer1(104, 128, True) 
        self.yolo0 = YoloLayer2(96, 128, False)
    def forward(self, x):
        h0 = self.base0(x) 
        h1 = self.base1(h0) 
        h2 = self.base2(h1) 
        
        y2, cat1 = self.yolo2(h2) 
        h1 = torch.cat([h1, cat1], dim=1)
        y1, cat0 = self.yolo1(h1) 
        h0 = torch.cat([h0, cat0], dim=1) 
        y0 = self.yolo0(h0)
        
        y0 = mkoutput(y0) 
        y1 = mkoutput(y1) 
        y2 = mkoutput(y2)

        if not self.training:  # inference
            print("推断模式")
            B, C, H, W = x.shape 
            anch0 = torch.tensor([[10,13], [16,30], [33,23]], dtype=torch.float32, device=x.device).view(1, -1, 1, 1, 2)
            anch1 = torch.tensor([[30,61], [62,45], [59,119]], dtype=torch.float32, device=x.device).view(1, -1, 1, 1, 2)
            anch2 = torch.tensor([[116,90], [156,198], [373,326]], dtype=torch.float32, device=x.device).view(1, -1, 1, 1, 2)
            grid0 = self._make_grid(52, 52).to(x.device)
            grid1 = self._make_grid(26, 26).to(x.device)
            grid2 = self._make_grid(13, 13).to(x.device)
            print(y0.shape, y1.shape, y2.shape, anch0.shape)

            y0[..., 0:2] = (y0[..., 0:2].sigmoid() + grid0) * 8  # xy
            y0[..., 2:4] = torch.exp(y0[..., 2:4]) * anch0 # wh
            y0[..., 4:] = y0[..., 4:].sigmoid()
            y0 = y0.view(B, -1, 85)

            y1[..., 0:2] = (y1[..., 0:2].sigmoid() + grid1) * 16  # xy
            y1[..., 2:4] = torch.exp(y1[..., 2:4]) * anch1 # wh
            y1[..., 4:] = y1[..., 4:].sigmoid()
            y1 = y1.view(B, -1, 85)

            y2[..., 0:2] = (y2[..., 0:2].sigmoid() + grid2) * 32  # xy
            y2[..., 2:4] = torch.exp(y2[..., 2:4]) * anch2 # wh
            y2[..., 4:] = y2[..., 4:].sigmoid()
            y2 = y2.view(B, -1, 85)

        return y0, y1, y2 

    @staticmethod
    def _make_grid(nx=20, ny=20):
        yv, xv = torch.meshgrid(torch.arange(ny), torch.arange(nx))
        return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
if __name__ == "__main__":
    model = YoloModel() 
    x = torch.randn([10, 3, 416, 416]) 
    y1, y2, y3 = model(x) 
    torch.save(model.state_dict(), "ckpt/mb.pt")
    print(y1.shape, y2.shape, y3.shape)