import math

import torch
import torch.nn as nn
import torch.nn.functional as F
from util.common import dist2bbox, make_anchors
# import metrics

def autopad(k, p=None, d=1):  # kernel, padding, dilation
    # Pad to 'same' shape outputs
    if d > 1:
        k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k]  # actual kernel-size
    if p is None:
        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
    return p

class Conv(nn.Sequential):
    def __init__(self, in_ch, out_ch, k=1, s=1, p=None):
        super(Conv, self).__init__()
        self.add_module("conv2d", nn.Conv2d(in_ch, out_ch, kernel_size=(k, k), stride=s, padding=autopad(k, p), bias=False))
        self.add_module("batchnorm2d", nn.BatchNorm2d(out_ch))
        self.add_module("silu", nn.SiLU())
    def forward(self, x):
        return super(Conv, self).forward(x)

class Bottleneck(nn.Module):
    def __init__(self, in_ch, out_ch, if_shotcut, e=0.5):
        super(Bottleneck, self).__init__()
        self.if_shotcut = if_shotcut
        if if_shotcut:
            out_ch = in_ch
        self.Conv1 = Conv(in_ch, int(e * out_ch), k=3, s=1)
        self.Conv2 = Conv(int(e * out_ch), out_ch, k=3, s=1)
    def forward(self, x):
        y = self.Conv1(x)
        y = self.Conv2(y)
        if self.if_shotcut:
            y = y + x
        return y

class c2f(nn.Module):
    def __init__(self, in_ch, out_ch, n_bottleneck=1, if_shotcut=False):
        super(c2f, self).__init__()
        self.in_ch = in_ch
        self.out_ch = out_ch                   #out_ch should equal to in_ch
        self.Conv1 = Conv(in_ch, out_ch, k=1, s=1)
        self.Conv2 = Conv(int(out_ch * n_bottleneck * 0.5 + out_ch), out_ch, k=1, s=1)
        self.shotcut = if_shotcut

        self.bottleModules = nn.ModuleList()
        for i in range(n_bottleneck):
            self.bottleModules.append(Bottleneck(int(0.5 * out_ch), int(0.5 * out_ch), if_shotcut, 1.0))
    def forward(self, x):
        y = list(self.Conv1(x).split((int(0.5 * self.out_ch), int(0.5 * self.out_ch)), 1))
        y.extend(bottleneck(y[-1]) for bottleneck in self.bottleModules)
        y = torch.cat(y, 1)
        y = self.Conv2(y)
        return y

class SPPF(nn.Module):
    def __init__(self, in_ch, out_ch, k = 5):
        super(SPPF, self).__init__()
        c_ = in_ch // 2

        self.Conv1 = Conv(in_ch, c_, k=1, s=1)
        self.Conv2 = Conv(c_ * 4, out_ch, k=1, s=1)
        self.mp = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
    def forward(self, x):
        y = [self.Conv1(x)]
        for i in range(3):
            y.append(self.mp(y[-1]))
        y = torch.cat(y, 1)
        return self.Conv2(y)

class Backbone(nn.Module):
    def __init__(self, last_channel, model_scale=1.0, width_scal=0.5, phi='s'):
        super(Backbone, self).__init__()
        make_divi = lambda x, scale, n: math.ceil(x*scale / n) * n
        divi_64 = math.ceil(make_divi(64, width_scal, 8))
        divi_128 = math.ceil(make_divi(128, width_scal, 8))
        divi_256 = math.ceil(make_divi(256, width_scal, 8))
        divi_512 = math.ceil(make_divi(512, width_scal, 8))
        divi_768 = math.ceil(make_divi(768, width_scal, 8))
        divi_1024 = math.ceil(make_divi(1024, width_scal, 8))

        self.Conv_1 = Conv(3, divi_64, k=3, s=2)
        self.Conv_2 = Conv(divi_64, divi_128, k=3, s=2)
        c2f_cfg = [round(x * model_scale) for x in (3, 6, 6, 3)]
        self.c2f_1 = nn.Sequential(c2f(divi_128, divi_128, c2f_cfg[0], True))

        self.Conv_3 = Conv(divi_128, divi_256, k=3, s=2, p=1)
        self.c2f_2 = nn.Sequential(c2f(divi_256, divi_256, c2f_cfg[1], True))

        self.Conv_4 = Conv(divi_256, divi_512, k=3, s=2, p=1)
        self.c2f_3 = nn.Sequential(c2f(divi_512, divi_512, c2f_cfg[2], True))


        self.Conv_5 = Conv(divi_512, last_channel, k=3, s=2, p=1)
        self.c2f_4 = nn.Sequential(c2f(last_channel, last_channel, c2f_cfg[3], True))
        # for i in range(c2f_cfg[3] - 1):
        #     self.c2f_4.add_module('c2f_4_%d' % (i + 1), c2f(512, 512, c2f_cfg[3], True))
        self.sppf = SPPF(last_channel, last_channel)
    def forward(self, x):
        y = self.Conv_1(x)                                 #[batch,  3,  320, 320]
        y = self.Conv_2(y)                                 #[batch, 128, 160, 160]
        y = self.c2f_1(y)                                  #[batch, 128, 160, 160]
        y = self.Conv_3(y)                                 #[batch, 256,  80,  80]
        y = self.c2f_2(y)                                  #[batch, 256,  80,  80]
        c2f_2 = y.clone()
        y = self.Conv_4(y)                                 #[batch, 512,  40,  40]
        y = self.c2f_3(y)                                  #[batch, 512,  40,  40]
        c2f_3 = y.clone()
        y = self.Conv_5(y)                                 #[batch, 512,  20,  20]
        y = self.c2f_4(y)                                  #[batch, 512,  20,  20]
        return c2f_2, c2f_3, self.sppf(y),                 #[batch, 512,  20,  20]

class Upsample(nn.Module):
    def __init__(self):
        super(Upsample, self).__init__()
    def forward(self, x):
        return F.interpolate(x, scale_factor=(2, 2), mode='nearest')

class Neck(nn.Module):
    # def __init__(self, model_scale=1.0, width_scal=0.5, phi='s'):
    def __init__(self, in_ch, out_channels, model_scale=1.0, width_scal=0.5, phi='s'):
        super(Neck, self).__init__()
        cf2_cfg = [round(x * model_scale) for x in (3, 3, 3, 3)]
        make_divi = lambda x, scale, n: math.ceil(x * scale / n) * n
        divi_256 = math.ceil(make_divi(256, width_scal, 8))
        divi_512 = math.ceil(make_divi(512, width_scal, 8))
        divi_768 = math.ceil(make_divi(768, width_scal, 8))
        divi_1024 = math.ceil(make_divi(1024, width_scal, 8))

        self.upsample = Upsample()
        # FPN有两个c2f，PAN也有两个c2f，整个neck总共有4个c2f模块
        # self.c2f_1 = nn.Sequential(c2f(in_channel + divi_512, divi_512, cf2_cfg[0]))
        self.c2f_1 = nn.Sequential(c2f(in_ch[2] + in_ch[1], divi_512, cf2_cfg[0]))

        self.c2f_2 = nn.Sequential(c2f(divi_512 + in_ch[0], divi_256, cf2_cfg[1]))

        self.Conv1 = Conv(divi_256, divi_256, k=3, s=2)
        self.c2f_3 = nn.Sequential(c2f(divi_512 + divi_256, divi_512, cf2_cfg[2]))

        self.Conv2 = Conv(divi_512, divi_512, k=3, s=2)

        self.c2f_4 = nn.Sequential(c2f(divi_512 + in_ch[2], out_channels, cf2_cfg[3]))


    def forward(self, x1, x2, x3):
        y = self.upsample(x3)                                        #[batch,  512,  40,  40]
        y = torch.cat((y, x2), dim=1)                             #[batch, 1024,  40,  40]
        y = self.c2f_1(y)
        y1 = y.clone()
        y = self.upsample(y)
        y = torch.cat((y, x1), dim=1)
        y = self.c2f_2(y)
        y2 = y.clone()
        y = self.Conv1(y)
        y = torch.cat((y, y1), dim=1)
        y = self.c2f_3(y)
        y3 = y.clone()
        y = self.Conv2(y)
        y = torch.cat((y, x3), dim=1)
        y = self.c2f_4(y)
        return y2, y3, y

class DFL(nn.Module):
    # Integral module of Distribution Focal Loss (DFL) proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391
    def __init__(self, c1=16):
        super().__init__()
        self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False)
        x = torch.arange(c1, dtype=torch.float)
        self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1))
        self.c1 = c1

    def forward(self, x):
        b, c, a = x.shape  # batch, channels, anchors
        return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a)


class DetectHead(nn.Module):
    shape = None
    anchors = torch.empty(0)  # init
    strides = torch.empty(0)  # init

    def __init__(self, nc, in_ch=(), stride=(), reg_max=16):
        super(DetectHead, self).__init__()
        self.nc = nc
        self.nl = len(in_ch)                      # number of detection layers
        self.no = nc + reg_max * 4
        self.reg_max = reg_max
        self.stride = stride

        c2, c3 = max((16, in_ch[0] // 4, reg_max * 4)), max(in_ch[0], nc)
        self.loc_Conv = nn.ModuleList(
            nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in in_ch)

        self.cls_Conv = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in in_ch)

        self.dfl = DFL(reg_max)
        self.export = True
    def forward(self, x):
        # batch = x.shape[0]

        shape = x[0].shape  # BCHW
        for i in range(self.nl):
            x[i] = torch.cat((self.loc_Conv[i](x[i]), self.cls_Conv[i](x[i])), 1)
        if self.training:
            return x
        elif self.shape != shape:
            self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
            self.shape = shape
        box, cls = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2).split((self.reg_max * 4, self.nc), 1)
        # 原始输出是距离left top 和 right bottom，eval时将距离转换到xyxy输出
        dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=False, dim=1) * self.strides
        y = torch.cat((dbox, cls.sigmoid()), 1)
        return y if self.export else (y, x)

class ClassifyHead(nn.Module):
    # YOLOv8 classification head, i.e. x(b,c1,20,20) to x(b,c2)
    def __init__(self, c1, c2, k=1, s=1, p=None):  # ch_in, ch_out, kernel, stride, padding, groups
        super().__init__()
        c_ = 1280  # efficientnet_b0 size
        self.conv = Conv(c1, c_, k, s, autopad(k, p))
        self.pool = nn.AdaptiveAvgPool2d(1)  # to x(b,c_,1,1)
        self.drop = nn.Dropout(p=0.0, inplace=True)
        self.linear = nn.Linear(c_, c2)  # to x(b,c2)

    def forward(self, x):
        if isinstance(x, list):
            x = torch.cat(x, 1)
        x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
        # return x if self.training else x.softmax(1)
        # return x.softmax(1)
        return x.sigmoid()

class yolov8_detect(nn.Module):
    def __init__(self, nc, phi='s'):
        super(yolov8_detect, self).__init__()
        make_divi = lambda x, scale, n: math.ceil(x * scale / n) * n
        # 仅支持n、s、m、l四种
        depth_dict = {'n': 0.33, 's': 0.33, 'm': 0.67, 'l': 1.00, 'x': 1.00, }
        width_dict = {'n': 0.25, 's': 0.50, 'm': 0.75, 'l': 1.00, 'x': 1.25, }
        model_scale = depth_dict[phi]
        width_scal = width_dict[phi]

        divi_256 = math.ceil(make_divi(256, width_scal, 8))
        divi_512 = math.ceil(make_divi(512, width_scal, 8))
        divi_768 = math.ceil(make_divi(768, width_scal, 8))
        divi_1024 = math.ceil(make_divi(1024, width_scal, 8))

        head_in_chs = {'n': (divi_256, divi_512, divi_1024), 's': (divi_256, divi_512, divi_1024),
                       'm': (divi_256, divi_512, divi_768), 'l': (divi_256, divi_512, divi_512)}
        head_in_ch = head_in_chs[phi]
        neck_in_chs = {'n': (divi_256, divi_512, divi_1024), 's': (divi_256, divi_512, divi_1024),
                       'm': (divi_256, divi_512, divi_768), 'l': (divi_256, divi_512, divi_512)}
        neck_in_ch = neck_in_chs[phi]
        neck_out_channel = head_in_ch[-1]
        backbone_last_channel = neck_in_ch[-1]
        
        self.nc = nc
        self.reg_max = 16
        self.no = 4 * self.reg_max + self.nc
        self.model_scale = model_scale
        self.strides = (8, 16, 32)
        self.backbone = Backbone(backbone_last_channel, model_scale, width_scal, phi)
        self.neck = Neck(neck_in_ch, neck_out_channel, model_scale, width_scal, phi)

        self.head = DetectHead(nc, head_in_ch, self.strides, self.reg_max)

        # n_feature = 8400
        # feature_dim = 3
        # self.register_buffer('pre_features', torch.zeros(n_feature, feature_dim))
        # self.register_buffer('pre_weight1', torch.ones(n_feature, 1))

    def forward(self, x):
        f1, f2, f3 = self.backbone(x)
        y1, y2, y3 = self.neck(f1, f2, f3)
        y = self.head([y1, y2, y3])
        return y


class yolov8_classify(nn.Module):
    def __init__(self, nc, model_scale=1.0):
        super(yolov8_classify, self).__init__()
        self.nc = nc
        self.model_scale = model_scale
        self.backbone = Backbone(model_scale)
        self.classifyHead = ClassifyHead(512, nc)
    def forward(self, x):
        _, _, f3 = self.backbone(x)
        return self.classifyHead(f3)

