import math

import torch
import torch.nn as nn
import torch.nn.functional as F
from util.common import dist2bbox, make_anchors
from modules.transformer import Transformer
# import metrics

def autopad(k, p=None, d=1):  # kernel, padding, dilation
    # Pad to 'same' shape outputs
    if d > 1:
        k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k]  # actual kernel-size
    if p is None:
        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
    return p

class Conv(nn.Sequential):
    def __init__(self, in_ch, out_ch, k=1, s=1, p=None):
        super(Conv, self).__init__()
        self.add_module("conv2d", nn.Conv2d(in_ch, out_ch, kernel_size=(k, k), stride=s, padding=autopad(k, p), bias=False))
        self.add_module("batchnorm2d", nn.BatchNorm2d(out_ch))
        self.add_module("silu", nn.SiLU())
    def forward(self, x):
        return super(Conv, self).forward(x)

class Bottleneck(nn.Module):
    def __init__(self, in_ch, out_ch, if_shotcut, e=0.5):
        super(Bottleneck, self).__init__()
        self.if_shotcut = if_shotcut
        if if_shotcut:
            out_ch = in_ch
        self.Conv1 = Conv(in_ch, int(e * out_ch), k=3, s=1)
        self.Conv2 = Conv(int(e * out_ch), out_ch, k=3, s=1)
    def forward(self, x):
        y = self.Conv1(x)
        y = self.Conv2(y)
        if self.if_shotcut:
            y = y + x
        return y

class c2f(nn.Module):
    def __init__(self, in_ch, out_ch, n_bottleneck=1, if_shotcut=False):
        super(c2f, self).__init__()
        self.in_ch = in_ch
        self.out_ch = out_ch                   #out_ch should equal to in_ch
        self.Conv1 = Conv(in_ch, out_ch, k=1, s=1)
        self.Conv2 = Conv(int(out_ch * n_bottleneck * 0.5 + out_ch), out_ch, k=1, s=1)
        self.shotcut = if_shotcut

        self.bottleModules = nn.ModuleList()
        for i in range(n_bottleneck):
            self.bottleModules.append(Bottleneck(int(0.5 * out_ch), int(0.5 * out_ch), if_shotcut, 1.0))
    def forward(self, x):
        y = list(self.Conv1(x).split((int(0.5 * self.out_ch), int(0.5 * self.out_ch)), 1))
        y.extend(bottleneck(y[-1]) for bottleneck in self.bottleModules)
        y = torch.cat(y, 1)
        y = self.Conv2(y)
        return y

class SPPF(nn.Module):
    def __init__(self, in_ch, out_ch, k = 5):
        super(SPPF, self).__init__()
        c_ = in_ch // 2

        self.Conv1 = Conv(in_ch, c_, k=1, s=1)
        self.Conv2 = Conv(c_ * 4, out_ch, k=1, s=1)
        self.mp = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
    def forward(self, x):
        y = [self.Conv1(x)]
        for i in range(3):
            y.append(self.mp(y[-1]))
        y = torch.cat(y, 1)
        return self.Conv2(y)

class Backbone(nn.Module):
    def __init__(self, model_scale=1.0, width_scal=0.5):
        super(Backbone, self).__init__()
        make_divi = lambda x, scale, n: math.ceil(x*scale / n) * n
        divi_64 = math.ceil(make_divi(64, width_scal, 8))
        divi_128 = math.ceil(make_divi(128, width_scal, 8))
        divi_256 = math.ceil(make_divi(256, width_scal, 8))
        divi_512 = math.ceil(make_divi(512, width_scal, 8))
        divi_1024 = math.ceil(make_divi(1024, width_scal, 8))
        self.Conv_1 = Conv(3, divi_64, k=3, s=2)
        self.Conv_2 = Conv(divi_64, divi_128, k=3, s=2)
        c2f_cfg = [math.ceil(x * model_scale) for x in (3, 6, 6, 3)]
        self.c2f_1 = nn.Sequential(c2f(divi_128, divi_128, c2f_cfg[0], True))
        # for i in range(c2f_cfg[0] - 1):
        #     self.c2f_1.add_module('c2f_1_%d' % (i + 1), c2f(128, 128, c2f_cfg[0], True))
        self.Conv_3 = Conv(divi_128, divi_256, k=3, s=2, p=1)
        self.c2f_2 = nn.Sequential(c2f(divi_256, divi_256, c2f_cfg[1], True))
        # for i in range(c2f_cfg[1] - 1):
        #     self.c2f_2.add_module('c2f_2_%d' % (i + 1), c2f(256, 256, c2f_cfg[1], True))
        self.Conv_4 = Conv(divi_256, divi_512, k=3, s=2, p=1)
        self.c2f_3 = nn.Sequential(c2f(divi_512, divi_512, c2f_cfg[2], True))
        # for i in range(c2f_cfg[2] - 1):
        #     self.c2f_3.add_module('c2f_3_%d' % (i + 1), c2f(512, 512, c2f_cfg[2], True))
        self.Conv_5 = Conv(divi_512, divi_1024, k=3, s=2, p=1)
        self.c2f_4 = nn.Sequential(c2f(divi_1024, divi_1024, c2f_cfg[3], True))
        # for i in range(c2f_cfg[3] - 1):
        #     self.c2f_4.add_module('c2f_4_%d' % (i + 1), c2f(512, 512, c2f_cfg[3], True))
        self.sppf = SPPF(divi_1024, divi_1024)
    def forward(self, x):
        y = self.Conv_1(x)                                 #[batch,  3,  320, 320]
        y = self.Conv_2(y)                                 #[batch, 128, 160, 160]
        y = self.c2f_1(y)                                  #[batch, 128, 160, 160]
        y = self.Conv_3(y)                                 #[batch, 256,  80,  80]
        y = self.c2f_2(y)                                  #[batch, 256,  80,  80]
        c2f_2 = y.clone()
        y = self.Conv_4(y)                                 #[batch, 512,  40,  40]
        y = self.c2f_3(y)                                  #[batch, 512,  40,  40]
        c2f_3 = y.clone()
        y = self.Conv_5(y)                                 #[batch, 512,  20,  20]
        y = self.c2f_4(y)                                  #[batch, 512,  20,  20]
        return c2f_2, c2f_3, self.sppf(y),                 #[batch, 512,  20,  20]

class Upsample(nn.Module):
    def __init__(self):
        super(Upsample, self).__init__()
    def forward(self, x):
        return F.interpolate(x, scale_factor=(2, 2), mode='nearest')

class Neck(nn.Module):
    def __init__(self, model_scale=1.0, width_scal=0.5):
        super(Neck, self).__init__()
        cf2_cfg = [math.ceil(x * model_scale) for x in (3, 3, 3, 3)]
        make_divi = lambda x, scale, n: math.ceil(x * scale / n) * n
        divi_256 = math.ceil(make_divi(256, width_scal, 8))
        divi_512 = math.ceil(make_divi(512, width_scal, 8))
        divi_1024 = math.ceil(make_divi(1024, width_scal, 8))
        self.upsample = Upsample()
        self.c2f_1 = nn.Sequential(c2f(divi_1024 + divi_512, divi_512, cf2_cfg[0]))
        # for i in range(cf2_cfg[0] - 1):
        #     self.c2f_1.add_module('c2f_1_%d' % (i + 1), c2f(512, 512, cf2_cfg[0]))
        self.c2f_2 = nn.Sequential(c2f(divi_512 + divi_256, divi_256, cf2_cfg[1]))
        # for i in range(cf2_cfg[1] - 1):
        #     self.c2f_2.add_module('c2f_2_%d' % (i + 1), c2f(256, 256, cf2_cfg[1]))
        self.Conv1 = Conv(divi_256, divi_256, k=3, s=2)
        self.c2f_3 = nn.Sequential(c2f(divi_512 + divi_256, divi_512, cf2_cfg[2]))
        # for i in range(cf2_cfg[2] - 1):
        #     self.c2f_3.add_module('c2f_3_%d' % (i + 1), c2f(512, 512, cf2_cfg[2]))
        self.Conv2 = Conv(divi_512, divi_512, k=3, s=2)
        self.c2f_4 = nn.Sequential(c2f(divi_512 + divi_1024, divi_1024, cf2_cfg[3]))
        # for i in range(cf2_cfg[3] - 1):
        #     self.c2f_4.add_module('c2f_4_%d' % (i + 1), c2f(512, 512, cf2_cfg[3]))

    def forward(self, x1, x2, x3):
        y = self.upsample(x3)                                        #[batch,  512,  40,  40]
        y = torch.cat((y, x2), dim=1)                             #[batch, 1024,  40,  40]
        y = self.c2f_1(y)
        y1 = y.clone()
        y = self.upsample(y)
        y = torch.cat((y, x1), dim=1)
        y = self.c2f_2(y)
        y2 = y.clone()
        y = self.Conv1(y)
        y = torch.cat((y, y1), dim=1)
        y = self.c2f_3(y)
        y3 = y.clone()
        y = self.Conv2(y)
        y = torch.cat((y, x3), dim=1)
        y = self.c2f_4(y)
        return y2, y3, y

class DFL(nn.Module):
    # Integral module of Distribution Focal Loss (DFL) proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391
    def __init__(self, c1=16):
        super().__init__()
        self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False)
        x = torch.arange(c1, dtype=torch.float)
        self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1))
        self.c1 = c1

    def forward(self, x):
        b, c, a = x.shape  # batch, channels, anchors
        return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a)
        # return self.conv(x.view(b, self.c1, 4, a).softmax(1)).view(b, 4, a)



class DetectHead(nn.Module):
    shape = None
    anchors = torch.empty(0)  # init
    strides = torch.empty(0)  # init

    def __init__(self, nc, in_ch=(), stride=(), reg_max=16):
        super(DetectHead, self).__init__()
        self.nc = nc
        self.nl = len(in_ch)                      # number of detection layers
        self.no = nc + reg_max * 4
        self.reg_max = reg_max
        self.stride = stride

        c2, c3 = max((16, in_ch[0] // 4, reg_max * 4)), max(in_ch[0], nc)
        self.loc_Conv = nn.ModuleList(
            nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in in_ch)
        self.cls_Conv = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in in_ch)
        shapes = [80, 40, 20]
        # self.tfheads = nn.ModuleList(
        #     Transformer(x * x, depth=1, heads=8, dim_head=32, mlp_dim=512) for x in shapes
        # )
        self.tfheads = nn.ModuleList(
            Transformer(x * x, depth=1, heads=8, dim_head=32, mlp_dim=512) for x in shapes
        )
        # self.loc_Conv = nn.Sequential(Conv(in_ch, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * reg_max, 1, bias=False))
        # self.cls_Conv = nn.Sequential(Conv(in_ch, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, nc, 1, bias=False))
        self.dfl = DFL(reg_max)
        self.export = True
    def forward(self, x):
        # batch = x.shape[0]
        shape = x[0].shape  # BCHW
        for i in range(self.nl):
            loc_resault = self.loc_Conv[i](x[i])
            cls_resault = self.cls_Conv[i](x[i])
            tmp_shape = cls_resault.shape
            _cls_resault = cls_resault.reshape((tmp_shape[0], tmp_shape[1], -1))
            _cls_resault = self.tfheads[i](_cls_resault)
            cls_resault = _cls_resault.reshape(tmp_shape)

            # x[i] = torch.cat((self.loc_Conv[i](x[i]), self.cls_Conv[i](x[i])), 1)
            x[i] = torch.cat((loc_resault, cls_resault), 1)
        if self.training:
            return x
        elif self.shape != shape:
            self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
            self.shape = shape
        box, cls = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2).split((self.reg_max * 4, self.nc), 1)
        # 原始输出是距离left top 和 right bottom，eval时将距离转换到xyxy输出
        dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=False, dim=1) * self.strides
        y = torch.cat((dbox, cls.sigmoid()), 1)
        return y if self.export else (y, x)



        # _loc = self.loc_Conv(x)                                                            #[batch , 4 * reg_max, H, W]
        # _cls = self.cls_Conv(x)                                                            #[batch ,      nc     , H, W]
        # y = torch.cat((_loc, _cls), dim=1)                                              #[batch ,4 * reg_max + nc, H, W]
        # if self.training:
        #     return y
        # box = self.dfl(_loc.view(batch, 4 * self.reg_max, -1)).view(batch, 4, x.shape[2], x.shape[3])
        # cls = _cls.sigmoid()
        # print((box.shape, cls.shape))
        # return torch.cat((box, cls), dim=1) if self.export else (box, cls)

class ClassifyHead(nn.Module):
    # YOLOv8 classification head, i.e. x(b,c1,20,20) to x(b,c2)
    def __init__(self, c1, c2, k=1, s=1, p=None):  # ch_in, ch_out, kernel, stride, padding, groups
        super().__init__()
        c_ = 1280  # efficientnet_b0 size
        self.conv = Conv(c1, c_, k, s, autopad(k, p))
        self.pool = nn.AdaptiveAvgPool2d(1)  # to x(b,c_,1,1)
        self.drop = nn.Dropout(p=0.0, inplace=True)
        self.linear = nn.Linear(c_, c2)  # to x(b,c2)

    def forward(self, x):
        if isinstance(x, list):
            x = torch.cat(x, 1)
        x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
        # return x if self.training else x.softmax(1)
        # return x.softmax(1)
        return x.sigmoid()

class yolov8_detect(nn.Module):
    def __init__(self, nc, model_scale=1.0, width_scal=0.5):
        super(yolov8_detect, self).__init__()
        make_divi = lambda x, scale, n: math.ceil(x * scale / n) * n
        divi_256 = math.ceil(make_divi(256, width_scal, 8))
        divi_512 = math.ceil(make_divi(512, width_scal, 8))
        divi_1024 = math.ceil(make_divi(1024, width_scal, 8))
        self.nc = nc
        self.reg_max = 16
        self.no = 4 * self.reg_max + self.nc
        self.model_scale = model_scale
        self.backbone = Backbone(model_scale, width_scal)
        self.neck = Neck(model_scale)
        self.head  = DetectHead(nc, (divi_256, divi_512, divi_1024), (8, 16, 32), self.reg_max)
        # self.head_s8 = DetectHead(nc, divi_256, 8, self.reg_max)
        # self.head_s16 = DetectHead(nc, divi_512, 16, self.reg_max)
        # self.head_s32 = DetectHead(nc, divi_1024, 32, self.reg_max)
    def forward(self, x):
        f1, f2, f3 = self.backbone(x)
        y1, y2, y3 = self.neck(f1, f2, f3)
        y = self.head([y1, y2, y3])
        return y
        # y1, y2, y3 = self.head_s8(y1), self.head_s16(y2), self.head_s32(y3)
        # return [y1, y2, y3]

class yolov8_classify(nn.Module):
    def __init__(self, nc, model_scale=1.0):
        super(yolov8_classify, self).__init__()
        self.nc = nc
        self.model_scale = model_scale
        self.backbone = Backbone(model_scale)
        self.classifyHead = ClassifyHead(512, nc)
    def forward(self, x):
        _, _, f3 = self.backbone(x)
        return self.classifyHead(f3)

