import torch
import torch.nn as nn
from .Cbam import Cbam
from .SENet import SENet
from .ECA import ECANet
from .Cbam_ECA import CbamECA
from .darknet53 import darknet53

attention_blocks = [SENet, Cbam, ECANet, CbamECA]


class BasicConv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1):
        super(BasicConv, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, kernel_size // 2, bias=False)
        self.bn = nn.BatchNorm2d(out_channels)
        self.activation = nn.LeakyReLU(0.1)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.activation(x)
        return x


class Upsample(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(Upsample, self).__init__()
        self.upsample = nn.Sequential(
            BasicConv(in_channels, out_channels, 1),
            nn.Upsample(scale_factor=2, mode='nearest')
        )

    def forward(self, x):
        x = self.upsample(x)
        return x


def yolo_head(filters_list, in_filters):
    m = nn.Sequential(
        BasicConv(in_filters, filters_list[0], 3)
    )
    return m


class YoloBody(nn.Module):
    def __init__(self, attn_use=0):
        super(YoloBody, self).__init__()
        # self.conv_for_P1 = BasicConv(256, 3, 1)
        self.backbone = darknet53()
        self.conv_for_P1 = BasicConv(256, 1, 1)
        self.conv_for_P2 = BasicConv(256, 1, 1)

        self.upsample3 = Upsample(1024, 256)
        self.upsample2 = Upsample(512, 256)
        self.upsample3_2 = Upsample(256, 256)
        self.attn_use = attn_use

        # self.yolo_headP5 = yolo_head([512, len(a)])

        if self.attn_use > 0 and self.attn_use < 5:
            self.feat1_attention = attention_blocks[self.attn_use - 1](256)
            self.feat2_attention = attention_blocks[self.attn_use - 1](512)
            self.feat3_attention = attention_blocks[self.attn_use - 1](1024)
        else:
            self.feat1_attention = None
            self.feat2_attention = None
            self.feat3_attention = None

    def forward(self, x):
        feat1, feat2, feat3 = self.backbone(x)
        if self.attn_use:
            feat1 = self.feat1_attention(feat1)
            feat2 = self.feat2_attention(feat2)
            feat3 = self.feat3_attention(feat3)

        up1 = feat1
        up2 = self.upsample2(feat2)
        up3 = self.upsample3(feat3)
        up3 = self.upsample3_2(up3)
        up = up1 + up2 + up3

        out = self.conv_for_P1(up)
        out = nn.Sigmoid()(out)
        return out
