# Ultralytics YOLO 🚀, AGPL-3.0 license
"""Convolution modules."""

import math

import numpy as np
import torch
import torch.nn as nn

__all__ = (
    "MobileViTBackbone",
)

import torch
import torch.nn as nn
import timm

# 红外网络
class LrNet(nn.Module):
    def __init__(self, step=1, *args, **kwargs):
        super().__init__(*args, **kwargs)

        mobilevit = timm.create_model('mobilevit_s', pretrained=False)

        # 第一层
        self.layer1_1 = list(mobilevit.children())[0]
        self.layer1_2 = list(mobilevit.children())[1][0:3]

        self.layer1_1.conv = nn.Conv2d(
            in_channels=1,  # 修改输入通道为 1
            out_channels=self.layer1_1.conv.out_channels,  # 保持输出通道不变
            kernel_size=self.layer1_1.conv.kernel_size,
            stride=self.layer1_1.conv.stride,
            padding=self.layer1_1.conv.padding,
            bias=self.layer1_1.conv.bias is not None,
        )

        # 第二层
        self.layer2_1 = list(mobilevit.children())[1][3]

        # 第三层
        self.layer3_1 = list(mobilevit.children())[1][4:]
        self.layer3_2 = list(mobilevit.children())[2]

        # 以640 * 640 输入为例，输出结果为80*80
        if step == 1:
            self.layer = nn.Sequential(
                self.layer1_1,
                *self.layer1_2,
            )

        # 输出为 40*40
        elif step == 2:
            self.layer = self.layer2_1

        # 输出为 20*20
        elif step == 3:
            self.layer = nn.Sequential(
                *self.layer3_1, self.layer3_2
            )

    def forward(self, x):
        # print(f"LrNet 输入形状为：{x.shape}")
        return self.layer(x)

# 可见光网络
class VisibleNet(nn.Module):
    def __init__(self, step=1, *args, **kwargs):
        super().__init__(*args, **kwargs)

        mobilevit = timm.create_model('mobilevit_s', pretrained=False)

        # 第一层
        self.layer1_1 = list(mobilevit.children())[0]
        self.layer1_2 = list(mobilevit.children())[1][0:3]
        # original_conv1 = self.layer1_2[0]  # 获取第一层卷积


        # 第二层
        self.layer2_1 = list(mobilevit.children())[1][3]

        # 第三层
        self.layer3_1 = list(mobilevit.children())[1][4:]
        self.layer3_2 = list(mobilevit.children())[2]

        # 以640 * 640 输入为例，输出结果为80*80
        if step == 1:
            self.layer = nn.Sequential(
                self.layer1_1,
                *self.layer1_2,
            )

        # 输出为 40*40
        elif step == 2:
            self.layer = self.layer2_1

        # 输出为 20*20
        elif step == 3:
            self.layer = nn.Sequential(
                *self.layer3_1, self.layer3_2
            )

    def forward(self, x):
        # print(f"VisibleNet 输入形状为：{x.shape}")
        return self.layer(x)


class MobileViTBackbone(nn.Module):
    def __init__(self, step=1, layer1_r=1, layer2_r=1):
        super(MobileViTBackbone, self).__init__()

        self.layer1_rate = layer1_r #两个通道的比例
        self.layer2_rate = layer2_r

        # 分别加载这两部分的网络
        self.visibleNet = VisibleNet(step=step)
        self.lrNet = LrNet(step=step)

        pass

    def forward(self, x):
        maxChannelLen = x.shape[1]

        l1 = self.layer1_rate
        lys = self.layer1_rate + self.layer2_rate

        n_l1 = maxChannelLen*l1 // lys
        # print("输入数据的形状", x.shape, maxChannelLen, n_l1)

        # 根据通道大小提取出 可见光和 红外光
        visible_x = x[:, :n_l1, :, :]
        lr_x = x[:, n_l1:, :, :]

        visible_x_out = self.visibleNet(visible_x)
        lr_x_out = self.lrNet(lr_x)

        # 在这里添加打印语句
        print("可见光通道输出均值: ", visible_x_out.mean().item())
        print("红外通道输出均值: ", lr_x_out.mean().item())

        output = torch.concat((visible_x_out, lr_x_out), dim=1)
        # print("输出数据的形状", output.shape)
        # return self.layer(x)
        return output


if __name__ == '__main__':
    from torchinfo import summary
    # model = CustomBackbone()
    # model = MobileViTBackbone(3)
    model = timm.create_model('mobilevit_s', pretrained=False)

    summary(model, input_size=(1, 3, 640, 640))
    pass