import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
import torch.nn.functional as F


# 定义卷积块
class Conv_block(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
        super().__init__()
        self.conv_block = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.conv_block(x)

    # 定义残差块


class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size, stride=1, padding=padding, bias=False)

        # 如果输入通道数和输出通道数不同，需要进行下采样
        if in_channels != out_channels or stride != 1:
            self.downsample = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
            )
        else:
            self.downsample = None

    def forward(self, x):
        identity = x

        out = self.conv1(x)
        out = self.relu(out)
        out = self.conv2(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out += identity
        out = self.relu(out)
        return out

    # 修改后的网络结构：五分类


class MultiStreamRibConvNet(nn.Module):
    def __init__(self, in_channels, num_classes):
        super(MultiStreamRibConvNet, self).__init__()

        # 分支1
        self.conv1_1 = Conv_block(in_channels, 32, kernel_size=3, stride=1, padding=1)
        self.conv2_1 = Conv_block(in_channels, 32, kernel_size=7, stride=1, padding=3)
        self.res1_2 = ResidualBlock(32, 64, kernel_size=3, stride=1, padding=1)
        self.res1_3 = ResidualBlock(64, 128, kernel_size=3, stride=1, padding=1)
        self.res1_4 = ResidualBlock(128, 256, kernel_size=3, stride=1, padding=1)

        # 分支2
        self.res2_2 = ResidualBlock(32, 64, kernel_size=3, stride=1, padding=1)
        self.res2_3 = ResidualBlock(64, 128, kernel_size=3, stride=1, padding=1)
        self.res2_4 = ResidualBlock(128, 256, kernel_size=3, stride=1, padding=1)

        # 融合分支
        self.resr_1 = ResidualBlock(32, 64, kernel_size=3, stride=1, padding=1)
        self.resr_2 = ResidualBlock(64, 128, kernel_size=3, stride=1, padding=1)
        self.resr_3 = ResidualBlock(128, 256, kernel_size=3, stride=1, padding=1)
        self.resr_4 = ResidualBlock(256, 512, kernel_size=3, stride=1, padding=1)

        self.pool = nn.MaxPool2d(2, 2)
        self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))

        # Dropout层
        self.dropout = nn.Dropout(0.5)

        # 全连接层
        self.fc1 = nn.Linear(512, 1024)
        self.fc2 = nn.Linear(1024, 512)
        self.fc3 = nn.Linear(512, num_classes)

        self.fc4 = nn.Linear(42, 128)
        self.fc5 = nn.Linear(128, 256)
        self.fc6 = nn.Linear(256, 512)

    def forward(self, x1, x2, X3):
        # 分支1
        f1_1 = self.conv1_1(x1)
        f1_1 = self.pool(f1_1)
        f1_2 = self.res1_2(f1_1)
        f1_2 = self.pool(f1_2)
        f1_3 = self.res1_3(f1_2)
        f1_3 = self.pool(f1_3)
        f1_4 = self.res1_4(f1_3)
        f1_4 = self.pool(f1_4)

        # 分支2
        f2_1 = self.conv2_1(x2)
        f2_1 = self.pool(f2_1)
        f2_2 = self.res2_2(f2_1)
        f2_2 = self.pool(f2_2)
        f2_3 = self.res2_3(f2_2)
        f2_3 = self.pool(f2_3)
        f2_4 = self.res2_4(f2_3)
        f2_4 = self.pool(f2_4)

        # 分支3 输入（batch_size,42）
        fd_1 = self.dropout(self.fc4(X3))
        fd_2 = self.dropout(self.fc5(fd_1))
        fd_3 = self.fc6(fd_2)

        # 融合分支
        fr_1 = self.resr_1(f1_1 + f2_1)
        fr_1 = self.pool(fr_1)
        fr_2 = self.resr_2(fr_1 + f1_2 + f2_2)
        fr_2 = self.pool(fr_2)
        fr_3 = self.resr_3(fr_2 + f1_3 + f2_3)
        fr_3 = self.pool(fr_3)
        fr_4 = self.resr_4(fr_3 + f1_4 + f2_4)
        fr_4 = self.pool(fr_4)

        # 全局池化和全连接层
        fr = self.avg_pool(fr_4)
        fr = fr.view(fr.size(0), -1)  # 展平
        fr = fr + fd_3
        fr = self.dropout(self.fc1(fr))
        fr = self.fc2(fr)
        pre = self.fc3(fr)  # 输出五分类
        return pre
