
import torch
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import os
import numpy as np
import pandas as pd
import random
from torchvision import datasets, transforms
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
import torch.nn.functional as F


# 定义卷积块
class Conv_block(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
        super().__init__()
        self.conv_block = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.conv_block(x)



# 修改后的网络结构：五分类


class MultiStreamRibConvNet(nn.Module):
    def __init__(self, in_channels, num_classes):
        super(MultiStreamRibConvNet, self).__init__()

        # 分支1
        self.conv1_1 = Conv_block(in_channels, 32, kernel_size=3, stride=1, padding=1)
        self.conv2_1 = Conv_block(in_channels, 32, kernel_size=7, stride=1, padding=3)
        self.conv1_2 = Conv_block(32, 64, kernel_size=3, stride=1, padding=1)
        self.conv2_2 = Conv_block(32, 64, kernel_size=3, stride=1, padding=1)
        self.conv1_3 = Conv_block(64, 128, kernel_size=3, stride=1, padding=1)
        self.conv2_3 = Conv_block(64, 128, kernel_size=3, stride=1, padding=1)
        self.conv1_4 = Conv_block(128, 256, kernel_size=3, stride=1, padding=1)
        self.conv2_4 = Conv_block(128, 256, kernel_size=3, stride=1, padding=1)

        # 分支2
        self.convr_1 = Conv_block(32, 64, kernel_size=3, stride=1, padding=1)
        self.convr_2 = Conv_block(64, 128, kernel_size=3, stride=1, padding=1)
        self.convr_3 = Conv_block(128, 256, kernel_size=3, stride=1, padding=1)
        self.convr_4 = Conv_block(256, 512, kernel_size=3, stride=1, padding=1)

        self.pool = nn.MaxPool2d(2, 2)
        self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))

        # Dropout层
        self.dropout = nn.Dropout(0.5)

        # 全连接层
        self.fc1 = nn.Linear(512, 1024)
        self.fc2 = nn.Linear(1024, 512)
        self.fc3 = nn.Linear(512, num_classes)

        self.fc4 = nn.Linear(42,128)
        self.fc5 = nn.Linear(128, 256)
        self.fc6 = nn.Linear(256, 512)

    # 最后一层输出 num_classes

    def forward(self, x1, x2,X3):
        # 分支1
        f1_1 = self.conv1_1(x1)
        f1_1 = self.pool(f1_1)
        f1_2 = self.conv1_2(f1_1)
        f1_2 = self.pool(f1_2)
        f1_3 = self.conv1_3(f1_2)
        f1_3 = self.pool(f1_3)
        f1_4 = self.conv1_4(f1_3)
        f1_4 = self.pool(f1_4)

        # 分支2
        f2_1 = self.conv2_1(x2)
        f2_1 = self.pool(f2_1)
        f2_2 = self.conv2_2(f2_1)
        f2_2 = self.pool(f2_2)
        f2_3 = self.conv2_3(f2_2)
        f2_3 = self.pool(f2_3)
        f2_4 = self.conv2_4(f2_3)
        f2_4 = self.pool(f2_4)

        #分支3 输入（batch_size,42）
        fd_1 = self.dropout(self.fc4(X3))
        fd_2 = self.dropout(self.fc5(fd_1))
        fd_3 = self.fc6(fd_2)

        # 融合分支
        fr_1 = self.convr_1(f1_1 + f2_1)
        fr_1 = self.pool(fr_1)
        fr_2 = self.convr_2(fr_1 + f1_2 + f2_2)
        fr_2 = self.pool(fr_2)
        fr_3 = self.convr_3(fr_2 + f1_3 + f2_3)
        fr_3 = self.pool(fr_3)
        fr_4 = self.convr_4(fr_3 + f1_4 + f2_4)
        fr_4 = self.pool(fr_4)

        # 全局池化和全连接层
        fr = self.avg_pool(fr_4)
        fr = fr.view(fr.size(0), -1)  # 展平
        fr = fr+fd_3
        fr = self.dropout(self.fc1(fr))
        fr = self.fc2(fr)
        pre = self.fc3(fr)  # 输出五分类
        return pre
