import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import random
import numpy as np
import cv2


class ConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
        super(ConvBlock, self).__init__()
        self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride, padding)
        self.bn = nn.BatchNorm3d(out_channels)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        return self.relu(self.bn(self.conv(x)))


class DeepFusionNet(nn.Module):
    def __init__(self, in_channels=1, base_channels=32, feature_dim=128):
        super(DeepFusionNet, self).__init__()

        # 1x1 conv + bn blocks
        self.initial_fuse = ConvBlock(in_channels, 1, kernel_size=1, stride=1, padding=0)
        self.b_conv1 = ConvBlock(in_channels, base_channels, kernel_size=1, stride=1, padding=0)
        self.h_conv1 = ConvBlock(in_channels, base_channels, kernel_size=1, stride=1, padding=0)

        #self.b1_conv = ConvBlock(base_channels, base_channels, kernel_size=1, stride=1, padding=0)
        #self.b2_conv = ConvBlock(base_channels, base_channels, kernel_size=1, stride=1, padding=0)

        self.c1_conv = ConvBlock(base_channels, base_channels, kernel_size=1, stride=1, padding=0)

        self.b3_conv = ConvBlock(base_channels, base_channels, kernel_size=1, stride=1, padding=0)
        self.b4_conv = ConvBlock(base_channels, base_channels, kernel_size=1, stride=1, padding=0)

        self.c3_conv = ConvBlock(base_channels, base_channels, kernel_size=1, stride=1, padding=0)

        # Deep feature extraction
        self.conv7 = ConvBlock(base_channels, base_channels, kernel_size=7, stride=2, padding=3)
        self.pool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)

        self.res1a = ConvBlock(base_channels, base_channels, kernel_size=3, stride=1, padding=1)
        self.res1b = ConvBlock(base_channels, base_channels, kernel_size=3, stride=1, padding=1)

        self.res2a = ConvBlock(base_channels, base_channels, kernel_size=3, stride=1, padding=1)
        self.res2b = ConvBlock(base_channels, base_channels, kernel_size=3, stride=1, padding=1)

        self.down1 = ConvBlock(base_channels, base_channels, kernel_size=3, stride=2, padding=1)
        self.down2 = ConvBlock(base_channels, base_channels, kernel_size=3, stride=2, padding=1)

        self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
        self.feature_dim = feature_dim
        self.final_linear = nn.Linear(base_channels, feature_dim)

    def forward(self, b, h):
        x = b + h

        x = self.initial_fuse(x)

        b1 = self.b_conv1(b + x)
        #b1 = self.b1_conv(b1)
        b2 = self.h_conv1(h + x)
        #b2 = self.b2_conv(b2)

        c1 = self.c1_conv(b1 + b2)

        b3 = self.b3_conv(b1 + c1)
        b4 = self.b4_conv(b2 + c1)

        c2 = b3 + b4
        c3 = self.c3_conv(c2)

        c4 = b + h + c3

        k1 = self.conv7(c4)
        # 此处可添加CBAM
        k1 = self.pool(k1)

        x = self.res1a(k1)
        x = self.res1b(x)
        # 此处可添加CBAM
        k2 = x
        k3 = k1 + k2

        x = self.res2a(k3)
        x = self.res2b(x)
        # 此处可添加CBAM
        k4 = x
        k5 = k3 + k4

        x = F.relu(k5)
        x = self.down1(x)
        # 此处可添加CBAM
        x = self.down2(x)

        x = self.avgpool(x).flatten(1)
        x = self.final_linear(x)
        return x


class ProjectionHead(nn.Module):
    def __init__(self, input_dim=128, projection_dim=128):
        super(ProjectionHead, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, input_dim),
            nn.BatchNorm1d(input_dim),
            nn.ReLU(inplace=True),
            nn.Linear(input_dim, projection_dim)
        )
        self.proj = nn.Sequential(
            nn.Flatten(),  # 变为 [B, C]
            nn.Linear(input_dim, input_dim),
            nn.ReLU(inplace=True),
            nn.Linear(input_dim, projection_dim)
        )

    def forward(self, x):
        x = torch.flatten(x, start_dim=1)  # [B, C*D*H*W]
        x = self.proj(x)  # [B, 128]
        return self.net(x)
    
# 分类头
class ClassificationHead(nn.Module):
    def __init__(self, input_dim=128, hidden_dim=512, num_classes=3):
        super(ClassificationHead, self).__init__()
        self.classifier = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(inplace=True),
            nn.Linear(hidden_dim, num_classes)  # 注意：不加 softmax
        )

    def forward(self, x):
        return self.classifier(x)

    # def forward(self, x):
    #     # x: [B, C, D, H, W] -> flatten
    #     x = torch.flatten(x, start_dim=1)  # [B, C*D*H*W]
    #     x = self.proj(x)  # [B, 128]
    #     return F.normalize(x, dim=1)  # 输出维度 [B, projection_dim]



# if __name__ == '__main__':
#     # # Simulate a pair of inputs with batch size 2, 3 channels, and 64x64 spatial dimensions
#     # data_B = torch.randn(2, 3, 64, 64)
#     # data_h = torch.randn(2, 3, 64, 64)

#     # # Initialize model
#     # model = DualInputNet(in_channels=3)

#     # # Forward pass
#     # output = model(data_B, data_h)
#     # print("Output shape:", output.shape)

#     device = 'cuda' if torch.cuda.is_available() else 'cpu'
#     print(f'device: {device}')
#     model = DualInputNet(in_channels=3).to(device)


#     batch_size = 10
#     parent = r'F:\ADNI\ADNI_PNG_3Ddata\download_data\NIFTI_data\NIFTI5\train'
#     parent_folder = os.listdir(parent)
#     print(parent_folder)

#     datasets_lst = []
#     for i in range(3):
#         datapath = os.path.join(parent, parent_folder[i])   # 到train/AD
#         datasets_i = os.listdir(datapath)
#         datasets_lst.append(datasets_i)
#     # for i in range(len(datasets_lst)):
#     #     for j in range(5):
#     #         print(datasets_lst[i][j], end=',')
#     #     print('\n')
#     random_list_AD = [i for i in range(1110)]
#     random_list_CN = [i for i in range(1110)]
#     random_list_MCI = [i for i in range(1110)]
#     random_list = [random_list_AD, random_list_CN, random_list_MCI]
    
#     batch_data = [[], [], []]

#     for i in range(len(datasets_lst)):
#         for j in range(batch_size):
#             random_num = random.randint(0,len(random_list[i]))
#             if random_num in random_list[i]:
#                 file_path = os.path.join(parent, parent_folder[i], datasets_lst[i][random_num])
#                 # print(random_num, str(file_path), end='\n')
#                 img = cv2.imread(file_path)
#                 tensor_img = torch.from_numpy(np.transpose(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)))    # 110 111两行换成之前代码里读取nii.gz，调用函数即可
#                 batch_data[i].append(tensor_img)
#                 random_list[i].remove(random_num)
#     print(batch_data[0][0])


