import torch
from einops import rearrange
from torch import nn
import torch.nn.functional as F
from Net.PatchFeatureExtractor_240930 import BaseNet
from Net.Patch_Blocking_240930 import Patches_CropSelected
from Net.attention_20241022 import VisionTransformerBlock
import Net


class PSN_10_position(nn.Module):
    def __init__(self, class_num, input_shape, patch_size):
        super().__init__()
        self.input_shape = input_shape

        # 从input输入中计算左右脑张量的尺寸
        input1_shape = input_shape[:]
        input2_shape = input_shape[:]
        input1_shape[0] = input1_shape[0] // 2
        input2_shape[0] = input_shape[0] - input1_shape[0]

        # 分割函数，输入要分割的图像尺寸、块尺寸，输出分割的结果
        self.input1 = Patches_CropSelected(input_shape=input1_shape, patch_size=patch_size)  # 去边留芯分块块
        self.input2 = Patches_CropSelected(input_shape=input2_shape, patch_size=patch_size)

        # 从patch中提取特征，输入为四个卷积层的通道数
        self.patchFeatureExtractorLeft = BaseNet([32, 64, 128, 128])
        self.patchFeatureExtractorRight = BaseNet([32, 64, 128, 128])

        # 计算左脑被分了多少个块，右脑被分了多少个块
        self.LeftPatchNumber = torch.prod(self.input1.patches_shape)
        self.RightPatchNumber = torch.prod(self.input2.patches_shape)

        self.attentionLeft = Net.attention_20241022.VisionTransformerBlock(self.LeftPatchNumber, 128, 8)
        self.attentionRight = Net.attention_20241022.VisionTransformerBlock(self.RightPatchNumber, 128, 8)
        self.attentionGlobal = Net.attention_20241022.VisionTransformerBlock(
            self.LeftPatchNumber + self.RightPatchNumber, 128, 8)

        # 分类函数，铺平后分类
        self.classifierLeft = nn.Sequential(
            # 输入向量的长度 = 块数 * 128维度
            nn.Linear(torch.prod(torch.tensor([self.LeftPatchNumber, 128])), 32),
            # nn.Linear(self.LeftPatchNumber, 32),
            nn.ReLU(True),
            nn.Linear(32, class_num),
            nn.Softmax(dim=1),
        )
        self.classifierRight = nn.Sequential(
            nn.Linear(torch.prod(torch.tensor([self.LeftPatchNumber, 128])), 32),
            # nn.Linear(self.RightPatchNumber, 32),
            nn.ReLU(True),
            nn.Linear(32, class_num),
            nn.Softmax(dim=1),
        )
        self.classifier = nn.Sequential(
            nn.Linear(torch.prod(torch.tensor([self.LeftPatchNumber, 128])) + torch.prod(
                torch.tensor([self.LeftPatchNumber, 128])), 32),
            # nn.Linear(self.LeftPatchNumber + self.RightPatchNumber, 32),
            nn.ReLU(True),
            nn.Linear(32, class_num),
            nn.Softmax(dim=1)
        )

    def forward(self, x):
        b = x.shape[0]  # batch number

        # 划分左右脑
        x1 = x[:, :, :(x.shape[2] // 2), :, :]  # 左脑
        x2 = x[:, :, (x.shape[2] // 2):, :, :]  # 右脑

        # 分割左右脑（分块儿）
        x1 = self.input1(x1)
        x2 = self.input2(x2)
        # x1.shape == x2.shape == [b, num, patch_h, patch_w, patch_d]

        # 将 b num 合并 避免for循环引起性能损失
        x1 = rearrange(x1, 'b num (c h) w d -> (b num) c h w d', c=1)
        x2 = rearrange(x2, 'b num (c h) w d -> (b num) c h w d', c=1)

        # patch特征提取
        x1, x1_score = self.patchFeatureExtractorLeft(x1)
        x2, x2_score = self.patchFeatureExtractorRight(x2)
        # 上命令执行后 x1.shape :: [b*num, c, h, w, d]
        # if patch==25:
        #   x1.shape == [b*num, 128, 6, 6, 6]

        x1 = F.adaptive_avg_pool3d(x1, (1, 1, 1))
        x2 = F.adaptive_avg_pool3d(x2, (1, 1, 1))
        # 上命令执行后 x1.shape :: [b*40, 128, 1, 1, 1]

        left_features_flat = x1.view(-1, 128)
        right_features_flat = x2.view(-1, 128)
        # 辅助输出，计算余弦损失

        x1 = rearrange(x1, '(b num) c h w d -> b num (c h w d)', b=b)
        x2 = rearrange(x2, '(b num) c h w d -> b num (c h w d)', b=b)
        # 上命令执行后 x.shape :: [b, num, 128]

        x1 = self.attentionLeft(x1)
        x2 = self.attentionRight(x2)
        # 过Transformer编码器，之前尝试过像ViT一样，有单独的分类token，但是训练不起来，后来改成这个样子，但效果不好
        x = torch.cat([x1, x2], dim=1)
        x = self.attentionGlobal(x)
        # 送进分类器分类
        x = rearrange(x, 'b num l -> b (num l)', b=b)
        x1 = rearrange(x1, 'b num l -> b (num l)', b=b)
        x2 = rearrange(x2, 'b num l -> b (num l)', b=b)

        x1 = self.classifierLeft(x1)
        x2 = self.classifierRight(x2)
        result = self.classifier(x)

        return left_features_flat, right_features_flat, x1, x2, result


if __name__ == '__main__':
    x = torch.randn(12, 1, 105, 125, 105).cuda()
    model = PSN_10_position(2, input_shape=[105, 125, 105], patch_size=25).cuda()
    left, right, y1, y2, output = model(x)
    print(output)
