import torch.nn as nn 
import torch 
from einops import rearrange
from monai.networks.nets.attentionunet import AttentionUnet
import torch.nn as nn 
import torch 
from einops import rearrange
import copy 
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#  
def infer_25d(image3d, net_25d):
    device = image3d.device
    batch, channel, d, h, w = image3d.shape
    index = 0
    res_pred = None

    for y in torch.split(image3d, 1, dim=2):
        y = y.squeeze(dim=2) # 去掉deep 维度 在channel维度进行cat
        if index == 0:
            y_in = torch.zeros((batch, 3, h, w), device=device)
            y = torch.cat([y, image3d[:, :, index + 1]], dim=1)
            y_in[:, 1:] = copy.deepcopy(y)
            del y
            pred_25d = net_25d(y_in)
            res_pred = pred_25d.unsqueeze(2)

        elif index == d - 1:
            y_in = torch.zeros((batch, 3, h, w), device=device)
            y = torch.cat([y, image3d[:, :, index - 1]], dim=1)
            y_in[:, :2] = copy.deepcopy(y)
            del y
            pred_25d = net_25d(y_in)
            res_pred = torch.cat([res_pred, pred_25d.unsqueeze(dim=2)], dim=2)

        else:
            y = torch.cat([image3d[:, :, index - 1], y, image3d[:, :, index + 1]], dim=1)
            pred_25d = net_25d(y)
            res_pred = torch.cat((res_pred, pred_25d.unsqueeze(2)), dim=2)

        index += 1
    return res_pred


class AttUNet25D_32(nn.Module):
    def __init__(self, in_ch, out_ch) -> None:
        super().__init__()
        
        self.model = AttentionUnet(2, in_ch, out_ch, channels=[32, 64, 128, 256], strides=[2, 2, 2, 2])

    
    def forward(self, x):
        # x: (b, c, d, w, h)
        b, c, d, w, h = x.shape
        # return infer_25d(x, self.model)
        # return self.model(x)
        input2d = x[:, :, 0:2, :, :]
        single = x[:, :, 0:1, :, :]
        input2d = torch.cat((input2d, single), 2)
        for i in range(d - 2):
            input2dtmp = x[:, :, i:i + 3, :, :]
            input2d = torch.cat((input2d, input2dtmp), 0)
            if i == d - 3:
                f1 = x[:, :, d - 2: d, :, :]
                f2 = x[:, :, d - 1: d, :, :]
                ff = torch.cat((f1, f2), 2)
                input2d = torch.cat((input2d, ff), 0)
        x = input2d
        x = rearrange(x, "g c k w h -> g (c k) w h")
        x = self.model(x)
        # x = x[:, :, 1]

        x = rearrange(x, "(b d) c w h -> b c d w h", b=b, d=d)

        return x 
        



class AttUNet25D(nn.Module):
    def __init__(self, in_ch, out_ch) -> None:
        super().__init__()
        
        self.model = AttentionUnet(2, in_ch, out_ch, channels=[64, 128, 256, 512], strides=[2, 2, 2, 2])

    
    def forward(self, x):
        # x: (b, c, d, w, h)
        b, c, d, w, h = x.shape
        # return infer_25d(x, self.model)
        # return self.model(x)
        input2d = x[:, :, 0:2, :, :]
        single = x[:, :, 0:1, :, :]
        input2d = torch.cat((input2d, single), 2)
        for i in range(d - 2):
            input2dtmp = x[:, :, i:i + 3, :, :]
            input2d = torch.cat((input2d, input2dtmp), 0)
            if i == d - 3:
                f1 = x[:, :, d - 2: d, :, :]
                f2 = x[:, :, d - 1: d, :, :]
                ff = torch.cat((f1, f2), 2)
                input2d = torch.cat((input2d, ff), 0)
        x = input2d
        x = rearrange(x, "g c k w h -> g (c k) w h")
        x = self.model(x)
        # x = x[:, :, 1]

        x = rearrange(x, "(b d) c w h -> b c d w h", b=b, d=d)

        return x 
        


