# import torch
# import torch.nn as nn
# import torch.nn.functional as F
# 看起来改完了
import mindspore.nn as nn
import mindspore
import mindspore.ops as ops
from mindspore.common.initializer import One,Zero, HeUniform, XavierUniform, Uniform,initializer
import time
import sys
sys.path.append("..")
def to_fp16(x):
    """安全转换函数：处理Tensor/元组/列表输入"""
    from mindspore import dtype
    from mindspore.ops import Cast
    if isinstance(x, (tuple, list)):
        # 递归处理元组或列表中的每个元素
        return tuple(to_fp16(item) for item in x)
    elif isinstance(x, mindspore.Tensor):
        # 仅当输入是Tensor且为float32时转换
        return Cast()(x, dtype.float16) if x.dtype == dtype.float32 else x
    else:
        return x  # 其他类型原样返回
# from utils import local_pcd # 这个load_PCD和utils中的是一样的
def local_pcd(depth, intr):
    nx = depth.shape[1]  # w
    ny = depth.shape[0]  # h
    x, y = np.meshgrid(np.arange(nx), np.arange(ny), indexing='xy')
    x = x.reshape(nx * ny)
    y = y.reshape(nx * ny)
    p2d = np.array([x, y, np.ones_like(y)])
    p3d = np.matmul(np.linalg.inv(intr), p2d)
    depth = depth.reshape(1, nx * ny)
    p3d *= depth
    p3d = np.transpose(p3d, (1, 0))
    p3d = p3d.reshape(ny, nx, 3).astype(np.float32)
    return p3d

def init_bn(module):
    # 用gamma和beta取代wight和bias
    # 没有init函数，只有set_data和initializer
    if module.gamma is not None:
        module.gamma.set_data(initializer(One(), module.gamma.shape, module.gamma.dtype))
    if module.beta is not None:
        module.beta.set_data(initializer(Zero(), module.beta.shape, module.beta.dtype))
    return

def init_uniform(module, init_method):
    if module.weight is not None:
        if init_method == "kaiming":
            module.weight.set_data(initializer(HeUniform(),module.weight.shape,module.weight.dtype))
        elif init_method == "xavier":
            module.weight.set_data(initializer(XavierUniform(),module.weight.shape,module.weight.dtype))
    return module
class Swish(nn.Cell):
    def __init__(self):
        super(Swish, self).__init__()

    def construct(self, x):
        return x * nn.Sigmoid(x)

class Conv2d(nn.Cell):
    """Applies a 2D convolution (optionally with batch normalization and relu activation)
    over an input signal composed of several input planes.

    Attributes:
        conv (nn.Cell): convolution module
        bn (nn.Cell): batch normalization module
        relu (bool): whether to activate by relu

    Notes:
        Default momentum for batch normalization is set to be 0.01,

    """

    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 relu=True, bn=True, bn_momentum=0.1, norm_type='BN', init_method="xavier", **kwargs):
        super(Conv2d, self).__init__()

        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,has_bias=(not bn), pad_mode='pad', **kwargs)
        self.kernel_size = kernel_size
        self.stride = stride
        self.relu = nn.ReLU()
        if norm_type == 'IN':
            self.bn = nn.InstanceNorm2d(out_channels, momentum=bn_momentum) if bn else None
        elif norm_type == 'BN':
            self.bn = nn.BatchNorm2d(out_channels, momentum=bn_momentum) if bn else None
        self.ifrelu = relu

        # assert init_method in ["kaiming", "xavier"]
        # self.init_weights(init_method)

    def construct(self, x):
        x = self.conv(x)
        if self.bn is not None:
            x = self.bn(x)
        if self.ifrelu:
            x = self.relu(x)
        return x

    def init_weights(self, init_method):
        """default initialization"""
        init_uniform(self.conv, init_method)
        if self.bn is not None:
            init_bn(self.bn)


class Deconv2d(nn.Cell):
    """Applies a 2D deconvolution (optionally with batch normalization and relu activation)
       over an input signal composed of several input planes.

       Attributes:
           conv (nn.Cell): convolution module
           bn (nn.Cell): batch normalization module
           relu (bool): whether to activate by relu

       Notes:
           Default momentum for batch normalization is set to be 0.01,

       """

    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 relu=True, bn=True, bn_momentum=0.1, init_method="xavier", **kwargs):
        super(Deconv2d, self).__init__()
        self.out_channels = out_channels
        assert stride in [1, 2]
        self.stride = stride
        # self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride,
        #                                bias=(not bn), **kwargs)
        self.conv = nn.Conv2dTranspose(in_channels, out_channels, kernel_size, stride=stride,
                                       has_bias=(not bn), **kwargs)
        self.bn = nn.BatchNorm2d(out_channels, momentum=bn_momentum) if bn else None
        self.ifrelu = relu
        self.relu = nn.ReLU()
        # assert init_method in ["kaiming", "xavier"]
        # self.init_weights(init_method)

    def construct(self, x):
        y = self.conv(x)
        if self.stride == 2:
            h, w = list(x.size())[2:]
            y = y[:, :, :2 * h, :2 * w].contiguous()
        if self.bn is not None:
            x = self.bn(y)
        if self.ifrelu:
            x = self.relu(x)
        return x

    def init_weights(self, init_method):
        """default initialization"""
        init_uniform(self.conv, init_method)
        if self.bn is not None:
            init_bn(self.bn)

class Conv3d(nn.Cell):
    """Applies a 3D convolution (optionally with batch normalization and relu activation)
    over an input signal composed of several input planes.

    Attributes:
        conv (nn.Cell): convolution module
        bn (nn.Cell): batch normalization module
        relu (bool): whether to activate by relu

    Notes:
        Default momentum for batch normalization is set to be 0.01,

    """

    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
                 relu=True, bn=True, bn_momentum=0.1, init_method="xavier", **kwargs):
        super(Conv3d, self).__init__()
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        # assert stride in [1, 2]
        self.stride = stride

        self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride,
                              has_bias=(not bn), pad_mode='pad', **kwargs)
        self.bn = nn.BatchNorm3d(out_channels, momentum=bn_momentum) if bn else None
        self.ifrelu = relu
        self.relu = nn.ReLU()
        # assert init_method in ["kaiming", "xavier"]
        # self.init_weights(init_method)

    def construct(self, x):
        x = self.conv(x)
        if self.bn is not None:
            x = self.bn(x)
        if self.ifrelu:
            x = self.relu(x)
        return x

    def init_weights(self, init_method):
        """default initialization"""
        init_uniform(self.conv, init_method)
        if self.bn is not None:
            init_bn(self.bn)

class Deconv3d(nn.Cell):
    """Applies a 3D deconvolution (optionally with batch normalization and relu activation)
       over an input signal composed of several input planes.

       Attributes:
           conv (nn.Cell): convolution module
           bn (nn.Cell): batch normalization module
           relu (bool): whether to activate by relu

       Notes:
           Default momentum for batch normalization is set to be 0.01,

       """

    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
                 relu=True, bn=True, bn_momentum=0.1, init_method="xavier", **kwargs):
        super(Deconv3d, self).__init__()
        self.out_channels = out_channels
        self.stride = stride
        self.conv = nn.Conv3dTranspose(in_channels, out_channels, kernel_size, stride=stride,
                                       has_bias=(not bn), pad_mode='pad', **kwargs)
        self.bn = nn.BatchNorm3d(out_channels, momentum=bn_momentum) if bn else None
        self.ifrelu = relu
        self.relu = nn.ReLU()
        # assert init_method in ["kaiming", "xavier"]
        # self.init_weights(init_method)

    def construct(self, x):
        y = self.conv(x)
        if self.bn is not None:
            x = self.bn(y)
        if self.ifrelu:
            x = self.relu(x)
        return x
    def init_weights(self, init_method):
        """default initialization"""
        init_uniform(self.conv, init_method)
        if self.bn is not None:
            init_bn(self.bn)

class ConvBnReLU(nn.Cell):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1):
        super(ConvBnReLU, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, pad_mode='pad')
        self.bn = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU()
    def construct(self, x):
        print("In ConvBnReLU forward function, input x size:", x.shape)
        print("In ConvBnReLU forward function, input x size:", x.dtype)
        return self.relu(self.bn(self.conv(x)))


class ConvBn(nn.Cell):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1):
        super(ConvBn, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, pad_mode='pad', has_bias=False)
        self.bn = nn.BatchNorm2d(out_channels)

    def construct(self, x):
        return self.bn(self.conv(x))


class ConvBnReLU3D(nn.Cell):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1):
        super(ConvBnReLU3D, self).__init__()
        self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, pad_mode='pad', has_bias=False)
        self.bn = nn.BatchNorm3d(out_channels)
        self.relu = nn.ReLU()
    def construct(self, x):
        return self.relu(self.bn(self.conv(x)))


class ConvBn3D(nn.Cell):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, pad=1):
        super(ConvBn3D, self).__init__()
        self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, pad_mode='pad', has_bias=False)
        self.bn = nn.BatchNorm3d(out_channels)

    def construct(self, x):
        return self.bn(self.conv(x))


class BasicBlock(nn.Cell):
    def __init__(self, in_channels, out_channels, stride, downsample=None):
        super(BasicBlock, self).__init__()

        self.conv1 = ConvBnReLU(in_channels, out_channels, kernel_size=3, stride=stride, pad=1)
        self.conv2 = ConvBn(out_channels, out_channels, kernel_size=3, stride=1, pad=1)
        self.downsample = downsample
        self.stride = stride

    def construct(self, x):
        out = self.conv1(x)
        out = self.conv2(out)
        if self.downsample is not None:
            x = self.downsample(x)
        out += x
        return out


class Hourglass3d(nn.Cell):
    def __init__(self, channels):
        super(Hourglass3d, self).__init__()

        self.conv1a = ConvBnReLU3D(channels, channels * 2, kernel_size=3, stride=2, pad=1)
        self.conv1b = ConvBnReLU3D(channels * 2, channels * 2, kernel_size=3, stride=1, pad=1)

        self.conv2a = ConvBnReLU3D(channels * 2, channels * 4, kernel_size=3, stride=2, pad=1)
        self.conv2b = ConvBnReLU3D(channels * 4, channels * 4, kernel_size=3, stride=1, pad=1)

        self.dconv2 = nn.SequentialCell(
            nn.Conv3dTranspose(channels * 4, channels * 2, kernel_size=3, padding=1, output_padding=1, stride=2,
                               has_bias=False),
            nn.BatchNorm3d(channels * 2))

        self.dconv1 = nn.SequentialCell(
            nn.Conv3dTranspose(channels * 2, channels, kernel_size=3, padding=1, output_padding=1, stride=2,
                               has_bias=False),
            nn.BatchNorm3d(channels))

        self.redir1 = ConvBn3D(channels, channels, kernel_size=1, stride=1, pad=0)
        self.redir2 = ConvBn3D(channels * 2, channels * 2, kernel_size=1, stride=1, pad=0)
        self.relu = nn.ReLU()
    def construct(self, x):
        conv1 = self.conv1b(self.conv1a(x))
        conv2 = self.conv2b(self.conv2a(conv1))
        dconv2 = self.relu(self.dconv2(conv2) + self.redir2(conv1))
        dconv1 = self.relu(self.dconv1(dconv2) + self.redir1(x))
        return dconv1

# from mindspore import ms_function
# @ms_function
def homo_warping_new(src_fea, src_proj, ref_proj, depth_values):
    # src_fea: [B, C, H, W]
    # src_proj: [B, 4, 4]
    # ref_proj: [B, 4, 4]
    # depth_values: [B, Ndepth] o [B, Ndepth, H, W]
    # out: [B, C, Ndepth, H, W]

    batch, channels = src_fea.shape[0], src_fea.shape[1]
    num_depth = depth_values.shape[1]
    height, width = src_fea.shape[2], src_fea.shape[3]
    # 确保 ref_proj 是 float32 类型
    ref_proj = ops.cast(ref_proj, mindspore.float32)
    src_proj = ops.cast(src_proj, mindspore.float32)
    # 计算逆矩阵并矩阵乘法
    proj = ops.matmul(src_proj, ops.inverse(ref_proj))
    rot = proj[:, :3, :3]  # [B,3,3]
    trans = proj[:, :3, 3:4]  # [B,3,1]
    # 创建网格
    batch = src_fea.shape[0]
    num_depth = depth_values.shape[1]
    y, x = ops.meshgrid(mindspore.numpy.arange(0, height, dtype=mindspore.float32),
                         mindspore.numpy.arange(0, width, dtype=mindspore.float32))
    y, x = y.reshape(-1), x.reshape(-1)
    xyz = ops.stack((x, y, ops.ones_like(x)))  # [3, H*W]
    # xyz = ops.expand_dims(xyz, 0).repeat(batch, axis=0)  # [B, 3, H*W]
    xyz = ops.expand_dims(xyz, 0)  # [1, 3, H*W]
    xyz = ops.tile(xyz, (batch, 1, 1))  # [B, 3, H*W]
    # 3D投影计算
    rot_xyz = ops.matmul(rot, xyz)  # [B, 3, H*W]
    # rot_depth_xyz = ops.expand_dims(rot_xyz, 2).repeat(num_depth, axis=2) * depth_values.view(batch, 1, num_depth, -1)  # [B, 3, Ndepth, H*W]
    rot_depth_xyz = ops.expand_dims(rot_xyz, 2)  # [B, 3, 1, H*W]
    rot_depth_xyz = rot_depth_xyz.broadcast_to((batch, 3, num_depth, rot_xyz.shape[-1]))  # [B, 3, Ndepth, H*W]
    rot_depth_xyz = rot_depth_xyz * depth_values.view(batch, 1, num_depth, -1)
    proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1)  # [B, 3, Ndepth, H*W]
    # 处理除以零的情况
    proj_xyz_slice = proj_xyz[:, 2:3, :, :]
    proj_xyz_slice = ops.select(ops.equal(proj_xyz_slice, 0),
                               proj_xyz_slice + 1e-8,
                               proj_xyz_slice)
    proj_xyz = ops.concat([proj_xyz[:, :2, :, :], proj_xyz_slice], axis=1)
    # 归一化坐标
    proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :]  # [B, 2, Ndepth, H*W]
    # 归一化到[-1,1]范围
    proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1
    proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1
    # 重组网格
    proj_xy = ops.stack((proj_x_normalized, proj_y_normalized), axis=3)  # [B, Ndepth, H*W, 2]
    grid = proj_xy.view(batch, num_depth * height, width, 2)
    # 双线性采样
    warped_src_fea = ops.grid_sample(src_fea, 
                                   grid, 
                                   mode='bilinear',
                                   padding_mode='zeros',
                                   align_corners=True)   
    return warped_src_fea
    # with torch.no_grad():
    #     proj = torch.matmul(src_proj, torch.inverse(ref_proj))
    #     rot = proj[:, :3, :3]  # [B,3,3]
    #     trans = proj[:, :3, 3:4]  # [B,3,1]

    #     y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=src_fea.device),
    #                            torch.arange(0, width, dtype=torch.float32, device=src_fea.device)])
    #     y, x = y.contiguous(), x.contiguous()
    #     y, x = y.view(height * width), x.view(height * width)
    #     xyz = torch.stack((x, y, torch.ones_like(x)))  # [3, H*W]
    #     xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1)  # [B, 3, H*W]
    #     rot_xyz = torch.matmul(rot, xyz)  # [B, 3, H*W]
    #     rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_values.view(batch, 1, num_depth,
    #                                                                                         -1)  # [B, 3, Ndepth, H*W]
    #     proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1)  # [B, 3, Ndepth, H*W]
    #     proj_xyz[:, 2:3, :, :] = torch.where(proj_xyz[:, 2:3, :, :] == 0, proj_xyz[:, 2:3, :, :] + 1e-8,
    #                                          proj_xyz[:, 2:3, :, :])
    #     proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :]  # [B, 2, Ndepth, H*W]
    #     if proj_xyz[:, 2:3, :, :].mean() != proj_xyz[:, 2:3, :, :].mean():
    #         proj_xyz[:, 2:3, :, :] += 1e8
    #     # proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :]  # [B, 2, Ndepth, H*W]
    #     # proj_xy = proj_xy.clamp(min = -1e8)
    #     # proj_xy = proj_xy.clamp(max = 1e8)
    #     proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1
    #     proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1
    #     proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3)  # [B, Ndepth, H*W, 2]
    #     grid = proj_xy
    # warped_src_fea = F.grid_sample(src_fea, grid.view(batch, num_depth * height, width, 2), mode='bilinear',
    #                                padding_mode='zeros', align_corners=True)
    

class P_1to8_FeatureNet_Fast(nn.Cell):
    def __init__(self, base_channels=8, in_channel=[8,16,32,64], out_channel=[32,16,8], stage_channel=True):
        super(P_1to8_FeatureNet_Fast, self).__init__()

        self.base_channels = base_channels


        self.conv0 = nn.SequentialCell(
            Conv2d(3, in_channel[0], 3, 1, padding=1),
            Conv2d(in_channel[0], in_channel[0], 3, 1, padding=1),
        )

        self.conv1 = nn.SequentialCell(
            Conv2d(in_channel[0], in_channel[1], 5, stride=2, padding=2),
            Conv2d(in_channel[1], in_channel[1], 3, 1, padding=1),
            Conv2d(in_channel[1], in_channel[1], 3, 1, padding=1),
        )

        self.conv2 = nn.SequentialCell(
            Conv2d(in_channel[1], in_channel[2], 5, stride=2, padding=2),
            Conv2d(in_channel[2], in_channel[2], 3, 1, padding=1),
            Conv2d(in_channel[2], in_channel[2], 3, 1, padding=1),
        )
        self.conv3 = nn.SequentialCell(
            Conv2d(in_channel[2], in_channel[3], 5, stride=2, padding=2),
            Conv2d(in_channel[3], in_channel[3], 3, 1, padding=1),
            Conv2d(in_channel[3], in_channel[3], 3, 1, padding=1),
        )
        if stage_channel:
            self.out1 = nn.Conv2d(in_channel[3], out_channel[0], 1, has_bias=False)
        else:
            self.out1 = nn.Conv2d(in_channel[3], out_channel[1], 1, has_bias=False)
        self.out_channels = [in_channel[3]]
        final_chs = in_channel[3]

        self.inner1 = nn.Conv2d(in_channel[2], final_chs, 1, has_bias=True)
        self.inner2 = nn.Conv2d(in_channel[1], final_chs, 1, has_bias=True)
        if stage_channel:
            self.out2 = nn.Conv2d(final_chs, out_channel[1], 3, padding=1, pad_mode='pad', has_bias=False)
            self.out3 = nn.Conv2d(final_chs, out_channel[2], 3, padding=1, pad_mode='pad', has_bias=False)
        else:
            self.out2 = nn.Conv2d(final_chs, out_channel[1], 3, padding=1, pad_mode='pad', has_bias=False)
            self.out3 = nn.Conv2d(final_chs, out_channel[1], 3, padding=1, pad_mode='pad', has_bias=False)
        self.out_channels.append(in_channel[1])
        self.out_channels.append(in_channel[0])

    def construct(self, x):
        conv0 = self.conv0(x)
        conv1 = self.conv1(conv0)
        conv2 = self.conv2(conv1)
        conv3 = self.conv3(conv2)

        intra_feat = conv3
        outputs = {}
        out = self.out1(intra_feat)
        outputs["stage1"] = out
        # _, _, h, w = intra_feat.shape
        # new_size = (int(h * 2), int(w * 2))
        # intra_feat = ops.interpolate(intra_feat, size=new_size, mode="nearest") + self.inner1(conv2)
        # # intra_feat = ops.interpolate(intra_feat, scale_factor=2.0, mode="bilinear") + self.inner1(conv2)
        conv2_out = self.inner1(conv2)
        intra_feat = ops.interpolate(intra_feat, size=conv2_out.shape[2:], mode="nearest")
        intra_feat = intra_feat + conv2_out
        out = self.out2(intra_feat)
        outputs["stage2"] = out
        
        # _, _, h, w = intra_feat.shape
        # new_size = (int(h * 2), int(w * 2))
        # intra_feat = ops.interpolate(intra_feat, size=new_size,mode="nearest") + self.inner2(conv1)
        conv1_out = self.inner2(conv1)
        intra_feat = ops.interpolate(intra_feat, size=conv1_out.shape[2:], mode="nearest")
        intra_feat = intra_feat + conv1_out
        out = self.out3(intra_feat)
        outputs["stage3"] = out
        return outputs

class PixelwiseNet(nn.Cell):
    def __init__(self, G):
        super(PixelwiseNet, self).__init__()
        self.conv0 = ConvBnReLU3D(G, 16, 1, 1, 0)
        self.conv1 = ConvBnReLU3D(16, 8, 1, 1, 0)
        self.conv2 = nn.Conv3d(8, 1, kernel_size=1, stride=1, pad_mode='pad', padding=0)
        self.output = nn.Sigmoid()

    def construct(self, x1):
        # x1: [B, G, Ndepth, H, W]

        # [B, Ndepth, H, W]
        x1 = self.conv2(self.conv1(self.conv0(x1))).squeeze(1)
        output = self.output(x1)
        # del x1
        # [B,H,W]
        output = ops.max(output, axis=1)[0]
        # return output.unsqueeze(1)
        return ops.unsqueeze(output,1)

class CostRegNet_2_sample_FPN3D_Fast(nn.Cell):
    def __init__(self, in_channels, base_channels):
        super(CostRegNet_2_sample_FPN3D_Fast, self).__init__()

        self.conv0 = Conv3d(in_channels, base_channels, padding=1)
        self.conv1 = Conv3d(base_channels, base_channels, padding=1)

        self.conv2 = Conv3d(base_channels, base_channels * 2, stride=2, padding=1)
        self.conv3 = Conv3d(base_channels * 2, base_channels * 2, padding=1)

        self.conv4 = Conv3d(base_channels * 2, base_channels * 4, stride=2, padding=1)
        self.conv5 = Conv3d(base_channels * 4, base_channels * 4, padding=1)

        self.conv6 = Deconv3d(base_channels * 4, base_channels * 2, stride=2, padding=1, output_padding=1)

        self.conv7 = Deconv3d(base_channels * 2, base_channels * 1, stride=2, padding=1, output_padding=1)
        #
        self.prob = nn.Conv3d(base_channels, 1, 3, stride=1, padding=1, pad_mode='pad', has_bias=False)
    def construct(self, x):
        # CostRegNet_2_sample_FPN3D_Fast: input x torch.Size([4, 1, 48, 64, 80])
        # CostRegNet_2_sample_FPN3D_Fast: input conv1 torch.Size([4, 8, 48, 64, 80])
        # CostRegNet_2_sample_FPN3D_Fast: input conv3 torch.Size([4, 16, 24, 32, 40])
        # CostRegNet_2_sample_FPN3D_Fast: middle x torch.Size([4, 32, 12, 16, 20])
        # CostRegNet_2_sample_FPN3D_Fast: input conv6 torch.Size([4, 16, 24, 32, 40])
        conv1 = self.conv1(self.conv0(x))
        conv3 = self.conv3(self.conv2(conv1))
        x = self.conv5(self.conv4(conv3))
        temp = self.conv6(x)
        x = conv3 + self.conv6(x)
        pro = conv1 + self.conv7(x)
        x = self.prob(pro)
        return x, pro


class CostRegNet_2_sample_FPN3D(nn.Cell):
    def __init__(self, in_channels, base_channels):
        super(CostRegNet_2_sample_FPN3D, self).__init__()

        self.conv0 = Conv3d(in_channels, base_channels, padding=1)
        self.conv0_1 = Conv3d(base_channels, base_channels, padding=1)
        self.conv0_2 = Conv3d(base_channels, base_channels, padding=1)
        self.conv0_3 = Conv3d(base_channels, base_channels, padding=1)

        self.conv1 = Conv3d(base_channels, base_channels * 2, stride=2, padding=1)
        self.conv1_1 = Conv3d(base_channels * 2, base_channels * 2, padding=1)
        self.conv1_2 = Conv3d(base_channels * 2, base_channels * 2, padding=1)
        self.conv1_3 = Conv3d(base_channels * 2, base_channels * 2, padding=1)

        self.conv2 = Conv3d(base_channels * 2, base_channels * 4, stride=2, padding=1)
        self.conv2_1 = Conv3d(base_channels * 4, base_channels * 4, padding=1)
        self.conv2_2 = Conv3d(base_channels * 4, base_channels * 4, padding=1)
        self.conv2_3 = Conv3d(base_channels * 4, base_channels * 4, padding=1)

        self.conv3 = Deconv3d(base_channels * 4, base_channels * 2, stride=2, padding=1, output_padding=1)

        self.conv4 = Deconv3d(base_channels * 2, base_channels * 1, stride=2, padding=1, output_padding=1)
        #
        self.prob = nn.Conv3d(base_channels, 1, 3, stride=1, pad_mode='pad',padding=1, has_bias=False)

    def construct(self, x):
        # print(x)
        conv0 = self.conv0_3(self.conv0_2(self.conv0_1(self.conv0(x))))
        conv1 = self.conv1_3(self.conv1_2(self.conv1_1(self.conv1(conv0))))
        x = self.conv2_3(self.conv2_2(self.conv2_1(self.conv2(conv1))))
        x = conv1 + self.conv3(x)
        pro = conv0 + self.conv4(x)
        x = self.prob(pro)
        return x, pro

class cost_up_small(nn.Cell):
    def __init__(self, in_channels, base_channels, IGEV_cost_channel=1):
        super(cost_up_small, self).__init__()
        self.IGEV_cost_channel = IGEV_cost_channel
        self.conv0 = Conv3d(in_channels, base_channels, stride=(1,2,2), padding=1)
        self.conv_cost = Conv3d(self.IGEV_cost_channel, base_channels, padding=1)
        self.conv1 = Conv3d(base_channels * 2, base_channels, padding=1)
        self.conv2 = Deconv3d(base_channels, self.IGEV_cost_channel, stride=(1,2,2), padding=1, output_padding=(0,1,1))
    def construct(self, x, IGEV_cost):
        # print(x)
        conv0 = self.conv0(x)
        IGEV_cost_ = self.conv_cost(IGEV_cost)
        conv1 = self.conv1(ops.cat([conv0, IGEV_cost_], axis=1))
        conv2 = self.conv2(conv1)
        # print(x.shape, conv1.shape, IGEV_cost.shape, conv3.shape)
        return conv2, conv1

def depth_regression(p, depth_values):
    if depth_values.dim() <= 2:
        # print("regression dim <= 2")
        depth_values = depth_values.view(*depth_values.shape, 1, 1)
    depth = ops.sum(p * depth_values, 1)

    return depth

def mvs_loss(inputs, depth_gt_ms, mask_ms, dloss, depth_values=[425,935], loss_rate=0.9):
    total_loss = mindspore.tensor(0.0, dtype=mindspore.float32)
    loss_dict = {}
    loss_len = len(inputs)
    loss_rate = loss_rate
    stage_id = dloss
    for i, stage_inputs in enumerate(inputs):
        depth_est = stage_inputs
        depth_gt = depth_gt_ms["stage{}".format(stage_id[i])]
        mask = mask_ms["stage{}".format(stage_id[i])]
        mask = mask > 0.5
        depth_loss = ops.smooth_l1_loss(depth_est[mask], depth_gt[mask], reduction='mean')
        loss_dict["l{}".format(i)] = depth_loss
        if i == 0:
            total_loss += 1.0 * depth_loss
        else:
            total_loss += (loss_rate ** (loss_len - i - 1)) * depth_loss
    return total_loss, loss_dict

def get_cur_depth_range_samples(cur_depth, ndepth, depth_inteval_pixel, shape, max_depth=192.0, min_depth=0.0):
    #shape, (B, H, W)
    #cur_depth: (B, H, W)

    # cur_depth_min = (cur_depth - ndepth // 2 * depth_inteval_pixel).clamp(min = 1e-4)  # (B, H, W)
    # cur_depth_max = (cur_depth + ndepth // 2 * depth_inteval_pixel).clamp(min = 1e-4, max = 1e4)
    cur_depth_min = (cur_depth - ndepth // 2 * depth_inteval_pixel).clamp(min=1e-4)  # (B, H, W)
    cur_depth_max = (cur_depth + ndepth // 2 * depth_inteval_pixel).clamp(min=1e-4, max=1e4)
    # assert cur_depth.shape == ops.Size(shape), "cur_depth:{}, input shape:{}".format(cur_depth.shape, shape)
    assert cur_depth.shape == tuple(shape), "cur_depth:{}, input shape:{}".format(cur_depth.shape, shape)
    new_interval = (cur_depth_max - cur_depth_min) / (ndepth - 1)  # (B, H, W)
    depth_range_samples = ops.unsqueeze(cur_depth_min,1) + (ops.arange(0, ndepth).reshape(1, -1, 1, 1) * new_interval.unsqueeze(1))
    return depth_range_samples.clamp(min = 1e-5)

def get_depth_range_samples(cur_depth, ndepth, depth_inteval_pixel, dtype, shape,
                           max_depth=192.0, min_depth=0.0):
    #shape: (B, H, W)
    #cur_depth: (B, H, W) or (B, D)
    #return depth_range_samples: (B, D, H, W)
    
    # if cur_depth.dim() == 2:
    #     cur_depth_min = cur_depth[:, 0]  # (B,)
    #     cur_depth_max = cur_depth[:, -1]
    #     new_interval = (cur_depth_max - cur_depth_min) / (ndepth - 1)  # (B, )
    #     depth_range_samples = cur_depth_min.unsqueeze(1) + (torch.arange(0, ndepth, device=device, dtype=dtype,
    #                                                                    requires_grad=False).reshape(1, -1) * new_interval.unsqueeze(1)) #(B, D)
    #     depth_range_samples = depth_range_samples.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, shape[1], shape[2]) #(B, D, H, W)
    # else:
    #     depth_range_samples = get_cur_depth_range_samples(cur_depth, ndepth, depth_inteval_pixel, shape, max_depth, min_depth)
    # return depth_range_samples
    if len(cur_depth.shape) == 2:
        # Case when cur_depth is (B, D)
        cur_depth_min = cur_depth[:, 0]  # (B,)
        cur_depth_max = cur_depth[:, -1]
        new_interval = (cur_depth_max - cur_depth_min) / (ndepth - 1)  # (B,)
        
        # Generate depth range
        depth_range = ops.arange(0, ndepth, dtype=cur_depth.dtype).reshape(1, -1)  # (1, D)
        depth_range_samples = ops.expand_dims(cur_depth_min, 1) + depth_range * ops.expand_dims(new_interval, 1)  # (B, D)
        
        # Expand to (B, D, H, W)
        depth_range_samples = ops.expand_dims(ops.expand_dims(depth_range_samples, -1), -1)
        depth_range_samples = ops.tile(depth_range_samples, (1, 1, shape[1], shape[2]))
    else:
        # Case when cur_depth is (B, H, W)
        depth_range_samples = get_cur_depth_range_samples(
            cur_depth, ndepth, depth_inteval_pixel, shape, max_depth, min_depth)
    
    return depth_range_samples


# 这个原始代码都没有运行成功
if __name__ == "__main__":
    # MindSpore testing code
    # 如何运行：python -m models.module
    import sys
    sys.path.append("../")
    # from ..datasets import find_dataset_def
    from datasets.dtu_yao import MVSDataset
    import mindspore.dataset as ds
    import numpy as np
    import cv2
    import matplotlib as mpl
    mpl.use('Agg')
    import matplotlib.pyplot as plt
    from mindspore import Tensor, context
    import mindspore.ops as ops

    # 设置运行环境
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    # 创建数据集
    DTU_TRAINING = "/media/outbreak/68E1-B517/Dataset/DTU_ZIP/dtu_training/mvs_training/dtu"
    num_depth = 48
    dataset = MVSDataset(
        datapath=DTU_TRAINING,
        listfile="lists/dtu/train.txt",
        mode="train",
        nviews=5,
        ndepths=num_depth,
        interval_scale=1.06 * 192 / num_depth,
        dispmaxfirst='last',
        max_h=1200,
        max_w=1600
    )
    
    # 创建MindSpore数据加载器
    minds_dataset = ds.GeneratorDataset(
        dataset, 
        column_names=["imgs", "proj_matrices", "depth", "depth_values", "mask", "filename"],
        shuffle=True
    )
    # 获取一个batch数据
    batched_dataset = minds_dataset.batch(batch_size=4)
    item = next(batched_dataset.create_dict_iterator())
    imgs = item['imgs']
    proj_matrices = item['proj_matrices']
    depth_values = item['depth_values']
    # 数据处理 (B, N, 3, H, W)
    imgs = imgs[:, :, :, ::4, ::4]  # 降采样
    proj_matrices = proj_matrices.asnumpy()
    proj_matrices[:, :, 1, :2, :] = proj_matrices[:, :, 1, :2, :]  # 保持内参不变
    proj_matrices = Tensor(proj_matrices)
    
    # 分离参考视图和源视图
    imgs = ops.unstack(imgs, axis=1)
    proj_matrices = ops.unstack(proj_matrices, axis=1)
    ref_img, src_imgs = imgs[0], imgs[1:]
    ref_proj, src_proj = proj_matrices[0], proj_matrices[1:][0]  # 只使用第一个源视图
    
    # 计算新的投影矩阵
    src_proj_new = src_proj[:, 0].copy()
    src_proj_new[:, :3, :4] = ops.matmul(src_proj[:, 1, :3, :3], src_proj[:, 0, :3, :4])
    ref_proj_new = ref_proj[:, 0].copy()
    ref_proj_new[:, :3, :4] = ops.matmul(ref_proj[:, 1, :3, :3], ref_proj[:, 0, :3, :4])
    
    # 执行homography warping (需要实现homo_warping_new的MindSpore版本)
    warped_imgs = homo_warping_new(src_imgs[0], src_proj_new, ref_proj_new, depth_values)
    
    # 可视化保存结果
    ref_img_np = ref_img.transpose(0, 2, 3, 1).asnumpy()[0][:, :, ::-1] * 255
    cv2.imwrite('../tmp/ref.png', ref_img_np)
    cv2.imwrite('../tmp/src.png', src_imgs[0].transpose(0, 2, 3, 1).asnumpy()[0][:, :, ::-1] * 255)

    for i in range(warped_imgs.shape[2]):
        warped_img = warped_imgs[:, :, i, :, :].transpose(0, 2, 3, 1)
        img_np = warped_img.asnumpy()[0][:, :, ::-1] * 255

        alpha = 0.5
        beta = 1 - alpha
        gamma = 0
        img_add = cv2.addWeighted(ref_img_np, alpha, img_np, beta, gamma)
        cv2.imwrite('../tmp/tmp{}.png'.format(i), np.hstack([ref_img_np, img_np, img_add]))

# if __name__ == "__main__":
#     # some testing code, just IGNORE it
#     import sys
#     sys.path.append("../")
#     from datasets import find_dataset_def
#     from torch.utils.data import DataLoader
#     import numpy as np
#     import cv2
#     import matplotlib as mpl
#     mpl.use('Agg')
#     import matplotlib.pyplot as plt

#     # MVSDataset = find_dataset_def("colmap")
#     # dataset = MVSDataset("../data/results/ford/num10_1/", 3, 'test',
#     #                      128, interval_scale=1.06, max_h=1250, max_w=1024)

#     MVSDataset = find_dataset_def("dtu_yao")
#     num_depth = 48
#     dataset = MVSDataset("../data/DTU/mvs_training/dtu/", '../lists/dtu/train.txt', 'train',
#                          3, num_depth, interval_scale=1.06 * 192 / num_depth)

#     dataloader = DataLoader(dataset, batch_size=1)
#     item = next(iter(dataloader))

#     imgs = item["imgs"][:, :, :, ::4, ::4]  #(B, N, 3, H, W)
#     # imgs = item["imgs"][:, :, :, :, :]
#     proj_matrices = item["proj_matrices"]   #(B, N, 2, 4, 4) dim=N: N view; dim=2: index 0 for extr, 1 for intric
#     proj_matrices[:, :, 1, :2, :] = proj_matrices[:, :, 1, :2, :]
#     # proj_matrices[:, :, 1, :2, :] = proj_matrices[:, :, 1, :2, :] * 4
#     depth_values = item["depth_values"]     #(B, D)

#     imgs = torch.unbind(imgs, 1)
#     proj_matrices = torch.unbind(proj_matrices, 1)
#     ref_img, src_imgs = imgs[0], imgs[1:]
#     ref_proj, src_proj = proj_matrices[0], proj_matrices[1:][0]  #only vis first view

#     src_proj_new = src_proj[:, 0].clone()
#     src_proj_new[:, :3, :4] = torch.matmul(src_proj[:, 1, :3, :3], src_proj[:, 0, :3, :4])
#     ref_proj_new = ref_proj[:, 0].clone()
#     ref_proj_new[:, :3, :4] = torch.matmul(ref_proj[:, 1, :3, :3], ref_proj[:, 0, :3, :4])

#     warped_imgs = homo_warping_new(src_imgs[0], src_proj_new, ref_proj_new, depth_values)

#     ref_img_np = ref_img.permute([0, 2, 3, 1])[0].detach().cpu().numpy()[:, :, ::-1] * 255
#     cv2.imwrite('../tmp/ref.png', ref_img_np)
#     cv2.imwrite('../tmp/src.png', src_imgs[0].permute([0, 2, 3, 1])[0].detach().cpu().numpy()[:, :, ::-1] * 255)

#     for i in range(warped_imgs.shape[2]):
#         warped_img = warped_imgs[:, :, i, :, :].permute([0, 2, 3, 1]).contiguous()
#         img_np = warped_img[0].detach().cpu().numpy()
#         img_np = img_np[:, :, ::-1] * 255

#         alpha = 0.5
#         beta = 1 - alpha
#         gamma = 0
#         img_add = cv2.addWeighted(ref_img_np, alpha, img_np, beta, gamma)
#         cv2.imwrite('../tmp/tmp{}.png'.format(i), np.hstack([ref_img_np, img_np, img_add])) #* ratio + img_np*(1-ratio)]))
