import mindspore
from PIL import Image
from mindspore import nn, ops,Tensor
from functools import partial
from mindspore import dtype as mstype
from mindspore import context
from mindspore.common.initializer import One, Normal
from models.ChangeFormer2.ChangeFormerBaseNetworks import *
import time
from mindvision.classification.models.blocks.drop_path import DropPath
from mindspore.common import initializer as init
import numpy as np
import cv2
from mindspore.train.serialization import load_checkpoint, load_param_into_net

class OverlapPatchEmbed(nn.Cell):
    """ Image to Patch Embedding
    """

    def __init__(self, img_size=256, patch_size=7, stride=4, in_chans=3, embed_dim=768):
        super().__init__()
        img_size = (img_size, img_size)
        patch_size = (patch_size, patch_size)

        self.img_size = img_size
        self.patch_size = patch_size
        self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
        self.num_patches = self.H * self.W
        # padding_demo = (patch_size[0] // 2, patch_size[1] // 2)

        self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
                              pad_mode='pad', padding=patch_size[0]//2, has_bias=True)
        # TODO：待修改
        self.norm = nn.LayerNorm((embed_dim,))

        # TODO: 待初始化
        # self.apply(self._init_weights)
        self.transpose = ops.Transpose()
        self.reshape = ops.Reshape()

    def _init_weights(self, m):
        pass

    def construct(self, x):
        x = self.proj(x)  # [1,64,64,64] stage3:[1,320,16,16]
        B, C, H, W = x.shape
        x = self.reshape(x, (B, C, -1))  # [1,4096,64]
        x = self.transpose(x, (0, 2, 1))
        x = self.norm(x)

        return x, H, W

# TODO: 未进行权重初始化
class Attention(nn.Cell):
    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
        super().__init__()
        assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."

        self.dim = dim
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.scale = qk_scale or head_dim ** -0.5

        self.q = nn.Dense(dim, dim, has_bias=qkv_bias)
        self.kv = nn.Dense(dim, dim * 2, has_bias=qkv_bias)
        self.attn_drop = nn.Dropout(keep_prob=(1.-attn_drop))
        self.attn_drop_scale = 1./(1.-attn_drop)

        self.proj = nn.Dense(dim, dim, has_bias=True)
        self.proj_drop = nn.Dropout(keep_prob=(1.-proj_drop))
        self.proj_drop_scale = 1./(1. - proj_drop)

        self.sr_ratio = sr_ratio
        if sr_ratio > 1:
            self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio, has_bias=True)
            self.norm = nn.LayerNorm((dim,))

        # self.apply(self._init_weights)
        self.transpose = ops.Transpose()
        self.reshape = ops.Reshape()
        # self.batMul = ops.BatchMatMul()
        self.q_matmul_k = ops.BatchMatMul(transpose_b=True)
        self.attn_matmul_v = ops.BatchMatMul()
        self.mul = ops.Mul()

        self.active = ops.Softmax()

    def _init_weights(self, m):
        pass

    def construct(self, x, H, W):
        B, N, C = x.shape
        q = self.reshape(self.q(x), (B, N, self.num_heads, C // self.num_heads))
        q = self.transpose(q, (0, 2, 1, 3))

        if self.sr_ratio > 1:
            x_ = self.transpose(x, (0, 2, 1))
            x_ = self.reshape(x_, (B, C, H, W))

            x_ = self.reshape(self.sr(x_), (B, C, -1))
            x_ = self.transpose(x_, (0, 2,1))
            x_ = self.norm(x_)

            kv = self.reshape(self.kv(x_), (B, -1, 2, self.num_heads, C // self.num_heads))
            kv = self.transpose(kv, (2, 0, 3, 1, 4))

        else:
            kv = self.reshape(self.kv(x), (B, -1, 2, self.num_heads, C // self.num_heads))
            kv = self.transpose(kv, (2, 0, 3, 1, 4))

        k, v = kv[0], kv[1]

        attn = self.mul(self.q_matmul_k(q, k), self.scale)
        attn = self.active(attn)
        attn = self.attn_drop(attn)
        attn = self.mul(attn, self.attn_drop_scale)

        x = self.attn_matmul_v(attn, v)
        x = self.transpose(x, (0, 2, 1, -1))
        x = self.reshape(x, (B, N, C))

        x = self.proj(x)
        x = self.proj_drop(x)  # [1,4096,64]
        x = self.mul(x, self.proj_drop_scale)

        return x

# TODO：未进行初始化
class DWConv(nn.Cell):
    def __init__(self, dim=768):
        super(DWConv, self).__init__()
        self.dwconv = nn.Conv2d(in_channels=dim, out_channels=dim,
                                kernel_size=3, stride=1, padding=1, pad_mode='pad', has_bias=True, group=dim)
        self.transpose = ops.Transpose()
        self.reshape = ops.Reshape()

    def construct(self, x, H, W):
        B, N, C = x.shape
        x = self.transpose(x, (0, 2, 1))
        x = self.reshape(x, (B, C, H, W))
        x = self.dwconv(x)

        B, C = x.shape[0], x.shape[1]
        x = self.reshape(x, (B, C, -1))
        x = self.transpose(x, (0, 2, 1))  # [1,4096,256]

        return x


# TODO: 未进行初始化
class Mlp(nn.Cell):
    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.fc1 = nn.Dense(in_features, hidden_features)
        self.dwconv = DWConv(hidden_features)
        self.act = act_layer()
        self.fc2 = nn.Dense(hidden_features, out_features)
        self.drop = nn.Dropout(keep_prob=(1.-drop))
        self.drop_scale = 1./(1. - drop)
        self.mul = ops.Mul()
        # self.apply(self._init_weights)

    def _init_weights(self, m):
        pass

    def construct(self, x, H, W):
        # print("execute Mlp ...")
        x = self.fc1(x)
        x = self.dwconv(x, H, W)  # [1,4096,256]
        x = self.act(x)
        x = self.drop(x)
        x = self.mul(x, self.drop_scale)

        x = self.fc2(x)
        x = self.drop(x)  # [1,4096,64]
        x = self.mul(x, self.drop_scale)

        return x

# TODO: 未进行初始化
class Block(nn.Cell):

    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
        super().__init__()
        # self.norm1 = norm_layer((dim,))
        self.norm1 = norm_layer((dim,))
        self.attn = Attention(
            dim,
            num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
            attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)

        # TODO: 已完成修改
        self.drop_path = DropPath(drop_path) if drop_path > 0. else ops.Identity()


        self.norm2 = norm_layer((dim,))
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)

        # self.apply(self._init_weights)

    def _init_weights(self, m):
       pass

    def construct(self, x, H, W):
        # print("execute Block ...")

        x = x + self.drop_path(self.attn(self.norm1(x), H, W))  # [1,4096,64]
        x = x + self.drop_path(self.mlp(self.norm2(x), H, W))   # [1,4096,64]
        return x



# TODO:未进行初始化
# Transformer Decoder
class MLP(nn.Cell):
    """
    Linear Embedding
    """
    def __init__(self, input_dim=2048, embed_dim=768):
        super().__init__()
        self.proj = nn.Dense(input_dim, embed_dim)
        self.reshape = ops.Reshape()
        self.transpose = ops.Transpose()

    def construct(self, x):
        B, C = x.shape[0], x.shape[1]
        # x = x.reshape(B, C, -1).transpose(0, 2, 1)
        x = self.reshape(x, (B, C, -1))
        x = self.transpose(x, (0, 2, 1))
        x = self.proj(x)
        return x

# TODO：未进行权重初始化，Conv、BN均应该按照pytorch修改
#Difference module
def conv_diff(in_channels, out_channels):
    return nn.SequentialCell(
        nn.Conv2d(in_channels, out_channels, kernel_size=3, pad_mode='pad', padding=1, has_bias=True),
        nn.ReLU(),
        nn.BatchNorm2d(out_channels),
        nn.Conv2d(out_channels, out_channels, kernel_size=3, pad_mode='pad', padding=1, has_bias=True),
        nn.ReLU()
    )

#Intermediate prediction module
def make_prediction(in_channels, out_channels):
    return nn.SequentialCell(
        nn.Conv2d(in_channels, out_channels, kernel_size=3, pad_mode='pad', padding=1, has_bias=True),
        nn.ReLU(),
        nn.BatchNorm2d(out_channels),
        nn.Conv2d(out_channels, out_channels, kernel_size=3, pad_mode='pad', padding=1,has_bias=True)
    )



class EncoderTransformer_v3(nn.Cell):
    def __init__(self, img_size=256, patch_size=3, in_chans=3, num_classes=2, embed_dims=[32, 64, 128, 256],
                 num_heads=[2, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True, qk_scale=None, drop_rate=0.,
                 attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
                 depths=[3, 3, 6, 18], sr_ratios=[8, 4, 2, 1]):
        super().__init__()
        self.num_classes = num_classes
        self.depths = depths
        self.embed_dims = embed_dims

        # patch embedding definitions
        self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
                                              embed_dim=embed_dims[0])
        self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=patch_size, stride=2,
                                              in_chans=embed_dims[0],
                                              embed_dim=embed_dims[1])
        self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=patch_size, stride=2,
                                              in_chans=embed_dims[1],
                                              embed_dim=embed_dims[2])
        self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=patch_size, stride=2,
                                              in_chans=embed_dims[2],
                                              embed_dim=embed_dims[3])

        linspace = ops.LinSpace()
        start = mindspore.Tensor(0, mstype.float32)
        drop_path_rate = mindspore.Tensor(drop_path_rate, mstype.float32)
        dpr = linspace(start, drop_path_rate, sum(depths)).asnumpy()

        cur = 0

        self.block1 = nn.CellList([Block(
            dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
            drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
            sr_ratio=sr_ratios[0])
            for i in range(depths[0])])
        self.norm1 = norm_layer((embed_dims[0],))

        # Stage-2 (x1/8 scale)
        cur += depths[0]
        self.block2 = nn.CellList([Block(
            dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
            drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
            sr_ratio=sr_ratios[1])
            for i in range(depths[1])])
        self.norm2 = norm_layer((embed_dims[1],))

        # Stage-3 (x1/16 scale)
        cur += depths[1]
        self.block3 = nn.CellList([Block(
            dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
            drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
            sr_ratio=sr_ratios[2])
            for i in range(depths[2])])
        self.norm3 = norm_layer((embed_dims[2],))

        # Stage-4 (x1/32 scale)
        cur += depths[2]
        self.block4 = nn.CellList([Block(
            dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
            drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
            sr_ratio=sr_ratios[3])
            for i in range(depths[3])])
        self.norm4 = norm_layer((embed_dims[3],))

        self.reshape = ops.Reshape()
        self.transpose = ops.Transpose()

        # self.apply(self._init_weights)

    def _init_weights(self, m):
        pass

    def reset_drop_path(self, drop_path_rate):
        # dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
        linspace = ops.LinSpace()
        start = mindspore.Tensor(0, mstype.float32)
        drop_path_rate = mindspore.Tensor(drop_path_rate, mstype.float32)
        dpr = linspace(start, drop_path_rate, sum(self.depths)).asnumpy()
        cur = 0
        for i in range(self.depths[0]):
            self.block1[i].drop_path.drop_prob = dpr[cur + i]

        cur += self.depths[0]
        for i in range(self.depths[1]):
            self.block2[i].drop_path.drop_prob = dpr[cur + i]

        cur += self.depths[1]
        for i in range(self.depths[2]):
            self.block3[i].drop_path.drop_prob = dpr[cur + i]

        cur += self.depths[2]
        for i in range(self.depths[3]):
            self.block4[i].drop_path.drop_prob = dpr[cur + i]

    def forward_features(self, x):
        # print("execute encoder v3...")
        B = x.shape[0]  # x: [1,3,256,256]
        outs = []

        # stage 1

        x1, H1, W1 = self.patch_embed1(x)  # [1,4096,64]  64, 64
        for i, blk in enumerate(self.block1):
            x1 = blk(x1, H1, W1)

        x1 = self.norm1(x1)  # [1,4096,64]
        x1 = self.reshape(x1, (B, H1, W1, -1))
        x1 = self.transpose(x1, (0, 3, 1, 2))

        outs.append(x1)

        # stage 2
        x1, H1, W1 = self.patch_embed2(x1)  # [1, 1024, 128] 32 32
        for i, blk in enumerate(self.block2):
            x1 = blk(x1, H1, W1)
        x1 = self.norm2(x1)
        x1 = self.reshape(x1, (B, H1, W1, -1))
        x1 = self.transpose(x1, (0, 3, 1, 2))

        outs.append(x1)

        # stage 3
        x1, H1, W1 = self.patch_embed3(x1)  # [1, 256, 320]  16, 16(16x16=256)
        for i, blk in enumerate(self.block3):
            x1 = blk(x1, H1, W1)
        x1 = self.norm3(x1)  # [1,256,320]
        x1 = self.reshape(x1, (B, H1, W1, -1))
        x1 = self.transpose(x1, (0, 3, 1, 2))
        outs.append(x1)

        # stage 4
        x1, H1, W1 = self.patch_embed4(x1)  # [1,64,512]  8,8
        for i, blk in enumerate(self.block4):
            x1 = blk(x1, H1, W1)
        x1 = self.norm4(x1)
        x1 = self.reshape(x1, (B, H1, W1, -1))
        x1 = self.transpose(x1, (0, 3, 1, 2))
        outs.append(x1)
        return outs

    def construct(self, x):
        x = self.forward_features(x)  # [1,3,256,256]
        return x


class DecoderTransformer_v3(nn.Cell):
    """
    Transformer Decoder
    """
    def __init__(self, input_transform='multiple_select', in_index=[0, 1, 2, 3], align_corners=True,
                 in_channels=[32, 64, 128, 256], embedding_dim=64, output_nc=2,
                 decoder_softmax=False, feature_strides=[2, 4, 8, 16]):
        super(DecoderTransformer_v3, self).__init__()
        # assert
        assert len(feature_strides) == len(in_channels)
        assert min(feature_strides) == feature_strides[0]

        # settings
        self.feature_strides = feature_strides
        self.input_transform = input_transform
        self.in_index = in_index
        self.align_corners = align_corners
        self.in_channels = in_channels
        self.embedding_dim = embedding_dim
        self.output_nc = output_nc
        c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.in_channels

        # MLP decoder heads
        self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=self.embedding_dim)
        self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=self.embedding_dim)
        self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=self.embedding_dim)
        self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=self.embedding_dim)

        # convolutional Difference Modules
        self.diff_c4 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
        self.diff_c3 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
        self.diff_c2 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)
        self.diff_c1 = conv_diff(in_channels=2 * self.embedding_dim, out_channels=self.embedding_dim)

        # taking outputs from middle of the encoder
        self.make_pred_c4 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
        self.make_pred_c3 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
        self.make_pred_c2 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)
        self.make_pred_c1 = make_prediction(in_channels=self.embedding_dim, out_channels=self.output_nc)

        # Final linear fusion layer
        self.linear_fuse = nn.SequentialCell(
            nn.Conv2d(in_channels=self.embedding_dim * len(in_channels), out_channels=self.embedding_dim,
                      kernel_size=1, pad_mode='valid', padding=0, has_bias=True),
            nn.BatchNorm2d(self.embedding_dim)
        )

        # Final predction head
        self.convd2x = UpsampleConvLayer(self.embedding_dim, self.embedding_dim, kernel_size=4, stride=2)
        self.dense_2x = nn.SequentialCell([ResidualBlock(self.embedding_dim)])
        self.convd1x = UpsampleConvLayer(self.embedding_dim, self.embedding_dim, kernel_size=4, stride=2)
        self.dense_1x = nn.SequentialCell([ResidualBlock(self.embedding_dim)])
        self.change_probability = ConvLayer(self.embedding_dim, self.output_nc, kernel_size=3, stride=1, padding=1)

        # Final activation
        self.output_softmax = decoder_softmax
        self.active = nn.Sigmoid()

        self.transpose = ops.Transpose()
        self.reshape = ops.Reshape()
        self.concat = ops.Concat(axis=1)

    def _transform_inputs(self, inputs):
        """Transform inputs for decoder.
        Args:
            inputs (list[Tensor]): List of multi-level img features.
        Returns:
            Tensor: The transformed inputs
        """

        if self.input_transform == 'resize_concat':
            # to support graph mode
            # inputs = [inputs[i] for i in self.in_index]

            inputs_temp = inputs
            inputs = []
            for i in self.in_index:
                inputs.append(inputs_temp[i])

            # to support graph mode
            upsampled_inputs = []
            for x in inputs:
                x = resize(input=x, size=inputs[0].shape[2], mode='bilinear', align_corners=self.align_corners)
                upsampled_inputs.append(x)

            cat_fun = ops.Concat(axis=1)
            inputs = cat_fun(upsampled_inputs, dim=1)
        elif self.input_transform == 'multiple_select':
            # inputs = [inputs[i] for i in self.in_index]

            inputs_temp = inputs
            inputs = []
            for i in self.in_index:
                inputs.append(inputs_temp[i])

        else:
            inputs = inputs[self.in_index]

        return inputs

    def construct(self, inputs1, inputs2):
        # Transforming encoder features (select layers)
        x_1 = self._transform_inputs(inputs1)  # len=4, 1/2, 1/4, 1/8, 1/16
        x_2 = self._transform_inputs(inputs2)  # len=4, 1/2, 1/4, 1/8, 1/16

        # img1 and img2 features
        c1_1, c2_1, c3_1, c4_1 = x_1  # [1,64,64,64] [1,128,32,32] [1,320,16,16] [1,512,8,8]
        c1_2, c2_2, c3_2, c4_2 = x_2

        ############## MLP decoder on C1-C4 ###########
        n, _, h, w = c4_1.shape  # 1,512,8,8

        outputs = []

        # cat_fun = ops.Concat(axis=1)
        # Stage 4: x1/32 scale
        _c4_1 = self.transpose(self.linear_c4(c4_1), (0, 2, 1))
        _c4_1 = self.reshape(_c4_1, (n, -1, c4_1.shape[2], c4_1.shape[3]))

        _c4_2 = self.transpose(self.linear_c4(c4_2), (0, 2, 1))
        _c4_2 = self.reshape(_c4_2, (n, -1, c4_2.shape[2], c4_2.shape[3]))

        _c4 = self.diff_c4(self.concat((_c4_1, _c4_2)))  # [1,256,8,8]
        p_c4 = self.make_pred_c4(_c4)  # [1,2,8,8]
        outputs.append(p_c4)
        _c4_up = resize(_c4, size=c1_2.shape[2:], mode='bilinear', align_corners=False)  # [1,256,64,64]

        # Stage 3: x1/16 scale
        # _c3_1 = self.linear_c3(c3_1).transpose(0, 2, 1).reshape(n, -1, c3_1.shape[2], c3_1.shape[3])
        _c3_1 = self.transpose(self.linear_c3(c3_1), (0, 2, 1))
        _c3_1 = self.reshape(_c3_1, (n, -1, c3_1.shape[2], c3_1.shape[3]))

        # _c3_2 = self.linear_c3(c3_2).transpose(0, 2, 1).reshape(n, -1, c3_2.shape[2], c3_2.shape[3])
        _c3_2 = self.transpose(self.linear_c3(c3_2), (0, 2, 1))
        _c3_2 = self.reshape(_c3_2, (n, -1, c3_2.shape[2], c3_2.shape[3]))

        _c3 = self.diff_c3(self.concat((_c3_1, _c3_2))) + resize(input=_c4,size=_c4.shape[2:], scale_factor=2, mode="bilinear")
        p_c3 = self.make_pred_c3(_c3)  # [1,2,16,16]
        outputs.append(p_c3)
        _c3_up = resize(_c3, size=c1_2.shape[2:], mode='bilinear', align_corners=False)  # [1,256,64,64]

        # Stage 2: x1/8 scale
        # _c2_1 = self.linear_c2(c2_1).transpose(0, 2, 1).reshape(n, -1, c2_1.shape[2], c2_1.shape[3])
        _c2_1 = self.transpose(self.linear_c2(c2_1), (0, 2, 1))
        _c2_1 = self.reshape(_c2_1, (n, -1, c2_1.shape[2], c2_1.shape[3]))

        # _c2_2 = self.linear_c2(c2_2).transpose(0, 2, 1).reshape(n, -1, c2_2.shape[2], c2_2.shape[3])
        _c2_2 = self.transpose(self.linear_c2(c2_2), (0, 2, 1))
        _c2_2 = self.reshape(_c2_2, (n, -1, c2_2.shape[2], c2_2.shape[3]))

        _c2 = self.diff_c2(self.concat((_c2_1, _c2_2))) + resize(input=_c3, size=_c3.shape[2:], scale_factor=2, mode="bilinear")
        p_c2 = self.make_pred_c2(_c2)  # [1,2,32,32]
        outputs.append(p_c2)
        _c2_up = resize(_c2, size=c1_2.shape[2:], mode='bilinear', align_corners=False)  # [1,256,64,64]

        # Stage 1: x1/4 scale
        # _c1_1 = self.linear_c1(c1_1).transpose(0, 2, 1).reshape(n, -1, c1_1.shape[2], c1_1.shape[3])
        _c1_1 = self.transpose(self.linear_c1(c1_1), (0, 2, 1))
        _c1_1 = self.reshape(_c1_1, (n, -1, c1_1.shape[2], c1_1.shape[3]))

        # _c1_2 = self.linear_c1(c1_2).transpose(0, 2, 1).reshape(n, -1, c1_2.shape[2], c1_2.shape[3])
        _c1_2 = self.transpose(self.linear_c1(c1_2), (0, 2, 1))
        _c1_2 = self.reshape(_c1_2, (n, -1, c1_2.shape[2], c1_2.shape[3]))

        _c1 = self.diff_c1(self.concat((_c1_1, _c1_2))) + resize(input=_c2, size=_c2.shape[2:], scale_factor=2, mode="bilinear")
        p_c1 = self.make_pred_c1(_c1)  # [1,2,64,64]
        outputs.append(p_c1)

        # Linear Fusion of difference image from all scales
        _c = self.linear_fuse(self.concat((_c4_up, _c3_up, _c2_up, _c1)))


        # Upsampling x2 (x1/2 scale)
        x = self.convd2x(_c)  # [1,256,128,128]
        # Residual block
        x = self.dense_2x(x)  # [1,256,128,128]
        # Upsampling x2 (x1 scale)
        x = self.convd1x(x)  # [1,256,256,256]
        # Residual block
        x = self.dense_1x(x)

        # Final prediction
        cp = self.change_probability(x)  # [1,2,256,256]

        outputs.append(cp)

        if self.output_softmax:
            temp = outputs
            outputs = []
            for pred in temp:
                outputs.append(self.active(pred))

        return outputs


class ChangeFormerV6(nn.Cell):
    def __init__(self, input_nc=3, output_nc=2, decoder_softmax=False, embed_dim=256):
        super(ChangeFormerV6, self).__init__()
        # Transformer Encoder
        # self.embed_dims = [64, 128, 320, 512]  原始版
        self.embed_dims = [64, 128, 320, 512]    # updated
        self.depths = [3, 3, 4, 3]  # [3, 3, 6, 18, 3]
        self.embedding_dim = embed_dim
        self.drop_rate = 0.1
        self.attn_drop = 0.1
        self.drop_path_rate = 0.1

        self.Tenc_x2 = EncoderTransformer_v3(img_size=256, patch_size=7, in_chans=input_nc, num_classes=output_nc,
                                             embed_dims=self.embed_dims,
                                             num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=True,
                                             qk_scale=None, drop_rate=self.drop_rate,
                                             attn_drop_rate=self.attn_drop, drop_path_rate=self.drop_path_rate,
                                             norm_layer=partial(nn.LayerNorm, epsilon=1e-6),
                                             depths=self.depths, sr_ratios=[8, 4, 2, 1])

        # Transformer Decoder
        self.TDec_x2 = DecoderTransformer_v3(input_transform='multiple_select', in_index=[0, 1, 2, 3],
                                             align_corners=False,
                                             in_channels=self.embed_dims, embedding_dim=self.embedding_dim,
                                             output_nc=output_nc,
                                             decoder_softmax=decoder_softmax, feature_strides=[2, 4, 8, 16])


        self.stack = ops.Stack()
        self.custom_init_weights()
        print("model initilize successfully...")

    def custom_init_weights(self):
        for _, cell in self.cells_and_names():
            if isinstance(cell, nn.Conv2d) or isinstance(cell, nn.Dense):
                cell.weight.set_data(init.initializer(
                    Normal(mean=0, sigma=0.02), cell.weight.shape, cell.weight.dtype
                ))
                if cell.bias is not None:
                    cell.bias.set_data(init.initializer(
                        'zeros', cell.bias.shape, cell.bias.dtype))
            elif isinstance(cell, nn.BatchNorm2d):
                cell.gamma.set_data(init.initializer(
                    Normal(mean=1.0, sigma=0.02), cell.gamma.shape, cell.gamma.dtype
                ))
                cell.beta.set_data(init.initializer('zeros', cell.beta.shape, cell.beta.dtype))

    # def construct(self, x_AB):
    def construct(self, x1, x2):
        # print("execute changeformer forward ...")
        # batch_len = x_AB.shape[0]
        # x = []
        # y = []
        # for i in range(batch_len):
        #     fx_AB = x_AB[i]
        #     x_A = fx_AB[0]
        #     x_B = fx_AB[1]
        #     x.append(x_A)
        #     y.append(x_B)
        # x1 = self.stack(x)
        # x2 = self.stack(y)

        # [fx1, fx2] = [self.Tenc_x2(x1), self.Tenc_x2(x2)]
        fx1 = self.Tenc_x2(x1)
        fx2 = self.Tenc_x2(x2)

        cp = self.TDec_x2(fx1, fx2)

        return cp

if __name__ == '__main__':
    mindspore.context.set_context(mode=mindspore.PYNATIVE_MODE, device_target="CPU")
    imgA = r'D:\git_test\Levir_sample\LEVIR-train_val_test\val\A\val_7_1.png'
    imgB = r'D:\git_test\Levir_sample\LEVIR-train_val_test\val\B\val_7_1.png'

    img_A = Image.open(imgA).convert('RGB')
    img_B = Image.open(imgB).convert('RGB')

    imgA = np.array(img_A, np.float32).transpose(2, 0, 1)
    imgB = np.array(img_B, np.float32).transpose(2, 0, 1)

    imgA = [imgA]
    imgB = [imgB]

    model = ChangeFormerV6()

    param_dict_path = r'D:\pythonDemo\ChangeFormer_Server\ckpt_test\pt_2_ckpt\torch_cf_result.ckpt'
    param_dict = load_checkpoint(param_dict_path)
    load_param_into_net(model, param_dict)

    imgA = mindspore.Tensor(imgA, dtype=mindspore.float32)
    imgB = mindspore.Tensor(imgB, dtype=mindspore.float32)

    mean = 127.5
    std = 127.5

    imgA = (imgA - mean)/std
    imgB = (imgB - mean)/std

    out = model(imgA, imgB)[-1]  # [1,2,256,256]
    argmax = ops.Argmax(axis=1)
    pred = argmax(out)
    pred = 255*pred

    pred = pred.asnumpy()

    pred = pred.transpose(1, 2, 0)
    out_path = r'D:\pythonDemo\ChangeFormer_Server\ckpt_test\val_7_1.png'
    cv2.imwrite(out_path, pred)


    print(out)

