import mindspore.nn as nn
import mindspore.common.dtype as mstype
import numpy as np
from mindspore import Tensor
from mindspore.ops import operations as P

from src.resnet import resnet50c
from src.common import *
from src.attention import Attention


def get_feature(features, stride):
    if stride == 32:
        return features["32"]
    elif stride == 16:
        return features["16"]
    elif stride == 8:
        return features["8"]
    elif stride == 4:
        return features["4"]
    else:
        return features["2"]


class SimpleRender(nn.Cell):

    def __init__(self, channels, num_classes, coarse_pred_stride=8, in_stride=(8,), out_stride=4, fc_dim=256, num_fc=3,
                 cls_agnostic_mask=False, coarse_pred_each_layer=True, dropout_ratio=0.):
        super().__init__()
        self.in_stride = in_stride
        self.out_stride = out_stride
        self.coarse_pred_each_layer = coarse_pred_each_layer

        in_channels = sum(channels[stride] for stride in self.in_stride) + num_classes

        assert set(self.in_stride) == {2, 4}
        self.resize_4 = UpSample(channels[4], 4 // out_stride) if 4 in self.in_stride else None
        self.resize_2 = UpSample(channels[2], 2 // out_stride) if 2 in self.in_stride else None

        self.resize_coarse = UpSample(num_classes, coarse_pred_stride // out_stride)
        self.fc_layers = nn.CellList()
        for k in range(num_fc):
            self.fc_layers.append(ConvBNReLU(in_channels, fc_dim, 1, 1, 0))
            in_channels = fc_dim
            in_channels += num_classes if self.coarse_pred_each_layer else 0

        num_mask_classes = 1 if cls_agnostic_mask else num_classes
        self.dropout = nn.Dropout(dropout_ratio) if dropout_ratio > 0 else None
        self.predictor = Conv2d(in_channels, num_mask_classes, kernel_size=1, stride=1, padding=0, bias=True)
        self.cat = P.Concat(axis=1)

    def construct(self, features: dict, coarse_logits):
        coarse_logits = self.resize_coarse(coarse_logits)
        x = self.cat((coarse_logits, self.resize_4(features["4"]), self.resize_2(features["2"])))
        for layer in self.fc_layers:
            x = layer(x)
            if self.coarse_pred_each_layer:
                x = self.cat((x, coarse_logits))
        if self.dropout is not None:
            x = self.dropout(x)
        return self.predictor(x)


class UNet(nn.Cell):
    def __init__(self, channels: dict, out_channels: dict = None, in_stride=32, out_stride=4):
        super(UNet, self).__init__()
        assert in_stride == 32
        self._channels = channels.copy()
        if out_channels is not None:
            self._channels.update(out_channels)
        self.in_stride = in_stride
        self.out_stride = out_stride
        self.layers = nn.CellList()
        self.resize_ops = nn.CellList()
        self.layer_map = {}
        while in_stride >= out_stride:
            self.layer_map[in_stride] = len(self.layers)
            if in_stride == self.in_stride:
                self.layers.append(ConvBNReLU(channels[in_stride], self._channels[in_stride], 3, 1, 1))
            else:
                self.layers.append(nn.SequentialCell(
                    ConvBNReLU(self._channels[in_stride * 2] + channels[in_stride], self._channels[in_stride], 3, 1, 1),
                    ConvBNReLU(self._channels[in_stride], self._channels[in_stride], 3, 1, 1)
                ))
            self.resize_ops.append(UpSample(self._channels[in_stride]))
            in_stride //= 2
        self.cat = P.Concat(axis=1)

    def construct(self, features: dict):
        features["32"] = self.layers[0](features["32"])
        if self.out_stride <= 16:
            features["16"] = self.layers[1](self.cat((self.resize_ops[0](features["32"]), features["16"])))
        if self.out_stride <= 8:
            features["8"] = self.layers[2](self.cat((self.resize_ops[1](features["16"]), features["8"])))
        if self.out_stride <= 4:
            features["4"] = self.layers[3](self.cat((self.resize_ops[2](features["8"]), features["4"])))
        return features

    @property
    def channels(self):
        return self._channels


class Segmentation(nn.Cell):
    def __init__(self, num_classes=14, out_stride=8, dropout=0.1, refiner_stride=8, refiner_cfg=None, **kwargs):
        super().__init__()
        self.backbone = resnet50c(replace_stride_with_dilation=[False, True, True])
        # unet = UNet(self.backbone.channels, out_channels={32: 1024, 16: 512, 8: 256}, out_stride=8)
        att = Attention(self.backbone.channels,
                        down_stride=8,
                        method="dnl",
                        mid_channels=512,
                        out_channels=256,
                        num_conv_pre=1,
                        num_conv_post=1,
                        attention_cfg={'reduce_ratio': 4},
                        add_fluff=True,
                        fluff_reduction_ratio=16)
        self.attachments = nn.SequentialCell([att, ])
        self.out_stride = out_stride
        self.num_classes = num_classes
        channels = att.channels
        self.classifier = nn.SequentialCell(
            nn.Dropout(dropout if dropout > 0 else 0),
            Conv2d(channels[out_stride], num_classes, 1, 1, 0, bias=True),
        )
        self.resize = UpSample(num_classes, out_stride)

        self.refiner_stride = refiner_stride
        if refiner_stride > 0:
            if refiner_cfg is None:
                refiner_cfg = {}
            refiner_cfg.setdefault('coarse_pred_stride', refiner_stride)
            refiner_cfg.setdefault('in_stride', [4, 2])
            refiner_cfg.setdefault('out_stride', 2)
            refiner_cfg.setdefault('fc_dim', 128)
            refiner_cfg.setdefault('dropout_ratio', dropout)
            self.refiner = SimpleRender(channels, self.num_classes, **refiner_cfg)
            self.refiner_resize = UpSample(num_classes, 2)

    def construct(self, x):
        features = self.backbone(x)
        features = self.attachments(features)
        features["1"] = x
        outputs = []
        o = self.classifier(get_feature(features, self.out_stride))
        outputs.append(self.resize(o))
        if self.refiner_stride == self.out_stride:
            o = self.refiner(features, o)
            outputs.append(self.refiner_resize(o))
        return outputs


class BuildTrainNetwork(nn.Cell):
    def __init__(self, network, criterion):
        super(BuildTrainNetwork, self).__init__()
        self.network = network
        self.criterion = criterion
        self.cast = P.Cast()
        self.add = P.TensorAdd()

    def construct(self, input_data, label):
        output = self.network(input_data, )
        net_loss = None
        for o in output:
            o = self.cast(o, mstype.float32)
            loss = self.criterion(o, label)
            if net_loss is None:
                net_loss = loss
            else:
                net_loss = self.add(net_loss, loss)
        return net_loss


def _test():
    import mindspore.context as context
    context.set_context(device_target="GPU")
    ## Test Segmentation
    image_size = 256
    x = Tensor(np.random.rand(2, 3, image_size, image_size).astype(np.float32))
    net = Segmentation((image_size, image_size), 13)
    y = net(x)
    print([v.shape for v in y])
    net.compile_and_run(x)

    from src.loss import SoftmaxCrossEntropyLoss
    y = Tensor(np.random.randint(0, 13, (2, image_size, image_size)).astype(np.int32))
    net.to_float(mstype.float16)

    from mindspore.train.model import Model
    net = BuildTrainNetwork(net, SoftmaxCrossEntropyLoss(13))
    print(net(x, y))
    net.compile_and_run(x, y)


def _test2():
    import cv2
    import torch
    import mindspore.context as context
    from mindspore import Parameter
    from detection.attachment.attention import Attention as Attention_torch
    from extension.backbones.resnet import resnet50c as R50c_torch
    from detection.models.SegmentEncoderDecoder2 import SegmentEncoderDecoder2
    from src.conver_torch_model_to_mindspore import convert
    from mindspore.train.serialization import load_param_into_net, save_checkpoint, load_checkpoint
    context.set_context(device_target="GPU")

    # x = np.random.rand(2, 3, 256, 256).astype(np.float32)
    x = cv2.imread('/home/wan/data/seg_naic/examples/images/000135.tif')
    x = (x / 255. - 0.5) / 0.5
    x = np.ascontiguousarray(np.transpose(x[:, :, ::-1], (2, 0, 1)))[np.newaxis, :, :, :]
    x = x.astype(np.float32)
    print(x.shape)
    net = Segmentation(dropout=0.1)
    y = net(Tensor(x))
    print([v.shape for v in y])
    net.compile_and_run(Tensor(x))
    net.set_train(False)

    backbone_torch = R50c_torch(replace_stride_with_dilation=[False, True, True])
    attachments_torch = torch.nn.Sequential(
        Attention_torch(
            channels=backbone_torch.channels,
            down_stride=8,
            method="dnl",
            mid_channels=512,
            out_channels=256,
            num_conv_pre=1,
            num_conv_post=1,
            attention_cfg={'reduce_ratio': 4},
            add_fluff=True,
            fluff_reduction_ratio=16
        )
    )
    net2 = SegmentEncoderDecoder2(backbone_torch, attachments_torch, 14, dropout=0.1)
    pth = torch.load('../../results/best.pth', map_location='cpu')
    net2.load_state_dict(pth)
    net2.eval()
    y2 = net2(torch.from_numpy(x))

    param_dict = convert(net2.state_dict())
    for key, value in param_dict.items():
        if key.endswith('.resize.weight') and value.shape[1] == 1:
            print(key)
            temp = value.asnumpy()
            N, _, H, W = temp.shape
            new_value = np.zeros_like(temp, shape=(N, N, H, W))
            for i in range(N):
                new_value[i, i, :, :] = temp[i, 0, :, :]
            param_dict[key] = Parameter(Tensor(new_value), name=key)
    load_param_into_net(net, param_dict)
    net.set_train(False)
    y = net(Tensor(x))
    print([v.shape for v in y2])

    print(x.reshape(-1)[:10])
    print([v.view(-1)[:10] for v in y2])
    for k in range(len(y2)):
        print(k, np.abs(y[k].asnumpy() - y2[int(k)].cpu().detach().numpy()).max())
    save_checkpoint(net, "../checkpoints/model.ckpt")

    param_dict = load_checkpoint("../checkpoints/model.ckpt")
    load_param_into_net(net, param_dict)
    net.set_train(False)
    y = net(Tensor(x))
    for k in range(len(y2)):
        print(k, np.abs(y[k].asnumpy() - y2[int(k)].cpu().detach().numpy()).max())


if __name__ == '__main__':
    _test2()
