# 自定义网络
import time

import mindspore
import numpy as np
from PIL import ImageSequence
from mindspore import context, Model

import downloadUNetData

cfg = {'name': 'Unet', 'lr': 0.0001, 'epochs': downloadUNetData.epochs, 'distribute_epochs': 1600, 'batchsize': 16,
       'cross_valid_ind': 1, 'num_classes': 2, 'num_channels': 22,
       'resume': False}

import mindspore.nn as nn
import mindspore.ops.operations as F
from mindspore.communication.management import init, get_group_size
from mindspore.common.initializer import TruncatedNormal
from mindspore.nn import CentralCrop


class DoubleConv(nn.Cell):
    def __init__(self, in_channels, out_channels, mid_channels=None):
        super().__init__()
        init_value_0 = TruncatedNormal(0.06)
        init_value_1 = TruncatedNormal(0.06)
        if not mid_channels:
            mid_channels = out_channels
        self.double_conv = nn.SequentialCell(
            [nn.Conv2d(in_channels, mid_channels, kernel_size=3, has_bias=True, weight_init=init_value_0,
                       pad_mode='valid'),
             nn.ReLU(),
             nn.Conv2d(mid_channels, out_channels, kernel_size=3, has_bias=True, weight_init=init_value_1,
                       pad_mode='valid'),
             nn.ReLU()]
        )

    def construct(self, x):
        return self.double_conv(x)


class Down(nn.Cell):
    def __init__(self, in_channels, out_channels):
        super().__init__()

        self.maxpool_conv = nn.SequentialCell(
            [nn.MaxPool2d(kernel_size=2, stride=2), DoubleConv(in_channels, out_channels)]
        )

    def construct(self, x):
        return self.maxpool_conv(x)


class Up1(nn.Cell):
    def __init__(self, in_channels, out_channels, bilinear=True):
        super().__init__()
        self.concat = F.Concat(axis=1)
        self.factor = 56.0 / 64.0

        self.center_crop = CentralCrop(central_fraction=self.factor)
        self.print_fn = F.Print()
        self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
        self.up = nn.Conv2dTranspose(in_channels, in_channels // 2, kernel_size=2, stride=2)
        self.relu = nn.ReLU()

    def construct(self, x1, x2):
        x1 = self.up(x1)
        x1 = self.relu(x1)
        x2 = self.center_crop(x2)
        x = self.concat((x1, x2))
        return self.conv(x)


class Up2(nn.Cell):
    def __init__(self, in_channels, out_channels, bilinear=True):
        super().__init__()
        self.concat = F.Concat(axis=1)
        self.factor = 104.0 / 136.0
        self.center_crop = CentralCrop(central_fraction=self.factor)
        self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
        self.up = nn.Conv2dTranspose(in_channels, in_channels // 2, kernel_size=2, stride=2)
        self.relu = nn.ReLU()

    def construct(self, x1, x2):
        x1 = self.up(x1)
        x1 = self.relu(x1)
        x2 = self.center_crop(x2)
        x = self.concat((x1, x2))
        return x


class Up3(nn.Cell):
    def __init__(self, in_channels, out_channels, bilinear=True):
        super().__init__()
        self.concat = F.Concat(axis=1)
        self.factor = 200 / 280
        self.center_crop = CentralCrop(central_fraction=self.factor)
        self.print_fn = F.Print()
        self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
        self.up = nn.Conv2dTranspose(in_channels, in_channels // 2, kernel_size=2, stride=2)
        self.relu = nn.ReLU()

    def construct(self, x1, x2):
        x1 = self.up(x1)
        x1 = self.relu(x1)
        x2 = self.center_crop(x2)
        x = self.concat((x1, x2))
        return self.conv(x)


class Up4(nn.Cell):
    def __init__(self, in_channels, out_channels, bilinear=True):
        super().__init__()
        self.concat = F.Concat(axis=1)
        self.factor = 392 / 568
        self.center_crop = CentralCrop(central_fraction=self.factor)
        self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
        self.up = nn.Conv2dTranspose(in_channels, in_channels // 2, kernel_size=2, stride=2)
        self.relu = nn.ReLU()

    def construct(self, x1, x2):
        x1 = self.up(x1)
        x1 = self.relu(x1)
        x2 = self.center_crop(x2)
        x = self.concat((x1, x2))
        return self.conv(x)


class OutConv(nn.Cell):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        init_value = TruncatedNormal(0.06)
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, has_bias=True, weight_init=init_value)

    def construct(self, x):
        x = self.conv(x)
        return x


from scipy.special import softmax


class UNet(nn.Cell):
    def __init__(self, n_channels, n_classes):
        super(UNet, self).__init__()

        self.n_channels = n_channels
        self.n_classes = n_classes
        self.inc = DoubleConv(n_channels, 64)
        self.down1 = Down(64, 128)
        self.down2 = Down(128, 256)
        self.down3 = Down(256, 512)
        self.down4 = Down(512, 1024)
        self.up1 = Up1(1024, 512)
        self.up2 = Up2(512, 256)
        self.up3 = Up3(256, 128)
        self.up4 = Up4(128, 64)
        self.outc = OutConv(64, n_classes)

    def construct(self, x):
        x1 = self.inc(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x4 = self.down3(x3)
        x5 = self.down4(x4)
        x = self.up1(x5, x4)
        x = self.up2(x, x3)
        x = self.up3(x, x2)
        x = self.up4(x, x1)
        logits = self.outc(x)
        return logits


from mindspore.nn.loss.loss import LossBase


class CrossEntropyWithLogits(LossBase):
    def __init__(self):
        super(CrossEntropyWithLogits, self).__init__()
        self.transpose_fn = F.Transpose()
        self.reshape_fn = F.Reshape()
        self.soft_max_cross_entroy_loss = nn.SoftmaxCrossEntropyWithLogits()
        self.cast = F.Cast()

    def construct(self, logits, labels):
        logits = self.transpose_fn(logits, (0, 2, 3, 1))
        logits = self.cast(logits, mindspore.float32)
        loss = self.reduce_mean(self.soft_max_cross_entroy_loss(self.reshape_fn(logits, (-1, 2)),
                                                                self.reshape_fn(labels, (-1, 2))))
        return self.get_loss(loss)


from mindspore.train.callback import Callback
import time

from mindspore.common.tensor import Tensor

per_print_times = 100


class StepLossTimeMonitor(Callback):
    def __init__(self, batch_size, per_print_times=per_print_times):
        super(StepLossTimeMonitor, self).__init__()
        if not isinstance(per_print_times, int) or per_print_times < 0:
            return ValueError('print_step must be int and >=0')

        self._per_print_times = per_print_times
        self.batch_size = batch_size

        def step_begin(self, run_context):
            self.step_time = time.time()

        def step_end(self, run_context):
            step_seconds = time.time() - self.step_time
            step_fps = self.batch_size * 1.0 / step_seconds

            cb_params = run_context.original_args()
            loss = cb_params.net_outputs

            if isinstance(loss, (tuple, list)):
                if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):
                    loss = loss[0]

            if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):
                loss = np.mean(loss.asnumpy())

            cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1

            if isinstance(loss, float) and (np.isnan(loss) or np.isinf(loss)):
                raise ValueError('epoch: {} step:{}.Innvalid loss,terminating training.'.format(
                    cb_params.cur_step_num, cur_step_in_epoch
                ))

            if self._per_print_times != 0 and cb_params.cur_step_num % self._per_print_times == 0:
                print('step:%s,loss is %s,fps is %s' % (cur_step_in_epoch, loss, step_fps), flush=True)


from mindspore.train.callback import CheckpointConfig, ModelCheckpoint
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.context import ParallelMode
import mindspore.context
import loadData


def train_net(data_dir, cross_valid_ind=1, epochs=10, batch_size=16, lr=0.0001, run_distribute=False, cfg=None):
    if run_distribute:
        init()
        group_size = get_group_size()
        parallel_mode = ParallelMode.DATA_PARALLEL
        context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=group_size, gradients_mean=False)

    net = UNet(n_channels=cfg['num_channels'], n_classes=cfg['num_classes'])

    if cfg['resume']:
        param_dict = load_checkpoint(cfg['resume_ckpt'])
        load_param_into_net(net, param_dict)

    train_dataset, _ = loadData.create_dataset(data_dir, epochs, batch_size, True, cross_valid_ind, run_distribute)
    train_data_size = train_dataset.get_dataset_size()
    print('dataset length is:', train_data_size)
    ckpt_config = CheckpointConfig(save_checkpoint_steps=train_data_size,
                                   keep_checkpoint_max=cfg['keep_checkpoint_max'])
    ckpoint_cb = ModelCheckpoint(prefix='unet_medical_adam', directory='./UNETcheckpoints/', config=ckpt_config)
    criterion = CrossEntropyWithLogits()
    optimizer = nn.Adam(params=net.trainable_params(), learning_rate=lr, weight_decay=cfg['weight_decay'],
                        loss_scale=cfg['loss_scale'])
    loss_scale_manager = mindspore.train.loss_scale_manager.FixedLossScaleManager(cfg['FixedLossScaleManager'], False)
    model = Model(net, loss_fn=criterion, loss_scale_manager=loss_scale_manager, optimizer=optimizer, amp_level='O3')

    print('==============starting training============')
    model.train(2, train_dataset, callbacks=[StepLossTimeMonitor(batch_size=batch_size), ckpoint_cb])
    print('----------------------end training-------------------')


# data_url = './dataset/case7BrainMRI/case7/BrainMRI'
data_url = './dataset/case7BrainMRI/case7/BrainMRI/train'
run_distribute = False
epoch_size = cfg['epochs'] if not run_distribute else cfg['distribute_epochs']
train_net(data_dir=data_url, cross_valid_ind=cfg['cross_valid_ind'], epochs=epoch_size,
          batch_size=cfg['batchsize'], lr=cfg['lr'], run_distribute=run_distribute, cfg=cfg)


class dice_coeff(nn.Metric):
    def __init__(self):
        super(dice_coeff, self).__init__()
        self.clear()

    def clear(self):
        self._dice_coeff_sum = 0
        self._samples_num = 0

    def update(self, *inputs):
        if len(inputs) != 2:
            raise ValueError('Mean dice coefficient need input(y_pred,y),but got{}'.format(len(inputs)))

        y_pred = self._convert_data(inputs[0])
        y = self._convert_data(inputs[1])
        self._samples_num += y.shape[0]
        y = y.transpose(0, 2, 3, 1)
        y_pred = softmax(y_pred, axis=3)
        inter = np.dot(y_pred.flatten(), y.flatten())
        union = np.dot(y_pred.flatten(), y_pred.flatten()) + np.dot(y.flatten(), y.flatten())
        single_dice_coeff = 2 * float(inter) / float(union + 1e-6)

        print('single dice coeff is:', single_dice_coeff)
        self._dice_coeff_sum += single_dice_coeff

    def eval(self):
        if self._samples_num == 0:
            raise RuntimeError('total sample num must not be 0 ')
        return self._dice_coeff_sum / float(self._samples_num)


import PIL.Image as Image
from matplotlib import pyplot as plt


def test_net(data_dir, ckpt_path, cross_valid_ind=1, cfg=None):
    net = UNet(n_channels=cfg['num_channels'], n_classes=cfg['num_classes'])
    parm_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, parm_dict)

    criterion = CrossEntropyWithLogits()
    _, valid_dataset = loadData.create_dataset(data_dir, 1, 1, False, cross_valid_ind, False)
    model = Model(net, loss_fn=criterion, metrics={'dice_coeff': dice_coeff()})

    print('==============starting evaluting======================')
    dice_score = model.eval(valid_dataset, dataset_sink_mode=False)
    print('cross valid dice coeff is ', dice_score)

    testimage = np.array([np.array(p) for p in
                          ImageSequence.Iterator(Image.open('./dataset/case7BrainMRI/case7/BrainMRI/test/test.tif'))])
    testlable = np.array([np.array(p) for p in ImageSequence.Iterator(
        Image.open('./dataset/case7BrainMRI/case7/BrainMRI/test/testlabel.tif'))])

    testdata = testimage[9]
    image = Image.fromarray(testdata)
    image = image.resize(((388, 388)))
    testdata = np.asarray(image)

    testdata = np.pad(testdata, ((92, 92), (92, 92)), 'symmetric')
    testdata = testdata / 127.5 - 1
    testdata = testdata.astype(np.float32)
    testdata = testdata.reshape(1, 1, 572, 572)
    output = model.predict(Tensor(testdata))
    pred = np.argmax(output.asnumpy(), axis=1)
    pred = pred.reshape(388, 388)
    plt.figure()
    plt.subplot(1, 3, 1)
    plt.title("MRI")
    plt.imshow(testimage[9], cmap='grey')
    plt.subplot(1, 3, 2)
    plt.title('Predict')
    plt.imshow(pred, cmap='gray')
    plt.subplot(1, 3, 3)
    plt.title('label')
    plt.imshow(testlable[9], cmap='gray')
    plt.show()
