# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import division

import argparse
import os

import cv2
import mindspore.nn as nn
import numpy as np
from mindspore import Tensor
from mindspore import context
from mindspore.ops import operations as P
from mindspore.train.serialization import load_checkpoint, load_param_into_net

from src.network import Segmentation


def parse_args():
    parser = argparse.ArgumentParser('mindspore NAIC eval')

    # val data
    parser.add_argument('--data_root', type=str, default='', help='root path of val data')
    parser.add_argument('--data_lst', type=str, default='', help='list of val data')
    parser.add_argument('--data_dir', type=str, default='', help='directory of val data')
    parser.add_argument('--batch_size', type=int, default=10, help='batch size')
    parser.add_argument('--image_mean', type=list, default=[0.5, 0.5, 0.5], help='image mean')
    parser.add_argument('--image_std', type=list, default=[0.5, 0.5, 0.5], help='image std')
    parser.add_argument('--scales', type=float, action='append', help='scales of evaluation')
    parser.add_argument('--flip', action='store_true', help='perform left-right flip')
    parser.add_argument('--ignore_label', type=int, default=255, help='ignore label')
    parser.add_argument('--num_classes', type=int, default=17, help='number of classes')

    # model
    parser.add_argument('--ckpt_path', type=str, default='', help='model to evaluate')

    parser.add_argument('--device', type=str, default='GPU', choices=['GPU', 'Ascend'])
    args, _ = parser.parse_known_args()
    return args


def cal_hist(a, b, n):
    k = (a >= 0) & (a < n)
    return np.bincount(n * a[k].astype(np.int32) + b[k], minlength=n ** 2).reshape(n, n)


class BuildEvalNetwork(nn.Cell):
    def __init__(self, network):
        super(BuildEvalNetwork, self).__init__()
        self.network = network
        self.softmax = P.Softmax(axis=-1)
        self.permute = P.Transpose()

    def construct(self, input_data):
        output = self.network(input_data)[0]
        output = self.permute(output, (0, 2, 3, 1))
        output = self.softmax(output)
        return output


class Solver:
    def __init__(self, model, size=256, batch_size=100, num_classes=14, flip=0):
        self.model = model
        self.batch_size = batch_size
        self.size = size
        self.num_classes = num_classes
        self.images = []
        self.offsets = []
        self.solution = np.zeros((size, size, num_classes))
        self.cnt = np.zeros((size, size), dtype=np.int32)
        self.flip = flip

    def reset(self, size):
        self.size = size
        self.images = []
        self.offsets = []
        self.solution = np.zeros((size, size, self.num_classes), dtype=np.float32)
        self.cnt = np.zeros((size, size, 1), dtype=np.int32)

    def add_inputs(self, images: np.ndarray, offset):
        self.images.append(images)
        self.offsets.append(offset)
        if len(self.images) == self.batch_size:
            self.run()

    def end(self):
        if len(self.images) > 0:
            self.run()
        assert (self.cnt == 0).sum() == 0
        return self.solution / self.cnt.astype(np.float32)

    def run(self):
        inputs = np.stack(self.images)
        H, W = inputs.shape[-2:]
        self.images.clear()
        cnt = 0
        solutions = self.model(Tensor(inputs)).asnumpy()
        cnt += 1
        if self.flip:
            # Note now solutions are NHWC formatter
            solutions += self.model(Tensor(inputs[:, :, ::-1, :])).asnumpy()[:, :, ::-1, :]
            cnt += 1
        for k, (x, y) in enumerate(self.offsets):
            res_k = solutions[k]
            self.solution[x:x + H, y:y + W, :] = res_k
            self.cnt[x:x + H, y:y + W, :] += cnt
        self.offsets.clear()
        return


class PreProcess:
    def __init__(self, args):
        self.image_mean = np.array(args.image_mean, dtype=np.float32) * 255.
        self.image_std = 1. / (np.array(args.image_std, dtype=np.float32) * 255.)

    def __call__(self, img):
        img = (img.astype(np.float32) - self.image_mean) * self.image_std
        img = np.transpose(img[:, :, ::-1], (2, 0, 1))
        img = np.ascontiguousarray(img)
        return img


def predict(solver: Solver, input_path: str, output_dir=None, scales=None):
    img = cv2.imread(input_path)
    print('Image shape:', img.shape, end='', flush=True)
    H, W, _ = img.shape
    assert H == W
    stride = 128
    shape = 256
    if scales is None:
        scales = [1.]
    results = 0
    for scale in scales:
        num = max((int(H * scale) - shape - 1 + stride) // stride + 1, 1)
        newH = (num - 1) * stride + shape
        img_ = cv2.resize(img, (newH, newH), interpolation=cv2.INTER_CUBIC)
        img_ = solver.model.pre_process(img_)
        solver.reset(newH)

        for i in range(num):
            x = i * stride
            for j in range(num):
                y = j * stride
                solver.add_inputs(img_[:, x:x + shape, y:y + shape], (x, y))
        solution = solver.end()
        results = cv2.resize(solution, (H, W), interpolation=cv2.INTER_NEAREST) + results
    results = np.argmax(results, axis=-1)
    results = solver.model.label_remap[results]

    if output_dir is not None:
        os.makedirs(output_dir, exist_ok=True)
        name = os.path.split(os.path.splitext(input_path)[0])[-1] + ".png"
        cv2.imwrite(os.path.join(output_dir, name), results)
    return results


def net_eval():
    np.set_printoptions(precision=3, linewidth=150, suppress=True)
    args = parse_args()
    if args.device == 'GPU':
        context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=False)
    else:
        context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False,
                            device_id=int(os.getenv('DEVICE_ID')))
    # data list
    with open(args.data_lst) as f:
        img_lst = f.readlines()
    print(f'There are {len(img_lst)} images to test', flush=True)
    # network & load model
    network = Segmentation(args.num_classes - 3)
    param_dict = load_checkpoint(args.ckpt_path)
    load_param_into_net(network, param_dict)
    eval_net = BuildEvalNetwork(network)
    eval_net.set_train(False)

    eval_net.pre_process = PreProcess(args)

    label_remap = np.arange(args.num_classes) + 1
    label_remap[label_remap > 3] += 3
    eval_net.label_remap = label_remap
    eval_net.num_classes = args.num_classes - 3

    solver = Solver(eval_net, batch_size=args.batch_size, num_classes=args.num_classes - 3, flip=args.flip)

    print('complete network initialization, begin to test')
    # evaluate
    hist = np.zeros((args.num_classes, args.num_classes))
    for i, line in enumerate(img_lst):
        img_path, msk_path = line.strip().split(' ')
        img_path = os.path.join(args.data_root, img_path)
        msk_path = os.path.join(args.data_root, msk_path)

        msk_ = cv2.imread(msk_path, cv2.IMREAD_GRAYSCALE) - 1  # [1-17] --> [0-16]
        res_ = predict(solver, img_path, scales=args.scales) - 1
        hist += cal_hist(msk_.flatten(), res_.flatten(), args.num_classes)
        print(f'processed {i + 1} / {len(img_lst)} images     ', end='\r', flush=True)
    #         break
    print()

    print(hist / hist.sum())
    freq = hist.sum(1) / hist.sum()
    iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
    print('per-class IoU', iu)
    print('mean IoU', np.nanmean(iu))
    print('FWIoU', np.nansum(freq * iu))


if __name__ == '__main__':
    net_eval()
