
import argparse
import os
import sys
import importlib
import mindspore
import mindspore.dataset as ds
import mindspore.ops as ops

from mindspore import context,load_checkpoint,load_param_into_net
from mindspore.communication.management import init
from mindspore.common import set_seed
from mindspore.context import ParallelMode
from tqdm import tqdm
import numpy as np


set_seed(1)
BASE_DIR=os.path.dirname(os.path.abspath(__file__))
print(os.path.dirname(os.path.dirname(os.path.dirname(BASE_DIR))))
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(BASE_DIR))))
ROOT_DIR=os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(BASE_DIR))),"mind3d")
sys.path.append(ROOT_DIR)

from mind3d.dataset.S3DIS import S3DISDataset
from mind3d.utils.load_yaml import load_yaml
from visualization.visualization import show_pointcloud_seg
from mind3d.models.dgcnn import DGCNN_seg
from mind3d.utils.pointnet2_util import log_string
from mind3d.dataset.scannet import ScannetDatasetWholeScene_infer
from mind3d.models.pointnet2 import Pointnet2segModelMSG,Pointnet2segModelSSG



def dgcnn_seg_infer(opt):
    "Dgcnn infer."
    for test_area in range(1,7):
        for test_area in range(1, 7):

            if os.path.exists("./dataset/data/indoor3d_sem_seg_hdf5_data_test/room_filelist.txt"):
                with open("./dataset/data/indoor3d_sem_seg_hdf5_data_test/room_filelist.txt") as f:
                    for line in f:
                        if (line[5]) == test_area:
                            break
            if (opt["datasets"]['val'].get("area") == "all") or (test_area == opt["datasets"]['val'].get("area")):
                test_dataset_generator = S3DISDataset(split="eval", num_points=opt["datasets"]['val'].get("resize"),
                                                      test_area=test_area)

                test_ds = ds.GeneratorDataset(test_dataset_generator, ["data", "label"], shuffle=True)
                test_ds = test_ds.batch(batch_size=1)

                # Create model
                model = DGCNN_seg(opt, opt['train'].get("k"))

                # param_dict = load_checkpoint(opt['val']['pretrained_ckpt'])
                param_dict = load_checkpoint(os.path.join(opt["val"]["pretrained_ckpt"], "model_%s.ckpt" % test_area))
                load_param_into_net(model, param_dict)
                print("sucessfully load pretrain model_%s.ckpt" % test_area)
                model.set_train(False)
                model.set_grad(False)

                for _, data in tqdm(enumerate(test_ds.create_dict_iterator(), 0)):
                    points, target = data["data"], data["label"]

                    seg_pred = model(points)
                    seg_pred = seg_pred.transpose(0, 2, 1)
                    argmax = ops.ArgMaxWithValue(axis=2)
                    index, pred = argmax(seg_pred)
                    pred_np = index.asnumpy()
                    points=points.squeeze(0).asnumpy()
                    show_pointcloud_seg(points,pred_np,save=None)


def infer(model, test_ds, test_steps_per_epoch, batch_size, num_classes):
    """infer"""
    print("infer...")

    # iter
    for load_idx, data in tqdm(enumerate(test_ds.create_dict_iterator(), 0), total=test_steps_per_epoch, smoothing=0.9):
        # feed
        coords, targets, weights = data["data"], data["label"], data["weights"]
        pred = []
        n, _, chan = coords.shape
        if n > batch_size:
            for i in range((n - 1) // batch_size + 1):
                if (i + 1) * batch_size <= n:
                    coord = coords[i * batch_size:(i + 1) * batch_size, :, :]
                else:
                    coord = coords[i * batch_size:, :, :]
                # coord = ops.Transpose()(coord, (0, 2, 1)).astype("float32")
                coord = coord.astype("float32")
                output = model(coord)
                pred.append(output)
        else:
            coord = coords
            # coord = ops.Transpose()(coord, (0, 2, 1)).astype("float32")
            coord = coord.astype("float32")
            output = model(coord)
            pred.append(output)

        x = ops.Concat(0)(pred)
        pred = ops.ExpandDims()(x, 0)  # (1, CK, N, C)
        preds = pred.argmax(3)

        # input
        coords = coords.view(-1, chan).asnumpy()  # (CK*N, C)
        # eval
        preds = preds.squeeze(0).view(-1).asnumpy()  # (CK*N, C)


    return coords, preds


def pointnet2_seg_infer(opt):
    """PointNet++ infer."""
    # device.
    device_id = int(os.getenv('DEVICE_ID', '0'))
    device_num = int(os.getenv('RANK_SIZE', '1'))

    if not opt['device_target'] in ("Ascend", "GPU"):
        raise ValueError("Unsupported platform {}".format(opt['device_target']))

    if opt['device_target'] == "Ascend":
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="Ascend",
                            device_id=device_id)
        context.set_context(max_call_depth=20480)

    else:
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="GPU",
                            max_call_depth=20480)

    # run distribute.
    if opt['run_distribute']:
        if opt['device_target'] == "Ascend":
            if device_num > 1:
                init()
                context.set_auto_parallel_context(
                    parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
        else:
            if device_num > 1:
                mindspore.dataset.config.set_enable_shared_mem(False)
                context.set_auto_parallel_context(
                    parallel_mode=context.ParallelMode.DATA_PARALLEL,
                    gradients_mean=True,
                    device_num=device_num)
                mindspore.common.set_seed(1234)
                init()
            else:
                context.set_context(device_id=device_id)

    # batch size
    batch_size = opt['datasets']['val'].get('batch')

    # Data Pipeline.
    print("preparing data...")

    testdata = ScannetDatasetWholeScene_infer(path=opt['datasets']['val'].get('data_path'), phase='infer', is_weighting=not opt['datasets']['val'].get('use_no_weighting'),
                                        use_color=opt['datasets']['val'].get('use_color'), use_normal=opt['datasets']['val'].get('use_normal'))
    test_ds = ds.GeneratorDataset(testdata, ["data", "label", "weights"], num_parallel_workers=1, shuffle=True)

    test_steps_per_epoch = test_ds.get_dataset_size()

    print("initializing...")


    '''MODEL LOADING'''
    # Create model.
    model = Pointnet2segModelSSG(num_classes=opt['datasets']['val'].get('num_classes'), use_color=opt['datasets']['val'].get('use_color'),
                              use_normal=opt['datasets']['val'].get('use_normal'))
    model.set_train()
    # load pretrained ckpt
    param_dict = load_checkpoint(opt['val']['pretrained_ckpt'])
    load_param_into_net(model, param_dict)


    # infer
    xyz, predicted = infer(model, test_ds, test_steps_per_epoch, batch_size, opt['datasets']['val'].get('num_classes'))

    show_pointcloud_seg(xyz, seg=predicted, save=None)
    print('infer completed...')

if __name__=="__main__":
    #Infer setting
    parser=argparse.ArgumentParser(description="Infer.")
    parser.add_argument("-opt", type=str, default="/home/cxh/音乐/MindSpore-DGCNN/dgcnn_s3dis_seg.yaml",
                        help='Path to option YAML file.')
    args = parser.parse_known_args()[0]
    opt=load_yaml(args.opt)
    network_name = opt["model_name"]
    if network_name == "DGCNN_seg":
        dgcnn_seg_infer(opt)
    elif network_name == "Pointnet2_seg":
        pointnet2_seg_infer(opt)
