import os, sys, argparse
sys.path.append(os.path.abspath(os.path.join(os.getcwd())))
path = os.path.abspath(os.path.join(os.getcwd()))
import mindspore.nn as nn
from mindspore.common import set_seed
import numpy as np
import mindspore.ops as ops
from mindspore import context, load_checkpoint, load_param_into_net
from mindspore.context import ParallelMode
from mindspore.communication.management import init
from mind3d.dataset.ShapeNet import ShapeNetpartDataset
from mind3d.models.PointTransformer import PointTransformerSeg
from mind3d.models.pointnet import PointNet_seg
import mindspore.dataset as ds
import mindspore
import logging
import mindspore
from mind3d.utils import provider


def to_categorical(y, num_class):
    Eye = ops.Eye()
    new_y = Eye(num_class, num_class, mindspore.float32)[mindspore.Tensor(y)]
    new_y = mindspore.Tensor(new_y)
    return new_y

class ModifyNetwork(nn.Cell):
    def __init__(self, models):
        super(ModifyNetwork, self).__init__()
        self.models = models
    def construct(self, data):
        output = self.models(data)
        return output

def Eval(args_opt):
    """PointTransformer eval."""
    set_seed(1)

    logging.basicConfig(filename = args_opt.EvalSeg.logging_path, filemode="w",
                        format= "[%(asctime)s][line:%(lineno)d][%(levelname)s] %(message)s",
                        datefmt="%Y-%M-%d %H:%M:%S",
                        level=logging.INFO)
    
        # device.
    device_id = int(os.getenv('DEVICE_ID', '1'))
    device_num = int(os.getenv('RANK_SIZE', '1'))

    if not args_opt.device_target in ("Ascend", "GPU"):
        raise ValueError("Unsupported platform {}".format(args_opt.device_target))

    if args_opt.device_target == "Ascend":
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="Ascend",
                            save_graphs=False,
                            device_id=device_id)
        context.set_context(max_call_depth=2048)

    else:
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="GPU",
                            save_graphs=False,
                            device_id=device_id,
                            max_call_depth=2048)

    # run distribute.
    if args_opt.run_distribute:
        if args_opt.device_target == "Ascend":
            if device_num > 1:
                init()
                context.set_auto_parallel_context(
                    parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
        else:
            if device_num > 1:
                mindspore.dataset.config.set_enable_shared_mem(False)
                context.set_auto_parallel_context(
                    parallel_mode=context.ParallelMode.DATA_PARALLEL,
                    gradients_mean=True,
                    device_num=device_num)
                mindspore.common.set_seed(1234)
                init()
            else:
                context.set_context(device_id=device_id)        

    seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
               'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37],
               'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49],
               'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
    seg_label_to_cat = {}  # {0:Airplane, 1:Airplane, ...49:Table}
    test_metrics = {}
    total_correct = 0
    total_seen = 0
    total_seen_class = [0 for _ in range(50)]
    total_correct_class = [0 for _ in range(50)]
    shape_ious = {cat: [] for cat in seg_classes.keys()}
    seg_label_to_cat = {}
    for cat in seg_classes.keys():
        for label in seg_classes[cat]:
            seg_label_to_cat[label] = cat
    

    # Data pipeline.
    dataset = ShapeNetpartDataset(root_path=args_opt.TrainSeg.data_path, num_points = args_opt.TrainSeg.num_points, split= args_opt.EvalSeg.split, normal_channel=True)
    dataset_val = ds.GeneratorDataset(dataset, ["data", "cls", "label"], shuffle=False)
    dataset_val = dataset_val.batch(batch_size= args_opt.EvalSeg.batch_size, drop_remainder = True)
    step_val = dataset_val.get_dataset_size()
    # Create model.
    if args_opt.model == "pointtransformer": 
        network = PointTransformerSeg()
        network = ModifyNetwork(models= network)
    elif args_opt.model == "PointNet_seg":
        network = PointNet_seg()
    network.set_train(False)
    network.set_grad(False)
    #network = PointTransformerSeg()

    # Load checkpoint file for ST test.

    #ModelParamModify( args_opt.TrainSeg.ckpt_save_dir, args_opt.EvalSeg.ckpt_file) 
    # logging.info("-------------------------Finish Modify--------------------------")
    param_dict = load_checkpoint(args_opt.EvalSeg.ckpt_file, network, strict_load=True)
    # logging.info("-------------------------Finish Loading-------------------------")
    load_param_into_net(network, param_dict)

    # Begin to eval
    network.set_train(False)
    network.set_grad(False)
    
    shape_ious = {cat: [] for cat in seg_classes.keys()}
    logging.info("------------------------------Eval-----------------------------")
    for _, data in enumerate(dataset_val.create_dict_iterator()):
        points, cls, target = data['data'],data["cls"], data['label']
        if args_opt.model == "pointtransformer":
            cat = ops.Concat(axis = 2)
            onehot = ops.OneHot()
            B, N, C = points.shape
            point = cat((mindspore.Tensor(points), mindspore.numpy.tile(
            onehot(mindspore.Tensor(cls, mindspore.int32), 16, mindspore.Tensor(1.0, mindspore.float32),
                    mindspore.Tensor(0.0, mindspore.float32)),
            (1, N, 1))))
            pred = network(point)
        elif args_opt.model == "PointNet_seg":
            B, N, C = points.shape
            pred = network(points, to_categorical(cls, 16))
        #print(pred.shape)
        pred_np = pred.asnumpy()
        cur_pred_val = pred_np
        cur_pred_val_logits = cur_pred_val
        cur_pred_val = np.zeros((B, N)).astype(np.int32)
        target_np = target.asnumpy()
        for i in range(B):
            cat = seg_label_to_cat[target_np[i, 0]]
            logits = cur_pred_val_logits[i, :,:]
            cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]],1) + seg_classes[cat][0]

        correct = np.sum(cur_pred_val == target_np)
        total_correct += correct
        total_seen += (B * N)
        for l in range(50):
            total_seen_class[l] += np.sum(target_np == l)
            total_correct_class[l] += (np.sum((cur_pred_val == l) & (target_np == l)))

        for i in range(B):
            segp = cur_pred_val[i, :]
            segl = target_np[i, :]
            cat = seg_label_to_cat[segl[0]]
            part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
            
            for l in seg_classes[cat]:
                if (np.sum(segl == l) == 0) and ( np.sum(segp == l) == 0):
                    part_ious[l - seg_classes[cat][0]] = 1.0
                else:
                    part_ious[l - seg_classes[cat][0]] = np.sum((segl == l) & (segp == l)) / float(np.sum((segl == l) | (segp == l)))
            shape_ious[cat].append(np.mean(part_ious))

    all_shape_ious = []
    for cat in shape_ious.keys():
        for iou in shape_ious[cat]:
            all_shape_ious.append(iou)
        shape_ious[cat] = np.mean(shape_ious[cat])
    mean_shape_ious = np.mean(list(shape_ious.values()))
    test_metrics['accuracy'] = total_correct / float(total_seen)            
    test_metrics['class_avg_accuracy'] = np.mean(
                np.array(total_correct_class) / np.array(total_seen_class, dtype=float))
    for cat in sorted(shape_ious.keys()):
        logging.info('  Eval mIoU of %s %f' % (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
    test_metrics['class_avg_iou'] = np.mean(mean_shape_ious)
    test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)
    logging.info('  Eval Accuracy: %.3f  Inctance avg mIOU: %.3f' % (
        test_metrics['accuracy'],  test_metrics['inctance_avg_iou']))
    print('  Eval Accuracy: %.3f  Inctance avg mIOU: %.3f' % (
        test_metrics['accuracy'], test_metrics['inctance_avg_iou']))

def main(args):
    from mind3d.utils.PointTransformerUtils import AttrDict,create_attr_dict
    import yaml
    #path = os.path.abspath(os.path.join(os.getcwd(), "configs/", str(args.model_name)))
    with open(args.opt, 'r') as f:
        args_ops = AttrDict(yaml.safe_load(f.read()))
    create_attr_dict(args_ops)
    Eval(args_opt=args_ops)



if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Shapenet segmentation train.')
    parser.add_argument('-opt',default="./configs/pointtransformer/pointtransformerseg.yaml",
                        help='pointtransformer or pointnet')
    args = parser.parse_known_args()[0]
    main(args)
