import os, sys, argparse
sys.path.append(os.path.abspath(os.path.join(os.getcwd())))
path = os.path.abspath(os.path.join(os.getcwd()))
from mindspore import context, nn, ops, load_checkpoint, load_param_into_net, load_checkpoint,save_checkpoint, DynamicLossScaleManager
from mindspore.train.model import Model
import numpy as np
from tqdm import tqdm
from mindspore.train.callback import TimeMonitor
import mindspore.dataset as ds
import logging,  mindspore
from mindspore.context import ParallelMode
from mindspore.communication.management import init

from mind3d.dataset.ShapeNet import ShapeNetpartDataset
from mind3d.models.PointTransformer import PointTransformerSeg
from mind3d.utils.PointTransformerUtils import AttrDict,create_attr_dict, CustumWithLoss, pointnetCustomWithLossCell
from mind3d.utils.provider import pointtransformerloss, pointnetloss, pointnetpre
from mind3d.models.pointnet import PointNet_seg
from mind3d.models.losses.NLLLoss import NLLLoss


def to_categorical(y, num_class):
    Eye = ops.Eye()
    new_y = Eye(num_class, num_class, mindspore.float32)[mindspore.Tensor(y)]
    new_y = mindspore.Tensor(new_y)
    return new_y

def train(args_opt):

    logging.basicConfig(filename= args_opt.TrainSeg.logging_path, filemode="w",
                        format= "[%(asctime)s][line:%(lineno)d][%(levelname)s] %(message)s",
                        datefmt="%Y-%M-%d %H:%M:%S",
                        level=logging.INFO)

    seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43],
               'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37],
               'Guitar': [19, 20, 21], 'ahhhBag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49],
               'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
    seg_label_to_cat = {}  # {0:Airplane, 1:Airplane, ...49:Table}


        # device.
    device_id = int(os.getenv('DEVICE_ID', '1'))
    device_num = int(os.getenv('RANK_SIZE', '1'))

    if not args_opt.device_target in ("Ascend", "GPU"):
        raise ValueError("Unsupported platform {}".format(args_opt.device_target))

    if args_opt.device_target == "Ascend":
        context.set_context(mode=context.PYNATIVE_MODE,
                            device_target="Ascend",
                            save_graphs=False,
                            device_id=device_id)
        context.set_context(max_call_depth=2048)

    else:
        context.set_context(mode=context.GRAPH_MODE,
                            device_target="GPU",
                            save_graphs=False,
                            device_id=device_id,
                            max_call_depth=2048)

    # run distribute.
    if args_opt.run_distribute:
        if args_opt.device_target == "Ascend":
            if device_num > 1:
                init()
                context.set_auto_parallel_context(
                    parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
        else:
            if device_num > 1:
                mindspore.dataset.config.set_enable_shared_mem(False)
                context.set_auto_parallel_context(
                    parallel_mode=context.ParallelMode.DATA_PARALLEL,
                    gradients_mean=True,
                    device_num=device_num)
                mindspore.common.set_seed(1234)
                init()
            else:
                context.set_context(device_id=device_id)

    # Data Pipeline.
    train_dataset = ShapeNetpartDataset(root_path=args_opt.TrainSeg.data_path, num_points = args_opt.TrainSeg.num_points, split="train", 
                                        normal_channel= True)
    train_ds = ds.GeneratorDataset(train_dataset, ["output","cls", "label"],
                                        num_parallel_workers=args_opt.TrainSeg.num_work, shuffle=True)
    #train_ds = train_ds.batch(batch_size=args_opt.TrainSeg.batch_size)
    train_ds = train_ds.batch(batch_size=args_opt.TrainSeg.batch_size)

    test_dataset = ShapeNetpartDataset(root_path=args_opt.TrainSeg.data_path, num_points = args_opt.TrainSeg.num_points, split="val", normal_channel=True)
    test_ds = ds.GeneratorDataset(test_dataset, ["output","cls", "label"], 
                                        num_parallel_workers=args_opt.TrainSeg.num_work, shuffle=False) 
    test_ds = test_ds.batch(batch_size=args_opt.EvalSeg.batch_size, drop_remainder=True)

    step_size = train_ds.get_dataset_size()
    test_step = test_ds.get_dataset_size()

    if args_opt.TrainSeg.lr_decay_mode == "cosine_decay_lr":
        lr = nn.cosine_decay_lr(min_lr=args_opt.TrainSeg.min_lr,
                                max_lr=args_opt.TrainSeg.max_lr,
                                total_step=args_opt.TrainSeg.epoch_size * step_size,
                                step_per_epoch=step_size,
                                decay_epoch=args_opt.TrainSeg.decay_epoch)
    elif args_opt.TrainSeg.lr_decay_mode == "exp_decay_lr":
        lr = nn.exponential_decay_lr(learning_rate=args_opt.TrainSeg.max_lr,
                                     decay_rate=args_opt.TrainSeg.decay_rate,
                                     total_step=args_opt.TrainSeg.epoch_size * step_size,
                                     step_per_epoch=step_size,
                                     decay_epoch=args_opt.TrainSeg.decay_epoch)

    
    
    
    if args_opt.model == "pointtransformer": 
        network = PointTransformerSeg()
        network_loss = nn.SoftmaxCrossEntropyWithLogits(sparse = True, reduction = "mean")
        network_opt = nn.Adam(network.trainable_params(), lr, args_opt.TrainSeg.momentum, weight_decay = args_opt.TrainSeg.weight_decay)
    elif args_opt.model == "PointNet_seg":
        network = PointNet_seg()
        network_opt = nn.Adam(network.trainable_params(), lr, args_opt.TrainSeg.momentum, weight_decay = args_opt.TrainSeg.weight_decay)
        network_loss = NLLLoss(reduction="mean")

    if args_opt.TrainSeg.pretrain == True:
        logging.INFO("-------PreTrain-----------")
        param_dict = load_checkpoint(args_opt.EvalSeg.ckpt_file)
        load_param_into_net(network, param_dict)
    


    # Init the model.
    if args_opt.model == "pointtransformer": 
        network_with_loss = CustumWithLoss(network, network_loss, 50)
    elif args_opt.model == "PointNet_seg":
        network_with_loss = pointnetCustomWithLossCell(network, network_loss)
    train_net = nn.TrainOneStepCell(network_with_loss,network_opt, sens = 1024)

    best_acc = 0
    best_class_avg_iou = 0
    best_inctance_avg_iou = 0
    for epoch in range(args_opt.TrainSeg.epoch_size):
        loss_list = []
        train_net.set_train(True)
        logging.info("  -------Train Model-----------")
        for batch_id, points in tqdm(enumerate(train_ds.create_dict_iterator()),total = step_size, smoothing = 0.9):
            point_set = points["output"]
            cls = points["cls"]
            target = points["label"]
            if args_opt.model == "pointtransformer":  
                loss = pointtransformerloss(point_set,target,cls,train_net)
            else:
                loss = pointnetloss(train_net, point_set, cls, target, 16)
            loss_list.append(loss)
        print(" Epoch: %d, Train_loss: %f" %( epoch+1, np.mean(loss_list)))
        logging.info(" Epoch: %d, Train_loss: %f" %( epoch+1, np.mean(loss_list)))
        logging.info("  -------Test Model-----------")
        test_metrics = {}
        total_correct = 0
        total_seen = 0
        total_seen_class = [0 for _ in range(50)]
        total_correct_class = [0 for _ in range(50)]
        shape_ious = {cat: [] for cat in seg_classes.keys()}
        seg_label_to_cat = {}  # {0:Airplane, 1:Airplane, ...49:Table}
        for cat in seg_classes.keys():
            for label in seg_classes[cat]:
                seg_label_to_cat[label] = cat

        train_net.set_train(False)
        train_net.set_grad(False)
        network.set_train(False)
        network.set_grad(False)

        for _, data in tqdm(enumerate(test_ds.create_dict_iterator()), total = test_step,smoothing=0.9):
            points, cls, target = data['output'],data["cls"], data['label']
            if args_opt.model == "pointtransformer":
                cat = ops.Concat(axis = 2)
                onehot = ops.OneHot()
                B, N, C = points.shape
                point = cat((mindspore.Tensor(points), mindspore.numpy.tile(to_categorical(cls, 16), (1, N, 1))))
                pred = network(point)
            elif args_opt.model == "PointNet_seg":
                B, N, C = points.shape
                pred = network(points, to_categorical(cls, 16))
                #pred = pointnetpre(points, cls, target, 16 , PointNet_seg)
            cur_pred_val = pred.asnumpy()
            cur_pred_val_logits = cur_pred_val
            cur_pred_val = np.zeros((B, N)).astype(np.int32)
            target_np = target.asnumpy()

            for i in range(B):
                cat = seg_label_to_cat[target_np[i, 0]]
                logits = cur_pred_val_logits[i, :, :]
                cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]],1) + seg_classes[cat][0]
            correct = np.sum(cur_pred_val == target_np)
            total_correct += correct
            total_seen += (B * N)
            for l in range(50):
                total_seen_class[l] += np.sum(target_np == l)
                total_correct_class[l] += (np.sum((cur_pred_val == l) & (target_np == l)))

            for i in range(B):
                segp = cur_pred_val[i, :]
                segl = target_np[i, :]
                cat = seg_label_to_cat[segl[0]]
                part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
                for l in seg_classes[cat]:
                    if (np.sum(segl == l) == 0) and ( np.sum(segp == l) == 0):
                        part_ious[l - seg_classes[cat][0]] = 1.0
                    else:
                        part_ious[l - seg_classes[cat][0]] = np.sum((segl == l) & (segp == l)) / float(np.sum((segl == l) | (segp == l)))
                shape_ious[cat].append((np.mean(part_ious)))
            
        all_shape_ious = []
        for cat in shape_ious.keys():
            for iou in shape_ious[cat]:
                all_shape_ious.append(iou)
            shape_ious[cat] = np.mean(shape_ious[cat])
        mean_shape_ious = np.mean(list(shape_ious.values()))
        test_metrics['accuracy'] = total_correct / float(total_seen)            
        test_metrics['class_avg_accuracy'] = np.mean(
                    np.array(total_correct_class) / np.array(total_seen_class))
        for cat in sorted(shape_ious.keys()):
            logging.info( 'eval mIoU of %s %f' % (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
        test_metrics['class_avg_iou'] = mean_shape_ious
        test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)

        logging.info('  Epoch %d test Accuracy: %f     Inctance avg mIOU: %f' % (
                epoch + 1, test_metrics['accuracy'], test_metrics['inctance_avg_iou']))

        if (test_metrics['inctance_avg_iou'] >= best_inctance_avg_iou):
            logging.info("  -------Save Model-----------")
            savepath = str(args_opt.TrainSeg.ckpt_save_dir) + '/best_seg_model.ckpt'
            save_checkpoint(network, savepath)  # 保存模型

        if test_metrics['inctance_avg_iou'] >= best_inctance_avg_iou:
            best_iacc = test_metrics['accuracy']
            #if test_metrics['class_avg_iou'] > best_class_avg_iou:
            best_class_avg_iou = test_metrics['class_avg_iou']
            #if test_metrics['inctance_avg_iou'] > best_inctance_avg_iou:
            best_inctance_avg_iou = test_metrics['inctance_avg_iou']
        logging.info('  Best accuracy is: %.3f' % best_iacc)
        logging.info('  Best inctance avg mIOU is: %.3f' % best_inctance_avg_iou)
        print('  Best inctance avg mIOU is: %.3f' % best_inctance_avg_iou)

def main(args):
    from mind3d.utils.PointTransformerUtils import AttrDict,create_attr_dict
    import yaml
    #path = os.path.abspath(os.path.join(os.getcwd(), "configs/", str(args.model_name)))
    with open(args.opt, 'r') as f:
        args_ops = AttrDict(yaml.safe_load(f.read()))
    create_attr_dict(args_ops)
    train(args_opt=args_ops)



if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Shapenet segmentation train.')
    parser.add_argument('-opt',default="./configs/pointtransformer/pointtransformerseg.yaml",
                        help='pointtransformer or pointnet')
    args = parser.parse_known_args()[0]
    main(args)
