import os, gc, objgraph, time, datetime
import random, pickle
import argparse
import sys
import logging
import psutil
import mindspore.nn as nn
import mindspore as ms
from mindspore.communication import get_rank, get_group_size, init
from mindspore import context, ops, load_checkpoint, load_param_into_net
from mindspore import save_checkpoint, Model
from mind3d.utils.sim_builder import build_dataset
from mind3d.utils.sim_batch_utils import train_collate, eval_collate
from mind3d.models.build_sim_model import build_model, get_config
from mindspore import dataset as de
from pathlib import Path
from mindspore.common import set_seed
from mindspore.parallel._utils import (_get_device_num, _get_gradients_mean,
                                       _get_parallel_mode)

# import tensorboard
BASE_DIR = os.path.dirname(os.path.abspath(__file__))  
sys.path.append(os.path.dirname(os.path.dirname(BASE_DIR)))

def log_string(filename, verbosity=1, name=None):
    """log init"""
    level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
    formatter = logging.Formatter(
        "[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
    )
    logger = logging.getLogger(name)
    logger.setLevel(level_dict[verbosity])

    fh = logging.FileHandler(filename, "w")
    fh.setFormatter(formatter)
    logger.addHandler(fh)

    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    logger.addHandler(sh)
    return logger

def parse_second_losses(losses):
    loss = sum(losses["loss"])
    return loss


def get_cpu_memory(pid=os.getpid()):
    p = psutil.Process(pid)
    res=p.memory_info().rss / (1024 * 1024 * 1024)
    free=psutil.virtual_memory().free / (1024 * 1024 * 1024)
    return [res, free]


class MyWithLossCell(nn.Cell):
    def __init__(self, model):
        super(MyWithLossCell, self).__init__(auto_prefix=False)
        self._model = model

    def construct(self, example):
        # losses = self._model(example, return_loss=True, return_feature=False)
        pred = self._model(example)    # 运行正向网络，获取loss
        losses= self._model.get_loss(example, pred)
        time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        # print("after-loss:",time)
        loss = parse_second_losses(losses)
        return loss

class TrainOneStepCell(nn.Cell):
    def __init__(self, network, optimizer):
        super(TrainOneStepCell, self).__init__(auto_prefix=False)
        self.network = network  # 带loss的网络结构
        self.network.set_grad()   # PYNATIVE模式时需要，如果为True，则在执行正向网络时，将生成需要计算梯度的反向网络。
        self.optimizer = optimizer   # 优化器，用于参数更新
        self.weights = self.optimizer.parameters    # 获取优化器的参数
        self.grad = ops.GradOperation(get_by_list=True)   # 获取所有输入和参数的梯度

        # 并行计算相关逻辑
        self.reducer_flag = False
        self.grad_reducer = ops.identity
        self.parallel_mode = _get_parallel_mode()
        self.reducer_flag = self.parallel_mode in (ms.ParallelMode.DATA_PARALLEL, ms.ParallelMode.HYBRID_PARALLEL)
        if self.reducer_flag:
            self.mean = _get_gradients_mean()
            self.degree = _get_device_num()
            self.grad_reducer = nn.DistributedGradReducer(self.weights, self.mean, self.degree)

    def construct(self, *inputs):
        pred = self.network(*inputs)    # 运行正向网络，获取loss
        loss= self.network.get_loss(*inputs, pred)
        grads = self.grad(self.network, self.weights)(*inputs) # 获得所有Parameter自由变量的梯度
        clip = nn.ClipByNorm()
        clip_norm = ms.Tensor(ms.numpy.array([35]).astype(ms.numpy.float32))
        # grads = grad_op(grads)    # 可以在这里加对梯度的一些计算逻辑，如梯度裁剪
        grads= ops.clip_by_value(grads, 35)
        grads = self.grad_reducer(grads)  # 梯度聚合
        self.optimizer(grads)    # 优化器更新参数
        return loss

# pip install torch==1.13.0 -f https://download.pytorch.org/whl/cu111/torch_stable.html
def train(args):
    set_seed(0)

    if args.device_num==1:
        device_id = int(os.getenv('DEVICE_ID', '7'))
        device_num = int(os.getenv('RANK_SIZE', '1'))
        # context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, device_id=device_id)
        ms.set_context(mode=ms.PYNATIVE_MODE, device_target=args.device_target, device_id=device_id)
    else:
        ms.set_context(mode=ms.PYNATIVE_MODE, device_target=args.device_target, pynative_synchronize=True) #GRAPH_MODE PYNATIVE_MODE
        init("nccl")
        # ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.AUTO_PARALLEL, search_mode="sharding_propagation", gradients_mean=True) #self.ny=Parameter(Tensor(1.0, mindspore.float32))
        # ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.DATA_PARALLEL, gradients_mean=True, ) # AUTO_PARALLEL DATA_PARALLEL
        ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.DATA_PARALLEL, gradients_mean=True, comm_fusion={"allreduce": {"mode": "auto", "config": None}})
    cfg_path = Path(args.config)
    cfg = get_config(cfg_path)

    output_dir = Path(args.work_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
    logger = log_string(os.path.join(output_dir, "simtrack_train.log"))

    logger.info("preparing data...")

    train_dataset = build_dataset(cfg['data']['train'])

    batch_size = cfg['data']['samples_per_gpu']
    if args.device_num==1:
        train_ds = de.GeneratorDataset(train_dataset, column_names=cfg['train_column_names'], shuffle=False, num_parallel_workers=1)
        # eval_ds = de.GeneratorDataset(eval_dataset, column_names=cfg['eval_column_names'], shuffle=False)
    else:
        rank_id = get_rank()
        rank_size = get_group_size()
        print(f"using {rank_size} GPUs, id is {rank_id}")
        train_ds = de.GeneratorDataset(train_dataset, column_names=cfg['train_column_names'], shuffle=True, num_shards=rank_size, shard_id=rank_id, num_parallel_workers=2, python_multiprocessing=False) #, num_parallel_workers=2, python_multiprocessing=False
        # eval_ds = de.GeneratorDataset(eval_dataset, column_names=cfg['eval_column_names'], shuffle=False, num_shards=rank_size, shard_id=rank_id)
    train_ds = train_ds.batch(batch_size=batch_size, input_columns=cfg['train_column_names'], drop_remainder=True, per_batch_map=train_collate)

    steps_per_epoch = int(len(train_dataset) / batch_size/ args.device_num)
    total_steps = cfg['total_epochs']* steps_per_epoch

    logger.info("preparing model...")

    model = build_model(model_cfg=cfg['model'])
    model.CLASSES = train_dataset.CLASSES
    ckpt = args.checkpoint
    # print(ckpt)
    if ckpt and (args.start_epoch!=-1) and (args.start_epoch!=1):
        ms_checkpoint = load_checkpoint(ckpt+"/epoch_{}.ckpt".format(args.start_epoch-1))
        load_param_into_net(model, ms_checkpoint)

    milestone=[]
    if args.start_epoch==-1:
        cfg['total_epochs']=20
    else:
        cfg['total_epochs']=20-args.start_epoch+1
    for i in range(cfg['total_epochs']):
        milestone.append(steps_per_epoch*(i+1))

    learning_rates =[x*0.4 for x in [ 0.00040, 0.00054, 0.00094, 0.00152, 0.00221, 0.00290, 0.00348, 0.00387, 0.00400, 0.00393, 0.00373,
                        0.00299, 0.00251, 0.00199, 0.00147, 0.00099, 0.00058, 0.00040, 0.00026, 0.00007]] #0.00026, 0.00007, 0.00001
    
    lr = nn.piecewise_constant_lr(milestone, learning_rates[20-cfg['total_epochs']:])
    # opt = nn.Adam(model.trainable_params(), lr)
    opt = nn.AdamWeightDecay(model.trainable_params(), learning_rate=lr, beta2=0.99, weight_decay=0.01)
    loss_net = MyWithLossCell(model)

    train_net = nn.TrainOneStepCell(loss_net, opt)

    for epoch in range(cfg['total_epochs']):
        print('Epoch (%d/%s)' % (epoch, cfg['total_epochs']))
        train_net.set_train(True)
        idx=0
        iterator = train_ds.create_dict_iterator()
        time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        
        for data_batch in iterator:
            data_batch['points'] = data_batch['points'][0,:,:]
            data_batch['voxels'] = data_batch['voxels'][0,:,:,:]
            data_batch['num_points'] = data_batch['num_points'][0,:]
            data_batch['coordinates'] = data_batch['coordinates'][0,:,:]
            data_batch['num_voxels'] = data_batch['num_voxels'].squeeze(axis=1).astype('int32')
            data_batch['shape'] = data_batch['shape'].astype('int32')
            data_batch['anno_box'] = data_batch['anno_box'].transpose((1, 0 ,2, 3))
            data_batch['ind'] = data_batch['ind'].transpose((1, 0 ,2)).astype('int32')
            data_batch['mask'] = data_batch['mask'].transpose((1, 0 ,2))
            data_batch['cat'] = data_batch['cat'].transpose((1, 0 ,2)).astype('int32')
            losses = train_net(data_batch)
            
            if idx % 50 == 0:
                mem=get_cpu_memory()
                logger.info("train loss(idx/steps_per_epoch/epoch/total_epoch/mem/lr/loss): %d/ %d / %d / %s / %f G used, %f G left / %f / %f", idx, steps_per_epoch,
                        epoch, cfg['total_epochs'], mem[0], mem[1], opt.get_lr(), losses)
                if args.device_num==1 or (get_rank()==1 and args.device_num!=1):
                    writer = open((args.work_dir+"/record.log"),"a")
                    iter = epoch*steps_per_epoch + idx
                    losses1 = losses.asnumpy()
                    writer.writelines("step:{}, loss:{}, lr:{}\n".format(iter,losses,opt.get_lr()))
                    writer.close()
                    mem_str=get_cpu_memory()
                    print('step {}, mem={}G'.format(idx, mem_str))
            idx+=1
        
      
        filename_tmpl = "epoch_{}"
        filename = filename_tmpl.format(epoch+21-cfg['total_epochs'])
        print(filename)
        savepath = os.path.join(output_dir, filename)
        save_checkpoint(model, savepath)
        save_checkpoint(opt, savepath+"_opt")
        # opt.parameters_dict


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--config",
                        default='/mind3d/configs/simtrack/simtrack.yaml')
    
    parser.add_argument("--work_dir", default='/mind3d/word_dirs/train/new2_copy2')
    parser.add_argument('--device_target', default='GPU', help='device type')
    # parser.add_argument("--multi", default=False, help="whether to use mutiple GPUs")
    parser.add_argument("--device_num", type=int, default=1, help="GPU number")
    parser.add_argument(
        "--checkpoint", help="the dir to checkpoint which the model read from",
        default='/mind3d/word_dirs/train/new2_copy2')
    parser.add_argument(
        "--start_epoch",type=int, default=-1)
    args = parser.parse_args()
    # print(args.device_id)
    train(args)
