import os
import cv2
import mindspore.nn as nn
from mindspore import context
from utils.ms_utils import Config
import mindspore as ms
import argparse
import logging
# import mindspore.dataset as ds
from mindspore.common import set_seed
# from mindspore.train.model import Model
# from mindspore.context import ParallelMode
# from mindspore.nn.wrap import WithLossCell
from mindspore.train.callback import TimeMonitor, LossMonitor, CheckpointConfig, ModelCheckpoint
from mindspore.communication.management import init, get_group_size, get_rank
from src.ms_model_abinet_iter import ABINetIterModel
# from src.ms_losses_batch_192 import MultiLosses
from src.ms_losses import MultiLosses
from utils.ms_create_dataset_lmdb import image_dataset
# from src.ms_metrics import TextAccuracy
from utils.ms_config_init import config as config_init_env
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops import composite as C
import faulthandler
from mindspore import profiler
import logging
GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 20

clip_grad = C.MultitypeFuncGraph("clip_grad")


# @clip_grad.register("Number", "Number", "Tensor")

def init_env(cfg):
    """初始化运行时环境."""
    ms.set_seed(1)
    # 如果device_target设置是None，利用框架自动获取device_target，否则使用设置的。
    if cfg.device_target != "None":
        if cfg.device_target not in ["Ascend", "GPU", "CPU"]:
            raise ValueError(f"Invalid device_target: {cfg.device_target}, "
                             f"should be in ['None', 'Ascend', 'GPU', 'CPU']")
        ms.set_context(device_target=cfg.device_target)

    # 配置运行模式，支持图模式和PYNATIVE模式
    if cfg.context_mode not in ["graph", "pynative"]:
        raise ValueError(f"Invalid context_mode: {cfg.context_mode}, "
                         f"should be in ['graph', 'pynative']")
    context_mode = ms.GRAPH_MODE if cfg.context_mode == "graph" else ms.PYNATIVE_MODE
    ms.set_context(mode=context_mode)

    cfg.device_target = ms.get_context("device_target")
    # 如果是CPU上运行的话，不配置多卡环境
    if cfg.device_target == "CPU":
        cfg.device_id = 0
        cfg.device_num = 1
        cfg.rank_id = 0

    # 设置运行时使用的卡
    if hasattr(cfg, "device_id") and isinstance(cfg.device_id, int):
        ms.set_context(device_id=cfg.device_id)
    if cfg.device_num > 1:
        # init方法用于多卡的初始化，不区分Ascend和GPU，get_group_size和get_rank方法只能在init后使用
        init()
        print("run distribute!", flush=True)
        group_size = get_group_size()
        if cfg.device_num != group_size:
            raise ValueError(f"the setting device_num: {cfg.device_num} not equal to the real group_size: {group_size}")
        cfg.rank_id = get_rank()
        ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.DATA_PARALLEL, gradients_mean=True)
        if hasattr(cfg, "all_reduce_fusion_config"):
            ms.set_auto_parallel_context(all_reduce_fusion_config=cfg.all_reduce_fusion_config)
    else:
        cfg.device_num = 1
        cfg.rank_id = 0
        print("run standalone!", flush=True)

def clip_grad(clip_type, clip_value, grad):
    """
    Clip gradients.

    Inputs:
        clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
        clip_value (float): Specifies how much to clip.
        grad (tuple[Tensor]): Gradients.

    Outputs:
        tuple[Tensor], clipped gradients.
    """
    # if clip_type not in (0, 1):
    #     return grad
    dt = F.dtype(grad)
    # if clip_type == 0:
    #     new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt),
    #                                F.cast(F.tuple_to_array((clip_value,)), dt))
    # else:

    new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
    return new_grad

class ABINetTrainOneStepCell(nn.TrainOneStepCell):
    """
    Encapsulation class of bert network training.

    Append an optimizer to the training network after that the construct
    function can be called to create the backward graph.

    Args:
        network (Cell): The training network. Note that loss function should have been added.
        optimizer (Optimizer): Optimizer for updating the weights.
        sens (Number): The adjust parameter. Default: 1.0.
        enable_clip_grad (boolean): If True, clip gradients in BertTrainOneStepCell. Default: True.
    """

    def __init__(self, network, optimizer, sens=1.0, enable_clip_grad=False):
        super(ABINetTrainOneStepCell, self).__init__(network, optimizer, sens)
        self.cast = P.Cast()
        self.hyper_map = C.HyperMap()
        self.enable_clip_grad = enable_clip_grad
        self.enable_tuple_broaden = True

    def set_sens(self, value):
        self.sens = value

    def clip_grads(self, grads):
        grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
        return grads

    def construct(self, data, label, length,label_for_mask):
        """Defines the computation performed."""
        weights = self.weights
        loss = self.network( data, label, length,label_for_mask)
        grads = self.grad(self.network,weights)(data, 
                                                label, 
                                                length,
                                                label_for_mask,
                                                self.cast(F.tuple_to_array((self.sens,)),
                                                           ms.dtype.float16))   
        if self.enable_clip_grad:
            grads = self.clip_grads(grads)
        grads = self.grad_reducer(grads)
        self.optimizer(grads)
        # print("after optimizer")
        return loss


class CustomWithLossCell(nn.Cell):
    """连接前向网络和损失函数"""

    def __init__(self, backbone, loss_fn):
        """输入有两个,前向网络backbone和损失函数loss_fn"""
        super(CustomWithLossCell, self).__init__(auto_prefix=False)
        self._backbone = backbone
        self._loss_fn = loss_fn
        
        

    def construct(self, data, label, length,label_for_mask):


        output = self._backbone(data)                 # 前向计算得到网络输出
        
        loss_args = [label,length,label_for_mask]
                
        loss = self._loss_fn(output, loss_args)

        
        return loss  # 得到多标签损失值

def train(config):
    set_seed(1)
    ms.set_context(max_call_depth=100000000)
    dataset = image_dataset
    #dataset = data_after_iiit5k
    # dataset = data_after_mjtest_alone
    loss_fn = MultiLosses()
    
    net = ABINetIterModel(config=config)

    #加载权重
    ckpt_file_name = "/home/data/zhangyh22/ABINet/ABINet_Mindspore/abinet_gpu/abinet_ascend/ms_abinet_from_py_ascend.ckpt"
    param_dict = ms.load_checkpoint(ckpt_file_name)
    param_not_load = ms.load_param_into_net(net, param_dict)

    loss_net = CustomWithLossCell(net, loss_fn)

    # opt = nn.Adam(params=net.trainable_params())
    opt = nn.Adam(params=net.trainable_params(),learning_rate=config.optimizer_lr)

    loss_net_clip_grad = ABINetTrainOneStepCell(loss_net,opt)

    step_size = dataset.get_dataset_size()
    print('step_size: ' ,step_size)
    time_cb = TimeMonitor(data_size=step_size)
    # set and apply parameters of check point config.TRAIN_MODEL_SAVE_PATH
    ckpoint_cf = CheckpointConfig(save_checkpoint_steps=3000 ,keep_checkpoint_max=100)
    ckpoint_cb = ModelCheckpoint(prefix="alldata_batch96_8card_lr1e-5_gpushuffle_graph",
                                 config=ckpoint_cf,
                                 directory="./alldata_batch96_8card_lr1e-5_gpushuffle_graph"  + str(get_rank()) + "_ckpt")




    model = ms.Model(network=loss_net_clip_grad  ,amp_level="O0")
    #model.train(epoch=5, train_dataset=dataset, callbacks=[LossMonitor(0.005)])
    
    # profiler = ms.Profiler()
    
    model.train(epoch=200, train_dataset=dataset,callbacks = [LossMonitor(),ckpoint_cb,time_cb],dataset_sink_mode=False)
    
    # profiler.analyse()

def main():
    
    
    # context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU', save_graphs=False)
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default='utils/configs/train_abinet.yaml',
                        help='path to config file')
    #parser.add_argument('--input', type=str, default='figs/test')
    parser.add_argument('--cuda', type=int, default=-1)
    #parser.add_argument('--checkpoint', type=str, default='workdir/train-abinet/best-train-abinet.pth')
    parser.add_argument('--model_eval', type=str, default='alignment', 
                        choices=['alignment', 'vision', 'language'])
    parser.add_argument('--batch_size', type=int, default=1)
    parser.add_argument('--max_text_length', type=int, default=30)
    parser.add_argument('--device_target',type=str,default='GPU')
    parser.add_argument('--device_num',type=str,default='2')
    parser.add_argument('--context_mode',type=str,default='pynative')
    args = parser.parse_args()
    cfg = args.config
    init_env(config_init_env)
    config = Config(args.config)
    #if args.checkpoint is not None: config.model_checkpoint = args.checkpoint
    # if args.model_eval is not None: config.model_eval = args.model_eval
    # config.global_phase = 'test'
    # config.model_vision_checkpoint, config.model_language_checkpoint = None, None
    
    logging.basicConfig(level=logging.INFO, format='%(filename)-15s[%(lineno)03d] %(message)s',
# datefmt='%Y-%m-%d %H:%M:%S', 
                filename='./graphtrainlog/allshuffledata_batch96_8card_loss0.5e-5_new_'+str(get_rank())+'.log',
                filemode='w')
    print("begin training")
#     logging.basicConfig(level=logging.INFO, format='%(filename)-15s[%(lineno)03d] %(message)s',
# # datefmt='%Y-%m-%d %H:%M:%S', 
#                 filename='./trainlog/alldata_batch96_8card_loss0.5e-5_'+str(get_rank())+'.log',
#                 filemode='w')
    train(config)
    print("done")

if __name__ == '__main__':

    main()
    
# import os
# import cv2
# import mindspore.nn as nn
# from mindspore import context
# from utils.ms_utils import Config
# import mindspore as ms
# import argparse
# import mindspore.dataset as ds
# from mindspore.common import set_seed
# from mindspore.train.model import Model
# from mindspore.context import ParallelMode
# from mindspore.nn.wrap import WithLossCell
# from mindspore.train.callback import TimeMonitor, LossMonitor, CheckpointConfig, ModelCheckpoint
# from mindspore.communication.management import init, get_group_size, get_rank
# from src.ms_model_abinet_iter import ABINetIterModel
# from src.ms_losses import MultiLosses
# from utils.ms_create_dataset import ms_ImageDataset,image_dataset
# from src.ms_metrics import TextAccuracy
# from  utils.ms_config_init import get_config as config_init_env
# import faulthandler


# def init_env(cfg):
#     """初始化运行时环境."""
#     ms.set_seed(cfg.seed)
#     # 如果device_target设置是None，利用框架自动获取device_target，否则使用设置的。
#     if cfg.device_target != "None":
#         if cfg.device_target not in ["Ascend", "GPU", "CPU"]:
#             raise ValueError(f"Invalid device_target: {cfg.device_target}, "
#                              f"should be in ['None', 'Ascend', 'GPU', 'CPU']")
#         ms.set_context(device_target=cfg.device_target)

#     # 配置运行模式，支持图模式和PYNATIVE模式
#     if cfg.context_mode not in ["graph", "pynative"]:
#         raise ValueError(f"Invalid context_mode: {cfg.context_mode}, "
#                          f"should be in ['graph', 'pynative']")
#     context_mode = ms.GRAPH_MODE if cfg.context_mode == "graph" else ms.PYNATIVE_MODE
#     ms.set_context(mode=context_mode)

#     cfg.device_target = ms.get_context("device_target")
#     # 如果是CPU上运行的话，不配置多卡环境
#     if cfg.device_target == "CPU":
#         cfg.device_id = 0
#         cfg.device_num = 1
#         cfg.rank_id = 0

#     # 设置运行时使用的卡
#     if hasattr(cfg, "device_id") and isinstance(cfg.device_id, int):
#         ms.set_context(device_id=cfg.device_id)
#     if cfg.device_num > 1:
#         # init方法用于多卡的初始化，不区分Ascend和GPU，get_group_size和get_rank方法只能在init后使用
#         init()
#         print("run distribute!", flush=True)
#         group_size = get_group_size()
#         print("group_size",group_size)
#         if cfg.device_num != group_size:
#             raise ValueError(f"the setting device_num: {cfg.device_num} not equal to the real group_size: {group_size}")
#         cfg.rank_id = get_rank()
#         #ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.DATA_PARALLEL, gradients_mean=True)
#         ms.set_auto_parallel_context(device_num=cfg.device_num,parallel_mode='data_parallel', gradients_mean=True)
#         if hasattr(cfg, "all_reduce_fusion_config"):
#             ms.set_auto_parallel_context(all_reduce_fusion_config=cfg.all_reduce_fusion_config)
#     else:
#         cfg.device_num = 1
#         cfg.rank_id = 0
#         print("run standalone!", flush=True)



# class CustomWithLossCell(nn.Cell):
#     """连接前向网络和损失函数"""

#     def __init__(self, backbone, loss_fn):
#         """输入有两个,前向网络backbone和损失函数loss_fn"""
#         super(CustomWithLossCell, self).__init__(auto_prefix=False)
#         self._backbone = backbone
#         self._loss_fn = loss_fn
#         self.i = 1
        
#     def construct(self, data, label, length):

#         print(self.i)
#         self.i = self.i + 1
#         output = self._backbone(data)                 # 前向计算得到网络输出
#         loss = self._loss_fn(output, label, length)
#         print(loss)
#         return loss  # 得到多标签损失值

# def train(config):
#     set_seed(1)
#     dataset = image_dataset

#     loss_fn = MultiLosses()
    
#     net = ABINetIterModel(config=config)

#     #加载权重
#     ckpt_file_name = "./ms_abinet_from_py.ckpt"
#     param_dict = ms.load_checkpoint(ckpt_file_name)
#     param_not_load = ms.load_param_into_net(net, param_dict)

#     loss_net = CustomWithLossCell(net, loss_fn)
    
#     opt = nn.Adam(params=net.trainable_params(),learning_rate=config.optimizer_lr)#

#     step_size = dataset.get_dataset_size()
#     print('step_size: ' ,step_size)
#     time_cb = TimeMonitor(data_size=step_size)
#     # set and apply parameters of check point config.TRAIN_MODEL_SAVE_PATH
#     ckpoint_cf = CheckpointConfig(save_checkpoint_steps=30 ,keep_checkpoint_max=10)
#     ckpoint_cb = ModelCheckpoint(prefix="MS_ABINET_DISTRIBUTED_GPU",
#                                  config=ckpoint_cf,
#                                  directory="{}".format('./ms_abinet_distributed_gpu_ckpt'))

#     model = ms.Model(network=loss_net, optimizer=opt)
#     #model.train(epoch=5, train_dataset=dataset, callbacks=[LossMonitor(0.005)])
#     profiler = ms.Profiler()
#     model.train(epoch=200, train_dataset=dataset,callbacks = [LossMonitor(),ckpoint_cb,time_cb],dataset_sink_mode=False)
    
#     profiler.analyse()

# def main():
    
   
    #     parser = argparse.ArgumentParser()
#     parser.add_argument('--config', type=str, default='utils/configs/train_abinet.yaml',
#                         help='path to config file')
#     #parser.add_argument('--input', type=str, default='figs/test')
#     parser.add_argument('--cuda', type=int, default=-1)
#     #parser.add_argument('--checkpoint', type=str, default='workdir/train-abinet/best-train-abinet.pth')
#     parser.add_argument('--model_eval', type=str, default='alignment', 
#                         choices=['alignment', 'vision', 'language'])
#     parser.add_argument('--batch_size', type=int, default=1)
#     parser.add_argument('--max_text_length', type=int, default=30)
#     parser.add_argument('--device_target',type=str,default='GPU')
#     parser.add_argument('--device_num',type=str,default='8')
    
    
#     args = parser.parse_args()
    
#     cfg = args.config

#     cfg = config_init_env(args.device_target,args.device_num)
#     init_env(cfg)
#     config = Config(args.config)
#     #if args.checkpoint is not None: config.model_checkpoint = args.checkpoint
#     if args.model_eval is not None: config.model_eval = args.model_eval
#     config.global_phase = 'test'
#     config.model_vision_checkpoint, config.model_language_checkpoint = None, None
#     device = 'cpu' if args.cuda < 0 else f'cuda:{args.cuda}'

#     print("begin training")
#     train(config)
#     print("done")

# if __name__ == '__main__':
#     main()