"""crnn training"""
import os
import cv2
import mindspore.nn as nn
from mindspore import context
from ms_utils import Config
import mindspore as ms
import argparse
import mindspore.dataset as ds
from mindspore.common import set_seed
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.nn.wrap import WithLossCell
from mindspore.train.callback import TimeMonitor, LossMonitor, CheckpointConfig, ModelCheckpoint
from mindspore.communication.management import init, get_group_size, get_rank
from ms_model_abinet_iter import ABINetIterModel
from ms_losses import MultiLosses
from ms_create_dataset import ms_ImageDataset,image_dataset
from ms_metrics import TextAccuracy
import faulthandler
#from src.loss import CTCLoss

#from src.dataset import create_dataset

#from src.crnn import crnn
# from src.crnn_for_train import TrainOneStepCellWithGradClip
# from src.metric import CRNNAccuracy
# from src.eval_callback import EvalCallBack
# from src.model_utils.moxing_adapter import moxing_wrapper
# from src.model_utils.config import config
# from src.model_utils.device_adapter import get_rank_id, get_device_num, get_device_id




# def apply_eval(eval_param):
#     evaluation_model = eval_param["model"]
#     eval_ds = eval_param["dataset"]
#     metrics_name = eval_param["metrics_name"]
#     res = evaluation_model.eval(eval_ds)
#     return res[metrics_name]


# def modelarts_pre_process():
#     pass


# @moxing_wrapper(pre_process=modelarts_pre_process)
# def train():
#     if config.device_target == 'Ascend':
#         device_id = get_device_id()
#         context.set_context(device_id=device_id)

#     if config.model_version == 'V1' and config.device_target != 'Ascend':
#         raise ValueError("model version V1 is only supported on Ascend, pls check the config.")

#     # lr_scale = 1
#     if config.run_distribute:
#         if config.device_target == 'Ascend':
#             init()
#             # lr_scale = 1
#             device_num = get_device_num()
#             rank = get_rank_id()
#         else:
#             init()
#             # lr_scale = 1
#             device_num = get_group_size()
#             rank = get_rank()
#         context.reset_auto_parallel_context()
#         context.set_auto_parallel_context(device_num=device_num,
#                                           parallel_mode=ParallelMode.DATA_PARALLEL,
#                                           gradients_mean=True)
#     else:
#         device_num = 1
#         rank = 0

#     max_text_length = config.max_text_length
#     # create dataset
#     dataset = create_dataset(name=config.train_dataset, dataset_path=config.train_dataset_path,
#                              batch_size=config.batch_size,
#                              num_shards=device_num, shard_id=rank, config=config)
#     step_size = dataset.get_dataset_size()
#     print("step_size:", step_size)
#     # define lr
#     lr_init = config.learning_rate
#     lr = nn.dynamic_lr.cosine_decay_lr(0.0, lr_init, config.epoch_size * step_size, step_size, config.epoch_size)
#     loss = CTCLoss(max_sequence_length=config.num_step,
#                    max_label_length=max_text_length,
#                    batch_size=config.batch_size)
#     net = crnn(config, full_precision=config.device_target != 'Ascend')
#     opt = nn.SGD(params=net.trainable_params(), learning_rate=lr, momentum=config.momentum, nesterov=config.nesterov)

#     net_with_loss = WithLossCell(net, loss)
#     net_with_grads = TrainOneStepCellWithGradClip(net_with_loss, opt).set_train()
#     # define model
#     model = Model(net_with_grads)
#     # define callbacks
#     callbacks = [LossMonitor(per_print_times=config.per_print_time),
#                  TimeMonitor(data_size=step_size)]
#     save_ckpt_path = os.path.join(config.save_checkpoint_path, 'ckpt_' + str(rank) + '/')
#     if config.run_eval and rank == 0:
#         if config.train_eval_dataset_path is None or (not os.path.isdir(config.train_eval_dataset_path)):
#             raise ValueError("{} is not a existing path.".format(config.train_eval_dataset_path))
#         eval_dataset = create_dataset(name=config.train_eval_dataset,
#                                       dataset_path=config.train_eval_dataset_path,
#                                       batch_size=config.batch_size,
#                                       is_training=False,
#                                       config=config)
#         eval_model = Model(net, loss, metrics={'CRNNAccuracy': CRNNAccuracy(config, print_flag=False)})
#         eval_param_dict = {"model": eval_model, "dataset": eval_dataset, "metrics_name": "CRNNAccuracy"}
#         eval_cb = EvalCallBack(apply_eval, eval_param_dict, interval=config.eval_interval,
#                                eval_start_epoch=config.eval_start_epoch, save_best_ckpt=True,
#                                ckpt_directory=save_ckpt_path, best_ckpt_name="best_acc.ckpt",
#                                eval_all_saved_ckpts=config.eval_all_saved_ckpts, metrics_name="acc")
#         callbacks += [eval_cb]
#     if config.save_checkpoint and rank == 0:
#         config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_steps,
#                                      keep_checkpoint_max=config.keep_checkpoint_max)
#         ckpt_cb = ModelCheckpoint(prefix="crnn", directory=save_ckpt_path, config=config_ck)
#         callbacks.append(ckpt_cb)
#     model.train(config.epoch_size, dataset, callbacks=callbacks, dataset_sink_mode=config.device_target == 'Ascend')









class CustomWithLossCell(nn.Cell):
    """连接前向网络和损失函数"""

    def __init__(self, backbone, loss_fn):
        """输入有两个,前向网络backbone和损失函数loss_fn"""
        super(CustomWithLossCell, self).__init__(auto_prefix=False)
        self._backbone = backbone
        self._loss_fn = loss_fn
        
        

    def construct(self, data, label, length):
        
        # label = label.astype(dtype = ms.float32)
        # length = length.astype(dtype = ms.int8)
        # data = data.astype(dtype = ms.float16)
        #print(self.i)
        #self.i = self.i + 1
        output = self._backbone(data)                 # 前向计算得到网络输出
        loss = self._loss_fn(output, label, length)
        print(loss)
        return loss  # 得到多标签损失值

def train(config):
    set_seed(1)
    faulthandler.enable()
    # dataset_train_image_generator = ms_ImageDataset(path='/home/data4/zyh/ABINet/data/evaluation/IIIT5k_3000')
#dataset_image_generator = ms_ImageDataset(path='/home/data4/zyh/ABINet/data/evaluation/CUTE80')

    # config.batch_size = 72
    # image_dataset = ds.GeneratorDataset(dataset_train_image_generator, 
    #                 column_names= ['image','label','length'], 
    #                 shuffle=False,
    #                 python_multiprocessing=True,num_parallel_workers=8)
    # image_dataset = image_dataset.batch(batch_size= config.batch_size, drop_remainder=True)
    dataset = image_dataset

    loss_fn = MultiLosses()
    
    net = ABINetIterModel(config=config)
    

    #加载权重
    ckpt_file_name = "/home/data4/zyh/ABINet/ABINet_Mindspore/abinet_ms/ms_abinet_from_py.ckpt"
    param_dict = ms.load_checkpoint(ckpt_file_name)
    param_not_load = ms.load_param_into_net(net, param_dict)



    loss_net = CustomWithLossCell(net, loss_fn)
    
    opt = nn.Adam(params=net.trainable_params(),learning_rate=config.optimizer_lr)

####################利用ms.ops.value_and_grad尝试进行反向测试
    # weights = net.trainable_params()

    # def forward(inputs, labels, gt_length):
    #     logits = net(inputs)
    #     loss = loss_fn(logits, labels, gt_length)
    #     return loss, logits

    # grad_fn = ms.ops.value_and_grad(forward, grad_position=0, weights=weights, has_aux=False)

    # for data in image_dataset.create_dict_iterator():
    #     data1 = data['image']
    #     label1 = data['label']
    #     length1 = data['length']

    #     (loss, logits), (inputs_gradient, params_gradient) = grad_fn(data1, label1,length1)
    #     print(logits.shape)
    #     print(data1.shape, inputs_gradient.shape)
    #     print(len(weights), len(params_gradient))
###############################################
    step_size = dataset.get_dataset_size()
    print('step_size: ' ,step_size)
    time_cb = TimeMonitor(data_size=step_size)
    # set and apply parameters of check point config.TRAIN_MODEL_SAVE_PATH
    ckpoint_cf = CheckpointConfig(save_checkpoint_steps=30 ,keep_checkpoint_max=10)
    ckpoint_cb = ModelCheckpoint(prefix="MS_ABINET_Without_Pretrain",
                                 config=ckpoint_cf,
                                 directory="{}".format('./ms_abinet_wo_pretrain_ckpt'))


    #net_with_grads = TrainOneStepCellWithGradClip(loss_net, opt).set_train()

    model = ms.Model(network=loss_net, optimizer=opt)
    #model.train(epoch=5, train_dataset=dataset, callbacks=[LossMonitor(0.005)])
    model.train(epoch=30, train_dataset=dataset,callbacks = [LossMonitor(),ckpoint_cb,time_cb],dataset_sink_mode=False)


def main():
    set_seed(1)
    context.set_context(mode=context.PYNATIVE_MODE,device_target='GPU', save_graphs=False)
    # os.environ["OPENBLAS_NUM_THREADS"] = "1"
    # cv2.setNumThreads(2)





    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default='/home/data4/zyh/ABINet/configs/train_abinet.yaml',
                        help='path to config file')
    parser.add_argument('--input', type=str, default='figs/test')
    parser.add_argument('--cuda', type=int, default=-1)
    #parser.add_argument('--checkpoint', type=str, default='workdir/train-abinet/best-train-abinet.pth')
    parser.add_argument('--model_eval', type=str, default='alignment', 
                        choices=['alignment', 'vision', 'language'])
    parser.add_argument('--batch_size', type=int, default=1)
    parser.add_argument('--max_text_length', type=int, default=30)
    args = parser.parse_args()
    config = Config(args.config)
    #if args.checkpoint is not None: config.model_checkpoint = args.checkpoint
    if args.model_eval is not None: config.model_eval = args.model_eval
    config.global_phase = 'test'
    config.model_vision_checkpoint, config.model_language_checkpoint = None, None
    device = 'cpu' if args.cuda < 0 else f'cuda:{args.cuda}'


    print("begin training")
    train(config)
    print("done")

if __name__ == '__main__':
    main()