import mindspore
import mindspore.nn as nn
from mindspore import context, ops
from mindspore.communication.management import init, get_rank

import os
# import gc
import time
from datetime import datetime

import numpy as np

# from dataset import Ctw1500Text
from network.loss import TextLoss
from network.textnet import TextNet
# from util.augmentation import Augmentation
from util.config import config as cfg, update_config, print_config
# from util.misc import AverageMeter
from util.misc import mkdirs
from util.option import BaseOptions
from util.summary import LogSummary
from dataset.ctw1500_text import CTW1500_train_dataset_creator,CTW1500_test_dataset_creator
import os



train_step = 0

def get_device_id():
    device_id = os.getenv('DEVICE_ID', '0')
    return int(device_id)

def save_model(model, epoch, lr, optimzer):

    save_dir = os.path.join(cfg.save_dir, cfg.exp_name)
    if not os.path.exists(save_dir):
        mkdirs(save_dir)

    save_path = os.path.join(save_dir, 'textgraph_{}_{}.pth'.format(model.backbone_name, epoch))
    print('Saving to {}.'.format(save_path))

    mindspore.save_checkpoint(model, save_path)


def load_model(model, model_path):
    print('Loading from {}'.format(model_path))
    state_dict = mindspore.load_checkpoint(model_path)
    model.load_param_into_net(state_dict['model'])

    

def train(model, dataset, criterion, optimizer, epoch):
    
    def forward_fn(img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi):
        output, gcn_data = model(img, gt_roi)

        tr_loss, tcl_loss, sin_loss, cos_loss, radii_loss, gcn_loss = criterion(output, gcn_data, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map)
        loss = tr_loss + tcl_loss + sin_loss + cos_loss + radii_loss + gcn_loss
        print(f"loss:{loss}, tr_loss: {tr_loss}, tcl_loss:{tcl_loss}, sin_loss:{sin_loss}, cos_loss:{cos_loss}, radii_loss:{radii_loss}, gcn_loss:{gcn_loss}")
        return loss, tr_loss, tcl_loss, sin_loss, cos_loss, radii_loss, gcn_loss

    
    grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=True)


    def train_step(img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi):
        (loss, tr_loss, tcl_loss, sin_loss, cos_loss, radii_loss, gcn_loss), grads = grad_fn(img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi)
        loss = ops.depend(loss, optimizer(grads))
        return loss, tr_loss, tcl_loss, sin_loss, cos_loss, radii_loss, gcn_loss


    size = dataset.get_dataset_size()
    model.set_train()

    t0 = time.time()

    iterater = dataset.create_tuple_iterator()
    for batch, (img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi) in enumerate(iterater):
        loss, tr_loss, tcl_loss, sin_loss, cos_loss, radii_loss, gcn_loss = train_step(img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi)
        if batch % 100 == 0:
            loss, current = loss.asnumpy(), batch
            print(f"loss: {loss:>7f}  [{current:>3d}/{size:>3d}]")
            
    print("batch time: {}".format(time.time() - t0))
    
    if epoch % cfg.save_freq == 0:
        save_model(model, epoch, optimizer.get_lr(), optimizer)



def test(model, dataset, criterion):
    num_batches = dataset.get_dataset_size()
    model.set_train(False)
    total, test_loss = 0, 0
    for batch, (img, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map, gt_roi) in dataset.create_tuple_iterator():
        output, gcn_data = model(img, gt_roi)
        total += len(img)
        tr_loss, tcl_loss, sin_loss, cos_loss, radii_loss, gcn_loss \
            = criterion(output, gcn_data, train_mask, tr_mask, tcl_mask, radius_map, sin_map, cos_map)
        loss = tr_loss + tcl_loss + sin_loss + cos_loss + radii_loss + gcn_loss

        test_loss += loss.asnumpy()
    test_loss /= num_batches
    print(f"Avg loss: {test_loss:>8f} \n")




def main():
    os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
    mindspore.dataset.config.set_num_parallel_workers(8)

    # device_target = cfg.device_target
    context.set_context(mode=context.PYNATIVE_MODE,
                        # device_target=device_target,
                        device_target='GPU',
                        device_id=get_device_id(),
                        save_graphs=True,
                        save_graphs_path="/home/data/zby22/ms_DRRG/1" )


    #dataset
    if cfg.exp_name == 'Ctw1500':
        train_dataset=CTW1500_train_dataset_creator()
        test_dataset=CTW1500_test_dataset_creator()
    else:
        print("dataset name is not correct")
    
    #log
    # log_dir = os.path.join(cfg.log_dir, datetime.now().strftime('%b%d_%H-%M-%S_') + cfg.exp_name)
    # logger = LogSummary(log_dir)

    # Model
    model = TextNet(backbone=cfg.net, is_training=True).set_grad(True)

    #loss
    criterion = TextLoss().set_grad(True)

    #learning rate
    lr = cfg.lr
    
    #scheduler
    lr = nn.piecewise_constant_lr(cfg.milestone, cfg.learning_rates)


    #optimizer
    moment = cfg.momentum
    if cfg.optim == "Adam":
        optimizer = nn.optim.Adam(model.trainable_params(),learning_rate=lr)
    else:
        optimizer = nn.optim.SGD(model.trainable_params(),learning_rate=lr)

    
    print('Start training TextGraph.')
    for epoch in range(cfg.start_epoch, cfg.start_epoch + cfg.max_epoch+1):
        print('epoch:'+str(epoch))
        train(model, train_dataset, criterion, optimizer, epoch)
        if epoch % 10 == 0:
            print("evaluate on test dataset ...")
            test(model, test_dataset, criterion)

    




if __name__ == "__main__":
    np.random.seed(2022)
    mindspore.set_seed(2022)
    # parse arguments
    option = BaseOptions()
    args = option.initialize()

    update_config(cfg, args)
    print_config(cfg)

    # main
    main()

