from dataset import MOTAMultiTaskDataset
import augmentations
import logging
import os, random, time
import argparse
from utils import set_configs, parse_losses, parse_datainfos, data_augs
import torch
import torch.utils.data as D
import torch.nn as nn
import numpy as np
from preprocessing import MTP_DataPreprocessor
from semantic_segmentation.encoder_decoder import MTP_SS_UperNet
# from instance_segmentation.mask_rcnn import MTP_IS_MaskRCNN
from rotated_detection.oriented_rcnn import MTP_RD_OrientedRCNN
from mmdet.models.utils import empty_instances
from mmengine.config import ConfigDict
from models import MutliTaskFramework, MutliTaskFramework_demt, TestFrameWork, AverageMeter
import torch.distributed as dist
from mmengine.optim import build_optim_wrapper
# from schduler import build_param_scheduler
from mmcv_custom.layer_decay_optimizer_constructor_vit import *
from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper
from torch.optim.sgd import SGD
# import timm
from torch.cuda.amp import autocast, GradScaler
from pprint import pprint
from semantic_segmentation.metric import MTP_SS_Metric
from instance_segmentation.metric import MTP_IS_Metric
from rotated_detection.metric import MTP_RD_Metric
from tqdm import tqdm
from train_utils import *
from utils import AveragePrecisionMeter, get_optimizer
# 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220317-1cdeb081.pth'
#  'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220412-aeecf2aa.pth'
# 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window7_224_20220317-e9b98025.pth'


def main(args):
    # args = parse_args()
    set_seeds(args.seed)

    tasks_str = ''
    for task in args.tasks:
        tasks_str += str(task)+'_'
    args.save_path = os.path.join(args.save_path, args.backbone, tasks_str, 'interact_'+args.interact, str(args.rd_lamb)+"_"+str(args.ss_lamb)+"_"+str(args.cls_lamb))


    logger = init_logger(args)
    init_distributed_mode(args)
    classes = 16

    train_pipeline, valid_pipeline, batch_augments = set_configs(args.image_size)
   
    train_dataset, val_dataset = get_dataset(train_pipeline, valid_pipeline)

    train_sampler = D.distributed.DistributedSampler(train_dataset, num_replicas=args.
    world_size,rank=args.rank)
    val_sampler = D.distributed.DistributedSampler(val_dataset, num_replicas=args.
    world_size,rank=args.rank)

    train_loader = D.DataLoader(
    train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), 
    num_workers=args.workers, pin_memory=True,sampler=train_sampler, drop_last=True)

    val_loader = D.DataLoader(
    val_dataset, batch_size=args.batch_size_val, shuffle=False, 
    num_workers=args.workers, pin_memory=True,sampler=val_sampler)

    if is_main_process():
        logger.info('train data length: {}'.format(train_dataset.length))
        logger.info('train batch size: {}'.format(args.batch_size*args.world_size))
    if args.interact == 'demt':
        model = MutliTaskFramework_demt(args, classes=classes, batch_augments = batch_augments,logger = logger).cuda()
        
    else:
        model = MutliTaskFramework(args, classes=classes, batch_augments = batch_augments,logger = logger).cuda()
        # model = TestFrameWork(args, classes=classes, batch_augments = batch_augments,logger = logger).cuda()
    if is_main_process():
        if 'ss' in args.tasks:
            logger.info('Implementing Sementic Segmentation Task!')
        if 'rd' in args.tasks:
            logger.info('Implementing Rotated Detection Task!')
        if 'cls' in args.tasks:
            logger.info('Implementing Scene Classification Task!')

    optimizer, scheduler = get_optimizer(args, model, train_loader)

    model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],find_unused_parameters=True)
    
    # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
    if dist.get_rank() == 0:
        logger.info(args)
    if 'internimage' in args.backbone:
        model._set_static_graph()
    train(args, train_sampler, train_dataset, train_loader,val_dataset, val_loader, model, optimizer,logger,scheduler)


def get_parse_args():
    parser = argparse.ArgumentParser(description='PyTorch MutliTask', add_help=False)
    parser.add_argument('--backbone', type=str, default='vit_b_rvsa', choices=['vit_b_rvsa', 'vit_l_rvsa', 'internimage_xl', 
                                                                               'vitaev2_s','vit_b', 'vit_l','resnet50', 
                                                                               'swin_t','swin_b','swin_l'], help='backbone name')
    parser.add_argument('--datasets', type=str, nargs='+',help='used dataset')
    parser.add_argument('--tasks', type=str, nargs='+',help='used dataset')
    # epoch
    parser.add_argument('--start_epoch', type=int, default=0, help='index of start epoch')
    parser.add_argument('--start_iter', type=int, default=0, help='index of start iteration')
    parser.add_argument('--end_iter', type=int, default=30000, help='number of epochs to train')
    # batch size
    parser.add_argument('--batch_size', type=int, default=2, help='input batch size for training')
    parser.add_argument('--batch_size_val', type=int, default=1, help='input batch size for validation')
    parser.add_argument('--workers', type=int, default=4, help='workers num')
    parser.add_argument('--batch_mode', type=str, default='avg', choices=['ratio','avg'], help='how to assign batch size')
    # learning rate
    parser.add_argument('--lr', type=float, default=None, help='actual learning rate')
    parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)')
    parser.add_argument('--rd_lamb', type=float, default=1.0, help='Lambda of losses')
    parser.add_argument('--ss_lamb', type=float, default=1.0, help='Lambda of losses')
    parser.add_argument('--cls_lamb', type=float, default=1.0, help='Lambda of losses')

    # distributed
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
    parser.add_argument('--distributed', type=str, default='True', choices=['True', 'False'], help='distributed training')
    parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
    parser.add_argument('--local_rank', type=int, default=0)
    parser.add_argument('--gpu', type=int, default=0)

    # ft
    parser.add_argument('--ft', type=str, default='False', choices=['True', 'False'], help='finetune model')
    parser.add_argument('--resume', type=str, default=None, help='dataset name')
    # save
    parser.add_argument('--save_path', type=str, default='./rdonly_full', help='path of saving model')
    # ignored
    parser.add_argument('--ignore_label', type=int, default=255, help='ignore index of loss')
    # interval
    parser.add_argument('--interval', type=int,  default=2000, help='valid interval')
    # init_backbone
    parser.add_argument('--init_backbone', type=str, default='mae', choices=['imp', 'rsp', 'none', 
                                                                            'mae', 'beit'], help='init model')
    # port 
    parser.add_argument('--port', type=str, default=None, help='master ports')
    parser.add_argument('--interact', type=str, default='None', help='master ports')

    # input img size
    parser.add_argument('--image_size', type=int, default=1024, help='image size')
    # background
    parser.add_argument('--background', type=str, default='True', choices=['True', 'False'], help='consider background')
    # checkpoint mechanism
    parser.add_argument('--use_ckpt', type=str, default='False', choices=['True', 'False'], help='consider background')
    # mixed presicion
    parser.add_argument('--mixed_precision', type=str, default='True', choices=['True', 'False'], help='consider background')
    parser.add_argument('--seed', type=int, default=1234, help='random seeds')
    parser.add_argument('--max_epoch', type=int, default=100, help='random seeds')
    parser.add_argument('--finetune', type=str, default=None)

    return parser

def train(args, train_sampler, train_dataset, train_loader, val_dataset, val_loader, model, optimizer,logger,scheduler):
    epoch = args.start_epoch
    # lambda_loss = args.lambdas
    best_acc = 0
    best_list = {'rd':-1,'ss':-1,'cls':-1}
    iter = args.start_iter
    tasks_str = ''

    for task in args.tasks:
        tasks_str += str(task)+'_'
    # acc = validation(args, logger, epoch, model,val_dataset, val_loader)
    
    if args.mixed_precision == 'True':
        scaler = GradScaler()
    
    while epoch < args.max_epoch:
        if dist.get_rank() == 0:
            logger.info('===========> Epoch: {:}, LR: {:.7f}, Previous best: {}'.format(
                epoch, optimizer.param_groups[0]['lr'], best_list))
        train_sampler.set_epoch(epoch)
        # start_time = time.time()
        model.train()
        optimizer.zero_grad()

        if dist.get_rank() == 0:
            pbar = tqdm(train_loader)
        else :
            pbar = train_loader
        for id in pbar:
            datainfo_list = parse_datainfos(train_dataset, id)
            x = data_augs(train_dataset, datainfo_list)
            # print("1:{}".format(torch.cuda.memory_allocated(0)))
            loss = 0
            loss_ss = loss_rd = loss_cls = 0

            losses = model.forward(x)
            if 'ss' in args.tasks:
                # pprint(losses['loss_ss'])
                loss_ss = parse_losses(losses['loss_ss'])
            if 'rd' in args.tasks:
                # if dist.get_rank() == 0:
                    # pprint(losses['loss_rd'])
                loss_rd = parse_losses(losses['loss_rd'])
            if 'cls' in args.tasks:
                loss_cls = losses['loss_cls']

            loss = args.ss_lamb*loss_ss + args.rd_lamb*loss_rd + args.cls_lamb*loss_cls
            iter +=1
            # print("3:{}".format(torch.cuda.memory_allocated(0)))
            if args.mixed_precision == 'True':
                optimizer.zero_grad()
                scaler.scale(loss).backward()
                scaler.unscale_(optimizer)
                nn.utils.clip_grad_norm_(model.parameters(), max_norm=10)
                scaler.step(optimizer)
                scaler.update()
                torch.cuda.synchronize()
            else:
                optimizer.zero_grad()
                # print(1)
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(), max_norm=10)
                optimizer.step()
                torch.cuda.synchronize()
            # print("4:{}".format(torch.cuda.memory_allocated(0)))    
            if dist.get_rank() == 0:
                lr = optimizer.param_groups[0]['lr']
                pbar.set_description('Loss: %.3f, Loss_ss: %.3f, Loss_rd: %.3f, Loss_cls: %.3f, Lr: %.3e' % (loss, loss_ss, loss_rd, loss_cls, lr))
            del loss_rd, loss_ss, loss_cls, losses, loss
            # scheduler.step()
            scheduler_after_train_iter(scheduler)

        epoch +=1
        scheduler_after_train_epoch(scheduler)

        if (epoch % 2 == 0 and epoch < args.max_epoch /2) or (epoch % 2 == 0 and epoch >= args.max_epoch/2) or epoch == 1 or epoch == args.max_epoch:
            acc, acc_list = validation(args, logger, epoch, model,val_dataset, val_loader)
            best_list = {key: max(acc_list[key], best_list[key]) for key in acc_list}

            if best_acc < acc:
                best_acc = acc
                if (dist.get_rank() == 0) and (epoch >= args.max_epoch/2):
                    logger.info('>>>>>>>>>>>>>>>> Save model trained on iter {} >>>>>>>>>>>>>>>>'.format(iter))
                    filename = os.path.join(args.save_path,'Iter_{}_{}_{}model.pth'.format(iter, args.backbone, tasks_str))
                    torch.save({'epoch': epoch, 'iteration': iter, 'state_dict': model.module.state_dict(), 
                                # 'optimizer': optimizer.state_dict(),
                                # 'scheduler':scheduler.state_dict(), 
                                'acc': np.array(acc)}, filename)
                    # filename = args.save_path + '/Iter_{}_{}_{}_model_encoder.pth'.format(iter, args.backbone, tasks_str)
                    # torch.save({'epoch': epoch, 'iteration': iter, 'state_dict': model.module.encoder.state_dict(), 'optimizer': optimizer.state_dict(),'scheduler':scheduler[1].state_dict(), 'acc': np.array(acc)}, filename)

@torch.no_grad()
def validation(args, logger, epoch, model, val_dataset, val_loader):
    model.eval()
    eval_length=0

    ss_metric = MTP_SS_Metric(ignore_index=255, iou_metrics=['mIoU'])
    ss_metric.dataset_meta = val_loader.dataset.metainfo
    rd_metric = MTP_RD_Metric(metric = 'mAP', predict_box_type='rbox')
    rd_metric.dataset_meta = val_loader.dataset.metainfo

    if dist.get_rank() == 0:
        pbar = tqdm(val_loader)
    else:
        pbar = val_loader
    it = cls_acc = 0

    # if 'cls' in args.tasks:
    #     ap_meter = AveragePrecisionMeter(difficult_examples=False)
    #     ap_meter.reset()
    F1_micro = AverageMeter()
    Recall_micro = AverageMeter()
    Accuracy_micro = AverageMeter()
    Precision_miro = AverageMeter
    acc_list = {}
    for id in pbar:
        # print(id)
        it+=1
        datainfo_list = parse_datainfos(val_dataset, id)
        x = data_augs(val_dataset, datainfo_list)
        output = model.forward(x)
        if 'ss' in args.tasks:
            ss_metric.process(data_batch=x, data_samples = output['output_ss'])
        if 'rd' in args.tasks:
            rd_metric.process(data_batch=x, data_samples = output['output_rd'])
        if 'cls' in args.tasks:
            # print(output['output_cls'][0], output['cls_gt'][0])
            batch_size = output['batch_size']
            
            F1_micro.update(output['output_cls'].item(), batch_size)
            Recall_micro.update(output['cls_recall'].item(), batch_size)
            Accuracy_micro.update(output['cls_acc'].item(), batch_size)
            # Precision_miro.update(output['cls_precison'].item(), batch_size)


            # ap_meter.add(output['output_cls'], output['cls_gt'])
        eval_length += len(x['inputs'])

    ss_res = rd_res = -1
    acc = 0
    task_n = 0
    if 'ss' in args.tasks:
        ss_res = ss_metric.evaluate(eval_length)
        acc += ss_res['mIoU']
        task_n +=1
        if is_main_process():
            logger.info('SS mIOU: {:.2f}'.format(ss_res['mIoU']))
        acc_list['ss'] = ss_res['mIoU']
    if 'rd' in args.tasks:
        rd_res = rd_metric.evaluate(eval_length)
        acc += rd_res['dota/mAP']*100
        task_n +=1

        if is_main_process():
            logger.info('RD rbox mAP: {:.4f}'.format(rd_res['dota/mAP']*100))
        acc_list['rd'] = rd_res['dota/mAP']*100
        
    if 'cls' in args.tasks:
        # cls_acc = cls_acc / it
        cls_acc = F1_micro.avg*100
        acc += cls_acc
        task_n +=1
        # OP, OR, OF1, CP, CR, CF1 = ap_meter.overall()
        # ap_meter.reset()
        # acc += OF1
        if is_main_process():
            logger.info('cls F1: {:.4f}, cls Recall: {:.4f}, cls Acc: {:.4f}'.format(cls_acc, 
                Recall_micro.avg*100, Accuracy_micro.avg*100,))
        acc_list['cls'] = cls_acc
            # logger.info('OP:{:4f} OR:{:4f} OF1:{:4f} CP:{:4f} CR:{:4f} CF1:{:4f}'.format(OP, OR, OF1, CP, CR, CF1))

    acc = acc / task_n

    if is_main_process():
        logger.info('Valid epoch {}, Average accuracy: {:.5f}'.format(epoch, acc))
    model.train()
    # return acc, [ss_res['mIoU'], rd_res['dota/mAP']*100, cls_acc]
    return acc, acc_list
    

if __name__ =="__main__":
    args = get_parse_args().parse_args()
    main(args)
