from dataset import MOTAMultiTaskDataset
import augmentations
import logging
import os, random, time
import argparse
from utils import set_configs, parse_losses, parse_datainfos, data_augs
import torch
import torch.utils.data as D
import torch.nn as nn
import numpy as np
from preprocessing import MTP_DataPreprocessor
from semantic_segmentation.encoder_decoder import MTP_SS_UperNet
# from instance_segmentation.mask_rcnn import MTP_IS_MaskRCNN
from rotated_detection.oriented_rcnn import MTP_RD_OrientedRCNN
from mmdet.models.utils import empty_instances
from mmengine.config import ConfigDict
from models import MutliTaskPretrnFramework
import torch.distributed as dist
from mmengine.optim import build_optim_wrapper
from mmcv_custom.layer_decay_optimizer_constructor_vit import *
from torch.cuda.amp import autocast, GradScaler
from pprint import pprint
from semantic_segmentation.metric import MTP_SS_Metric
from instance_segmentation.metric import MTP_IS_Metric
from rotated_detection.metric import MTP_RD_Metric
from tqdm import tqdm

def main():
    args = parse_args()
    set_seeds(args.seed)
    logger = init_logger(args)
    LOCAL_RANK = init_distributed(args, logger)
    classes = 16

    train_pipeline, valid_pipeline, batch_augments = set_configs(args.image_size)
   
    train_dataset, val_dataset = get_dataset(train_pipeline, valid_pipeline)

    train_sampler = D.distributed.DistributedSampler(train_dataset, num_replicas=args.
    world_size,rank=args.rank)
    val_sampler = D.distributed.DistributedSampler(val_dataset, num_replicas=args.
    world_size,rank=args.rank)

    train_loader = D.DataLoader(
    train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), 
    num_workers=args.workers, pin_memory=True,sampler=train_sampler, drop_last=True)

    val_loader = D.DataLoader(
    val_dataset, batch_size=args.batch_size_val, shuffle=False, 
    num_workers=args.workers, pin_memory=True,sampler=val_sampler)

    if main_process(args):
        logger.info('train data length: {}'.format(train_dataset.length))
        logger.info('train batch size: {}'.format(args.batch_size*args.world_size))

    model = MutliTaskPretrnFramework(args, classes=classes, batch_augments = batch_augments).cuda()
    # for i, single_layer in enumerate(model.encoder.modules()):  # model.modules()能够迭代地遍历模型的所有子层
    #     if dist.get_rank() == 0:
    #         print(single_layer)
    #     for param in single_layer.parameters():
    #         param.requires_grad = False  # 冻结
    if main_process(args):
        if 'ss' in args.tasks:
            logger.info('Implementing Sementic Segmentation Task!')
        if 'rd' in args.tasks:
            logger.info('Implementing Rotated Detection Task!')

    if 'vit_' in args.backbone:
        # AdamW optimizer, no weight decay for position embedding & layer norm in backbone
        if 'vit_b_' in args.backbone:
            optim_wrapper = dict(
                optimizer=dict(
                type='AdamW', lr=1e-4, betas=(0.9, 0.999), weight_decay=0.05),
                constructor='LayerDecayOptimizerConstructor_ViT', 
                paramwise_cfg=dict(
                    num_layers=12, 
                    layer_decay_rate=0.9,
                    )
                    )  
        elif 'vit_l_' in args.backbone:
            optim_wrapper = dict(
                optimizer=dict(
                type='AdamW', lr=1e-4, betas=(0.9, 0.999), weight_decay=0.05),
                constructor='LayerDecayOptimizerConstructor_ViT', 
                paramwise_cfg=dict(
                    num_layers=24, 
                    layer_decay_rate=0.9,
                    )
                    )
            
        else:
            raise NotImplementedError

        optimizer = build_optim_wrapper(model, optim_wrapper)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer.optimizer, args.max_epoch*len(train_dataset), eta_min=0, last_epoch=-1)
    elif 'internimage' in args.backbone:
        # classification & segmentation
        optim_wrapper = dict(
            optimizer=dict(
            type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05),
            constructor='CustomLayerDecayOptimizerConstructor_InternImage',
            paramwise_cfg=dict(num_layers=39, 
                            layer_decay_rate=0.94,
                            depths=[5, 5, 24, 5]
                            )
                            )
        optimizer = build_optim_wrapper(model, optim_wrapper)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer.optimizer, args.max_epoch*len(train_dataset), eta_min=0, last_epoch=-1)
    elif 'vitae' in args.backbone:
        optim_wrapper = dict(
            optimizer=dict(
            type='AdamW', lr=0.0001, betas=(0.9, 0.999),  weight_decay=0.05),
            # constructor='CustomLayerDecayOptimizerConstructor',
            paramwise_cfg=dict(
            custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
                             'relative_position_bias_table': dict(decay_mult=0.),
                             'norm': dict(decay_mult=0.)}
                             )
                             
        )
        # optimizer = torch.optim.AdamW()
        optimizer = build_optim_wrapper(model, optim_wrapper)
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer.optimizer, args.max_epoch*len(train_dataset), eta_min=0, last_epoch=-1)
    model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[LOCAL_RANK],find_unused_parameters=True)

    train(args, train_sampler, train_dataset, train_loader,val_dataset, val_loader, model, optimizer,logger,scheduler)

def distprint(*args, **kwargs):
    if dist.get_rank() == 0:
        print(*args, **kwargs)        

def get_dataset(train_pipeline, valid_pipeline):
    data_root = '/data1/users/zhengzhiyu/mtp_workplace/dataset/MOTA/split_ss_dota1_0/train/'
    data_prefix = dict(img='images/', seg='semantic_labels/')
    ann_file = '/data1/users/zhengzhiyu/mtp_workplace/dataset/MOTA/split_ss_dota1_0/train/DOTA_train1024_new.json'
    rdet_ann_dir = '/data1/users/zhengzhiyu/mtp_workplace/dataset/MOTA/split_ss_dota1_0/train/labelTxt'
    seg_map_suffix = '.png'
    rdet_post_fix = '.txt'
    train_dataset = MOTAMultiTaskDataset(data_root = data_root, data_prefix = data_prefix, 
                                            ann_file = ann_file, rdet_ann_dir = rdet_ann_dir,
                                            seg_map_suffix = seg_map_suffix, rdet_post_fix = rdet_post_fix, 
                                            pipeline = train_pipeline)
    data_root = '/data1/users/zhengzhiyu/mtp_workplace/dataset/MOTA/split_ss_dota1_0/val/'
    data_prefix = dict(img='images/', seg='semantic_labels/')
    ann_file = '/data1/users/zhengzhiyu/mtp_workplace/dataset/MOTA/split_ss_dota1_0/val/DOTA_val1024_new.json'
    rdet_ann_dir = '/data1/users/zhengzhiyu/mtp_workplace/dataset/MOTA/split_ss_dota1_0/val/labelTxt'
    seg_map_suffix = '.png'
    rdet_post_fix = '.txt'
    val_dataset = MOTAMultiTaskDataset(data_root = data_root, data_prefix = data_prefix, 
                                            ann_file = ann_file, rdet_ann_dir = rdet_ann_dir,
                                            seg_map_suffix = seg_map_suffix, rdet_post_fix = rdet_post_fix, 
                                            pipeline = valid_pipeline)
    return train_dataset, val_dataset


def parse_args():
    parser = argparse.ArgumentParser(description='PyTorch MutliTask')
    parser.add_argument('--backbone', type=str, default='vit_b_rvsa', choices=['vit_b_rvsa', 'vit_l_rvsa', 'internimage_xl', 'vitaev2_s','resnet50'], help='backbone name')
    parser.add_argument('--datasets', type=str, nargs='+',help='used dataset')
    parser.add_argument('--tasks', type=str, nargs='+',help='used dataset')
    # epoch
    parser.add_argument('--start_epoch', type=int, default=0, help='index of start epoch')
    parser.add_argument('--start_iter', type=int, default=0, help='index of start iteration')
    parser.add_argument('--end_iter', type=int, default=30000, help='number of epochs to train')
    # batch size
    parser.add_argument('--batch_size', type=int, default=2, help='input batch size for training')
    parser.add_argument('--batch_size_val', type=int, default=1, help='input batch size for validation')
    parser.add_argument('--workers', type=int, default=4, help='workers num')
    parser.add_argument('--batch_mode', type=str, default='avg', choices=['ratio','avg'], help='how to assign batch size')
    # learning rate
    parser.add_argument('--lr', type=float, default=None, help='actual learning rate')
    parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)')
    # distributed
    parser.add_argument('--distributed', type=str, default='True', choices=['True', 'False'], help='distributed training')
    parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
    parser.add_argument('--local_rank', type=int, default=0)
    # ft
    parser.add_argument('--ft', type=str, default='False', choices=['True', 'False'], help='finetune model')
    parser.add_argument('--resume', type=str, default=None, help='dataset name')
    # save
    parser.add_argument('--save_path', type=str, default='./rdonly_full', help='path of saving model')
    # ignored
    parser.add_argument('--ignore_label', type=int, default=255, help='ignore index of loss')
    # interval
    parser.add_argument('--interval', type=int,  default=2000, help='valid interval')
    # init_backbone
    parser.add_argument('--init_backbone', type=str, default='mae', choices=['imp', 'rsp', 'none', 
                                                                            'mae', 'beit'], help='init model')
    # port 
    parser.add_argument('--port', type=str, default=None, help='master ports')
    # input img size
    parser.add_argument('--image_size', type=int, default=1024, help='image size')
    # background
    parser.add_argument('--background', type=str, default='True', choices=['True', 'False'], help='consider background')
    # checkpoint mechanism
    parser.add_argument('--use_ckpt', type=str, default='False', choices=['True', 'False'], help='consider background')
    # mixed presicion
    parser.add_argument('--mixed_precision', type=str, default='True', choices=['True', 'False'], help='consider background')
    parser.add_argument('--seed', type=int, default=1234, help='random seeds')
    parser.add_argument('--max_epoch', type=int, default=100, help='random seeds')
    parser.add_argument('--finetune', type=str, default=None)

    return parser.parse_args()

def set_seeds(seed=2023):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True

def init_distributed(args, logger):

    args.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
    args.rank = int(os.environ["RANK"])
    LOCAL_RANK = args.rank % torch.cuda.device_count()
    # print(torch.cuda.device_count())
    torch.cuda.set_device(LOCAL_RANK)  # 设置节点等级为GPU数
    os.environ['MASTER_PORT'] = args.port
    dist.init_process_group(backend='nccl', world_size=args.world_size, rank=args.rank)
    if dist.get_rank() == 0:
        logger.info('#################### Launch for DDP! ############################')
    return LOCAL_RANK

def init_logger(args):
    os.makedirs(args.save_path, exist_ok=True)
    logger_name = "main-logger"
    logger = logging.getLogger(logger_name)
    logger.setLevel(logging.INFO)
    fh = logging.FileHandler(os.path.join(args.save_path, 'log.txt'), mode='a')
    log_format = '%(asctime)s %(message)s'
    fh.setFormatter(logging.Formatter(log_format))
    logger.addHandler(fh)

    handler = logging.StreamHandler()
    fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
    handler.setFormatter(logging.Formatter(fmt))
    logger.addHandler(handler)
    return logger

def main_process(args):
    return not args.distributed == 'True' or (args.distributed == 'True' and args.rank % args.world_size == 0)

def train(args, train_sampler, train_dataset, train_loader, val_dataset, val_loader, model, optimizer,logger,scheduler):
    epoch = args.start_epoch
    best_acc = 0
    iter = args.start_iter
    tasks_str = ''

    for task in args.tasks:
        tasks_str += str(task)+'_'

    if args.mixed_precision == 'True':
        scaler = GradScaler()
    while epoch < args.max_epoch:
        train_sampler.set_epoch(epoch)
        start_time = time.time()
        model.train()
        optimizer.zero_grad()

        if dist.get_rank() == 0:
            pbar = tqdm(train_loader)
        else :
            pbar = train_loader
        for id in pbar:
            datainfo_list = parse_datainfos(train_dataset, id)
            x = data_augs(train_dataset, datainfo_list)
            # print("1:{}".format(torch.cuda.memory_allocated(0)))
            loss = 0
            loss_ss = loss_rd = -1

            loss1, loss2 = model.forward(x)
            loss_ss = parse_losses(loss1)
            loss_rd = parse_losses(loss2)

            loss = loss_ss + loss_rd
            iter +=1
            # print("3:{}".format(torch.cuda.memory_allocated(0)))
            if args.mixed_precision == 'True':
                optimizer.zero_grad()
                scaler.scale(loss).backward()
                scaler.unscale_(optimizer)
                nn.utils.clip_grad_norm_(model.parameters(), max_norm=5)
                scaler.step(optimizer)
                # old_scaler = scaler.get_scale()
                scaler.update()
                # new_scaler = scaler.get_scale()
                # skip_lr_sched = (old_scaler > new_scaler)
                torch.cuda.synchronize()
            else:
                optimizer.zero_grad()
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(), max_norm=5)
                optimizer.step()
                torch.cuda.synchronize()
            # print("4:{}".format(torch.cuda.memory_allocated(0)))    
            if dist.get_rank() == 0:
                pbar.set_description('Loss: %.3f, Loss_ss: %.3f, Loss_rd: %.3f' % (loss, loss_ss, loss_rd))
            del loss1, loss2, loss_rd, loss_ss
            scheduler.step()

        epoch +=1

        if (epoch % 2 == 0 and epoch < args.max_epoch /2) or (epoch % 2 == 0 and epoch >= args.max_epoch/2) or epoch == 1 or epoch == args.max_epoch:
            acc = validation(args, logger, epoch, model,val_dataset, val_loader)
            if best_acc < acc:
                best_acc = acc
                if main_process(args) and epoch >= args.max_epoch/2:
                    logger.info('>>>>>>>>>>>>>>>> Save model trained on iter {} >>>>>>>>>>>>>>>>'.format(iter))
                    filename = args.save_path + '/Iter_{}_{}_{}_model.pth'.format(iter, args.backbone, tasks_str)
                    torch.save({'epoch': epoch, 'iteration': iter, 'state_dict': model.module.state_dict(), 'optimizer': optimizer.state_dict(),'scheduler':scheduler.state_dict(), 'acc': np.array(acc)},
                                        filename)
                    filename = args.save_path + '/Iter_{}_{}_{}_model_encoder.pth'.format(iter, args.backbone, tasks_str)
                    torch.save({'epoch': epoch, 'iteration': iter, 'state_dict': model.module.encoder.state_dict(), 'optimizer': optimizer.state_dict(),'scheduler':scheduler.state_dict(), 'acc': np.array(acc)}, filename)

@torch.no_grad()
def validation(args, logger, epoch, model, val_dataset, val_loader):
    model.eval()
    eval_length=0

    ss_metric = MTP_SS_Metric(ignore_index=255, iou_metrics=['mIoU'])
    ss_metric.dataset_meta = val_loader.dataset.metainfo
    rd_metric = MTP_RD_Metric(metric = 'mAP', predict_box_type='rbox')
    rd_metric.dataset_meta = val_loader.dataset.metainfo

    if dist.get_rank() == 0:

        pbar = tqdm(val_loader)
    else:
        pbar = val_loader
    for id in pbar:
        # print(id)
        datainfo_list = parse_datainfos(val_dataset, id)
        x = data_augs(val_dataset, datainfo_list)
        output_ss, output_rd = model.forward(x)

        ss_metric.process(data_batch=x, data_samples = output_ss)
        rd_metric.process(data_batch=x, data_samples = output_rd)

        eval_length += len(x['inputs'])

    ss_res = rd_res = -1
    if 'ss' in args.tasks:
        ss_res = ss_metric.evaluate(eval_length)
        if main_process(args):
            logger.info('SS mIOU: {:.2f}'.format(ss_res['mIoU']))
    if 'rd' in args.tasks:
        rd_res = rd_metric.evaluate(eval_length)
        if main_process(args):
            logger.info('RD rbox mAP: {:.4f}'.format(rd_res['dota/mAP']))

    acc = 0
    acc = (ss_res['mIoU'] + rd_res['dota/mAP']*100) / 2

    if main_process(args):
        logger.info('Valid epoch {}, Average accuracy: {:.5f}'.format(epoch, acc))
    model.train()
    return acc

if __name__ =="__main__":
    main()
