import argparse
import logging
import os
import pprint
import torch
from torch import nn
import torch.backends.cudnn as cudnn
from torch.optim import SGD
from torch.utils.data import DataLoader
import yaml
from dataset.semi import SemiDataset
from model.semseg.deeplabv3plus import DeepLabV3Plus
from supervised import evaluate
from util.classes import CLASSES
from util.ohem import ProbOhemCrossEntropy2d
from util.utils import count_params, init_log, AverageMeter
from accelerate import Accelerator
import time
parser = argparse.ArgumentParser(description='Revisiting Weak-to-Strong Consistency in Semi-Supervised Semantic Segmentation')
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--labeled-id-path', type=str, required=True)
parser.add_argument('--unlabeled-id-path', type=str, required=True)
parser.add_argument('--save-path', type=str, required=True)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--port', default=None, type=int)
def main():
    accelerator = Accelerator(mixed_precision='fp16')
    accelerator.print(f'{accelerator.mixed_precision = }')
    print(f'using device {accelerator.device}')
    # cudnn.enabled = True
    cudnn.benchmark = True
    args = parser.parse_args()
    cfg = yaml.load(open(args.config, "r"), Loader=yaml.Loader)
    logger = init_log('global', logging.INFO)
    all_args = {**cfg, **vars(args)}
    logger.info('{}\n'.format(pprint.pformat(all_args)))
    os.makedirs(args.save_path, exist_ok=True)
    model = DeepLabV3Plus(cfg)
    optimizer = SGD([{'params': model.backbone.parameters(), 'lr': cfg['lr']},
                    {'params': [param for name, param in model.named_parameters() if 'backbone' not in name],
                    'lr': cfg['lr'] * cfg['lr_multi']}], lr=cfg['lr'], momentum=0.9, weight_decay=1e-4)
    
    logger.info('Total params: {:.1f}M\n'.format(count_params(model)))
    # model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
    if cfg['criterion']['name'] == 'CELoss':
        criterion_l = nn.CrossEntropyLoss(**cfg['criterion']['kwargs'])
        criterion_l = ProbOhemCrossEntropy2d(**cfg['criterion']['kwargs'])
    else:
        raise NotImplementedError('%s criterion is not implemented' % cfg['criterion']['name'])
    criterion_u = nn.CrossEntropyLoss(reduction='none')
    trainset_u = SemiDataset(cfg['dataset'], cfg['data_root'], 'train_u',
                             cfg['crop_size'], args.unlabeled_id_path)
    trainset_l = SemiDataset(cfg['dataset'], cfg['data_root'], 'train_l',
                             cfg['crop_size'], args.labeled_id_path, nsample=len(trainset_u.ids))
    valset = SemiDataset(cfg['dataset'], cfg['data_root'], 'val')
    num_workers = 4
    trainloader_l = DataLoader(trainset_l, batch_size=cfg['batch_size'],
                               num_workers=num_workers, drop_last=True, persistent_workers=True)
   
    trainloader_u = DataLoader(trainset_u, batch_size=cfg['batch_size'],
                                num_workers=num_workers, drop_last=True, persistent_workers=True)
  
    valloader = DataLoader(valset, batch_size=cfg['batch_size'],  num_workers=num_workers,
                           drop_last=False)
    total_iters = len(trainloader_u) * cfg['epochs']
    previous_best = 0.0
    epoch = -1
    logger.info(f'batches is {len(trainloader_l)}')
    model, trainloader_l, trainloader_u, valloader, optimizer = accelerator.prepare( model, trainloader_l, trainloader_u, valloader, optimizer)
    if os.path.exists(os.path.join(args.save_path, 'latest.pth')):
        checkpoint = torch.load(os.path.join(args.save_path, 'latest.pth'))
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        epoch = checkpoint['epoch']
        previous_best = checkpoint['previous_best']
        
        
        logger.info('************ Load from checkpoint at epoch %i\n' % epoch)
    
    for epoch in range(epoch + 1, cfg['epochs']):
        logger.info('===========> Epoch: {:}, LR: {:.5f}, Previous best: {:.2f}'.format(
            epoch, optimizer.param_groups[0]['lr'], previous_best))
        total_loss = AverageMeter()
        total_loss_x = AverageMeter()
        total_loss_s = AverageMeter()
        total_loss_w_fp = AverageMeter()
        total_mask_ratio = AverageMeter()
        loader = zip(trainloader_l, trainloader_u)
        start_time = time.time()
        for i, ((img_x, mask_x),
                (img_u_w, img_u_s1, img_u_s2, ignore_mask, _, _)) in enumerate(loader):
            model.train()
            num_lb, num_ulb = img_x.shape[0], img_u_w.shape[0]
            preds, preds_fp = model(torch.cat((img_x, img_u_w)), True)
            pred_x, pred_u_w = preds.split([num_lb, num_ulb])
            pred_u_w_fp = preds_fp[num_lb:]
            pred_u_s1, pred_u_s2 = model(torch.cat((img_u_s1, img_u_s2))).chunk(2)
            pred_u_w = pred_u_w.detach()
            conf_u_w = pred_u_w.softmax(dim=1).max(dim=1)[0]
            mask_u_w = pred_u_w.argmax(dim=1)
   
            loss_x = criterion_l(pred_x, mask_x)
            loss_u_s1 = criterion_u(pred_u_s1, mask_u_w)
            loss_u_s1 = loss_u_s1 * ((conf_u_w >= cfg['conf_thresh']) & (ignore_mask != 255))
            loss_u_s1 = loss_u_s1.sum() / (ignore_mask != 255).sum().item()
            loss_u_s2 = criterion_u(pred_u_s2, mask_u_w)
            loss_u_s2 = loss_u_s2 * ((conf_u_w >= cfg['conf_thresh']) & (ignore_mask != 255))
            loss_u_s2 = loss_u_s2.sum() / (ignore_mask != 255).sum().item()
            loss_u_w_fp = criterion_u(pred_u_w_fp, mask_u_w)
            loss_u_w_fp = loss_u_w_fp * ((conf_u_w >= cfg['conf_thresh']) & (ignore_mask != 255))
            loss_u_w_fp = loss_u_w_fp.sum() / (ignore_mask != 255).sum().item()
            loss = (loss_x + loss_u_s1 * 0.25 + loss_u_s2 * 0.25 + loss_u_w_fp * 0.5) / 2.0
            accelerator.backward(loss)
            optimizer.step()
            optimizer.zero_grad()
            total_loss.update(loss.item())
            total_loss_x.update(loss_x.item())
            total_loss_s.update((loss_u_s1.item() + loss_u_s2.item()) / 2.0)
            total_loss_w_fp.update(loss_u_w_fp.item())
            
            mask_ratio = ((conf_u_w >= cfg['conf_thresh']) & (ignore_mask != 255)).sum().item() / \
                (ignore_mask != 255).sum()
            total_mask_ratio.update(mask_ratio.item())
            iters = epoch * len(trainloader_u) + i
            lr = cfg['lr'] * (1 - iters / total_iters) ** 0.9
            optimizer.param_groups[0]["lr"] = lr
            optimizer.param_groups[1]["lr"] = lr * cfg['lr_multi']
            
            # Calculate ETA
            elapsed_time = time.time() - start_time
            iterations_done = epoch * len(trainloader_u) + i + 1
            iterations_total = cfg['epochs'] * len(trainloader_u)
            remaining_iterations = iterations_total - iterations_done
            eta_seconds = remaining_iterations * (elapsed_time / iterations_done)
            eta_minutes = int(eta_seconds / 60)
            eta_seconds %= 60
            
            logger.info('Iters: {:}, Total loss: {:.3f}, Loss x: {:.3f}, Loss s: {:.3f}, Loss w_fp: {:.3f}, Mask ratio: '
                        '{:.3f}, ETA: {:02}:{:02}'.format(i, total_loss.avg, total_loss_x.avg, total_loss_s.avg,
                                        total_loss_w_fp.avg, total_mask_ratio.avg, eta_minutes, int(eta_seconds)))
        eval_mode = 'sliding_window' if cfg['dataset'] == 'cityscapes' else 'original'
        logger.info(f'evaluating')
        mIoU, iou_class = evaluate(model, valloader, eval_mode, cfg)
        logger.info(f'mIoU: {mIoU}')
if __name__ == '__main__':
 
    main()
