import os
import hydra
import torch
from tqdm import tqdm
import torch.optim as optim
from core.utils.utils import InputPadder
from core.monster import Monster 
from omegaconf import OmegaConf
import torch.nn.functional as F
from accelerate import Accelerator
import core.stereo_datasets as datasets
from accelerate.utils import set_seed
from accelerate import DataLoaderConfiguration
from accelerate.utils import DistributedDataParallelKwargs
from core.warp import disp_warp



import matplotlib
import numpy as np
import swanlab
from pathlib import Path
import torch.distributed as dist
from swanlab.integration.accelerate import SwanLabTracker
from PIL import Image

def gray_2_colormap_np(img, cmap = 'rainbow', max = None):
    img = img.cpu().detach().numpy().squeeze()
    assert img.ndim == 2
    img[img<0] = 0
    mask_invalid = img < 1e-10
    if max == None:
        img = img / (img.max() + 1e-8)
    else:
        img = img/(max + 1e-8)

    norm = matplotlib.colors.Normalize(vmin=0, vmax=1.1)
    cmap_m = matplotlib.cm.get_cmap(cmap)
    map = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap_m)
    colormap = (map.to_rgba(img)[:,:,:3]*255).astype(np.uint8)
    colormap[mask_invalid] = 0

    return colormap

def sequence_loss(disp_preds, disp_init_pred, disp_gt, valid, loss_gamma=0.9, max_disp=192):
    """ Loss function defined over sequence of flow predictions """

    n_predictions = len(disp_preds)
    assert n_predictions >= 1
    disp_loss = 0.0
    mag = torch.sum(disp_gt**2, dim=1).sqrt()
    valid = ((valid >= 0.5) & (mag < max_disp)).unsqueeze(1)
    assert valid.shape == disp_gt.shape, [valid.shape, disp_gt.shape]
    assert not torch.isinf(disp_gt[valid.bool()]).any()

    # quantile = torch.quantile((disp_init_pred - disp_gt).abs(), 0.9)
    init_valid = valid.bool() & ~torch.isnan(disp_init_pred)#  & ((disp_init_pred - disp_gt).abs() < quantile)
    disp_loss += 1.0 * F.smooth_l1_loss(disp_init_pred[init_valid], disp_gt[init_valid], reduction='mean')
    for i in range(n_predictions):
        adjusted_loss_gamma = loss_gamma**(15/(n_predictions - 1))
        i_weight = adjusted_loss_gamma**(n_predictions - i - 1)
        i_loss = (disp_preds[i] - disp_gt).abs()
        # quantile = torch.quantile(i_loss, 0.9)
        assert i_loss.shape == valid.shape, [i_loss.shape, valid.shape, disp_gt.shape, disp_preds[i].shape]
        disp_loss += i_weight * i_loss[valid.bool() & ~torch.isnan(i_loss)].mean()

    epe = torch.sum((disp_preds[-1] - disp_gt)**2, dim=1).sqrt()
    epe = epe.view(-1)[valid.view(-1)]

    if valid.bool().sum() == 0:
        epe = torch.Tensor([0.0]).cuda()

    metrics = {
        'train/epe': epe.mean(),
        'train/1px': (epe < 1).float().mean(),
        'train/3px': (epe < 3).float().mean(),
        'train/5px': (epe < 5).float().mean(),
    }
    return disp_loss, metrics

def fetch_optimizer(args, model):
    """ Create the optimizer and learning rate scheduler """
    DPT_params = list(map(id, model.feat_decoder.parameters())) 
    rest_params = filter(lambda x:id(x) not in DPT_params and x.requires_grad, model.parameters())

    params_dict = [{'params': model.feat_decoder.parameters(), 'lr': args.lr/2.0}, 
                   {'params': rest_params, 'lr': args.lr}, ]
    optimizer = optim.AdamW(params_dict, lr=args.lr, weight_decay=args.wdecay, eps=1e-8)

    scheduler = optim.lr_scheduler.OneCycleLR(optimizer, [args.lr/2.0, args.lr], args.total_step+100,
            pct_start=0.01, cycle_momentum=False, anneal_strategy='linear')

    return optimizer, scheduler


@hydra.main(version_base=None, config_path='config', config_name='train_mixall')
def main(cfg):
    set_seed(cfg.seed)
    Path(cfg.save_path).mkdir(exist_ok=True, parents=True)
    tracker = SwanLabTracker(cfg.project_name)
    kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
    accelerator = Accelerator(mixed_precision='bf16', dataloader_config=DataLoaderConfiguration(use_seedable_sampler=True), log_with=tracker, kwargs_handlers=[kwargs], step_scheduler_with_optimizer=False)
    accelerator.init_trackers(project_name=cfg.project_name, config=OmegaConf.to_container(cfg, resolve=True))

    train_dataset = datasets.fetch_dataloader(cfg)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.batch_size//cfg.num_gpu,
        pin_memory=True, shuffle=True, num_workers=int(8), drop_last=True)

    aug_params = {}
    val_loaders = {}


    for dataset_name in cfg.val_dataset:
        print(f"Preparing dataset and dataloader for: {dataset_name}")
        
        try:
            dataset_class = getattr(datasets, dataset_name)
        except AttributeError:
            raise ValueError(f"数据集类 {dataset_name} 不存在，请检查 datasets 模块")

        # Initialize dataset and dataloader
        if dataset_name == 'Middlebury':
            val_dataset = dataset_class(aug_params, split=cfg.split, resolution=cfg.resolution)
        else:
            val_dataset = dataset_class(aug_params)
        val_loader = torch.utils.data.DataLoader(
            val_dataset,
            batch_size=1,
            pin_memory=True,
            shuffle=False,
            num_workers=4,
            drop_last=False
        )

        val_loaders[dataset_name] = val_loader
    # prepare every dataloader
    val_loaders = {
        name: accelerator.prepare(loader)
        for name, loader in val_loaders.items()
    }

    model = Monster(cfg)
    optimizer, lr_scheduler = fetch_optimizer(cfg, model)

    if not cfg.restore_ckpt.endswith("None"):
        assert cfg.restore_ckpt.endswith(".pth")
        print(f"Loading checkpoint from {cfg.restore_ckpt}")
        assert os.path.exists(cfg.restore_ckpt)
        checkpoint = torch.load(cfg.restore_ckpt, map_location='cpu')
        ckpt = dict()
        if 'state_dict' in checkpoint.keys():
            checkpoint = checkpoint['state_dict']
            for key in checkpoint:
                if key.startswith("module."):
                    ckpt[key.replace('module.', '')] = checkpoint[key]
                else:
                    ckpt[key] = checkpoint[key]
            model.load_state_dict(ckpt, strict=True)
            total_step = 0
        elif 'model' in checkpoint.keys():
            checkpoint = checkpoint['model']
            for key in checkpoint:
                if key.startswith("module."):
                    ckpt[key.replace('module.', '')] = checkpoint[key]
                else:
                    ckpt[key] = checkpoint[key]
            model.load_state_dict(ckpt, strict=True)
        print(f"Loaded checkpoint from {cfg.restore_ckpt} successfully")
        del ckpt, checkpoint
    else:
        total_step = 0

    train_loader, model, optimizer, lr_scheduler = accelerator.prepare(train_loader, model, optimizer, lr_scheduler)  #, val_loader  fds, 

    should_keep_training = True
    try:
        while should_keep_training:
            active_train_loader = train_loader

            model.train()
            model.module.freeze_bn()

            for data in tqdm(active_train_loader, dynamic_ncols=True, disable=not accelerator.is_main_process):
                image_list, left, right, disp_gt, valid = [x for x in data]

                warped_right, _ = disp_warp(right, disp_gt)
                valid_warp = valid.unsqueeze(1)  
                warped_right_masked = warped_right * valid_warp
                warped_img = torch.cat([warped_right_masked[0], left[0], right[0]], dim=1) / 255.0

                with accelerator.autocast():
                    disp_init_pred, disp_preds, depth_mono = model(left, right, iters=cfg.train_iters)

                        
                loss, metrics = sequence_loss(disp_preds, disp_init_pred, disp_gt, valid, max_disp=cfg.max_disp)
                accelerator.backward(loss)
                accelerator.clip_grad_norm_(model.parameters(), 1.0)
                optimizer.step()
                lr_scheduler.step()
                optimizer.zero_grad()

                total_step += 1
                loss_val = accelerator.reduce(loss.detach(), reduction='mean')
                metrics = accelerator.reduce(metrics, reduction='mean')
                accelerator.log({'train/loss': loss_val, 'train/learning_rate': optimizer.param_groups[0]['lr']}, total_step)
                accelerator.log(metrics, total_step)


                    
                ####visualize the depth_mono and disp_preds
                if total_step % 100 == 0 and accelerator.is_main_process:
                    image1_np = left[0].squeeze().cpu().numpy()
                    image1_np = (image1_np - image1_np.min()) / (image1_np.max() - image1_np.min()) * 255.0
                    image1_np = image1_np.astype(np.uint8)
                    image1_np = np.transpose(image1_np, (1, 2, 0))

                    image2_np = right[0].squeeze().cpu().numpy()
                    image2_np = (image2_np - image2_np.min()) / (image2_np.max() - image2_np.min()) * 255.0
                    image2_np = image2_np.astype(np.uint8)
                    image2_np = np.transpose(image2_np, (1, 2, 0))

                    depth_mono_np = gray_2_colormap_np(depth_mono[0].squeeze())
                    disp_preds_np = gray_2_colormap_np(disp_preds[-1][0].squeeze())
                    disp_gt_np = gray_2_colormap_np(disp_gt[0].squeeze())

                    accelerator.log({"left": swanlab.Image(image1_np, caption="step:{}".format(total_step))}, total_step)  #wandb
                    accelerator.log({"warped_img": swanlab.Image(warped_img, caption="step:{}".format(total_step))}, total_step)
                    accelerator.log({"disp_pred": swanlab.Image(disp_preds_np, caption="step:{}".format(total_step))}, total_step)
                    accelerator.log({"disp_gt": swanlab.Image(disp_gt_np, caption="step:{}".format(total_step))}, total_step)
                    accelerator.log({"depth_mono": swanlab.Image(depth_mono_np, caption="step:{}".format(total_step))}, total_step)


                if (total_step > 0) and (total_step % cfg.save_frequency == 0):
                    if accelerator.is_main_process:
                        save_path = Path(cfg.save_path + '/%d.pth' % (total_step))
                        model_save = accelerator.unwrap_model(model)
                        checkpoint = {
                            'model': model_save.state_dict(),
                            'optimizer': optimizer.state_dict(),
                            'total_step': total_step,
                            'scheduler': lr_scheduler.state_dict()
                        }
                        torch.save(checkpoint, save_path)
                        del model_save


                if (total_step > 0) and (total_step % cfg.val_frequency == 0):
                    torch.cuda.empty_cache()
                    model.eval()
                    threshold = 3.0
                    for name, val_loader in val_loaders.items():
                        if accelerator.is_main_process:
                            print(f"\nEvaluating on {name}...")
                        elem_num, total_epe, total_out = 0, 0, 0
                        if name == 'Middlebury':
                            threshold = 2.0
                        elif name == 'ETH3D':
                            threshold = 1.0
                        for data in tqdm(val_loader, dynamic_ncols=True, disable=not accelerator.is_main_process):
                            (imageL_file, imageR_file, GT_file), left, right, disp_gt, valid = [x for x in data]
                            padder = InputPadder(left.shape, divis_by=32)
                            left, right = padder.pad(left, right)
                            with torch.no_grad():
                                disp_pred = model(left, right, iters=cfg.valid_iters, test_mode=True)
                            disp_pred = padder.unpad(disp_pred)
                            assert disp_pred.shape == disp_gt.shape, (disp_pred.shape, disp_gt.shape)
                            epe = torch.abs(disp_pred - disp_gt)
                            out = (epe > threshold).float()
                            epe = torch.squeeze(epe, dim=1)
                            out = torch.squeeze(out, dim=1)
                            if name == 'ETH3D':
                                occ_mask = Image.open(GT_file[0].replace('disp0GT.pfm', 'mask0nocc.png'))
                                occ_mask = np.ascontiguousarray(occ_mask)
                                occ_mask = torch.from_numpy(np.array(occ_mask)).to(valid.device)
                                valid = (valid >= 0.5) & (occ_mask == 255)
                            elif name == 'Middlebury':
                                occ_mask = Image.open(imageL_file[0].replace('im0.png', 'mask0nocc.png')).convert('L')
                                occ_mask = np.ascontiguousarray(occ_mask, dtype=np.float32)
                                occ_mask = torch.from_numpy(np.array(occ_mask)).to(valid.device)
                                valid = (valid >= 0.5) & (occ_mask == 255)
                            epe, out = accelerator.gather_for_metrics((epe[valid >= 0.5].mean(), out[valid >= 0.5].mean()))
                            elem_num += epe.shape[0]
                            for i in range(epe.shape[0]):
                                total_epe += epe[i]
                                total_out += out[i]
                        if accelerator.is_main_process:
                            accelerator.log({f'{name}/val_epe': total_epe / elem_num, f'{name}/val_d1': 100 * total_out / elem_num}, total_step)

                    model.train()
                    model.module.freeze_bn()

                if total_step == cfg.total_step:
                    should_keep_training = False
                    break

        if accelerator.is_main_process:
            save_path = Path(cfg.save_path + '/final.pth')
            model_save = accelerator.unwrap_model(model)
            torch.save(model_save.state_dict(), save_path)
            del model_save
        
        accelerator.end_training()
    finally:
        if dist.is_initialized():
            dist.destroy_process_group()

if __name__ == '__main__':
    main()




