# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

'''
# Plan: This file should provide a trainer class, which provides
# 1. The init of DDP                                            Done
# 2. The init of optimizers, tb, timers, and so on               Done
# 3. A basic training framework (especially for finetuning)
#       self._train_epoch_                                     Done
#       self._process_batch_                                  Done
#       self._step_                                           Done
# 4. The training loop: more utils to be added
'''
import contextlib

import copy
import functools
import gc
import json
import logging
import math
import os
import sys
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
os.environ["MKL_THREADING_LAYER"] = "GNU"
os.environ["HYDRA_FULL_ERROR"] = "1"
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"

import random
import string
import time
from dataclasses import dataclass, field
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union

import numpy as np

import torch
import torch.distributed as dist
import torch.nn as nn
import torchvision


import fvcore
from einops import rearrange
from hydra.utils import instantiate
from iopath.common.file_io import g_pathmgr
from omegaconf import OmegaConf
from PIL import Image
from train_utils.csv_writer import CsvLogger

from datetime import timedelta

try:
    from filelock import FileLock
except ImportError:
    FileLock = None

import rootutils
rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
#
from train_utils.general import *
from train_utils.logging import setup_logging
from train_utils.distributed import get_machine_local_and_dist_rank
from train_utils.freeze import freeze_modules, unfreeze
from train_utils.optimizer import construct_optimizers
from train_utils.normalization import normalize_camera_extrinsics_and_points_batch
from train_utils.checkpoint import DDPCheckpointSaver

from eval_utils.eval_wrapper import eval_batch
from eval_utils.align_utils.align_camera import align_camera_and_points_batch_ext
from eval_utils.align_utils.umeyama_alignment import align_pred_to_gt_torch_batch, align_extrinsics_torch, align_pred_to_gt_torch_batch_roma
from eval_utils.normalize_utils.normalize_pc import normalize_depth_cam_extrinsics
from eval_utils.align_utils.depth_median_scaling import median_scale_depth_torch, median_scale_depth_torch_batch
from eval_utils.normalize_utils.normalize_pc import normalize_pointcloud_vggt, normalize_pr_pointcloud
from eval_utils.transform_utils import global_points_from_cam, cam_points_from_depth
from vggt.utils.pose_enc import pose_encoding_to_extri_intri
from dust3r.utils.camera import center_c2w_poses_batch, get_pred_world_to_gt_world_transforms

def get_amp_type(amp_dtype: str):
    assert amp_dtype in ["bfloat16", "float16"], f"Invalid Amp type: {amp_dtype}"
    if amp_dtype == "bfloat16":
        return torch.bfloat16
    else:
        return torch.float16


class Trainer:
    """
    Trainer supporting the DDP training strategies.
    """

    EPSILON = 1e-8

    def __init__(
        self,
        *,
        data: Dict[str, Any],
        model: Dict[str, Any],
        logging: Dict[str, Any],
        checkpoint: Dict[str, Any],
        max_epochs: int,
        mode: str = "train",
        device: str = "cuda",
        seed_value: int = 123,
        val_epoch_freq: int = 1,
        cuda: Dict[str, bool] = None,
        limit_train_batches: Optional[int] = None,
        limit_val_batches: Optional[int] = None,
        optim: Optional[Dict[str, Any]] = None,
        loss: Optional[Dict[str, Any]] = None,
        env_variables: Optional[Dict[str, Any]] = None,
        accum_steps: int = 1,
        postprocess: Dict[str, Any],
        **kwargs,
    ):
        self._setup_env_variables(env_variables)
        self._setup_timers()

        self.data_conf = data
        self.model_conf = model
        self.loss_conf = loss
        self.logging_conf = logging
        self.checkpoint_conf = checkpoint
        self.postprocess_conf = postprocess

        # hyperparameters
        self.accum_steps = accum_steps
        self.max_epochs = max_epochs
        self.mode = mode
        self.val_epoch_freq = val_epoch_freq
        self.limit_train_batches = limit_train_batches
        self.limit_val_batches = limit_val_batches
        self.optim_conf = optim

        self.where = 0.0
        self.seed_value = seed_value

        log_dir = self.logging_conf.log_dir
        def get_subdir_str(key='pred_center'): 
            return f"_{key}" if self.postprocess_conf.get('train', {}).get('align', {}).get(key, {}).get('enabled') else ''
        subdir = get_subdir_str('pred_center') + get_subdir_str('center_world') + get_subdir_str('pts_align_to_gt') 
        if subdir == '':
            subdir = 'vggt'
        log_dir = os.path.join(log_dir, subdir)
        self.logging_conf.log_dir = log_dir 
        safe_makedirs(log_dir)  

        print(log_dir)

        # --- Acquire a file lock to ensure exclusive GPU usage ---
        if FileLock:
            lock_path = "/tmp/gpu.lock"
            self.gpu_lock = FileLock(lock_path)
            print(f"Attempting to acquire lock on '{lock_path}'...")
            self.gpu_lock.acquire()
            print("Lock acquired. It is safe to proceed.")
            # The lock will be automatically released when the script exits.
        else:
            print("`filelock` library not found, skipping lock. Run `pip install filelock`.")      
        
        del get_subdir_str
        
        self._setup_device(device)
        self._setup_cuda_backend(cuda)
        setup_logging(
            __name__,
            output_dir=self.logging_conf.log_dir,
            rank=0,
            log_level_primary=self.logging_conf.log_level_primary,
            log_level_secondary=self.logging_conf.log_level_secondary,
            all_ranks=self.logging_conf.all_ranks,
        )
        set_seeds(seed_value, self.max_epochs, 0)
        self.amp_type = get_amp_type(self.optim_conf.amp.amp_dtype)        

        self._setup_components()  # Except Optimizer everything is setup here.
        self._setup_dataloaders()

        self.model.to(self.device)
        if self.scaler:
            copy_data_to_device(self.scaler, self.device)

        self.time_elapsed_meter = DurationMeter("Time Elapsed", self.device, ":.4f")

        if self.mode != "val":
            self.optims = construct_optimizers(
                self.model,
                self.optim_conf,
            )

        self.csv_logger = None
        if self.logging_conf.get("csv_writer") and self.logging_conf.csv_writer.get("enabled"):
            csv_conf = self.logging_conf.csv_writer
            csv_path = os.path.join(csv_conf.path, csv_conf.filename)
            
            self.csv_logger = CsvLogger(csv_path)


        # Save the full config for reproducibility
        init_params = locals()
        kwargs_dict = init_params.pop('kwargs', {})
        init_params.pop('self', None)
        init_params.pop('__class__', None)
        init_params.pop('get_subdir_str', None)
        init_params.update(kwargs_dict)
        
        conf_to_save = OmegaConf.create(init_params)
        config_path = os.path.join(self.logging_conf.log_dir, "trainer_config.yaml")
        with g_pathmgr.open(config_path, "w") as f:
            f.write(OmegaConf.to_yaml(conf_to_save))
        print(f"Saved trainer config to {config_path}")

        ################################
        # If you want to force to resume from a specific checkpoint, you can do so by setting the resume_checkpoint_path in the config
        if self.checkpoint_conf.resume_checkpoint_path is not None:
            self._load_resuming_checkpoint(self.checkpoint_conf.resume_checkpoint_path)
        else:   
            ckpt_path = get_resume_checkpoint(self.checkpoint_conf.save_dir)
            if ckpt_path is not None:
                self._load_resuming_checkpoint(ckpt_path)

    def _setup_timers(self):
        """
        Initializes counters for elapsed time and eta.
        """
        self.start_time = time.time()
        self.ckpt_time_elapsed = 0


    def _get_meters(self, phase_filters=None):
        if self.meters is None:
            return {}
        meters = {}
        for phase, phase_meters in self.meters.items():
            if phase_filters is not None and phase not in phase_filters:
                continue
            for key, key_meters in phase_meters.items():
                for name, meter in key_meters.items():
                    meters[f"{phase}_{key}/{name}"] = meter
        return meters


    def _setup_env_variables(self, env_variables_conf) -> None:
        if env_variables_conf is not None:
            for variable_name, value in env_variables_conf.items():
                os.environ[variable_name] = value
        print(f"Environment:\n{json.dumps(dict(os.environ), sort_keys=True, indent=2)}")

    def _setup_cuda_backend(self, cuda_conf) -> None:
        self.rank = 0
        if torch.cuda.is_available():
            torch.backends.cudnn.deterministic = cuda_conf.cudnn_deterministic
            torch.backends.cudnn.benchmark = cuda_conf.cudnn_benchmark
            torch.backends.cuda.matmul.allow_tf32 = cuda_conf.allow_tf32
            torch.backends.cudnn.allow_tf32 = cuda_conf.allow_tf32

    @staticmethod
    def _update_ckpt_keys_revised(ckpt, heads_to_keep=None, heads_to_discard=None, default_keep=True):
        """
        Helper function to selectively keep, discard, and rename keys from a checkpoint's state_dict.
        """
        if heads_to_keep is None:
            heads_to_keep = []
        if heads_to_discard is None:
            heads_to_discard = []

        new_ckpt = {}

        for key, value in ckpt.items():
            discard = False
            for head in heads_to_discard:
                if key.startswith(head):
                    discard = True
                    break
            if discard:
                continue

            processed = False
            for old_prefix, new_prefix in heads_to_keep:
                if old_prefix == "*":
                    new_key = f"{new_prefix}{key}"
                    new_ckpt[new_key] = value
                    processed = True
                    break
                if key.startswith(old_prefix):
                    new_key = key.replace(old_prefix, new_prefix, 1)
                    new_ckpt[new_key] = value
                    processed = True
                    break  # Process with the first matching rule

            if processed:
                continue

            if default_keep:
                new_ckpt[key] = value

        return new_ckpt

    def _load_resuming_checkpoint(self, ckpt_path: str):
        # This method seems fine for single GPU as it loads to CPU first.
        logging.info(f"Resuming training from {ckpt_path}")

        with g_pathmgr.open(ckpt_path, "rb") as f:
            checkpoint = torch.load(f, map_location="cpu")
            
        model_state_dict = checkpoint["model"] if "model" in checkpoint else checkpoint
        
        if self.checkpoint_conf.get("filter_keys") and self.checkpoint_conf.filter_keys.get("enabled"):
            filter_conf = self.checkpoint_conf.filter_keys
            logging.info("Filtering checkpoint keys before loading.")
            model_state_dict = self._update_ckpt_keys_revised(
                model_state_dict,
                heads_to_keep=filter_conf.get("heads_to_keep"),
                heads_to_discard=filter_conf.get("heads_to_discard"),
                default_keep=filter_conf.get("default_keep", True)
            )

        missing_keys, unexpected_keys = self.model.load_state_dict(model_state_dict, strict=self.checkpoint_conf.strict)
        
        if missing_keys:
            logging.warning(f"Missing keys when loading model state dict: {missing_keys}")
        else:
            logging.info(f"No missing keys when loading model state dict")
            
        if unexpected_keys:
            logging.warning(f"Unexpected keys when loading model state dict: {unexpected_keys}")
        else:
            logging.info(f"No unexpected keys when loading model state dict")
            
        logging.info(f"Loading the optimizer state dict")
        try:
            if "optimizer" in checkpoint:
                self.optims.optimizer.load_state_dict(checkpoint["optimizer"])

            if "epoch" in checkpoint:
                self.epoch = checkpoint["epoch"]

            self.steps = checkpoint["steps"] if "steps" in checkpoint else {"train": 0, "val": 0}
            self.ckpt_time_elapsed = checkpoint.get("time_elapsed", 0)

            if self.optim_conf.amp.enabled and "scaler" in checkpoint:
                self.scaler.load_state_dict(checkpoint["scaler"])
        except:
            pass


    def _setup_device(self, device):
        self.local_rank = 0
        if device == "cuda":
            self.device = torch.device("cuda", self.local_rank)
            torch.cuda.set_device(self.local_rank)
        elif device == "cpu":
            self.device = torch.device("cpu")
        else:
            raise ValueError(f"Unsupported device: {device}")


    def _setup_components(self):
        logging.info("Setting up components: Model, loss, optim, meters etc.")
        self.epoch = 0
        self.steps = {'train': 0, 'val': 0}
        self.meters = None

        self.tb_writer = instantiate(self.logging_conf.tensorboard_writer, _recursive_=False)
        self.model = instantiate(self.model_conf, _recursive_=False)
        if getattr(self.optim_conf, "frozen_module_names", None):
            logging.info(
                f"[Start] Freezing modules: {self.optim_conf.frozen_module_names}"
            )
            self.model = freeze_modules(
                self.model,
                patterns=self.optim_conf.frozen_module_names,
            )
            logging.info(
                f"[Done] Freezing modules: {self.optim_conf.frozen_module_names}"
            )

        model_summary_path = os.path.join(self.logging_conf.log_dir, "model.txt")
        model_summary(self.model, log_file=model_summary_path)
        logging.info(f"Model summary saved to {model_summary_path}")

        # TODO: Remind myself to finish this
        # Clean the dirty loss and build a single object
        self.loss = instantiate(self.loss_conf, _recursive_=False)


        # Use standard Gradient Scaler for DDP
        self.scaler = torch.cuda.amp.GradScaler(enabled=self.optim_conf.amp.enabled)
        self.gradient_clipper = instantiate(self.optim_conf.gradient_clip)

        logging.info("Successfully initialized all training components: model, loss function, optimizer, and etc.")



    def _setup_dataloaders(self):
        self.train_loader = None
        self.val_loader = None

        # Instantiate the data module from the config
        data_module = instantiate(self.data_conf.data_module, _recursive_=False)
        data_module.seed = self.seed_value

        if self.mode in ["train", "val"]:
            # Get the validation dataloader from the data module
            self.val_loader = data_module.val_dataloader()

        if self.mode in ["train"]:
            # Get the training dataloader from the data module
            self.train_loader = data_module.train_dataloader()


    def _move_to_device(self):
        print(
            f"Moving components to device {self.device}."
        )
        self.model.to(self.device)

        if self.loss:
            copy_data_to_device(self.loss, self.device)
        if self.scaler:
            copy_data_to_device(self.scaler, self.device)
        for meter in self._get_meters().values():
            meter.set_sync_device(self.device)

        print(
            f"Done moving components to device {self.device}."
        )

    def save_checkpoint(self, epoch, checkpoint_names=None):        
        checkpoint_folder = self.checkpoint_conf.save_dir
        safe_makedirs(checkpoint_folder)
        # if checkpoint_names is None:
        checkpoint_names = ["checkpoint"]
        if not (self.checkpoint_conf.save_freq > 0 and int(epoch + 1) % self.checkpoint_conf.save_freq == 0):
            return
            # if (
            #     self.checkpoint_conf.save_freq > 0
            #     and int(epoch) % self.checkpoint_conf.save_freq == 0
            #     and (int(epoch) > 0 or self.checkpoint_conf.save_freq == 1)
            # ):
            #     checkpoint_names.append(f"checkpoint_{int(epoch)}")

        checkpoint_content = {
            "prev_epoch": epoch,
            "steps": self.steps,
            "time_elapsed": self.time_elapsed_meter.val,
            "optimizer": [optim.optimizer.state_dict() for optim in self.optims],
        }
        
        if len(self.optims) == 1:
            checkpoint_content["optimizer"] = checkpoint_content["optimizer"][0]
        if self.optim_conf.amp.enabled:
            checkpoint_content["scaler"] = self.scaler.state_dict()

        # Save the checkpoint for DDP only
        saver = DDPCheckpointSaver(
            checkpoint_folder,
            checkpoint_names=checkpoint_names,
            rank=0,
            epoch=epoch,
        )

        saver.save_checkpoint(
            model=self.model,
            ema_models = None,
            skip_saving_parameters=[],
            **checkpoint_content,
        )



    def _get_train_dataset_checkpoint_state(self):
        # Checkpoint state for dataloaders is not handled in this setup.
        return None


    def _get_scalar_log_keys(self, phase):
        if self.logging_conf.scalar_keys_to_log is not None:
            return self.logging_conf.scalar_keys_to_log[phase].keys_to_log
        else:
            return []



    def _init_model_initializer(self):
        return instantiate(self.checkpoint_conf.model_weight_initializer)

    def _call_model_initializer(self, model_weight_initializer):
        if model_weight_initializer is not None:
            logging.info(
                f"Loading pretrained checkpoint from {self.checkpoint_conf.model_weight_initializer}"
            )
            self.model = model_weight_initializer(model=self.model)

    def is_intermediate_val_epoch(self, epoch):
        return epoch % self.val_epoch_freq == 0 and epoch < self.max_epochs - 1

    def run(self):
        mode = self.mode
        assert mode in [
            "train",
            "val",
        ]
        if mode == "train":
            self.run_train()
            self.run_val()
        elif mode == "val":
            self.run_val()
        else:
            raise ValueError(f"Invalid mode: {mode}")

        print(f"log_dir: {self.logging_conf.log_dir}")

    def _log_epoch_metrics_to_csv(self, phase, metrics):
        """Logs epoch metrics to a CSV file if enabled."""
        if not self.csv_logger:
            return

        data_dict = {'epoch': self.epoch, 'phase': phase}
        for k, v in metrics.items():
            data_dict[k] = v.avg if hasattr(v, 'avg') else v
        
        self.csv_logger.log(data_dict, val=(phase == 'val'))
    
    def end_warmup(self):
        if self.epoch == self.optim_conf.warmup_epochs:
            unfreeze(self.model, True)
        # self.loss.angle_pose.relative_weight = 0.7
        # self.loss.angle_pose.absolute_weight = 0.3
        # self.loss.point.global_from_cam_weight = 0.3
        # self.loss.point.global_from_depth_weight = 0.3
        # self.loss.point.cam_from_depth_weight = 0.3
        # self.loss.point.global_aligned_weight = 1.0
        # self.loss.point.valid_range = 0.96

    def run_train(self):
        while self.epoch < self.max_epochs:
            self.end_warmup()
            set_seeds(self.seed_value + self.epoch * 100, self.max_epochs, 0)

            self.train_epoch(self.train_loader)
            
            # Save checkpoint before validating
            self.save_checkpoint(self.epoch)

            gc.collect()
            torch.cuda.empty_cache()
            torch.cuda.reset_peak_memory_stats()

            self.epoch += 1
        self.epoch -= 1

    @torch.no_grad()
    def _dump_model_stats_for_tests(self):
        # Done on all ranks because of FSDP and also for debugging DDP
        logging.info("Dumping stats of the trained model")
        stats = {
            "epoch": self.epoch,
            "rank": 0,
            "model": sum(p.sum() for p in self.model.parameters()).item(),
        }
        with g_pathmgr.open(
            os.path.join(
                self.logging_conf.log_dir,
                "unit_tests_model_stats.json",
            ),
            "a",
        ) as f:
            f.write(json.dumps(stats) + "\n")

    def run_val(self):
        if not self.val_loader:
            return

        # The concept of a "fresh epoch" is not directly available with CombinedLoader
        outs = self.val_epoch(self.val_loader, is_fresh_epoch=True)
        gc.collect()
        torch.cuda.empty_cache()
        torch.cuda.reset_peak_memory_stats()

        self.tb_writer.log_dict(outs, self.epoch)  # Logged only on rank 0

        # Log metrics to CSV
        self._log_epoch_metrics_to_csv("val", {k: v for k, v in outs.items() if isinstance(v, (int, float))})


        with g_pathmgr.open(
            os.path.join(self.logging_conf.log_dir, "val_stats.json"),
            "a",
        ) as f:
            f.write(json.dumps(outs) + "\n")

    def val_epoch(self, val_loader, is_fresh_epoch: bool):
        curr_phases = ['val']
        curr_models = [self.model]
        phase = curr_phases[0]

        for model in curr_models:
            model.eval()
            if hasattr(model, "on_validation_epoch_start"):
                model.on_validation_epoch_start()

        all_metrics = {}

        for dl_idx, current_val_loader in enumerate(val_loader):
            batch_time = AverageMeter("Batch Time", self.device, ":.4f")
            data_time = AverageMeter("Data Time", self.device, ":.4f")
            mem = AverageMeter("Mem (GB)", self.device, ":.4f")
            
            iters_per_epoch = len(current_val_loader)

            loss_names = ["objective"] + self._get_scalar_log_keys(phase)
            loss_names = [f"{phase}_{name}" for name in loss_names]
            
            loss_meters = {
                name: AverageMeter(name, self.device, ":.4f") for name in loss_names
            }

            progress = ProgressMeter(
                iters_per_epoch,
                [batch_time, data_time, mem,
                self.time_elapsed_meter,
                *loss_meters.values(),],
                self._get_meters(curr_phases),
                prefix=f"Val Epoch: [{self.epoch}]",
            )

            end = time.time()

            limit_val_batches = (
                iters_per_epoch
                if self.limit_val_batches is None
                else self.limit_val_batches
            )
            dataset_name = None

            for data_iter, batch in enumerate(current_val_loader):
                # if data_iter > limit_val_batches:
                #     break

                data_time.update(time.time() - end)
                if dataset_name is None:
                    dataset_name = batch['dataset'][0][0]
                    progress.prefix = f"Val Epoch: [{self.epoch}] ({dataset_name})"

                #     with torch.amp.autocast(device_type='cuda', enabled=False):
                #         batch = self._process_batch(batch)
                batch = copy_data_to_device(batch, self.device)
                
                with torch.no_grad():
                    with torch.amp.autocast(
                        device_type='cuda',
                        enabled=self.optim_conf.amp.enabled,
                        dtype=self.amp_type,
                    ):
                        for phase, model in zip(curr_phases, curr_models):
                            self._val_step(
                                batch,
                                model,
                                phase,
                                loss_meters,
                            )

                batch_time.update(time.time() - end)
                end = time.time()

                self.time_elapsed_meter.update(
                    time.time() - self.start_time + self.ckpt_time_elapsed
                )

                if torch.cuda.is_available():
                    mem.update(torch.cuda.max_memory_allocated() // 1e9)

                if data_iter % self.logging_conf.log_freq == 0:
                    progress.display(data_iter)

            # Collect metrics for this dataloader
            for k, v in loss_meters.items():
                metric_name = k.split('/')[-1].replace(f"{phase}_", "")
                all_metrics[f"{phase}_{dataset_name}_{metric_name}"] = v.avg
            
            self._reset_meters(curr_phases)

        for model in curr_models:
            if hasattr(model, "on_validation_epoch_end"):
                model.on_validation_epoch_end()

        for phase in curr_phases:
            all_metrics.update(self._get_trainer_state(phase))

        logging.info(f"Meters: {all_metrics}")
        return all_metrics

    def _get_trainer_state(self, phase):
        return {
            "Trainer/where": self.where,
            "Trainer/epoch": self.epoch,
            f"Trainer/steps_{phase}": self.steps[phase],
        }



    def train_epoch(self, train_loader):        
        batch_time = AverageMeter("Batch Time", self.device, ":.4f")
        data_time = AverageMeter("Data Time", self.device, ":.4f")
        mem = AverageMeter("Mem (GB)", self.device, ":.4f")
        data_times = []
        phase = 'train'
        
        loss_names = self._get_scalar_log_keys(phase)
        loss_names = [f"{phase}_{name}" for name in loss_names]
        loss_meters = {
            name: AverageMeter(name, self.device, ":.4f") for name in loss_names
        }
        
        for config in self.gradient_clipper.configs: 
            param_names = ",".join(config['module_names'])
            loss_meters[f"Grad/{param_names}"] = AverageMeter(f"Grad/{param_names}", self.device, ":.4f")


        progress = ProgressMeter(
            num_batches=len(train_loader),
            meters=[
                batch_time,
                data_time,
                mem,
                self.time_elapsed_meter,
                *loss_meters.values(),
            ],
            real_meters={},
            prefix="Train Epoch: [{}]".format(self.epoch),
        )

        self.model.train()
        end = time.time()

        iters_per_epoch = int(len(train_loader))
        limit_train_batches = (
            iters_per_epoch
            if self.limit_train_batches is None
            else self.limit_train_batches
        )
        
        if self.gradient_clipper is not None:
            # setup gradient clipping at the beginning of training
            self.gradient_clipper.setup_clipping(self.model)

        for data_iter, batch in enumerate(train_loader):
            # if data_iter > limit_train_batches:
            #     break
            
            # measure data loading time
            data_time.update(time.time() - end)
            data_times.append(data_time.val)
            
            #     with torch.cuda.amp.autocast(enabled=False):
            #         batch = self._process_batch(batch)

            batch = copy_data_to_device(batch, self.device, non_blocking=True)

            accum_steps = self.accum_steps

            if accum_steps==1:
                chunked_batches = [batch]
            else:
                chunked_batches = chunk_batch_for_accum_steps(batch, accum_steps)

            self._run_steps_on_batch_chunks(
                chunked_batches, phase, loss_meters
            )

            # compute gradient and do SGD step
            # assert data_iter <= limit_train_batches  # allow for off by one errors
            exact_epoch = self.epoch + float(data_iter) / limit_train_batches
            self.where = float(exact_epoch) / self.max_epochs
            
            assert self.where <= 1 + self.EPSILON
            if self.where < 1.0:
                for optim in self.optims:
                    optim.step_schedulers(self.where)
            else:
                logging.warning(
                    f"Skipping scheduler update since the training is at the end, i.e, {self.where} of [0,1]."
                )
                    
            # Log schedulers
            if self.steps[phase] % self.logging_conf.log_freq == 0:
                for i, optim in enumerate(self.optims):
                    for j, param_group in enumerate(optim.optimizer.param_groups):
                        for option in optim.schedulers[j]:
                            optim_prefix = (
                                f"{i}_"
                                if len(self.optims) > 1
                                else (
                                    "" + f"{j}_"
                                    if len(optim.optimizer.param_groups) > 1
                                    else ""
                                )
                            )
                            self.tb_writer.log(
                                os.path.join("Optim", f"{optim_prefix}", option),
                                param_group[option],
                                self.steps[phase],
                            )
                self.tb_writer.log(
                    os.path.join("Optim", "where"),
                    self.where,
                    self.steps[phase],
                )

            # Clipping gradients and detecting diverging gradients
            if self.gradient_clipper is not None:
                for optim in self.optims:
                    self.scaler.unscale_(optim.optimizer)

                grad_norm_dict = self.gradient_clipper(model=self.model)

                for key, grad_norm in grad_norm_dict.items():
                    loss_meters[f"Grad/{key}"].update(grad_norm)

            # Optimizer step
            for optim in self.optims:   
                self.scaler.step(optim.optimizer)
            self.scaler.update()

            # Measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            self.time_elapsed_meter.update(
                time.time() - self.start_time + self.ckpt_time_elapsed
            )
            mem.update(torch.cuda.max_memory_allocated() // 1e9)

            if data_iter % self.logging_conf.log_freq == 0:
                progress.display(data_iter)

        # Log metrics to CSV
        self._log_epoch_metrics_to_csv("train", loss_meters)

        return True



    def _run_steps_on_batch_chunks(
        self,
        chunked_batches: List[Any],
        phase: str,
        loss_meters: Dict[str, AverageMeter],
    ):
        """
        Run the forward / backward as many times as there are chunks in the batch,
        accumulating the gradients on each backward
        """        
        
        for optim in self.optims:   
            optim.zero_grad(set_to_none=True)

        accum_steps = len(chunked_batches)

        for i, chunked_batch in enumerate(chunked_batches):
            with torch.amp.autocast(
                device_type='cuda',
                enabled=self.optim_conf.amp.enabled, dtype=self.amp_type
            ):
                loss_dict = self._step(chunked_batch, self.model, phase, loss_meters)


            loss = loss_dict["objective"]
            loss_key = f"{phase}_loss_objective"
            batch_size = chunked_batch["img"].shape[0]

            if not math.isfinite(loss.item()):
                error_msg = f"Loss is {loss.item()}, attempting to stop training"
                logging.error(error_msg)
                return

            loss /= accum_steps
            self.scaler.scale(loss).backward()
            loss_meters[loss_key].update(loss.item(), batch_size)



    def _reset_meters(self, phases: str) -> None:
        for meter in self._get_meters(phases).values():
            meter.reset()



    def _apply_batch_repetition(self, batch: Mapping) -> Mapping:
        tensor_keys = [
            "img", "depthmap", "camera_pose", "intrinsics", 
            "pts3d_cam", "pts3d", "valid_mask", 
        ]        
        string_keys = ["seq_name"]
        
        for key in tensor_keys:
            if key in batch:
                original_tensor = batch[key]
                batch[key] = torch.concatenate([original_tensor, 
                                                torch.flip(original_tensor, dims=[1])], 
                                                dim=0)
        
        for key in string_keys:
            if key in batch:
                batch[key] = batch[key] + batch[key]
        
        return batch


    # def _process_batch(self, batch: Mapping):      
    #     # if self.data_conf.train.common_config.repeat_batch:
    #     #     batch = self._apply_batch_repetition(batch)
        
    #     # Normalize camera extrinsics and points. The function returns new tensors.
    #     normalized_extrinsics, normalized_cam_points, normalized_world_points, normalized_depths = \
    #         normalize_camera_extrinsics_and_points_batch(
    #             extrinsics=batch["camera_pose"],
    #             cam_points=batch["pts3d_cam"],
    #             world_points=batch["pts3d"],
    #             depths=batch["depthmap"],
    #             point_masks=batch["valid_mask"],
    #         )

    #     # Replace the original values in the batch with the normalized ones.
    #     batch["camera_pose"] = normalized_extrinsics
    #     batch["pts3d_cam"] = normalized_cam_points
    #     batch["pts3d"] = normalized_world_points
    #     batch["depthmap"] = normalized_depths

        # return batch
    def _postprocess(self, batch, pred, pp_conf, pred_data_keys):
        data_keys = self.data_conf.data_keys 
        if pp_conf.get('to_extri'):
           # The image tensor `batch['img']` has a shape of (B, S, C, H, W).
           # We extract the last two dimensions (Height, Width) for the image size.
           image_size_hw = batch['img'].shape[-2:]
           pred[pred_data_keys.extrinsics], pred[pred_data_keys.intrinsics]= pose_encoding_to_extri_intri(pred["pose_enc"], image_size_hw=image_size_hw, build_intrinsics=True)

        if pp_conf.align.get('to_first_cam', {}).get('enabled'):
            with torch.no_grad():
                if pp_conf.align.to_first_cam.get("points"):
                    batch[data_keys.extrinsics], batch[data_keys.world_points] = align_camera_and_points_batch_ext(batch[data_keys.extrinsics], batch[data_keys.world_points])
                else:
                    batch[data_keys.extrinsics], _ = align_camera_and_points_batch_ext(batch[data_keys.extrinsics])
    
        if pp_conf.align.get('pr_align_cam', {}).get('enabled') and pred[pred_data_keys.extrinsics] is not None:
            with torch.no_grad():
                if pp_conf.align.pr_align_cam.points:
                    pred[pred_data_keys.extrinsics], pred[pred_data_keys.world_points] = align_camera_and_points_batch_ext(pred[pred_data_keys.extrinsics], pred[pred_data_keys.world_points])
                else:
                    pred[pred_data_keys.extrinsics], _ = align_camera_and_points_batch_ext(poses=pred[pred_data_keys.extrinsics])

        if pp_conf.align.get('pred_center', {}).get('enabled') and pred[pred_data_keys.extrinsics] is not None:
            aligned_to_center_key = pred_data_keys.get("global_aligned_to_center", "global_aligned_to_center")
            with torch.no_grad():
                pred_to_gt_transform = get_pred_world_to_gt_world_transforms(batch[data_keys.extrinsics], pred[pred_data_keys.extrinsics])
                mean_pose_in_old_world, old_world_to_mean_pose, _ = center_c2w_poses_batch(c2w_poses=pred_to_gt_transform, return_poses=False)
                pred[pred_data_keys.extrinsics], pred[aligned_to_center_key] = align_camera_and_points_batch_ext(pred[pred_data_keys.extrinsics], pred[pred_data_keys.world_points], mean_pose_in_old_world.detach())

        if pp_conf.align.get('center_world', {}).get('enabled') and pred[pred_data_keys.extrinsics] is not None:
            center_world_key = pred_data_keys.get("global_to_center_world", "global_to_center_world")
            with torch.no_grad():
                mean_pose_in_old_world, old_world_to_mean_pose, _ = center_c2w_poses_batch(c2w_poses=batch[data_keys.extrinsics], return_poses=True)
                batch[data_keys.extrinsics], batch[data_keys.world_points] = align_camera_and_points_batch_ext(batch[data_keys.extrinsics], batch[data_keys.world_points], old_world_to_mean_pose)
                # pred[pred_data_keys.extrinsics], pred[center_world_key] = align_camera_and_points_batch_ext(pred[pred_data_keys.extrinsics], pred[pred_data_keys.world_points], mean_pose_in_old_world.detach())
        
        if pp_conf.transform.get('global_from_cam') and pred[pred_data_keys.extrinsics] is not None:
            # Detach pts3d_cam to prevent exploding gradients.
            # The gradient for the camera pose is proportional to the magnitude of the points.
            # Detaching breaks this unstable feedback loop.
            pred[pred_data_keys.global_from_cam] = global_points_from_cam(pred[pred_data_keys.extrinsics], batch[data_keys.pts3d_cam])
            # pred[pred_data_keys.global_from_cam] = global_points_from_cam(pred[pred_data_keys.extrinsics], pred[pred_data_keys.pts3d_cam])
            # pred[pred_data_keys.global_from_cam] = global_points_from_cam(pred[pred_data_keys.extrinsics], pred[pred_data_keys.pts3d_cam].detach())

        if pp_conf.transform.get('global_from_cam_detach_pose') and pred[pred_data_keys.extrinsics] is not None:
            pred[pred_data_keys.global_from_cam_detach_pose] = global_points_from_cam(batch[data_keys.extrinsics], pred[pred_data_keys.pts3d_cam])

        if (pp_conf.transform.get('cam_from_depth') or pp_conf.transform.get('global_from_depth')) and pred[pred_data_keys.intrinsics] is not None:
            pred[pred_data_keys.cam_from_depth] = cam_points_from_depth(pred[pred_data_keys.intrinsics], pred[pred_data_keys.depths].detach())

            if pp_conf.transform.global_from_depth  and pred[pred_data_keys.extrinsics] is not None:
                pred[pred_data_keys.global_from_depth] = global_points_from_cam(pred[pred_data_keys.extrinsics], pred[pred_data_keys.cam_from_depth].detach())

        if pp_conf.normalize.get('gt_pts'):
            batch[data_keys.world_points], gt_avg_scale = normalize_pointcloud_vggt(batch[data_keys.world_points], batch[data_keys.valid_mask])
            # print(f"Normalize with scale {avg_scale}")

            batch[data_keys.depths], batch[data_keys.pts3d_cam], batch[data_keys.extrinsics] = normalize_depth_cam_extrinsics(gt_avg_scale, batch[data_keys.depths], batch[data_keys.pts3d_cam], batch[data_keys.extrinsics])

            if pp_conf.normalize.pr_pts.get('enabled'):
                if pp_conf.normalize.pr_pts.metric:
                    pred[pred_data_keys.world_points], norm_factor_pr = normalize_pr_pointcloud(pred[pred_data_keys.world_points], gt_avg_scale, batch[data_keys.valid_mask], not_metric_mask=None)
                else:
                    pred[pred_data_keys.world_points], norm_factor_pr = normalize_pointcloud_vggt(pred[pred_data_keys.world_points], batch[data_keys.valid_mask])

                pred["depth"], _, pred[pred_data_keys.extrinsics] = normalize_depth_cam_extrinsics(norm_factor_pr, pred["depth"], None, pred[pred_data_keys.extrinsics])


        pts_align_conf = pp_conf.align.get("pts_align_to_gt", {})
        if pts_align_conf.get("enabled"):
            with torch.no_grad():
                # aligned_global_from_cam_key = pred_data_keys.get("aligned_global_from_cam", "aligned_global_from_cam")
                # if pred_data_keys.global_from_cam in pred:
                #     pred[aligned_global_from_cam_key], transform_params = align_pred_to_gt_torch_batch(pred[pred_data_keys.global_from_cam], batch[data_keys.world_points], batch[data_keys.valid_mask], pred["cam_points_conf"], conf_percentage=pp_conf.align.pts_align_to_gt.conf_percentage, with_scale=pp_conf.align.pts_align_to_gt.with_scale)

                # aligned_global_from_depth_key = pred_data_keys.get("aligned_global_from_depth", "aligned_global_from_depth")
                # if pred_data_keys.global_from_depth in pred:
                #     pred[aligned_global_from_depth_key], transform_params = align_pred_to_gt_torch_batch(pred[pred_data_keys.global_from_depth], batch[data_keys.world_points], batch[data_keys.valid_mask], pred["depth_conf"], conf_percentage=pp_conf.align.pts_align_to_gt.conf_percentage, with_scale=pp_conf.align.pts_align_to_gt.with_scale)

                aligned_world_points_key = pred_data_keys.get("aligned_world_points", "aligned_world_points")
                if pred_data_keys.world_points in pred:                
                    pred[aligned_world_points_key], transform_params = align_pred_to_gt_torch_batch_roma(pred[pred_data_keys.world_points], batch[data_keys.world_points], batch[data_keys.valid_mask], pred["world_points_conf"], conf_percentage=pp_conf.align.pts_align_to_gt.conf_percentage, with_scale=pp_conf.align.pts_align_to_gt.with_scale)
                    if pp_conf.align.pts_align_to_gt.align_pose:
                        with_scale = pp_conf.align.pts_align_to_gt.normalize_pose and pp_conf.align.pts_align_to_gt.with_scale
                        pred[pred_data_keys.extrinsics] = align_extrinsics_torch(extrinsics=pred[pred_data_keys.extrinsics], transform_params=transform_params, with_scale=with_scale)

                    if pts_align_conf.get("normalize_pose"):
                        _, _, pred[pred_data_keys.extrinsics] = normalize_depth_cam_extrinsics(extrinsics=pred[pred_data_keys.extrinsics],  norm_factor=(1/transform_params['scale']))
                    if pts_align_conf.get("normalize_depth"):
                        pred["depth"], _, _ = normalize_depth_cam_extrinsics(norm_factor=(1/transform_params['scale']), depths=pred["depth"])

        if pp_conf.align.get('depth_align_to_gt', {}).get('enabled'):
            pred["depth"], batch_median_pred, batch_median_gt = median_scale_depth_torch_batch(pred["depth"], batch[data_keys.depths], batch[data_keys.valid_mask], pred["depth_conf"], conf_percentage=pp_conf.align.depth_align_to_gt.conf_percentage)
            if pp_conf.align.depth_align_to_gt.pose:
                _, _, pred[pred_data_keys.extrinsics] = normalize_depth_cam_extrinsics(extrinsics=pred[pred_data_keys.extrinsics],  norm_factor=(batch_median_pred/batch_median_gt))


    def _step(
        self,
        batch,
        model: nn.Module,
        phase: str,
        loss_meters: dict[str, AverageMeter],
    ):
        # Forward run of the model
        y_hat = model(images = batch["img"])

        self._postprocess(batch, y_hat, self.postprocess_conf.train, self.postprocess_conf.data_keys)

        # Compute the loss
        loss_dict = self.loss(y_hat, batch, data_keys=self.data_conf.data_keys, pred_data_keys=self.postprocess_conf.data_keys)
        
        # concatenate y_hat, loss_dict and batch for visualizations
        y_hat_batch = {**y_hat, **loss_dict, **batch}

        self._update_and_log_scalars(y_hat_batch, phase, self.steps[phase], loss_meters)

        self._log_tb_visuals(y_hat_batch, phase, self.steps[phase])

        self.steps[phase] += 1

        return loss_dict


    def _val_step(
        self,
        batch,
        model: nn.Module,
        phase: str,
        loss_meters: dict[str, AverageMeter],
    ):
        # Forward run of the model
        y_hat = model(images = batch["img"])

        self._postprocess(batch, y_hat, self.postprocess_conf.val, self.postprocess_conf.data_keys)

        y_hat_batch = eval_batch(y_hat=y_hat, batch=batch, metrics_conf=self.postprocess_conf.val.metrics, data_keys=self.data_conf.data_keys, pred_data_keys=self.postprocess_conf.data_keys)

        # Compute the loss
        # loss_dict = self.loss(y_hat, batch)
        
        # concatenate y_hat, loss_dict and batch for visualizations
        # y_hat_batch = {**y_hat, **loss_dict, **batch}

        self._update_and_log_scalars(y_hat_batch, phase, self.steps[phase], loss_meters, batch_size=batch["img"].shape[0])

        self._log_tb_visuals(y_hat_batch, phase, self.steps[phase])

        self.steps[phase] += 1


    def _update_and_log_scalars(
        self,
        batch: Mapping,
        phase: str,
        step: int,        
        loss_meters: dict[str, AverageMeter],
        batch_size: int = 1,
    ) -> None:
        keys_to_log = self._get_scalar_log_keys(phase)
        if "camera_pose" in batch:
            batch_size = batch["camera_pose"].shape[0]
        for key in keys_to_log:
            if key in batch:
                value = batch[key].item() if torch.is_tensor(batch[key]) else batch[key]
                loss_meters[f"{phase}_{key}"].update(value, batch_size)
                if step % self.logging_conf.log_freq == 0:
                    self.tb_writer.log(f"Values/{phase}/{key}", value, step)


    def _log_tb_visuals(self, batch: Mapping, phase: str, step: int) -> None:
        if not (
            self.logging_conf.log_visuals
            and (phase in self.logging_conf.log_visual_frequency)
            and self.logging_conf.log_visual_frequency[phase] > 0
            and (step % self.logging_conf.log_visual_frequency[phase] == 0)
            and (self.logging_conf.visuals_keys_to_log is not None)
        ):
            return

        if phase in self.logging_conf.visuals_keys_to_log:
            keys_to_log = self.logging_conf.visuals_keys_to_log[phase][
                "keys_to_log"
            ]
            assert (
                len(keys_to_log) > 0
            ), "Need to include some visual keys to log"
            modality = self.logging_conf.visuals_keys_to_log[phase][
                "modality"
            ]
            assert modality in [
                "image",
                "video",
            ], "Currently only support video or image logging"

            name = f"Visuals/{phase}"

            visuals_to_log = torchvision.utils.make_grid(
                [
                    torchvision.utils.make_grid(
                        batch[key][0],  # Ensure batch[key][0] is tensor and has at least 3 dimensions
                        nrow=self.logging_conf.visuals_per_batch_to_log,
                    )
                    for key in keys_to_log if key in batch and batch[key][0].dim() >= 3
                ],
                nrow=1,
            ).clamp(-1, 1)

            visuals_to_log = visuals_to_log.cpu()
            if visuals_to_log.dtype == torch.bfloat16:
                visuals_to_log = visuals_to_log.to(torch.float16)
            visuals_to_log = visuals_to_log.numpy()

            self.tb_writer.log_visuals(
                name, visuals_to_log, step, self.logging_conf.video_logging_fps
            )



def chunk_batch_for_accum_steps(batch, accum_steps: int):
    return [get_chunk_from_data(batch, i, accum_steps) for i in range(accum_steps)]


def is_sequence_of_primitives(data):
    return (
        isinstance(data, Sequence)
        and not isinstance(data, str)
        and len(data) > 0
        and isinstance(data[0], (str, int, float, bool))
    )


def get_chunk_from_data(data, chunk_id, num_chunks):
    """
    Recursively splits all the tensors inside the passed data object into num_chunks.
    """
    if isinstance(data, torch.Tensor) or is_sequence_of_primitives(data):
        # either a tensor or a list of primitive objects
        # assert len(data) % num_chunks == 0
        start = (len(data) // num_chunks) * chunk_id
        end = (len(data) // num_chunks) * (chunk_id + 1)
        return data[start:end]
    elif isinstance(data, Mapping):
        return {
            key: get_chunk_from_data(value, chunk_id, num_chunks)
            for key, value in data.items()
        }
    elif isinstance(data, str):
        # NOTE: this is a hack to support string keys in the batch
        return data
    elif isinstance(data, Sequence):
        return [get_chunk_from_data(value, chunk_id, num_chunks) for value in data]
    else:
        return data