# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.

# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

from copy import deepcopy
import gc
import os
import sys
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import re
import roma
import torch
from pytorch_lightning import LightningModule
from pytorch_lightning.loggers.wandb import WandbLogger
from torchmetrics import MaxMetric, MeanMetric, MinMetric, SumMetric, Metric
from fast3r.dust3r.utils.geometry import inv
from fast3r.dust3r.heads.camera import pose_encoding_to_camera, camera_from_preds
from fast3r.croco.utils.misc import adjust_learning_rate2, adjust_learning_rate_by_step
from fast3r.croco.utils import misc
from fast3r.dust3r.model import FlashDUSt3R
from fast3r.models.fast3r import Fast3R
import time

from fast3r.eval.utils import align_local_pts3d_to_global, AccumulatedSum, correct_preds_orientation, estimate_camera_poses, process_poses, process_recon, align_global_pts3d_to_local, align_local_pts3d_to_global_with_cam
from fast3r.eval.utils import estimate_cam_pose_one_sample, estimate_focal, estimate_focal_knowing_depth_and_confidence_mask
from fast3r.eval.tools_cut3r import depth_evaluation
from concurrent.futures import ThreadPoolExecutor

from safetensors.torch import load_file

from fast3r.utils import pylogger
from fast3r.utils.muon import SingleDeviceMuonWithAuxAdam, MuonWithAuxAdam, muon_update

log = pylogger.RankedLogger(__name__, rank_zero_only=True)

from fast3r.dust3r.losses import MultiLoss

class MultiViewDUSt3RLitModule(LightningModule):
    def __init__(
        self,
        net: torch.nn.Module,
        compile: bool,
        train_criterion: Optional[torch.nn.Module] = None,
        validation_criterion: Optional[torch.nn.Module] = None,
        optimizer: Optional[torch.optim.Optimizer] = None,
        scheduler: torch.optim.lr_scheduler = None,
        pretrained: Optional[str] = None,
        resume_from_checkpoint: Optional[str] = None,
        eval_use_pts3d_from_local_head: bool = True,
        eval_pts3d_from_depth: bool = True,
        log_detail: bool = False,
        log_summary: bool = False,
        weight_decay: float = 0.05,
        layer_decay: float = 1.0,
        scale_keywords: List[str] = [],
        lr_scales: List[float] = [],
        pretrain_keywords: List[str] = ['encoder'],
        vggt: bool = False,
        **kargs,
    ) -> None:
        super().__init__()

        self.save_hyperparameters(logger=False, ignore=['net', 'train_criterion', 'validation_criterion'])
        self.log_detail = log_detail
        self.log_summary = log_summary
        self.pretrain_keywords = pretrain_keywords
        self.vggt = vggt
        self.accumulate_grad_batches = self.hparams.get('accumulate_grad_batches', 1.0)

        self.net = net
        self.train_criterion = train_criterion
        print(self.train_criterion)
        self.validation_criterion = validation_criterion
        self.pretrained = pretrained
        self.resume_from_checkpoint = resume_from_checkpoint
        self.eval_use_pts3d_from_local_head = eval_use_pts3d_from_local_head
        self.eval_pts3d_from_depth = eval_pts3d_from_depth
        if self.net.local_head is None:
            self.eval_use_pts3d_from_local_head = False

        # use register_buffer to save these with checkpoints
        # so that when we resume training, these bookkeeping variables are preserved
        self.register_buffer("epoch_fraction", torch.tensor(0.0, dtype=torch.float32, device=self.device))
        self.register_buffer("train_total_samples", torch.tensor(0, dtype=torch.long, device=self.device))
        self.register_buffer("train_total_images", torch.tensor(0, dtype=torch.long, device=self.device))

        self.train_total_samples_per_step = AccumulatedSum()  # these need to be reduced across GPUs, so use Metric
        self.train_total_images_per_step = AccumulatedSum()  # these need to be reduced across GPUs, so use Metric

        self.val_loss = MeanMetric()

        # Initialize metrics
        self.RRA_thresholds = [5, 15, 30] #5, 15, 30
        self.RTA_thresholds = [5, 15, 30]
        # Initialize RRA and RTA metrics as attributes
        # for tau in self.RRA_thresholds:
        #     setattr(self, f'val_RRA_{tau}', MeanMetric())
        # for tau in self.RTA_thresholds:
        #     setattr(self, f'val_RTA_{tau}', MeanMetric())

        # self.val_mAA = MeanMetric()

        # Reconstruction evaluation metrics
        self.dataset_names_with_samples_of_uneven_num_of_views = ['dtu', '7scenes', 'nrgbd']
        # self.reconstruction_metrics_per_epoch = {}  # Accumulate all reconstruction metrics by dataset and scene for the epoch
        # New dictionary to store detailed losses for datasets with uneven number of views
        self.uneven_view_detailed_losses = {}
        self.lr = 0.0
        self.metric_keys = set()

    @classmethod
    def load_for_inference(cls, net: Fast3R):
        lit_module = cls(net=net, train_criterion=None, validation_criterion=None, optimizer=None, compile=False)
        lit_module.eval()
        return lit_module

    def forward(self, views: List[Dict[str, torch.Tensor]]) -> Any:
        return self.net(views)

    def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
        # Legacy: if the checkpoint does not contain the epoch_fraction, train_total_samples, and train_total_images
        # we manually add them to the checkpoint
        # if self.trainer.strategy.strategy_name != "deepseed":
        #     if checkpoint["state_dict"].get("epoch_fraction") is None:
        #         checkpoint["state_dict"]["epoch_fraction"] = self.epoch_fraction
        #     if checkpoint["state_dict"].get("train_total_samples") is None:
        #         checkpoint["state_dict"]["train_total_samples"] = self.train_total_samples
        #     if checkpoint["state_dict"].get("train_total_images") is None:
        #         checkpoint["state_dict"]["train_total_images"] = self.train_total_images
        pass

    def on_train_start(self) -> None:
        """Lightning hook that is called when training begins."""
        # by default lightning executes validation step sanity checks before training starts,
        # so it's worth to make sure validation metrics don't store results from these checks
        # self.val_loss.reset()

        # the wandb logger lives in self.loggers
        # find the wandb logger and watch the model and gradients
        for logger in self.loggers:
            if isinstance(logger, WandbLogger):
                self.wandb_logger = logger
                # log gradients, parameter histogram and model topology
                self.wandb_logger.watch(self.net, log="all", log_freq=500, log_graph=False)

    def on_train_epoch_start(self) -> None:
        # our custom dataset and sampler has to have epoch set by calling set_epoch
        if hasattr(self.trainer.train_dataloader, "dataset") and hasattr(self.trainer.train_dataloader.dataset, "set_epoch"):
            self.trainer.train_dataloader.dataset.set_epoch(self.current_epoch)
        if hasattr(self.trainer.train_dataloader, "sampler") and hasattr(self.trainer.train_dataloader.sampler, "set_epoch"):
            self.trainer.train_dataloader.sampler.set_epoch(self.current_epoch)
   
    def on_validation_epoch_start(self) -> None:
        # our custom dataset and sampler has to have epoch set by calling set_epoch
        self.val_loss.reset()
        for key in self.metric_keys:
            if hasattr(self,key):
                getattr(self, key).reset()
        MultiLoss.first_batch = True
        # for loader in self.trainer.val_dataloaders:
        #     if hasattr(loader, "dataset") and hasattr(loader.dataset, "set_epoch"):
        #         loader.dataset.set_epoch(0)
        #     if hasattr(loader, "sampler") and hasattr(loader.sampler, "set_epoch"):
        #         loader.sampler.set_epoch(0)

    # def on_test_epoch_start(self) -> None:
    #     # our custom dataset and sampler has to have epoch set by calling set_epoch
    #     for loader in self.trainer.test_dataloaders:
    #         if hasattr(loader, "dataset") and hasattr(loader.dataset, "set_epoch"):
    #             loader.dataset.set_epoch(0)
    #         if hasattr(loader, "sampler") and hasattr(loader.sampler, "set_epoch"):
    #             loader.sampler.set_epoch(0)

    def on_after_backward(self):
        # called *after* gradients have been computed, before optimizer step
        if self.net.decoder is not None and self.net.decoder.camera_token.grad is not None:
            # compute the L2 norm of the gradient
            grad_norm = self.net.decoder.camera_token.grad.norm()
            # log it; you can choose on_step/on_epoch as you like
            self.log(
                'grad1', 
                grad_norm, 
                on_step=True, 
                on_epoch=False, 
                prog_bar=True
            )
    def model_step(
        self, batch: List[Dict[str, torch.Tensor]], criterion: torch.nn.Module,
    ) -> Tuple[torch.Tensor, Dict]:
        device = self.device

        # Move data to device
        for view in batch:
            for name in "img pts3d valid_mask camera_pose camera_intrinsics F_matrix corres".split():
                if name in view:
                    view[name] = view[name].to(device, non_blocking=True)

        views = batch

        preds = self.forward(views)

        if criterion is not None:
        # Compute the loss in higher precision
            with torch.autocast(device_type=self.device.type, dtype=torch.float32):
                loss, loss_details = criterion(views, preds)

            return views, preds, loss, loss_details
        else:
            return views, preds

    def training_step(
        self, batch: List[Dict[str, torch.Tensor]], batch_idx: int
    ) -> torch.Tensor:
        views, preds, loss, loss_details = self.model_step(batch, self.train_criterion)

        if not isinstance(loss, (torch.Tensor, dict, type(None))):  # this will cause a lightning.fabric.utilities.exceptions.MisconfigurationException
            # log loss and the batch information to help debugging
            # use print instead of log because the logger only logs on rank 0, but this could happen on any rank
            print(f"Loss is not a tensor or dict but {type(loss)}, value: {loss}")
            print(f"Loss details: {loss_details}")
            print(f"Batch: {batch}")
            print(f"Batch index: {batch_idx}")
            print(f"Views: {views}")
            print(f"Preds: {preds}")
            loss = None  # set loss to None will still break the training loop in DDP, this is intended - we should fix the data to avoid nan loss in the first place
            return loss

        self.epoch_fraction = torch.tensor(self.trainer.current_epoch + batch_idx / self.trainer.num_training_batches, device=self.device)

        # self.log("trainer/epoch", self.epoch_fraction, on_step=True, on_epoch=False, prog_bar=False)
        # self.log("trainer/lr", self.trainer.lr_scheduler_configs[0].scheduler.get_last_lr()[0], on_step=True, on_epoch=False, prog_bar=True)
        self.log("trainer/lr", self.lr, on_step=False, on_epoch=True, prog_bar=False)
        self.log("train/loss", loss, on_step=False, on_epoch=True, prog_bar=True)
        # log the details of the loss
        if loss_details is not None:
            # if "pose_loss" in loss_details:
            #     self.log("train/ploss", loss_details['pose_loss'], on_step=True, on_epoch=True, prog_bar=True)
            # if "cam_loss" in loss_details:
            #     self.log("train/camloss", loss_details['cam_loss'], on_step=False, on_epoch=True, prog_bar=False)
            for key, value in loss_details.items():                            
                if  torch.is_tensor(value) and value.ndim>0:
                    continue
                
                if self.log_detail:
                    self.log(f"train_detail_{key}", value, on_step=False, on_epoch=True, prog_bar=False)

                match = re.search(r'/(\d{1,2})$', key)
                if match:
                    if self.log_summary:
                        stripped_key = key[:match.start()]
                        self.log(f"train/{stripped_key}", value, on_step=False, on_epoch=True, prog_bar=False)
                else:    
                    self.log(f"train/{key}", value, on_step=True, on_epoch=False, prog_bar=True)
                
        # Log the total number of samples seen so far
        batch_size = views[0]["img"].shape[0]
        self.train_total_samples_per_step(batch_size)  # aggregate across all GPUs
        self.train_total_samples += self.train_total_samples_per_step.compute()  # accumulate across all steps
        self.train_total_samples_per_step.reset()
        # self.log("trainer/total_samples", self.train_total_samples, on_step=True, on_epoch=False, prog_bar=False)

        # Log the total number of images seen so far
        num_views = len(views)
        n_image_cur_step = batch_size * num_views
        self.train_total_images_per_step(n_image_cur_step)  # aggregate across all GPUs
        self.train_total_images += self.train_total_images_per_step.compute()  # accumulate across all steps
        self.train_total_images_per_step.reset()
        # self.log("trainer/total_images", self.train_total_images, on_step=True, on_epoch=False, prog_bar=False)

        return loss

    def validation_step(
        self, batch: List[Dict[str, torch.Tensor]], batch_idx: int, dataloader_idx: int = 0,
    ) :
        views, preds, loss, loss_details = self.model_step(batch, self.validation_criterion)

        # Extract the dataset name and batch size
        dataset_name = views[0]['dataset'][0]  # all views should have the same dataset name because we use "sequential" mode of CombinedLoader
        batch_size = views[0]["img"].shape[0]

        # loss_value = loss.detach().cpu().item()
        # Log the overall validation loss
        # self.log("val/loss", loss, on_step=False, on_epoch=True, prog_bar=True, reduce_fx="mean", sync_dist=False, add_dataloader_idx=True, batch_size=batch_size)
        self.val_loss(loss)
        # self.log(f"val/loss_{dataset_name}", loss, on_step=False, on_epoch=True, prog_bar=True, reduce_fx="mean", sync_dist=False, add_dataloader_idx=False, batch_size=batch_size)

        # Log the details of the loss with dataset name and view number in the key
        if loss_details is not None:
            for key, value in loss_details.items():
                if torch.is_tensor(value) and value.ndim>0:
                    print(key, value.shape)
                    continue                    
                if  self.log_detail:
                    self.log(
                        f"val_detail_{dataset_name}_{key}",
                        value,
                        on_step=False,
                        on_epoch=True,
                        prog_bar=False,
                        reduce_fx="mean",
                        sync_dist=False,
                        add_dataloader_idx=False,
                        batch_size=batch_size,
                    )
                match = re.search(r'/(\d{1,2})$', key)
                if match:
                    if self.log_summary:
                        stripped_key = key[:match.start()]
                        self.log(f"val/{dataset_name}_{stripped_key}", value, on_step=False, on_epoch=True, prog_bar=False, reduce_fx="mean", sync_dist=False, add_dataloader_idx=False, batch_size=batch_size)
                else:    
                    self.put2metric(f'val_{key}', value)
                    self.log(f'val/{dataset_name}_{key}', value, on_step=False, on_epoch=True, prog_bar=False, reduce_fx="mean", sync_dist=False, add_dataloader_idx=False, batch_size=batch_size)

        del batch, views, preds, loss, loss_details
        torch.cuda.empty_cache()
        MultiLoss.first_batch = False

    def put2metric(self, key, value):
        self.metric_keys.add(key)
        if not hasattr(self, key):
                setattr(self, key, MeanMetric().to(self.device))
        getattr(self, key)(value)

        # return loss_value

    def on_validation_epoch_end(self) -> None:
        for key in self.metric_keys:
            if hasattr(self,key):
                self.log(f"val/{key.removeprefix("val_")}", getattr(self, key), prog_bar=True, metric_attribute=key)
            #     print(f"{key} found")
            # else:
            #     print(f"{key} missing")
        self.log("val/loss", value=self.val_loss, prog_bar=True)
        # if hasattr(self,'val_poss_loss'):
        #     self.log("val/ploss", self.val_poss_loss, prog_bar=True, metric_attribute='val_poss_loss')
        # if hasattr(self,'val_cam_loss'):
        #     self.log("val/camloss", self.val_cam_loss, prog_bar=True, metric_attribute='val_cam_loss')
        # if hasattr(self,'val_vggt_pose'):
        #     self.log("val/vpose", self.val_vggt_pose, prog_bar=True, metric_attribute='val_vggt_pose')
        # if hasattr(self,'val_vggt_fov'):
        #     self.log("val/fov", self.val_vggt_fov, prog_bar=True, metric_attribute='val_vggt_fov')

        # if we dont do these, wandb for some reason cannot display the validation loss with them as the x-axis
        # self.log("trainer/epoch", self.epoch_fraction, sync_dist=False)
        # self.log("trainer/total_samples", self.train_total_samples.cpu().item(), sync_dist=False)
        # self.log("trainer/total_images", self.train_total_images.cpu().item(), sync_dist=False)

        self.aggregate_and_log_reconstruction_detail_losses()

        # Log the 3D reconstruction metrics
        # self.aggregate_and_log_reconstruction_metrics()
        # return super().on_validation_epoch_end()

    def on_test_epoch_start(self) -> None:
        self.peek = True

    # def on_validation_epoch_start(self) -> None:
    def test_step(
        self, batch: List[Dict[str, torch.Tensor]], batch_idx: int, dataloader_idx: int = 0,
    ) -> None:
        views, preds = self.model_step(batch, None)
        if self.vggt:
            conf_keys = {'global': "conf_vggt", 'local': "conf_self_vggt",'depth':'conf_depth'}
            result_keys = {'local':"pts3d_in_self_vggt", 'global':"pts3d_in_other_view_vggt",'depth':'depth_vggt','fuv':'camera_intrinsics'}
        else:
            conf_keys = {'global': "conf", 'local': "conf_self",'depth':'conf_depth'}
            result_keys = {'local':"pts3d_in_self_view", 'global':"pts3d_in_other_view",'depth':'depth','fuv':'camera_intrinsics'}
        
        dataset_name = views[0]['dataset'][0]
        # Evaluate point clouds only for the reconstruction datasets (DTU, 7-Scenes, and NRGBD)
        # eval only every 5 epochs because it's slow
        # current_precision = self.trainer.precision
        # if current_precision == 'bf16-mixed' or current_precision == 'bf16':
        #     dtype = torch.bfloat16
        # elif current_precision == '16-mixed' or current_precision == '16':
        #     dtype = torch.float16
        # else: # '32-true' or '64-true' or other
        #     dtype = None # Or handle as an error/default to float32 operations
        # dtype=torch.float32
        with torch.autocast(device_type=self.device.type, enabled=True, dtype=torch.float32):
            if result_keys["local"] in preds[0]:
                self.evaluate_reconstruction(views, preds, dataset_name=dataset_name,
                    use_pts3d_from_local_head=True,
                    min_conf_thr_percentile_for_local_alignment_and_icp=85,
                    min_conf_thr_percentile_for_metric_cacluation=0, conf_keys=conf_keys, result_keys=result_keys)  # use only the very confident points for alignment and use all of the points for metric calculation   
            if result_keys["global"] in preds[0]:
                self.evaluate_reconstruction(views, preds, dataset_name=dataset_name,
                    use_pts3d_from_local_head= False,
                    min_conf_thr_percentile_for_local_alignment_and_icp=85,
                    min_conf_thr_percentile_for_metric_cacluation=0, conf_keys=conf_keys, result_keys=result_keys)            
            

            if result_keys["depth"] in preds[0]:
                self.evaluate_depth(views, preds, dataset_name=dataset_name, conf_keys=conf_keys, result_keys=result_keys, depth_key=result_keys["depth"])        
                # torch.cuda.empty_cache()
            if result_keys["local"] in preds[0]:
                self.evaluate_depth(views, preds, dataset_name=dataset_name, conf_keys=conf_keys, result_keys=result_keys)
            # self.evaluate_depth(views, preds, dataset_name=dataset_name, conf_keys=conf_keys, result_keys=result_keys, use_pts3d_from_global_head=True, min_conf_thr_percentile_for_local_alignment_and_icp=85)
            # gc.collect()
            torch.cuda.empty_cache()
            # Evaluate metrics for camera poses
            if 'vggt_camera_pose' in preds[0]:
                self.evaluate_camera_poses_with_key(views, preds, conf_keys=conf_keys, result_keys=result_keys, pose_key='vggt_camera_pose', fov_key='vggt_fov')
            if 'camera_pose' in preds[0]:
                self.evaluate_camera_poses_with_key(views, preds, conf_keys=conf_keys, result_keys=result_keys, pose_key='camera_pose', fov_key='fov', align=True)
            # else:
            # gc.collect()
            torch.cuda.empty_cache()
            if self.eval_pts3d_from_depth and result_keys['depth'] in preds[0]:
                from fast3r.eval.utils import get_local_pts3d_from_depth_views
                from fast3r.dust3r.losses import MultiLoss
                conf_keys['global'] = conf_keys['global'] = conf_keys['depth']
                result_keys['local'] = 'local_from_depth'
                result_keys['global'] = 'depth_aligned_to_global'
                pose_key='camera_pose'
                get_local_pts3d_from_depth_views(pred_views=preds, result_key=result_keys['local'], fuv_key=result_keys['fuv'], depth_key=result_keys['depth'], fuv_scaler=MultiLoss.fuv_scaler)
                
                align_local_pts3d_to_global_with_cam(preds, pose_key=pose_key, pred_key=result_keys['local'], result_key=result_keys['global'])

                self.evaluate_reconstruction(views, preds, dataset_name=dataset_name,
                    use_pts3d_from_local_head= False,
                    min_conf_thr_percentile_for_local_alignment_and_icp=85,
                    min_conf_thr_percentile_for_metric_cacluation=0, conf_keys=conf_keys, result_keys=result_keys) 
            gc.collect()
            # self.evaluate_camera_poses(views, preds, niter_PnP=100, focal_length_estimation_method='first_view_from_global_head', conf_keys=conf_keys, result_keys=result_keys)
        
        del views, preds, batch
        gc.collect()
        torch.cuda.empty_cache()

    def evaluate_depth(self, views, preds, dataset_name, conf_keys=None, result_keys=None, depth_key=None, use_pts3d_from_global_head=False, min_conf_thr_percentile_for_local_alignment_and_icp=0):
        if use_pts3d_from_global_head:
            align_global_pts3d_to_local(preds, views, min_conf_thr_percentile=min_conf_thr_percentile_for_local_alignment_and_icp, conf_keys=conf_keys, result_keys=result_keys)
        num_views = len(preds)
        if result_keys["local"] in preds[0]:
            B, H, W, _ = preds[0][result_keys["local"]].shape  # Get batch size and dimensions
        else:
            B, H, W = preds[0][result_keys["depth"]].shape
        def process_sample(i):
            # scene_name = "/".join(views[0]['label'][i].split('/')[:-1]) if "label" in views[0] else "unknown" 
            depth_pred_list = []
            depth_gt_list = []
            for view, pred in zip(views, preds):
                if use_pts3d_from_global_head:
                    depth_pred = pred['pts3d_in_global_view_aligned_to_self'][i][..., 2]
                elif depth_key is not None:
                    depth_pred = pred[depth_key][i].squeeze(-1)
                else:
                    depth_pred = pred[result_keys["local"]][i][..., 2]
                depth_gt = view['depthmap'][i]
                assert depth_pred.shape == depth_gt.shape
                depth_pred_list.append(depth_pred)
                depth_gt_list.append(depth_gt)
                # print(depth_pred.shape, depth_gt.shape)
            depth_pred_all = torch.stack(depth_pred_list, dim=0)           # Shape: (N_pred, H, W)
            depth_gt_all = torch.stack(depth_gt_list, dim=0)           # Shape: (N_pred, H, W)
            results, _, _, _ = depth_evaluation(depth_pred_all, depth_gt_all)
            del depth_pred_all, depth_gt_all
            return results
        with ThreadPoolExecutor() as executor:
            batch_results = list(executor.map(process_sample, range(B)))
        v = '1' if depth_key is not None else ''
        if use_pts3d_from_global_head:
            v = 'g'
        for results in batch_results:
            for key in results.keys():
                if not hasattr(self, f'{key}{v}'):
                    setattr(self, f'{key}{v}', MeanMetric().to(self.device))
                getattr(self, f'{key}{v}')(results[key])
                self.log(f"val_metric_{dataset_name}/{key}{v}", getattr(self, f'{key}{v}'), on_step=False, on_epoch=True, prog_bar=True, reduce_fx="mean", sync_dist=False, add_dataloader_idx=False, metric_attribute=key)
        del batch_results 
        gc.collect()

    def aggregate_and_log_reconstruction_detail_losses(self):
        # log the detailes loss for uneven view datasets
        # Gather and aggregate detailed losses for uneven-view datasets across all ranks
        if torch.distributed.is_initialized():
            gathered_detailed_losses = [None] * torch.distributed.get_world_size() if self.global_rank == 0 else None
            # all_gather_object(gathered_detailed_losses, self.uneven_view_detailed_losses)
            # gather detailed losses from all ranks to rank 0
            torch.distributed.gather_object(self.uneven_view_detailed_losses, gathered_detailed_losses, dst=0)

            # log the detailed losses in rank 0
            if self.global_rank == 0:
                # Aggregate gathered data
                aggregated_losses = {}
                for rank_losses in gathered_detailed_losses:
                    for dataset_name, loss_dict in rank_losses.items():
                        if dataset_name not in aggregated_losses:
                            aggregated_losses[dataset_name] = {}
                        for key, values in loss_dict.items():
                            if key not in aggregated_losses[dataset_name]:
                                aggregated_losses[dataset_name][key] = []
                            aggregated_losses[dataset_name][key].extend(values)

                # Compute and log the mean of each loss
                for dataset_name, loss_dict in aggregated_losses.items():
                    for key, values in loss_dict.items():
                        mean_value = np.mean(values)
                        self.log(key, mean_value, rank_zero_only=True)

            # Clear the dictionary after logging
            self.uneven_view_detailed_losses.clear()

            # # Aggregate gathered data
            # aggregated_losses = {}
            # for rank_losses in gathered_detailed_losses:
            #     for dataset_name, loss_dict in rank_losses.items():
            #         if dataset_name not in aggregated_losses:
            #             aggregated_losses[dataset_name] = {}
            #         for key, values in loss_dict.items():
            #             if key not in aggregated_losses[dataset_name]:
            #                 aggregated_losses[dataset_name][key] = []
            #             aggregated_losses[dataset_name][key].extend(values)

            # # Compute and log the mean of each loss
            # for dataset_name, loss_dict in aggregated_losses.items():
            #     for key, values in loss_dict.items():
            #         mean_value = np.mean(values)
            #         self.log(key, mean_value, sync_dist=False)

            # # Clear the dictionary after logging
            # self.uneven_view_detailed_losses.clear()

    def evaluate_reconstruction(self, views, preds, dataset_name,
                                min_conf_thr_percentile_for_local_alignment_and_icp=0,
                                min_conf_thr_percentile_for_metric_cacluation=0,
                                use_pts3d_from_local_head=True, conf_keys=None, result_keys=None):
        # align the local head output to the global output
        # and populate the preds with "pts3d_in_self_view_aligned_to_global"
        if use_pts3d_from_local_head:
            if  'camera_pose' in preds[0]:# True or result_keys['global'] not in preds[0] :
                align_local_pts3d_to_global_with_cam(preds, pose_key='camera_pose', pred_key=result_keys['local'], result_key='pts3d_in_self_view_aligned_to_global')
                result_keys['global']='pts3d_in_self_view_aligned_to_global'
            else:
                align_local_pts3d_to_global(preds, views, min_conf_thr_percentile=min_conf_thr_percentile_for_local_alignment_and_icp, conf_keys=conf_keys, result_keys=result_keys)


        assert min_conf_thr_percentile_for_local_alignment_and_icp >= min_conf_thr_percentile_for_metric_cacluation # Ensure that the confidence threshold for ICP is higher than the one for metrics
        # Define the function to process a single sample

        results_list = process_recon(views, preds, 
                  min_conf_thr_percentile_for_local_alignment_and_icp, 
                  min_conf_thr_percentile_for_metric_cacluation, 
                  use_pts3d_from_local_head, conf_keys=conf_keys, result_keys=result_keys)
        # Aggregate results from all processed samples into epoch metrics by dataset and scene
        for results in results_list:
            scene_name = results[0]
            v = '' if use_pts3d_from_local_head else 'g'
            for key in results[1].keys():
                if not hasattr(self, f'{key}{v}'):
                    setattr(self, f'{key}{v}', MeanMetric().to(self.device))
                getattr(self, f'{key}{v}')(results[1][key])
                self.log(f"val_metric_{dataset_name}/{key}{v}", getattr(self, f'{key}{v}'), on_step=False, on_epoch=True, prog_bar=True, reduce_fx="mean", sync_dist=False, add_dataloader_idx=False, metric_attribute=f'{key}{v}')
                if self.log_detail:
                    detail_key =  f'{scene_name}{key}{v}'
                    if not hasattr(self, detail_key):
                        setattr(self, detail_key, MeanMetric().to(self.device))
                    getattr(self, detail_key)(results[1][key])
                    self.log(f"val_metric_{dataset_name}_{scene_name}/{key}{v}", getattr(self, detail_key), on_step=False, on_epoch=True, prog_bar=True, reduce_fx="mean", sync_dist=False, add_dataloader_idx=False, metric_attribute=key)
            # # if dataset_name not in self.reconstruction_metrics_per_epoch:
            #     self.reconstruction_metrics_per_epoch[dataset_name] = {}
            # self.reconstruction_metrics_per_epoch[dataset_name].update(result)  # Accumulate per dataset for the epoch
        del results_list
    
    def evaluate_camera_poses_with_key(self, views, preds, conf_keys=None, result_keys=None, pose_key=None, fov_key=None, align=False):
        """Evaluate camera poses and focal lengths using fast_pnp in parallel.
           Focal_length_estimation_method can be 'individual' or 'first_view_from_local_head' or 'first_view_from_global_head'.
        """
        dataset_name = views[0]['dataset'][0]
        
        if align:
            # Get ground truth poses in_camera1 @ 
            in_camera1 = inv(views[0]['camera_pose'])            
            poses_c2w_gt = [in_camera1 @ view['camera_pose'] for view in views]
            poses_pred = camera_from_preds(preds, pose_key)
            # poses_pred = [pose_encoding_to_camera(pred[pose_key]) for pred in preds]
            # pred_camera1 = inv(poses_pred[0])
            # poses_pred = [pred_camera1@pose for pose in poses_pred]            
        else:
            poses_c2w_gt = [view['camera_pose'] for view in views]
            poses_pred = [pose_encoding_to_camera(pred[pose_key]) for pred in preds]       

        # Convert poses to tensors
        device = self.device
        
        gt_cameras = torch.stack(poses_c2w_gt).transpose(0, 1)  # (B, num_views, 4, 4)
        pred_cameras = torch.stack(poses_pred, dim=1).to(gt_cameras.dtype).to(device)  # Shape (B, num_views, 4, 4)
        # pred_cameras[:, 0, :, :] = gt_cameras[:, 0, :, :]
        if self.peek:
            log.info('gt_cameras * 1000') 
            log.info((gt_cameras[0, 1]*1000).to(torch.int))
            log.info( (pred_cameras[0, 1]*1000).to(torch.int))    
            self.peek = False      
        # compute the metrics: RRA, RTA, mAA
        # Ensure we have enough poses to compute relative errors
        if pred_cameras.shape[1] >= 2:

            batch_results = process_poses(pred_cameras, gt_cameras, self.RRA_thresholds, self.RTA_thresholds)

            v = pose_key[0]
            # Update metrics for all samples in the batch
            for results in batch_results:
                for key in results.keys():
                    if not hasattr(self, f'{key}{v}'):
                        setattr(self, f'{key}{v}', MeanMetric().to(self.device))
                    getattr(self, f'{key}{v}')(results[key])
                    self.log(f"val_metric_{dataset_name}/{f'{key}{v}'}", getattr(self, f'{key}{v}'), on_step=False, on_epoch=True, prog_bar=True, reduce_fx="mean", sync_dist=False, add_dataloader_idx=False, metric_attribute=f'{key}{v}')

        else:
            log.warning("Not enough camera poses to compute relative errors.")

    def evaluate_camera_poses(self, views, preds, niter_PnP=10, focal_length_estimation_method='individual', conf_keys=None, result_keys=None):
        """Evaluate camera poses and focal lengths using fast_pnp in parallel.
           Focal_length_estimation_method can be 'individual' or 'first_view_from_local_head' or 'first_view_from_global_head'.
        """
        dataset_name = views[0]['dataset'][0]
        # If focal_length_estimation_method is 'first_view_from_local_head', align local pts3d to global
        if focal_length_estimation_method == 'first_view_from_local_head':
            if result_keys['global'] not in preds[0]:
                align_local_pts3d_to_global_with_cam(preds, pose_key='camera_pose', pred_key=result_keys['local'], result_key='pts3d_in_self_view_aligned_to_global')
                result_keys['global']='pts3d_in_self_view_aligned_to_global'
            else:
                align_local_pts3d_to_global(preds, views, conf_keys=conf_keys, result_keys=result_keys)

        # in-place correction of the orientation of the predicted points and confidence maps
        # this is because the data loader transposed the input images and valid_masks to landscape
        correct_preds_orientation(preds, views, conf_keys=conf_keys, result_keys=result_keys, focal_length_estimation_method=focal_length_estimation_method)

        # Estimate camera poses using the provided function
        poses_c2w_estimated, estimated_focals = estimate_camera_poses(preds=preds, views=views, niter_PnP=niter_PnP, focal_length_estimation_method=focal_length_estimation_method, conf_keys=conf_keys, result_keys=result_keys)
        
        # Get ground truth poses
        poses_c2w_gt = [view['camera_pose'] for view in views]

        # Convert poses to tensors
        device = self.device
        pred_cameras = torch.tensor(np.stack(poses_c2w_estimated), dtype=poses_c2w_gt[0].dtype, device=device)  # Shape (B, num_views, 4, 4)
        gt_cameras = torch.stack(poses_c2w_gt).transpose(0, 1)  # (B, num_views, 4, 4)
        if self.peek:
            log.info('gt_cameras * 1000') 
            log.info((gt_cameras[0, 1]*1000).to(torch.int))
            log.info( (pred_cameras[0, 1]*1000).to(torch.int))    
            self.peek = False      

        # compute the metrics: RRA, RTA, mAA
        # Ensure we have enough poses to compute relative errors
        if pred_cameras.shape[1] >= 2:

            batch_results = process_poses(pred_cameras, gt_cameras, self.RRA_thresholds, self.RTA_thresholds)

            # Update metrics for all samples in the batch
            for results in batch_results:
                for key in results.keys():
                    if not hasattr(self, key):
                        setattr(self, key, MeanMetric().to(self.device))
                    getattr(self, key)(results[key])
                    self.log(f"val_metric_{dataset_name}/{key}", getattr(self, key), on_step=False, on_epoch=True, prog_bar=True, reduce_fx="mean", sync_dist=False, add_dataloader_idx=False, metric_attribute=key)

        else:
            log.warning("Not enough camera poses to compute relative errors.")
        
        # return batch_results

    # Function to estimate camera poses using fast_pnp

    def configure_optimizers(self) -> Dict[str, Any]:
        params, names = misc.get_parameter_groups(self.net, self.hparams.weight_decay, self.hparams.scale_keywords, self.hparams.lr_scales, return_name=True)
        if isinstance(self.hparams.optimizer, torch.optim.AdamW):
            optimizer = self.hparams.optimizer(params=params)
        else:
            final_groups = misc.refine_and_split_groups_for_muon(
                params,
                names,
                head_lr_scale=0.5,
                muon_lr_scale=1,
                betas=(0.9, 0.95)
            )
            optimizer = SingleDeviceMuonWithAuxAdam(final_groups)

        return {"optimizer": optimizer}
    # def configure_optimizers(self) -> Dict[str, Any]:
    #     params = misc.get_parameter_groups(self.net, self.hparams.weight_decay, self.hparams.scale_keywords, self.hparams.lr_scales)
    #     optimizer = self.hparams.optimizer(params=params)

    #     return {"optimizer": optimizer}
    
    def optimizer_step(self,
                       epoch: int,
                       batch_idx: int,
                       optimizer,
                       optimizer_closure=None):
        # compute fractional epoch
        # loader_len = len(self.trainer.train_dataloader)
        # epoch_f = epoch + batch_idx / float(loader_len)
        args = self.hparams.scheduler
        current_step = self.trainer.global_step
        # print(f'{current_step=}')
        # self.lr = adjust_learning_rate2(optimizer, epoch_f, base_lr=args.lr, min_lr=args.min_lr, warmup_epochs=args.warmup_epochs, total_epochs=args.epochs)
        self.lr = adjust_learning_rate_by_step(optimizer, current_step, base_lr=args.lr, min_lr=args.min_lr, warmup_steps=args.warmup_steps, total_steps=args.total_steps)
        # old = self.net.pose_token.detach().clone()
        # now do the regular step & zero_grad
        optimizer.step(closure=optimizer_closure)
        # new = self.net.pose_token.detach().clone()
        # self.log("weight_diff", (new-old).norm(),on_step=True, on_epoch=False, prog_bar=True)
        optimizer.zero_grad()

    def setup(self, stage: str) -> None:
        if self.hparams.compile and stage == "fit":
            self.net = torch.compile(self.net)

        # Load pretrained weights if available and not resuming
        # note that if resume_from_checkpoint is set, the Trainer is responsible for actually loading the checkpoint
        # so we are only using resume_from_checkpoint as a check of whether we should load the pretrained weights
        if self.pretrained and not self.resume_from_checkpoint:
            self._load_pretrained_weights()

    def _load_pretrained_weights(self) -> None:
        log.info(f"Loading pretrained: {self.pretrained}")
        if isinstance(self.net, FlashDUSt3R):  # if the model is FlashDUSt3R, use the weights of the first head only
            ckpt = torch.load(self.pretrained)
            ckpt = self._update_ckpt_keys(ckpt, new_head_name='downstream_head', head_to_keep='downstream_head1', head_to_discard='downstream_head2')
            self.net.load_state_dict(ckpt["model"], strict=False)
            del ckpt  # in case it occupies memory
        elif isinstance(self.net, Fast3R):
            if self.pretrained.endswith("DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth"):
                # if the model is Fast3R and the pretrained model is DUSt3R, load a subset of the weights into the net
                self.net.load_from_dust3r_checkpoint(self.pretrained)
            elif self.pretrained.endswith(".safetensors"):
                try:
                    state_dict = load_file(self.pretrained)
                    keys = list(state_dict.keys())
                    # for k in keys:
                    #     if not "encoder" in k:
                    #         state_dict.pop(k)
                    IncompatibleKeys = self.net.load_state_dict(state_dict, strict=False)
                    print(IncompatibleKeys)
                    print(f"Successfully loaded state_dict from {self.pretrained}")
                    del state_dict
                except Exception as e:
                    print(f"Error loading safetensors file: {e}")
            elif os.path.isdir(self.pretrained):
                self.net.from_pretrained(self.pretrained)
            elif self.pretrained.endswith("_vggtdino"):
                self.pretrained = self.pretrained.removesuffix("_vggtdino")

                ckpt = torch.load(self.pretrained)
                keys = list(ckpt.keys())
                if 'encoder' in self.pretrain_keywords:
                    ckpt_encoder = self._update_ckpt_keys_revised(ckpt, heads_to_keep=[('aggregator.patch_embed.', 'model.')], heads_to_discard=[], default_keep=False)
                    IncompatibleKeys = self.net.encoder.load_state_dict(ckpt_encoder, strict=False)
                    print('encoder', IncompatibleKeys)
                if 'decoder' in self.pretrain_keywords:
                    ckpt_decoder = self._update_ckpt_keys_revised(ckpt, heads_to_keep=[('aggregator.', '')], heads_to_discard=['aggregator.patch_embed'], default_keep=False)
                    IncompatibleKeys = self.net.decoder.load_state_dict(ckpt_decoder, strict=False)
                    print('decoder', IncompatibleKeys)
                if 'head' in self.pretrain_keywords:
                    vheads = ['vggt_cam_head', 'vggt_point_head', 'vggt_depth_head']
                    # , 'vggt_local_point_head'
                    oheads = ['camera_head.', 'point_head.', 'depth_head.']
                    for i in range(len(oheads)):
                        if hasattr(self.net, vheads[i]) and getattr(self.net, vheads[i]) is not None:
                            ckpt_decoder = self._update_ckpt_keys_revised(ckpt, heads_to_keep=[(oheads[i], '')], heads_to_discard=['aggregator.'], default_keep=False)
                            IncompatibleKeys = getattr(self.net, vheads[i]).load_state_dict(ckpt_decoder, strict=False)
                            print(vheads[i], IncompatibleKeys)
                
                del ckpt
                # pos_weight = ckpt['model.pos_embed']
                # pos_weight = pos_weight.reshape(1, 1370, -1, 2).mean(dim=3)
                # pos_emb = {'pos_embed': pos_weight}
                # IncompatibleKeys = self.net.decoder.load_state_dict(pos_emb, strict=False)
                # print(IncompatibleKeys.unexpected_keys)
                # seems not helpful

                
            elif self.pretrained.endswith("_depthanything"):
                self.pretrained = self.pretrained.removesuffix("_depthanything")

                ckpt = torch.load(self.pretrained)
                keys = list(ckpt.keys())
                ckpt = self._update_ckpt_keys_revised(ckpt, heads_to_keep=[('pretrained.', 'model.')], heads_to_discard=[], default_keep=False)
                IncompatibleKeys = self.net.encoder.load_state_dict(ckpt, strict=False)
                print(IncompatibleKeys)      
                del ckpt          
            else:
                # if the checkpoint is also Fast3R, load all weights
                log.info(f"Loading pretrained weights from {self.pretrained}")
                checkpoint = torch.load(self.pretrained,weights_only=False)
                filtered_state_dict = {k: v for k, v in checkpoint['state_dict'].items() if k.startswith('net.')}
                # Remove the 'net.' prefix from the keys
                filtered_state_dict = {k[len('net.'):]: v for k, v in filtered_state_dict.items()}
                # Load the filtered state_dict into the model
                IncompatibleKeys = self.net.load_state_dict(filtered_state_dict, strict=False)
                print(IncompatibleKeys)   
                del checkpoint             
        else:
            log.error("Unsupported model type. Only Fast3R and FlashDUSt3R are supported.")
        gc.collect()

    @staticmethod
    def _update_ckpt_keys(ckpt, new_head_name='downstream_head', head_to_keep='downstream_head1', head_to_discard='downstream_head2'):
        """Helper function to use the weights of a model with multiple heads in a model with a single head.
        specifically, keep only the weights of the first head and delete the weights of the second head.
        """
        new_ckpt = {'model': {}}

        for key, value in ckpt['model'].items():
            if key.startswith(head_to_keep):
                new_key = key.replace(head_to_keep, new_head_name)
                new_ckpt['model'][new_key] = value
            elif key.startswith(head_to_discard):
                continue
            else:
                new_ckpt['model'][key] = value

        return new_ckpt
    
    
    @staticmethod
    def _update_ckpt_keys_revised(ckpt, heads_to_keep=[('downstream_head1', 'downstream_head')], heads_to_discard=['downstream_head2'], default_keep=True):
        """Helper function to use the weights of a model with multiple heads in a model with a single head.
        specifically, keep only the weights of the first head and delete the weights of the second head.
        """
        new_ckpt = {}

        for key, value in ckpt.items():
            discard = False
            for head in heads_to_discard:
                if key.startswith(head):
                    discard = True                 
                    break 
            if discard:
                continue
            for head in heads_to_keep:
                if key.startswith(head[0]):
                    new_key = key.replace(head[0], head[1])
                    new_ckpt[new_key] = value
                    continue                
            if default_keep:                 
                new_ckpt[key] = value

        return new_ckpt
