import gc
import torch
from pytorch_lightning import Callback
# import logging as log
from fast3r.utils import RankedLogger
log = RankedLogger(__name__, rank_zero_only=True)

class MemoryCleanupCallback(Callback):
    # def on_train_epoch_end(self, trainer, pl_module):
    #     """Called after each training epoch."""
    #     log.info("Cleaning up training dataloader after epoch...")
    #     # Your lightweight dataloader cleanup
    #     # Optional: Call teardown("fit") if DataModule is used
    #     # if trainer.datamodule:
    #     #     trainer.datamodule.teardown("fit")
    #     #     trainer.datamodule.setup("validate")
    #     gc.collect()  # Force garbage collection for CPU memory
    #     # Skip torch.cuda.empty_cache() unless GPU is used
    def on_train_end(self, trainer, pl_module):
        """Called after each training epoch."""
        # log.info("Cleaning up training dataloader ...")
        # Your lightweight dataloader cleanup
        # Optional: Call teardown("fit") if DataModule is used
        trainer.callback_metrics.clear()
        if trainer.datamodule:
            trainer.datamodule.teardown("fit")
            # trainer.datamodule.setup("validate")
        gc.collect()  # Force garbage collection for CPU memory
        # Skip torch.cuda.empty_cache() unless GPU is used
    # def on_validation_start(self, trainer, pl_module):
    #     """Called before validation starts."""
        # log.info("Preparing for validation...")
        # pl_module.eval()  # Ensure model is in eval mode
        # if trainer.datamodule:
            # trainer.datamodule.teardown("fit")
            # trainer.datamodule.setup("validate")
        # gc.collect()  # Force garbage collection
        # Rely on Lightning's automatic setup("validate")

    def on_validation_end(self, trainer, pl_module):
        """Called after validation ends."""
        # log.info("Cleaning up validation dataloader...")
          # Force garbage collection
        log.info("Cleaning up after validation loop...")
        if trainer.datamodule:
            trainer.datamodule.teardown("validate")  # Release validation dataloader resources
        #     # trainer.datamodule.setup("fit")  # Reinitialize training dataloader
        trainer.callback_metrics.clear()
        gc.collect()
        torch.cuda.empty_cache()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

    def on_test_start(self, trainer, pl_module):
        gc.collect()
        torch.cuda.empty_cache()
    # def on_train_epoch_start(self, trainer, pl_module):
    #     gc.collect()
        # log.info("Setting model to training mode...")

    # def on_train_epoch_end(self, trainer, pl_module):
    #     gc.collect()
    def on_validation_epoch_end(self, trainer, pl_module) -> None:
        """Called when the validation epoch ends."""
        log.info("Cleaning up after validation epoch...")
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

    def on_test_epoch_end(self, trainer, pl_module) -> None:
        """Called when the test epoch ends."""
        log.info("Cleaning up after test epoch...")
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()