# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
#                       Peking University &
#                       Huawei Technologies Co., Ltd
#
# This code is a part of Cybertron package.
#
# The Cybertron is open-source software based on the AI-framework:
# PyTorch (https://pytorch.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Callback functions
"""

import os
from typing import Optional, Dict, Any, Union, List
import torch
from torch import Tensor
import logging
import datetime

__all__ = [
    "Callback",
    "TrainMonitor",
]


class Callback:
    """Base class for callbacks."""
    def begin(self, run_context: Dict[str, Any]) -> None:
        """Called once before the training starts.

        Args:
            run_context (Dict[str, Any]): Training context information.
        """
        pass

    def step_end(self, run_context: Dict[str, Any]) -> None:
        """Called at the end of each training step.

        Args:
            run_context (Dict[str, Any]): Training context information.
        """
        pass

    def epoch_end(self, run_context: Dict[str, Any]) -> None:
        """Called at the end of each training epoch.

        Args:
            run_context (Dict[str, Any]): Training context information.
        """
        pass


class TrainMonitor(Callback):
    def __init__(self, 
                 model: torch.nn.Module,
                 optimizer: torch.optim.Optimizer,
                 save_dir: str,
                 eval_dataloader=None,
                 writer=None,
                 metrics=None,
                 main_metric=None,
                 checkpoint_interval=10,  
                 max_checkpoints=5,
                 prefix="best"): 
        super().__init__()
        self.model = model
        self.optimizer = optimizer
        self.save_dir = save_dir
        self.eval_dataloader = eval_dataloader
        self.writer = writer
        self.metrics = metrics or {}
        self.main_metric = main_metric
        self.best_loss = float('inf')
        self.prefix = prefix
        
        # checkpoint
        self.checkpoint_interval = checkpoint_interval
        self.max_checkpoints = max_checkpoints
        self.checkpoint_list = []  # checkpoint path list
        
        os.makedirs(save_dir, exist_ok=True)

        self.avg_loss = []
        
    def begin(self, run_context):
        """Called once before the training starts."""
        self.start_time = datetime.datetime.now()
        logging.info(f"Training starts - {self.start_time.strftime('%Y-%m-%d %H:%M:%S')}")
        
    def end(self, run_context):
        """Called once after the training ends."""
        end_time = datetime.datetime.now()
        h, m = divmod((end_time - self.start_time).seconds, 3600)
        m, s = divmod(m, 60)
        
        logging.info(f"Training ends - {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
        logging.info(f"Total training time: {h:02d}:{m:02d}:{s:02d}")
        
        if self.writer:
            self.writer.close()
        
    def step_end(self, run_context):
        step = run_context['step']
        loss = run_context['loss']
        
        if isinstance(loss, Tensor):
            loss = loss.item()
            
        if self.writer:
            self.writer.add_scalar('Loss/train_step', loss, step)
            
    def epoch_end(self, run_context):
        epoch = run_context['epoch']
        train_loss = run_context['epoch_loss']
        lr = run_context['lr']
        total_steps = run_context['step']
        self.avg_loss.append(train_loss) 
        if len(self.avg_loss) > 5: 
            self.avg_loss.pop(0) 
                
        recent_avg_loss = sum(self.avg_loss) / len(self.avg_loss) 
        
        info = f"Epoch {epoch} (Steps {total_steps}): lr={lr:.10f}, current_loss={train_loss:.6f}, avg_loss={recent_avg_loss:.6f}"
        
        if self.eval_dataloader is not None:
            self.model.eval()
            eval_loss = 0
            num_batches = 0
            
            with torch.no_grad():
                for batch in self.eval_dataloader:
                    loss, outputs, labels, atom_mask = self.model(*batch)
                        
                    if isinstance(loss, Tensor):
                        eval_loss += loss.item()
                    
                    # calculate each metric
                    for name, metric in self.metrics.items():
                        value = metric(loss, outputs, labels, atom_mask)        
                    num_batches += 1
                    
            eval_loss /= num_batches
            info += f", eval_loss={eval_loss:.6f}"
            
            # record and check each metric
            flag = False
            for name, metric in self.metrics.items():
                value = metric.eval()
                info += f", {name}={value:.6f}"
                
                
                if self.writer:
                    self.writer.add_scalar(f'Metrics/{name}', value, total_steps)
                
                # check if it is the best value
                if metric.update_best():
                    info += f" (best {name})"
                    
                    # if it is the main metric, save the model
                    if name == self.main_metric:
                        flag = True
                        checkpoint = {
                            'epoch': epoch,
                            'total_steps': total_steps,
                            'model_state_dict': self.model._network.state_dict(),
                            'optimizer_state_dict': self.optimizer.state_dict(),
                        }
                        torch.save(checkpoint, f"{self.save_dir}/{self.prefix}_{name}_best_model.pt")
                metric.clear()

            if flag:
                info += " (best model saved)"

            if self.writer:
                self.writer.add_scalar('Loss/eval', eval_loss, total_steps)
            
            # if no main metric is specified, use eval_loss
            if not self.main_metric and eval_loss < self.best_loss:
                self.best_loss = eval_loss
                checkpoint = {
                    'epoch': epoch,
                    'total_steps': total_steps,
                    'model_state_dict': self.model._network.state_dict(),
                }
                torch.save(checkpoint, f"{self.save_dir}/best_model.pt")
                info += " (best model saved)"
            
            # save checkpoint periodically
            if (epoch + 1) % self.checkpoint_interval == 0:
                checkpoint = {
                    'epoch': epoch,
                    'total_steps': total_steps,
                    'model_state_dict': self.model._network.state_dict(),
                }
                
                # save checkpoint
                checkpoint_path = f"{self.save_dir}/checkpoint_epoch_{epoch+1}.pt"
                torch.save(checkpoint, checkpoint_path)
                self.checkpoint_list.append(checkpoint_path)
                
                # keep only the last max_checkpoints checkpoints
                if len(self.checkpoint_list) > self.max_checkpoints:
                    oldest_checkpoint = self.checkpoint_list.pop(0)
                    os.remove(oldest_checkpoint)
        
        logging.info(info)
        self.model.train()