# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
#                       Peking University &
#                       Huawei Technologies Co., Ltd
#
# This code is a part of Cybertron package.
#
# The Cybertron is open-source software based on the AI-framework:
# PyTorch (https://pytorch.org/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Module for model
"""
import torch
from .callback import TrainMonitor
from torch.utils.tensorboard import SummaryWriter
from torch import Tensor

class Model:
    def __init__(self, 
                 dataset,
                 train_network,
                 eval_network,
                 optimizer,
                 scheduler=None,
                 callback=None,
                 save_dir="checkpoints",
                 prefix="best",
                 metrics=None,
                 main_metric=None,
                 writer_path=None):
        """
        Args:
            dataset: 数据集对象
            train_network: 训练网络
            eval_network: 评估网络
            optimizer: 优化器
            scheduler: 学习率调度器(可选)
            callback: 可选的回调函数
            save_dir: 保存目录
            metrics: dict, 评估指标字典 {name: metric_instance}
            main_metric: str, 用于保存最佳模型的主要指标名称
        """
        self.dataset = dataset
        self.train_network = train_network 
        self.eval_network = eval_network
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.save_dir = save_dir
        self.metrics = metrics or {}
        self.main_metric = main_metric
        self.total_steps = 0

        if writer_path is None:
            writer = None
        else:
            writer = SummaryWriter(writer_path)
        
        # 设置默认callback
        if callback is None:
            self.callback = TrainMonitor(
                model=self.eval_network,
                optimizer=self.optimizer,
                save_dir=self.save_dir,
                eval_dataloader=self.dataset.eval_loader,
                writer=writer,
                metrics=self.metrics,
                main_metric=self.main_metric,
                prefix=prefix
            )
        else:
            self.callback = callback
        
    def _train_epoch(self):
        self.train_network.train()
        total_loss = 0
        num_batches = 0
        
        for batch in self.dataset.train_loader:
            self.optimizer.zero_grad()
            loss = self.train_network(*batch)
            
            if isinstance(loss, Tensor):
                loss.backward()
                # 梯度裁剪
                torch.nn.utils.clip_grad_norm_(self.train_network.parameters(), max_norm=5.0)
                self.optimizer.step()
                total_loss += loss.item()
                
            num_batches += 1
            self.total_steps += 1
            
        avg_loss = total_loss / num_batches
        if self.scheduler is not None:
            self.scheduler.step()
            
        return avg_loss
        
    def train(self, num_epochs=100):
        """
        训练模型
        
        Args:
            num_epochs: 训练轮数
        """
        # 记录开始时间
        run_context = None
        self.callback.begin(run_context)
        
        # 训练循环
        for epoch in range(num_epochs):
            epoch_loss = self._train_epoch()
            
            run_context = {
                'epoch': epoch,
                'step': self.total_steps,
                'epoch_loss': epoch_loss,
                'lr': self.optimizer.param_groups[0]['lr']
            }
            
            self.callback.epoch_end(run_context)
                
        self.callback.end(run_context)