#!/usr/bin/env python

# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
import os
from pathlib import Path
from typing import Any, Dict, Optional, Union

import torch
import wandb

from lerobot.common.utils.logging_utils import MetricsTracker


class Logger:
    """简单的日志记录器，用于训练过程中的日志记录和模型保存。"""

    def __init__(self, cfg: Any, output_dir: Union[str, Path], wandb_run_name: Optional[str] = None):
        """初始化日志记录器。

        Args:
            cfg: 配置对象
            output_dir: 输出目录
            wandb_run_name: wandb 运行名称，如果为 None，则不使用 wandb
        """
        self.cfg = cfg
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)
        
        # 创建检查点目录
        self.checkpoint_dir = self.output_dir / "checkpoints"
        self.checkpoint_dir.mkdir(parents=True, exist_ok=True)
        
        # 设置 wandb
        self.use_wandb = wandb_run_name is not None
        if self.use_wandb:
            wandb.init(project="lerobot", name=wandb_run_name, config=cfg)
    
    def log_dict(self, metrics: Dict[str, Any], step: int, mode: str = "train") -> None:
        """记录指标字典。

        Args:
            metrics: 指标字典
            step: 当前步数
            mode: 模式，train 或 eval
        """
        if self.use_wandb:
            # 添加模式前缀
            wandb_metrics = {f"{mode}/{k}": v for k, v in metrics.items()}
            wandb_metrics["step"] = step
            wandb.log(wandb_metrics)
        
        # 打印日志
        metrics_str = ", ".join([f"{k}: {v:.4f}" if isinstance(v, float) else f"{k}: {v}" for k, v in metrics.items()])
        logging.info(f"[{mode.upper()}] Step {step}: {metrics_str}")
    
    def save_checkpoint(self, train_step: int, policy: Any, optimizer: Any, scheduler: Any = None, identifier: str = None) -> None:
        """保存检查点。

        Args:
            train_step: 训练步数
            policy: 策略模型
            optimizer: 优化器
            scheduler: 学习率调度器
            identifier: 检查点标识符，如果为 None，则使用步数
        """
        if identifier is None:
            checkpoint_dir = self.checkpoint_dir / f"{train_step:06d}"
        else:
            checkpoint_dir = self.checkpoint_dir / identifier
        
        checkpoint_dir.mkdir(parents=True, exist_ok=True)
        
        # 保存模型
        model_path = checkpoint_dir / "pretrained_model"
        policy.save_pretrained(model_path)
        
        # 保存优化器状态
        optimizer_path = checkpoint_dir / "optimizer.pt"
        torch.save(optimizer.state_dict(), optimizer_path)
        
        # 保存调度器状态（如果有）
        if scheduler is not None:
            scheduler_path = checkpoint_dir / "scheduler.pt"
            torch.save(scheduler.state_dict(), scheduler_path)
        
        # 保存训练步数
        with open(checkpoint_dir / "step.txt", "w") as f:
            f.write(str(train_step))
        
        logging.info(f"Saved checkpoint to {checkpoint_dir}")
    
    @staticmethod
    def get_last_checkpoint_dir(output_dir: Union[str, Path]) -> Path:
        """获取最后一个检查点目录。

        Args:
            output_dir: 输出目录

        Returns:
            最后一个检查点目录
        """
        checkpoint_dir = Path(output_dir) / "checkpoints"
        if not checkpoint_dir.exists():
            return checkpoint_dir
        
        checkpoints = [d for d in checkpoint_dir.iterdir() if d.is_dir()]
        if not checkpoints:
            return checkpoint_dir
        
        # 尝试按数字排序（对于步数命名的检查点）
        try:
            last_checkpoint = sorted(checkpoints, key=lambda x: int(x.name))[-1]
        except ValueError:
            # 如果不是数字命名，则按修改时间排序
            last_checkpoint = sorted(checkpoints, key=lambda x: x.stat().st_mtime)[-1]
        
        return last_checkpoint
