"""
FSDP PPO Trainer with Ray-based single controller.
Adapted from the excellently written verl implementation.
"""

import json
import os
from re import T
import uuid
from collections import defaultdict
from contextlib import contextmanager
from copy import deepcopy
from dataclasses import dataclass, field
from enum import Enum
from pprint import pprint
from typing import Dict, Optional, Type

import numpy as np
import ray
import torch
from omegaconf import OmegaConf, open_dict
from torch.utils.data import Dataset, Sampler
from torchdata.stateful_dataloader import StatefulDataLoader
from tqdm import tqdm

from verl import DataProto
from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto
from verl.single_controller.base import Worker
from verl.single_controller.ray import (
    RayClassWithInitArgs,
    RayResourcePool,
    RayWorkerGroup,
)
from verl.single_controller.ray.base import create_colocated_worker_cls
from verl.trainer.ppo import core_algos
from verl.trainer.ppo.metric_utils import (
    compute_data_metrics,
    compute_throughout_metrics,
    compute_timing_metrics,
    process_validation_metrics,
    reduce_metrics,
)
from verl.trainer.ppo.reward import compute_reward, compute_reward_async
from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path
from verl.utils.seqlen_balancing import (
    get_seqlen_balanced_partitions,
    log_seqlen_unbalance,
)
from verl.utils.torch_functional import masked_mean
from verl.utils.tracking import ValidationGenerationsLogger

WorkerType = Type[Worker]

from verl.utils.debug import marked_timer
from verl.trainer.ppo.ray_trainer import (
    Role,
    ResourcePoolManager,
    compute_response_mask,
    apply_kl_penalty,
    AdvantageEstimator,
)
from verl.trainer.ppo.ray_trainer import RayPPOTrainer as VerlRayPPOTrainer

import torch
from verl.utils.torch_functional import masked_mean

from ragen.llm_agent.agent_proxy import ApiCallingWrapperWg, LLMAgentProxy
from ragen.utils import GenerationsLogger


def compute_advantage(
    data: DataProto,
    adv_estimator,
    gamma=1.0,
    lam=1.0,
    num_repeat=1,
    multi_turn=False,
    norm_adv_by_std_in_grpo=True,
    bi_level_gae=False,
    high_level_gamma=1.0,
):
    # Back-compatible with trainers that do not compute response mask in fit
    if "response_mask" not in data.batch:
        data.batch["response_mask"] = compute_response_mask(data)
    # prepare response group
    # TODO: add other ways to estimate advantages
    if adv_estimator == AdvantageEstimator.GAE:
        advantages, returns = core_algos.compute_gae_advantage_return(
            token_level_rewards=data.batch["token_level_rewards"],
            values=data.batch["values"],
            response_mask=data.batch["response_mask"],
            gamma=gamma,
            lam=lam,
        )
        data.batch["advantages"] = advantages
        data.batch["returns"] = returns
    elif adv_estimator == AdvantageEstimator.GRPO:
        # TODO: test on more adv estimator type
        grpo_calculation_mask = data.batch["response_mask"]
        if multi_turn:
            # If multi-turn, replace the mask with the relevant part of loss_mask
            response_length = grpo_calculation_mask.size(
                1
            )  # Get length from the initial response mask
            grpo_calculation_mask = data.batch["loss_mask"][
                :, -response_length:
            ]  # This mask is the one intended for GRPO
        # Call compute_grpo_outcome_advantage with parameters matching its definition
        advantages, returns = core_algos.compute_grpo_outcome_advantage(
            token_level_rewards=data.batch["token_level_rewards"],
            response_mask=grpo_calculation_mask,
            index=data.non_tensor_batch["uid"],
            norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
        )
        data.batch["advantages"] = advantages
        data.batch["returns"] = returns
    elif adv_estimator == AdvantageEstimator.REINFORCE_PLUS_PLUS_BASELINE:
        advantages, returns = (
            core_algos.compute_reinforce_plus_plus_baseline_outcome_advantage(
                token_level_rewards=data.batch["token_level_rewards"],
                response_mask=data.batch["response_mask"],
                index=data.non_tensor_batch["uid"],
            )
        )
        data.batch["advantages"] = advantages
        data.batch["returns"] = returns
    elif adv_estimator == AdvantageEstimator.REINFORCE_PLUS_PLUS:
        advantages, returns = core_algos.compute_reinforce_plus_plus_outcome_advantage(
            token_level_rewards=data.batch["token_level_rewards"],
            response_mask=data.batch["response_mask"],
            gamma=gamma,
        )
        data.batch["advantages"] = advantages
        data.batch["returns"] = returns
    elif adv_estimator == AdvantageEstimator.REMAX:
        advantages, returns = core_algos.compute_remax_outcome_advantage(
            token_level_rewards=data.batch["token_level_rewards"],
            reward_baselines=data.batch["reward_baselines"],
            response_mask=data.batch["response_mask"],
        )

        data.batch["advantages"] = advantages
        data.batch["returns"] = returns
    elif adv_estimator == AdvantageEstimator.RLOO:
        advantages, returns = core_algos.compute_rloo_outcome_advantage(
            token_level_rewards=data.batch["token_level_rewards"],
            response_mask=data.batch["response_mask"],
            index=data.non_tensor_batch["uid"],
        )
        data.batch["advantages"] = advantages
        data.batch["returns"] = returns
    else:
        raise NotImplementedError
    return data


class RayAgentTrainer(VerlRayPPOTrainer):
    """
    Note that this trainer runs on the driver process on a single CPU/GPU node.
    """

    # TODO: support each role have individual ray_worker_group_cls,
    # i.e., support different backend of different role
    def __init__(
        self,
        config,
        tokenizer,
        role_worker_mapping: dict[Role, WorkerType],
        resource_pool_manager: ResourcePoolManager,
        ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup,
        processor=None,
        reward_fn=None,
        val_reward_fn=None,
        data_collector=None,
    ):

        super().__init__(
            config,
            tokenizer,
            role_worker_mapping,
            resource_pool_manager,
            ray_worker_group_cls,
            processor,
            reward_fn,
            val_reward_fn,
        )
        self.ref_in_actor = config.actor_rollout_ref.model.get("lora_rank", 0) > 0
        # do not use the original val logger, but use this here
        self.generations_logger = GenerationsLogger()
        # 数据收集器，用于保存训练和验证数据
        self.data_collector = data_collector

    def compute_control_parameter(self, current_step, total_steps):
        """
        计算控制参数u，用于平衡过程奖励和结果奖励
        
        参数:
            current_step: 当前训练步数
            total_steps: 总训练步数
            
        返回:
            u: 控制参数，用于混合过程奖励和结果奖励
        
        控制策略:
        - 前2/3步骤：u从1.0线性降到0.1（优先过程奖励，逐渐转向结果奖励）
        - 后1/3步骤：u保持0.1（主要关注结果奖励）
        """
        # 计算训练进度的关键节点
        transition_point = total_steps * 2 / 3
        
        if current_step <= transition_point:
            # 前2/3阶段：线性递减
            progress = current_step / transition_point
            u = 1.0 - 0.9 * progress  # 从1.0降到0.1
        else:
            # 后1/3阶段：保持固定值
            u = 0.1
            
        # 确保u在合理范围内
        u = max(0.0, min(1.0, u))
        
        return u

    def _save_batch_data(self, batch: DataProto, data_type: str, timestamp_str: str, metrics: dict = None):
        """
        保存训练或验证batch数据
        
        参数:
            batch: DataProto对象，包含batch数据
            data_type: 数据类型，"train" 或 "val"
            timestamp_str: 时间戳字符串，用于组织保存路径
            metrics: 可选的指标字典
        """
        if self.data_collector is None or not self.data_collector.is_enabled():
            return
        
        # 检查是否应该保存此类型的数据
        data_config = getattr(self.config, 'data_saving', None)
        if data_config:
            if data_type == "train" and not data_config.get('save_train', True):
                return
            if data_type == "val" and not data_config.get('save_val', True):
                return
        
        try:
            # 提取messages_list（对话历史记录）
            messages_list = batch.non_tensor_batch.get("messages_list", [])
            if messages_list is None or len(messages_list) == 0:
                print(f"⚠️  [{data_type}] messages_list为空，跳过保存")
                return
            
            # 提取环境信息和样本指标
            env_infos = batch.non_tensor_batch.get("env_infos", None)
            sample_metrics = batch.non_tensor_batch.get("sample_metrics", None)
            
            # 计算每个样本的平均奖励
            sample_rewards = None
            if "rm_scores" in batch.batch or "token_level_scores" in batch.batch:
                rm_scores = batch.batch.get("rm_scores", batch.batch.get("token_level_scores", None))
                if rm_scores is not None and hasattr(rm_scores, 'shape'):
                    # 获取loss_mask或response_mask来确定有效位置
                    loss_mask = batch.batch.get("loss_mask", batch.batch.get("response_mask", None))
                    if loss_mask is not None:
                        # 使用loss_mask来确定有效位置
                        masked_scores = rm_scores * loss_mask
                        # 计算每个样本的平均奖励（只对mask=1的位置求平均）
                        sample_rewards = (masked_scores.sum(dim=-1) / loss_mask.sum(dim=-1).clamp(min=1)).cpu().numpy()
                    else:
                        # 如果没有loss_mask，直接对所有位置求平均
                        sample_rewards = rm_scores.mean(dim=-1).cpu().numpy()
            
            # 清空之前的数据，确保每次保存的数据独立
            self.data_collector.collected_data.clear()
            self.data_collector.collect_from_messages_list(
                messages_list, 
                env_infos=env_infos, 
                sample_metrics=sample_metrics, 
                sample_rewards=sample_rewards
            )
            
            # 准备元数据
            model_path = self.config.actor_rollout_ref.model.path
            sample_count = len(messages_list)
            
            # 提取模型信息
            model_info = {
                "model_path": str(model_path),
                "model_type": self.config.actor_rollout_ref.model.get("type", "unknown"),
                "strategy": self.config.actor_rollout_ref.actor.strategy,
                "lora_rank": self.config.actor_rollout_ref.model.get("lora_rank", 0),
                "ppo_mini_batch_size": self.config.actor_rollout_ref.actor.ppo_mini_batch_size,
            }
            
            # 计算平均奖励等统计信息（如果有sample_rewards）
            metrics_with_stats = metrics.copy() if metrics else {}
            if sample_rewards is not None and len(sample_rewards) > 0:
                metrics_with_stats.update({
                    "avg_reward": float(np.mean(sample_rewards)),
                    "max_reward": float(np.max(sample_rewards)),
                    "min_reward": float(np.min(sample_rewards)),
                    "std_reward": float(np.std(sample_rewards)),
                })
            
            metadata = {
                "sample_count": sample_count,
                "data_type": data_type,
                "global_step": self.global_steps,
                "model_path": str(model_path),
                "metrics": metrics_with_stats,
                "model_info": model_info,
            }
            
            # 保存数据
            self.data_collector.save_training_data(
                metadata=metadata,
                date_time_dir=timestamp_str,
                model_name=f"step_{self.global_steps}_{data_type}",
            )
            print(f"✅ [{data_type.upper()}] 第 {self.global_steps} 步的数据已保存 (样本数: {sample_count})")
            
        except Exception as e:
            print(f"⚠️  [{data_type.upper()}] 第 {self.global_steps} 步保存数据失败: {e}")
            import traceback
            traceback.print_exc()

    def _create_dataloader(self, train_dataset, val_dataset, collate_fn, train_sampler):
        assert (
            self.config.trainer.total_training_steps is not None
        ), "must determine total training steps"
        total_training_steps = self.config.trainer.total_training_steps

        self.total_training_steps = total_training_steps
        print(f"Total training steps: {self.total_training_steps}")

        try:
            OmegaConf.set_struct(self.config, True)
            with open_dict(self.config):
                if OmegaConf.select(self.config, "actor_rollout_ref.actor.optim"):
                    self.config.actor_rollout_ref.actor.optim.total_training_steps = (
                        total_training_steps
                    )
                if OmegaConf.select(self.config, "critic.optim"):
                    self.config.critic.optim.total_training_steps = total_training_steps
        except Exception as e:
            print(
                f"Warning: Could not set total_training_steps in config. Structure missing? Error: {e}"
            )
        # val_start = 100000
        # self.train_seeds = [seed for seed in range(0, self.config.trainer.total_training_steps * 1000, 1000)]
        # self.val_seeds = [seed for seed in range(val_start, val_start + self.config.trainer.validation_steps)]

    def init_agent_proxy(self):
        self.agent_proxy = LLMAgentProxy(
            config=self.config,
            actor_rollout_wg=self.actor_rollout_wg,
            tokenizer=self.tokenizer,
        )
        
        # 获取CHORD框架配置
        chord_config = self.config.chord
        chord_mode = chord_config.get('mode', 'hybrid')  # 默认混合模式
        local_mode = chord_config.get('local_mode', False)
        expert_dataset_path = chord_config.get('expert_dataset_path', None)
        
        # 只在sft_only或hybrid模式下初始化专家数据代理
        if chord_mode in ['sft_only', 'hybrid']:
            if local_mode:
                print(f"🎓 启用本地模式，数据集路径: {expert_dataset_path}")
                from ragen.llm_agent.local_file_wrapper import LocalFileWrapperWg
                actor_wg = LocalFileWrapperWg(
                    self.config,
                    self.tokenizer, 
                    expert_dataset_path
                )
            else:
                print("📞 使用API调用模式获取专家数据")
                actor_wg = ApiCallingWrapperWg(self.config, self.tokenizer)
            
            self.api_agent_proxy = LLMAgentProxy(
                config=self.config,
                actor_rollout_wg=actor_wg,
                tokenizer=self.tokenizer,
            )
        else:
            # rl_only模式下不需要专家数据代理
            print("🚀 仅RL模式，无需专家数据代理")
            self.api_agent_proxy = None

    def _maybe_log_generations(self, inputs, outputs, scores, env_infos, _type="val"):
        """Log a table of validation samples to the configured logger (wandb or swanlab)"""

        generations_to_log = self.config.trainer.generations_to_log_to_wandb[_type]

        if generations_to_log == 0:
            return

        import numpy as np

        # Create tuples of (input, output, score) and sort by input text
        samples = list(zip(inputs, outputs, scores, env_infos))
        samples.sort(key=lambda x: x[0])  # Sort by input text

        # Use fixed random seed for deterministic shuffling
        rng = np.random.RandomState(42)
        rng.shuffle(samples)

        # Take first N samples after shuffling
        samples = samples[:generations_to_log]

        # Log to each configured logger
        self.generations_logger.log(
            self.config.trainer.logger, samples, self.global_steps, _type
        )

    def _validate(self):
        from datetime import datetime
        
        data_source_lst = []
        reward_extra_infos_dict: dict[str, list] = defaultdict(list)

        # Lists to collect samples for the table
        sample_inputs = []
        sample_outputs = []
        sample_scores = []

        env_metric_dict = {}
        
        # 为验证数据创建时间戳
        val_timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        for step in range(self.config.trainer.validation_steps):
            # Store original inputs
            input_texts = [
                ""
                for _ in range(
                    self.config.es_manager.val.env_groups
                    * self.config.es_manager.val.group_size
                )
            ]
            sample_inputs.extend(input_texts)

            meta_info = {
                "eos_token_id": self.tokenizer.eos_token_id,
                "pad_token_id": self.tokenizer.pad_token_id,
                "recompute_log_prob": False,
                "do_sample": self.config.actor_rollout_ref.rollout.val_kwargs.do_sample,
                "validate": True,
            }
            test_gen_batch = DataProto(
                batch=None, non_tensor_batch=None, meta_info=meta_info
            )
            print(f"test_gen_batch meta info: {test_gen_batch.meta_info}")

            # pad to be divisible by dp_size
            import time

            start_time = time.time()
            test_batch = self.agent_proxy.rollout(test_gen_batch, val=True)
            end_time = time.time()
            print(f"validation generation time: {end_time - start_time} seconds")
            for key, value in test_batch.meta_info["metrics"].items():
                if "val-env/" + key not in env_metric_dict:
                    env_metric_dict["val-env/" + key] = []
                env_metric_dict["val-env/" + key].append(value)

            # Store generated outputs
            output_ids = test_batch.batch["responses"]
            output_texts = [
                self.tokenizer.decode(ids, skip_special_tokens=True)
                for ids in output_ids
            ]
            sample_outputs.extend(output_texts)

            # evaluate using reward_function
            result = self.val_reward_fn(test_batch, return_dict=True)
            reward_tensor = result["reward_tensor"]
            scores = reward_tensor.sum(-1).cpu().tolist()
            sample_scores.extend(scores)

            reward_extra_infos_dict["reward"].extend(scores)
            if "reward_extra_info" in result:
                for key, lst in result["reward_extra_info"].items():
                    reward_extra_infos_dict[key].extend(lst)

            data_source_lst.append(
                test_batch.non_tensor_batch.get(
                    "data_source", ["unknown"] * reward_tensor.shape[0]
                )
            )
            
            # 保存验证数据
            # 将reward_tensor添加到batch中，以便_save_batch_data可以提取奖励
            test_batch.batch["rm_scores"] = reward_tensor
            
            # 提取当前batch的完整环境指标
            current_batch_metrics = {"val_step": step}
            # 将test_batch.meta_info["metrics"]中的所有指标添加到current_batch_metrics
            if "metrics" in test_batch.meta_info:
                for key, value in test_batch.meta_info["metrics"].items():
                    # 添加val-env/前缀以保持与日志一致
                    metric_key = f"val-env/{key}"
                    current_batch_metrics[metric_key] = value
            
            self._save_batch_data(
                batch=test_batch,
                data_type="val",
                timestamp_str=val_timestamp,
                metrics=current_batch_metrics  # ✅ 现在包含完整的环境指标
            )
        env_infos = test_batch.non_tensor_batch["env_infos"].tolist()

        self._maybe_log_generations(
            inputs=sample_inputs,
            outputs=sample_outputs,
            scores=sample_scores,
            env_infos=env_infos,
            _type="val",
        )

        # dump generations
        val_data_dir = self.config.trainer.get("validation_data_dir", None)
        if val_data_dir:
            self._dump_generations(
                inputs=sample_inputs,
                outputs=sample_outputs,
                scores=sample_scores,
                reward_extra_infos_dict=reward_extra_infos_dict,
                dump_path=val_data_dir,
            )

        for key_info, lst in reward_extra_infos_dict.items():
            assert len(lst) == 0 or len(lst) == len(
                sample_scores
            ), f"{key_info}: {len(lst)=}, {len(sample_scores)=}"

        data_sources = np.concatenate(data_source_lst, axis=0)

        data_src2var2metric2val = process_validation_metrics(
            data_sources, sample_inputs, reward_extra_infos_dict
        )
        metric_dict = reduce_metrics(env_metric_dict)

        for data_source, var2metric2val in data_src2var2metric2val.items():
            core_var = "acc" if "acc" in var2metric2val else "reward"
            for var_name, metric2val in var2metric2val.items():
                n_max = max(
                    [
                        int(name.split("@")[-1].split("/")[0])
                        for name in metric2val.keys()
                    ]
                )
                for metric_name, metric_val in metric2val.items():
                    if (
                        (var_name == core_var)
                        and any(
                            metric_name.startswith(pfx)
                            for pfx in ["mean", "maj", "best"]
                        )
                        and (f"@{n_max}" in metric_name)
                    ):
                        metric_sec = "val-core"
                    else:
                        metric_sec = "val-aux"
                    pfx = f"{metric_sec}/{data_source}/{var_name}/{metric_name}"
                    metric_dict[pfx] = metric_val

        return metric_dict

    def init_workers(self):
        """Init resource pool and worker group"""
        self.resource_pool_manager.create_resource_pool()

        self.resource_pool_to_cls = {
            pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()
        }

        # create actor and rollout
        if self.hybrid_engine:
            resource_pool = self.resource_pool_manager.get_resource_pool(
                Role.ActorRollout
            )
            actor_rollout_cls = RayClassWithInitArgs(
                cls=self.role_worker_mapping[Role.ActorRollout],
                config=self.config.actor_rollout_ref,
                role="actor_rollout",
            )
            self.resource_pool_to_cls[resource_pool][
                "actor_rollout"
            ] = actor_rollout_cls
        else:
            raise NotImplementedError

        # create critic
        if self.use_critic:
            resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic)
            critic_cls = RayClassWithInitArgs(
                cls=self.role_worker_mapping[Role.Critic], config=self.config.critic
            )
            self.resource_pool_to_cls[resource_pool]["critic"] = critic_cls

        # create reference policy if needed
        if self.use_reference_policy and not self.ref_in_actor:
            resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy)
            ref_policy_cls = RayClassWithInitArgs(
                self.role_worker_mapping[Role.RefPolicy],
                config=self.config.actor_rollout_ref,
                role="ref",
            )
            self.resource_pool_to_cls[resource_pool]["ref"] = ref_policy_cls

        # create a reward model if reward_fn is None
        if self.use_rm:
            # we create a RM here
            resource_pool = self.resource_pool_manager.get_resource_pool(
                Role.RewardModel
            )
            rm_cls = RayClassWithInitArgs(
                self.role_worker_mapping[Role.RewardModel],
                config=self.config.reward_model,
            )
            self.resource_pool_to_cls[resource_pool]["rm"] = rm_cls

        # initialize WorkerGroup
        # NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
        # you should not use `create_colocated_worker_cls`.
        # Instead, directly pass different resource pool to different worker groups.
        # See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
        all_wg = {}
        self.wg_dicts = []
        wg_kwargs = {}  # Setting up kwargs for RayWorkerGroup
        if (
            OmegaConf.select(self.config.trainer, "ray_wait_register_center_timeout")
            is not None
        ):
            wg_kwargs["ray_wait_register_center_timeout"] = (
                self.config.trainer.ray_wait_register_center_timeout
            )

        for resource_pool, class_dict in self.resource_pool_to_cls.items():
            worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
            wg_dict = self.ray_worker_group_cls(
                resource_pool=resource_pool,
                ray_cls_with_init=worker_dict_cls,
                **wg_kwargs,
            )
            spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
            all_wg.update(spawn_wg)
            # keep the referece of WorkerDict to support ray >= 2.31. Ref: https://github.com/ray-project/ray/pull/45699
            self.wg_dicts.append(wg_dict)

        if self.use_critic:
            self.critic_wg = all_wg["critic"]
            self.critic_wg.init_model()

        if self.use_reference_policy and not self.ref_in_actor:
            self.ref_policy_wg = all_wg["ref"]
            self.ref_policy_wg.init_model()

        if self.use_rm:
            self.rm_wg = all_wg["rm"]
            self.rm_wg.init_model()

        # we should create rollout at the end so that vllm can have a better estimation of kv cache memory
        self.actor_rollout_wg = all_wg["actor_rollout"]
        self.actor_rollout_wg.init_model()

        # create async rollout manager and request scheduler
        self.async_rollout_mode = False

    def _save_checkpoint(self):
        """
        Different from VerlRayPPOTrainer, we have no dataloader so we won"t save it. Other logic is the same.
        """
        # path: given_path + `/global_step_{global_steps}` + `/actor`
        local_global_step_folder = os.path.join(
            self.config.trainer.default_local_dir, f"global_step_{self.global_steps}"
        )

        print(f"local_global_step_folder: {local_global_step_folder}")
        actor_local_path = os.path.join(local_global_step_folder, "actor")

        actor_remote_path = (
            None
            if self.config.trainer.default_hdfs_dir is None
            else os.path.join(
                self.config.trainer.default_hdfs_dir,
                f"global_step_{self.global_steps}",
                "actor",
            )
        )

        remove_previous_ckpt_in_save = self.config.trainer.get(
            "remove_previous_ckpt_in_save", False
        )
        if remove_previous_ckpt_in_save:
            print(
                "Warning: remove_previous_ckpt_in_save is deprecated,"
                + " set max_actor_ckpt_to_keep=1 and max_critic_ckpt_to_keep=1 instead"
            )
        max_actor_ckpt_to_keep = (
            self.config.trainer.get("max_actor_ckpt_to_keep", None)
            if not remove_previous_ckpt_in_save
            else 1
        )
        max_critic_ckpt_to_keep = (
            self.config.trainer.get("max_critic_ckpt_to_keep", None)
            if not remove_previous_ckpt_in_save
            else 1
        )

        self.actor_rollout_wg.save_checkpoint(
            actor_local_path,
            actor_remote_path,
            self.global_steps,
            max_ckpt_to_keep=max_actor_ckpt_to_keep,
        )

        if self.use_critic:
            critic_local_path = os.path.join(local_global_step_folder, "critic")
            critic_remote_path = (
                None
                if self.config.trainer.default_hdfs_dir is None
                else os.path.join(
                    self.config.trainer.default_hdfs_dir,
                    f"global_step_{self.global_steps}",
                    "critic",
                )
            )
            self.critic_wg.save_checkpoint(
                critic_local_path,
                critic_remote_path,
                self.global_steps,
                max_ckpt_to_keep=max_critic_ckpt_to_keep,
            )

        # latest checkpointed iteration tracker (for atomic usage)
        local_latest_checkpointed_iteration = os.path.join(
            self.config.trainer.default_local_dir, "latest_checkpointed_iteration.txt"
        )
        with open(local_latest_checkpointed_iteration, "w") as f:
            f.write(str(self.global_steps))

    def fit(self):
        """
        The training loop of PPO.
        The driver process only need to call the compute functions of the worker group through RPC
         to construct the PPO dataflow.
        The light-weight advantage computation is done on the driver process.
        """

        from omegaconf import OmegaConf

        from verl.utils.tracking import Tracking

        logger = Tracking(
            project_name=self.config.trainer.project_name,
            experiment_name=self.config.trainer.experiment_name,
            default_backend=self.config.trainer.logger,
            config=OmegaConf.to_container(self.config, resolve=True),
        )

        self.global_steps = 0

        # load checkpoint before doing anything
        self._load_checkpoint()

        # perform validation before training
        # currently, we only support validation using the reward_function.
        if self.val_reward_fn is not None and self.config.trainer.get(
            "val_before_train", True
        ):
            val_metrics = self._validate()
            pprint(f"Initial validation metrics: {val_metrics}")
            logger.log(data=val_metrics, step=self.global_steps)
            if self.config.trainer.get("val_only", False):
                return

        # add tqdm
        progress_bar = tqdm(
            total=self.total_training_steps,
            initial=self.global_steps,
            desc="Training Progress",
        )

        # we start from step 1
        self.global_steps += 1
        last_val_metrics = None
        rollout_only_mode = self.config.trainer.get("rollout_only", False)

        def _process_batch_for_logging(batch):
            inputs = batch.batch["input_ids"]
            inputs = [
                self.tokenizer.decode(input_ids, skip_special_tokens=True)
                for input_ids in inputs
            ]
            outputs = [""] * len(inputs)
            scores = batch.batch["rm_scores"].sum(-1).cpu().tolist()
            return inputs, outputs, scores

        def _filter_rollout(batch):
            """filter rollout based on in-group max - in-group mean. We want those groups to have high-quality rollouts that deviates significantly from the mean"""
            num_groups, group_size = (
                self.config.es_manager.train.env_groups,
                self.config.es_manager.train.group_size,
            )

            rm_scores = (
                batch.batch["original_rm_scores"]
                .sum(dim=-1)
                .view(num_groups, group_size)
            )
            in_group_std = rm_scores.std(dim=-1)
            in_group_max = rm_scores.max(dim=-1).values
            in_group_mean = rm_scores.mean(dim=-1)
            return batch, {
                "rollout/in_group_std": in_group_std.mean(),
                "rollout/in_group_max": in_group_max.mean(),
                "rollout/in_group_mean": in_group_mean.mean(),
                "rollout/chosen_in_group_std": in_group_std.mean(),
                "rollout/chosen_in_group_max": in_group_max.mean(),
                "rollout/chosen_in_group_mean": in_group_mean.mean(),
            }

        import time
        from datetime import datetime

        self.start_time = time.time()
        # 创建一个统一的时间戳用于本次训练会话的所有保存操作
        session_timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        
        for step in range(self.total_training_steps):
            # metrics = {}
            timing_raw = {}

            batch: DataProto = DataProto()
            is_last_step = self.global_steps >= self.total_training_steps
            
            # 获取CHORD配置
            chord_config = self.config.chord
            chord_mode = chord_config.get('mode', 'hybrid')
            
            # 计算控制参数u，用于平衡过程奖励和结果奖励
            control_parameter_u = self.compute_control_parameter(
                current_step=self.global_steps,
                total_steps=self.total_training_steps
            )
            
            # 将当前step、chord_mode和控制参数u添加到batch的meta_info中
            batch.meta_info["current_step"] = self.global_steps
            batch.meta_info["chord_mode"] = chord_mode
            batch.meta_info["control_parameter_u"] = control_parameter_u
            
            print(f"步骤 {self.global_steps}/{self.total_training_steps}: 控制参数u = {control_parameter_u:.4f}")
            print(f"  - 过程奖励权重: {control_parameter_u:.4f}")
            print(f"  - 结果奖励权重: {1.0 - control_parameter_u:.4f}")

            with marked_timer("step", timing_raw):
                # generate a batch
                with marked_timer("gen", timing_raw):
                    # 根据chord_mode选择不同的rollout方式
                    api_batch = None
                    if chord_mode in ['sft_only', 'hybrid'] and self.api_agent_proxy:
                        api_batch = self.api_agent_proxy.rollout(batch, val=False)
                    
                    # 在sft_only模式下仍需要生成基础batch以保持数据结构一致性
                    if chord_mode in ['rl_only', 'hybrid']:
                        batch = self.agent_proxy.rollout(batch, val=False)
                        batch, metrics = _filter_rollout(batch)
                        metrics.update(
                            {
                                "train/" + key: value
                                for key, value in batch.meta_info["metrics"].items()
                            }
                        )
                    elif chord_mode == 'sft_only':
                        # 在sft_only模式下，生成简化的batch结构
                        # 但仍需要基本的tensor结构以支持后续处理
                        if api_batch is not None:
                            # 从api_batch复制基础结构到batch
                            batch = DataProto(
                                batch=api_batch.batch.copy() if hasattr(api_batch.batch, 'copy') else api_batch.batch,
                                non_tensor_batch=api_batch.non_tensor_batch,
                                meta_info=api_batch.meta_info.copy() if hasattr(api_batch.meta_info, 'copy') else dict(api_batch.meta_info)
                            )
                            # 确保必需的meta_info字段存在
                            batch.meta_info["temperature"] = batch.meta_info.get("temperature", self.config.actor_rollout_ref.rollout.temperature)
                            batch.meta_info["micro_batch_size"] = batch.meta_info.get("micro_batch_size", self.config.actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu)
                            batch.meta_info["use_dynamic_bsz"] = batch.meta_info.get("use_dynamic_bsz", self.config.actor_rollout_ref.actor.get("use_dynamic_bsz", False))
                            batch.meta_info["max_token_len"] = batch.meta_info.get("max_token_len", self.config.actor_rollout_ref.actor.get("ppo_max_token_len_per_gpu", 4096))
                            # 清空非必需的字段，保留必需字段
                            metrics = {"train/mode": "sft_only"}
                        else:
                            raise ValueError("sft_only模式下必须有专家数据")

                    # 添加chord_mode到batch的meta_info (确保所有模式都有这个字段)
                    batch.meta_info["chord_mode"] = chord_mode
                    
                    # 确保所有模式都有必需的meta_info字段
                    if "temperature" not in batch.meta_info:
                        batch.meta_info["temperature"] = self.config.actor_rollout_ref.rollout.temperature
                    
                    # 不对api_batch进行过滤，保证与batch数量一一对应
                    # 注意：api_batch(专家数据)与batch(RL数据)数量相同但环境可能不同
                    # 这样确保在计算SFT损失时能实现一一对应的配对训练
                    
                    if chord_mode in ['rl_only', 'hybrid']:
                        inputs, outputs, scores = _process_batch_for_logging(batch)
                        train_env_infos = batch.non_tensor_batch["env_infos"].tolist()

                        self._maybe_log_generations(
                            inputs=inputs,
                            outputs=outputs,
                            scores=scores,
                            env_infos=train_env_infos,
                            _type="train",
                        )

                # if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX:
                #     # TODO: check if this is correct. Not tested yer
                #     logger.log(
                #         "[NotImplemented] REMAX implementation is not tested yet in RAGEN. Exiting."
                #     )
                #     exit()
                #     with marked_timer("gen_max", timing_raw):
                #         gen_baseline_batch = deepcopy(batch)
                #         gen_baseline_batch.meta_info["do_sample"] = False
                #         gen_baseline_output = self.agent_proxy.rollout(
                #             gen_baseline_batch, val=False
                #         )

                #         batch = batch.union(gen_baseline_output)
                #         reward_baseline_tensor = self.reward_fn(batch)
                #         reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)

                #         batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))

                #         batch.batch["reward_baselines"] = reward_baseline_tensor

                #         del gen_baseline_batch, gen_baseline_output

                # batch.non_tensor_batch["uid"] = np.array([str(uuid.uuid4()) for _ in range(len(batch.batch))],
                # dtype=object)
                # repeat to align with repeated responses in rollout
                # batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
                # batch = batch.union(gen_batch_output)

                # NOTE reward normalization already done in ctx_manager, so set group size = 1 here
                batch.non_tensor_batch["uid"] = np.array(
                    [str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object
                )
                # batch.non_tensor_batch["uid"] = batch.non_tensor_batch["group_ids"]

                # batch.batch["response_mask"] = compute_response_mask(batch)
                batch.batch["response_mask"] = batch.batch["loss_mask"]
                if api_batch is not None and hasattr(api_batch, 'batch') and api_batch.batch is not None:
                    api_batch.batch["response_mask"] = api_batch.batch["loss_mask"]
                
                # 只在RL模式下进行后续的RL相关计算
                if chord_mode in ['rl_only', 'hybrid']:
                    # balance the number of valid tokens on each dp rank.
                    # Note that this breaks the order of data inside the batch.
                    # Please take care when you implement group based adv computation such as GRPO and rloo
                    if self.config.trainer.balance_batch:
                        self._balance_batch(batch, metrics=metrics)

                    # compute global_valid tokens
                    batch.meta_info["global_token_num"] = torch.sum(
                        batch.batch["attention_mask"], dim=-1
                    ).tolist()

                    if self.use_rm:
                        with marked_timer("reward", timing_raw):
                            # compute reward model score
                            reward_tensor = self.rm_wg.compute_rm_score(batch)
                            batch = batch.union(reward_tensor)

                    if self.config.reward_model.launch_reward_fn_async:
                        future_reward = compute_reward_async.remote(
                            batch, self.config, self.tokenizer
                        )
                    else:
                        reward_tensor, reward_extra_infos_dict = compute_reward(
                            batch, self.reward_fn
                        )
                    
                    # rollout_only 模式：计算reward后直接保存并继续循环，跳过训练
                    if rollout_only_mode:
                        if self.config.reward_model.launch_reward_fn_async:
                            reward_tensor, reward_extra_infos_dict = ray.get(future_reward)
                        batch.batch["token_level_scores"] = reward_tensor
                        batch.batch["rm_scores"] = reward_tensor
                        if reward_extra_infos_dict:
                            batch.non_tensor_batch.update(
                                {k: np.array(v) for k, v in reward_extra_infos_dict.items()}
                            )
                        self._save_batch_data(
                            batch=batch,
                            data_type="train",
                            timestamp_str=session_timestamp,
                            metrics=metrics
                        )
                        metrics.update({"timing_s/total": time.time() - self.start_time})
                        logger.log(data=metrics, step=self.global_steps)
                        progress_bar.update(1)
                        self.global_steps += 1
                        continue

                    # recompute old_log_probs
                    with marked_timer("old_log_prob", timing_raw):
                        old_log_prob = self.actor_rollout_wg.compute_log_prob(batch)
                        batch = batch.union(old_log_prob)
                        avg_old_log_prob = masked_mean(
                            old_log_prob.batch["old_log_probs"],
                            batch.batch["response_mask"],
                        )
                        metrics.update({"rollout/old_log_prob": avg_old_log_prob})

                    if self.use_reference_policy:
                        # compute reference log_prob
                        with marked_timer("ref", timing_raw):
                            if not self.ref_in_actor:
                                ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(
                                    batch
                                )
                            else:
                                ref_log_prob = self.actor_rollout_wg.compute_ref_log_prob(
                                    batch
                                )
                            batch = batch.union(ref_log_prob)
                            avg_ref_log_prob = masked_mean(
                                ref_log_prob.batch["ref_log_prob"],
                                batch.batch["response_mask"],
                            )
                            metrics.update({"rollout/ref_log_prob": avg_ref_log_prob})

                    # compute values
                    if self.use_critic:
                        with marked_timer("values", timing_raw):
                            values = self.critic_wg.compute_values(batch)
                            batch = batch.union(values)

                    with marked_timer("adv", timing_raw):
                        # we combine with rule-based rm
                        reward_extra_infos_dict: dict[str, list]
                        if self.config.reward_model.launch_reward_fn_async:
                            reward_tensor, reward_extra_infos_dict = ray.get(future_reward)
                        batch.batch["token_level_scores"] = reward_tensor

                        print(f"{list(reward_extra_infos_dict.keys())=}")
                        if reward_extra_infos_dict:
                            batch.non_tensor_batch.update(
                                {k: np.array(v) for k, v in reward_extra_infos_dict.items()}
                            )

                        # compute rewards. apply_kl_penalty if available
                        if self.config.algorithm.use_kl_in_reward:
                            batch, kl_metrics = apply_kl_penalty(
                                batch,
                                kl_ctrl=self.kl_ctrl_in_reward,
                                kl_penalty=self.config.algorithm.kl_penalty,
                                multi_turn=True,
                            )
                            metrics.update(kl_metrics)
                        else:
                            batch.batch["token_level_rewards"] = batch.batch[
                                "token_level_scores"
                            ]

                        # compute advantages, executed on the driver process

                        norm_adv_by_std_in_grpo = self.config.algorithm.get(
                            "norm_adv_by_std_in_grpo", True
                        )  # GRPO adv normalization factor

                        batch = compute_advantage(
                            batch,
                            adv_estimator=self.config.algorithm.adv_estimator,
                            gamma=self.config.algorithm.gamma,
                            lam=self.config.algorithm.lam,
                            num_repeat=self.config.actor_rollout_ref.rollout.n,
                            norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
                            multi_turn=True,
                            high_level_gamma=self.config.algorithm.high_level_gamma,
                            bi_level_gae=self.config.algorithm.bi_level_gae,
                        )
                elif chord_mode == 'sft_only':
                    # sft_only模式下跳过所有RL计算，设置基本的meta_info
                    batch.meta_info["global_token_num"] = torch.sum(
                        batch.batch["attention_mask"], dim=-1
                    ).tolist()
                    reward_extra_infos_dict = {}

                ##### A very different setting, just here for testing: Can I normalize the advantages to have a mean of 0?
                if (
                    self.config.algorithm.adv_estimator == AdvantageEstimator.GRPO
                    and self.config.grpo_advantage_length_weight
                ):
                    response_mask = batch.batch["response_mask"]
                    advantages = batch.batch["advantages"]
                    response_relative_lengths = (
                        torch.sum(response_mask, dim=-1) + 1e-6
                    ) / torch.sum(response_mask, dim=-1).float().mean()
                    advantages = advantages / response_relative_lengths.unsqueeze(-1)
                    batch.batch["advantages"] = advantages

                # update critic (只在RL模式下执行)
                if self.use_critic and chord_mode in ['rl_only', 'hybrid']:
                    with marked_timer("update_critic", timing_raw):
                        critic_output = self.critic_wg.update_critic(batch)
                    critic_output_metrics = reduce_metrics(
                        critic_output.meta_info["metrics"]
                    )
                    metrics.update(critic_output_metrics)

                # implement critic warmup
                if self.config.trainer.critic_warmup <= self.global_steps:
                    # update actor
                    with marked_timer("update_actor", timing_raw):
                        batch.meta_info["multi_turn"] = True
                        # 添加CHORD框架需要的参数
                        batch.meta_info["global_step"] = self.global_steps
                        batch.meta_info["total_steps"] = self.total_training_steps
                        batch.meta_info["chord_mode"] = chord_mode
                        
                        # 根据模式传递相应的数据
                        if chord_mode == 'rl_only':
                            # rl_only模式下不传递sft_batch
                            actor_output = self.actor_rollout_wg.update_actor(batch)
                        elif chord_mode == 'sft_only':
                            # sft_only模式下主要使用api_batch，但仍传递batch以保持接口一致性
                            actor_output = self.actor_rollout_wg.update_actor(batch, sft_batch=api_batch)
                        elif chord_mode == 'hybrid':
                            # hybrid模式下传递完整的数据
                            actor_output = self.actor_rollout_wg.update_actor(batch, sft_batch=api_batch)
                        else:
                            raise ValueError(f"未知的chord模式: {chord_mode}")
                            
                    actor_output_metrics = reduce_metrics(
                        actor_output.meta_info["metrics"]
                    )
                    metrics.update(actor_output_metrics)
                
                # 保存训练数据（只在RL模式和hybrid模式下保存）
                if chord_mode in ['rl_only', 'hybrid']:
                    self._save_batch_data(
                        batch=batch,
                        data_type="train",
                        timestamp_str=session_timestamp,
                        metrics=metrics
                    )

                # Log rollout generations if enabled
                rollout_data_dir = self.config.trainer.get("rollout_data_dir", None)
                if rollout_data_dir:
                    with marked_timer("dump_rollout_generations", timing_raw):
                        print(batch.batch.keys())
                        inputs = self.tokenizer.batch_decode(
                            batch.batch["prompts"], skip_special_tokens=True
                        )
                        outputs = self.tokenizer.batch_decode(
                            batch.batch["responses"], skip_special_tokens=True
                        )
                        scores = (
                            batch.batch["token_level_scores"].sum(-1).cpu().tolist()
                        )
                        self._dump_generations(
                            inputs=inputs,
                            outputs=outputs,
                            scores=scores,
                            reward_extra_infos_dict=reward_extra_infos_dict,
                            dump_path=rollout_data_dir,
                        )

                # validate
                if (
                    self.val_reward_fn is not None
                    and self.config.trainer.test_freq > 0
                    and (
                        is_last_step
                        or self.global_steps % self.config.trainer.test_freq == 0
                    )
                ):
                    with marked_timer("testing", timing_raw):
                        val_metrics: dict = self._validate()
                        if is_last_step:
                            last_val_metrics = val_metrics
                    metrics.update(val_metrics)

                if self.config.trainer.save_freq > 0 and (
                    is_last_step
                    or self.global_steps % self.config.trainer.save_freq == 0
                ):
                    with marked_timer("save_checkpoint", timing_raw):
                        self._save_checkpoint()

            # collect metrics
            # metrics.update(
            #     compute_data_metrics(batch=batch, use_critic=self.use_critic)
            # )
            # metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
            # # TODO: implement actual tflpo and theoretical tflpo
            # n_gpus = self.resource_pool_manager.get_n_gpus()
            # metrics.update(
            #     compute_throughout_metrics(
            #         batch=batch, timing_raw=timing_raw, n_gpus=n_gpus
            #     )
            # )

            # add another timing metric: total time
            metrics.update({"timing_s/total": time.time() - self.start_time})
            # TODO: make a canonical logger that supports various backend
            logger.log(data=metrics, step=self.global_steps)

            if is_last_step:
                pprint(f"Final validation metrics: {last_val_metrics}")
                progress_bar.close()
                return

            progress_bar.update(1)
            self.global_steps += 1
