
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

import uuid
from copy import deepcopy
from pprint import pprint
import os
import logging
from typing import Tuple, Dict
import numpy as np
import ray
import torch
from tqdm import tqdm

from verl import DataProto
from verl.experimental.dataset.sampler import AbstractCurriculumSampler
from verl.trainer.ppo.core_algos import AdvantageEstimator, agg_loss
from verl.trainer.ppo.metric_utils import (
    _compute_response_info,
    compute_data_metrics,
    compute_throughout_metrics,
    compute_timing_metrics,
)
from verl.trainer.ppo.ray_trainer import (
    RayPPOTrainer,
    apply_kl_penalty,
    compute_advantage,
    compute_response_mask,
    )
from verl.trainer.ppo.reward import compute_reward, compute_reward_async
from verl.utils.checkpoint.checkpoint_manager import should_save_ckpt_esi
from verl.utils.debug import marked_timer
from verl.utils.metric import reduce_metrics
from verl.utils.rollout_skip import RolloutSkip

from .utils.down_sample import reject_equal_reward, resample_of_correct


class RStar2AgentRayTrainer(RayPPOTrainer):
    def _down_sample_batch(self, batch: DataProto, reward_extra_infos_dict: dict, history_dict) -> Tuple[DataProto, dict, dict]:
        do_down_sampling = self.config.augmentation.do_down_sampling
        down_sampling_config = self.config.augmentation.down_sampling_config
        world_size = self.actor_rollout_wg.world_size
        metrics = {
            "down_sampling/before_sampling_trace_num": len(batch),
        }

        def check_batch_is_empty(batch: DataProto, down_sampling_stage: str):
            if batch is None or len(batch) == 0:
                print(
                    f"Batch is empty after {down_sampling_stage}, skipping the training step."
                )
                return True
            return False

        # reject rollout trace of the same prompt with equal rewards
        do_reject_equal_reward = (
            down_sampling_config.get("reject_equal_reward", False) and do_down_sampling
        )
        batch, reward_extra_infos_dict, _metrics = reject_equal_reward(batch, reward_extra_infos_dict, do_reject_equal_reward, world_size)
        metrics.update(_metrics)
        if check_batch_is_empty(batch, "reject_equal_reward"):
            logging.error(f"metrics: {metrics}")
            return None, None, metrics

        # weighted sampling
        config = {
            "roc_error_ratio": down_sampling_config.get("roc_error_ratio", False)
            and do_down_sampling,
            "roc_answer_format": down_sampling_config.get("roc_answer_format", False)
            and do_down_sampling,
            "min_zero_reward_trace_num": down_sampling_config.get(
                "min_zero_reward_trace_num", -1
            ),
            "min_non_zero_reward_trace_num": down_sampling_config.get(
                "min_non_zero_reward_trace_num", -1
            ),
            "down_sample_to_n": down_sampling_config.get("down_sample_to_n", -1),
        }
        batch, reward_extra_infos_dict, _metrics = resample_of_correct(
            batch,
            reward_extra_infos_dict,
            self.tokenizer,
            config,
            do_down_sampling,
            world_size=world_size,
            history_dict=history_dict,
        )
        metrics.update(_metrics)
        if check_batch_is_empty(batch, "fused_weighted_sampling"):
            return None, None, metrics

        metrics["down_sampling/after_sampling_trace_num"] = len(batch)
        return batch, reward_extra_infos_dict, metrics

    def fit(self):
        """
        The training loop of PPO.
        The driver process only need to call the compute functions of the worker group through RPC
        to construct the PPO dataflow.
        The light-weight advantage computation is done on the driver process.

        Most logic is same with RayPPOTrainer, mainly add down sample related.
        """
        from omegaconf import OmegaConf

        from verl.utils.tracking import Tracking

        logger = Tracking(
            project_name=self.config.trainer.project_name,
            experiment_name=self.config.trainer.experiment_name,
            default_backend=self.config.trainer.logger,
            config=OmegaConf.to_container(self.config, resolve=True),
        )

        self.global_steps = 0

        # load checkpoint before doing anything
        self._load_checkpoint()

        # perform validation before training
        # currently, we only support validation using the reward_function.
        if self.val_reward_fn is not None and self.config.trainer.get(
            "val_before_train", True
        ):
            val_metrics = self._validate()
            assert val_metrics, f"{val_metrics=}"
            pprint(f"Initial validation metrics: {val_metrics}")
            logger.log(data=val_metrics, step=self.global_steps)
            if self.config.trainer.get("val_only", False):
                return

        if self.config.actor_rollout_ref.rollout.get("skip_rollout", False):
            rollout_skip = RolloutSkip(self.config, self.actor_rollout_wg)
            rollout_skip.wrap_generate_sequences()

        # add tqdm
        progress_bar = tqdm(
            total=self.total_training_steps,
            initial=self.global_steps,
            desc="Training Progress",
        )

        # we start from step 1
        self.global_steps += 1
        last_val_metrics = None
        self.max_steps_duration = 0

        prev_step_profile = False
        curr_step_profile = (
            self.global_steps in self.config.global_profiler.steps
            if self.config.global_profiler.steps is not None
            else False
        )
        next_step_profile = False

        for epoch in range(self.config.trainer.total_epochs):
            max_step = self.config.trainer.get("max_steps_per_epoch", None)
            total_steps_now = 0
            for batch_dict in self.train_dataloader:
                if max_step is not None and total_steps_now >= max_step:
                    print(
                        f"Reached max_steps_per_epoch={max_step}, moving to next epoch."
                    )
                    break
                total_steps_now += 1
                metrics = {}
                timing_raw = {}

                with marked_timer("start_profile", timing_raw):
                    self._start_profiling(
                        not prev_step_profile and curr_step_profile
                        if self.config.global_profiler.profile_continuous_steps
                        else curr_step_profile
                    )

                batch: DataProto = DataProto.from_single_dict(batch_dict)

                # add uid to batch
                batch.non_tensor_batch["uid"] = np.array(
                    [str(uuid.uuid4()) for _ in range(len(batch.batch))], dtype=object
                )

                gen_batch = self._get_gen_batch(batch)

                # pass global_steps to trace
                gen_batch.meta_info["global_steps"] = self.global_steps
                gen_batch = gen_batch.repeat(
                    repeat_times=self.config.actor_rollout_ref.rollout.n,
                    interleave=True,
                )

                is_last_step = self.global_steps >= self.total_training_steps

                with marked_timer("step", timing_raw):
                    # generate a batch
                    with marked_timer("gen", timing_raw, color="red"):
                        if not self.async_rollout_mode:
                            gen_batch_output = self.actor_rollout_wg.generate_sequences(
                                gen_batch
                            )
                        else:
                            #################### add history_dict and output_image_dir ####################
                            output_image_dir = self.config.trainer.get(
                                "output_image_dir", None
                            )
                            if output_image_dir is not None:
                                output_image_dir = os.path.join(
                                    output_image_dir,
                                    "epochs_"
                                    + str(epoch)
                                    + "_step_"
                                    + str(self.global_steps),
                                )
                                os.makedirs(output_image_dir, exist_ok=True)
                            gen_batch_output = (
                                self.async_rollout_manager.generate_sequences(
                                    gen_batch,
                                    history_dict=self.history_dict,
                                    output_image_dir=output_image_dir,
                                )
                            )
                        ###############################################################################################
                        timing_raw.update(gen_batch_output.meta_info["timing"])
                        gen_batch_output.meta_info.pop("timing", None)

                    if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX:
                        if self.reward_fn is None:
                            raise ValueError(
                                "A reward_fn is required for REMAX advantage estimation."
                            )

                        with marked_timer("gen_max", timing_raw, color="purple"):
                            gen_baseline_batch = deepcopy(gen_batch)
                            gen_baseline_batch.meta_info["do_sample"] = False
                            if not self.async_rollout_mode:
                                gen_baseline_output = (
                                    self.actor_rollout_wg.generate_sequences(
                                        gen_baseline_batch
                                    )
                                )
                            else:
                                gen_baseline_output = (
                                    self.async_rollout_manager.generate_sequences(
                                        gen_baseline_batch, self.history_dict
                                    )
                                )
                            batch = batch.union(gen_baseline_output)
                            reward_baseline_tensor = self.reward_fn(
                                batch, history_dict=self.history_dict
                            )
                            reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)

                            batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))

                            batch.batch["reward_baselines"] = reward_baseline_tensor

                            del gen_baseline_batch, gen_baseline_output

                    # repeat to align with repeated responses in rollout
                    batch = batch.repeat(
                        repeat_times=self.config.actor_rollout_ref.rollout.n,
                        interleave=True,
                    )
                    batch = batch.union(gen_batch_output)

                    if "response_mask" not in batch.batch.keys():
                        batch.batch["response_mask"] = compute_response_mask(batch)
                    
                    if self.config.trainer.balance_batch:
                        self._balance_batch(batch, metrics=metrics)

                    # compute global_valid tokens
                    batch.meta_info["global_token_num"] = torch.sum(
                        batch.batch["attention_mask"], dim=-1
                    ).tolist()

                    with marked_timer("reward", timing_raw, color="yellow"):
                        # compute reward model score
                        if self.use_rm:
                            reward_tensor = self.rm_wg.compute_rm_score(batch)
                            batch = batch.union(reward_tensor)

                        if self.config.reward_model.launch_reward_fn_async:
                            future_reward = compute_reward_async.remote(
                                data=batch,
                                reward_fn=self.reward_fn,
                                history_dict=self.history_dict,
                            )
                        else:
                            reward_tensor, reward_extra_infos_dict = compute_reward(
                                batch, self.reward_fn, history_dict=self.history_dict
                            )
                            batch.batch["token_level_scores"] = reward_tensor

                    ################################### rStar Down-sampling ###################################
                    with marked_timer("down_sample", timing_raw, color="yellow"):
                        assert not (
                            self.config.reward_model.launch_reward_fn_async
                            and self.config.augmentation.do_down_sampling
                        ), "Down-sampling cannot be combined with async reward function for now."
                        
                        origin_batch = deepcopy(batch)
                        
                        batch, reward_extra_infos_dict, down_sampling_metrics = self._down_sample_batch(
                            batch, reward_extra_infos_dict, history_dict=self.history_dict
                        )
                        metrics.update(down_sampling_metrics)

                        if batch is None:
                            # This block is for debugging when the entire batch is filtered out.
                            if self.config.trainer.get("rollout_data_dir", None):
                                inputs = self.tokenizer.batch_decode(origin_batch.batch["prompts"], skip_special_tokens=True)
                                outputs = self.tokenizer.batch_decode(origin_batch.batch["responses"], skip_special_tokens=True)
                                scores = origin_batch.batch["token_level_scores"].sum(-1).cpu().tolist()
                                sample_gts = [item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None) for item in origin_batch]
                                
                                # Re-compute the original extra info dict for dumping
                                _, original_extra_info = compute_reward(origin_batch, self.reward_fn, history_dict=self.history_dict)
                                if self.config.trainer.get("output_image_dir", None) is not None:
                                    if "tool_outputs" in gen_batch_output.non_tensor_batch:
                                        original_extra_info["tool_outputs"] = origin_batch.non_tensor_batch["tool_outputs"]
                                self._dump_generations(
                                    inputs=inputs, outputs=outputs, gts=sample_gts, scores=scores,
                                    reward_extra_infos_dict=original_extra_info,
                                    dump_path="/data/home/zdhs0094/data-from-5h100/verl/train_output/expert-rstar-1029/test_down_sample_empty",
                                )
                            continue
                        else:
                            # Log metrics from the original batch before down-sampling
                            with torch.no_grad():
                                original_sequence_score = origin_batch.batch["token_level_scores"].sum(-1)
                                response_info = _compute_response_info(origin_batch)
                                original_response_length = response_info["response_length"]

                                original_metrics = {
                                    "original_batch/score/mean": torch.mean(original_sequence_score).item(),
                                    "original_batch/score/max": torch.max(original_sequence_score).item(),
                                    "original_batch/score/min": torch.min(original_sequence_score).item(),
                                    "original_batch/response_length/mean": torch.mean(original_response_length).item(),
                                }
                                metrics.update(original_metrics)
                    ##########################################################################################
                    
                    reward_info_keys = list(reward_extra_infos_dict.keys()) if reward_extra_infos_dict else []
                    
                    if reward_extra_infos_dict:
                        # Convert all lists to numpy arrays with dtype=object for safe handling
                        # of complex items (like dicts) inside DataProto.
                        safe_reward_infos = {
                            k: np.array(v, dtype=object) for k, v in reward_extra_infos_dict.items() if v is not None
                        }
                        batch.non_tensor_batch.update(safe_reward_infos)

                    # Now, when we balance the batch, the reward info will be reordered correctly with the tensors.
                    if self.config.trainer.balance_batch:
                        self._balance_batch(batch, metrics=metrics)
                    
                    # For functions that need the old dict format, we can reconstruct them from the
                    # now-correctly-ordered batch object.
                    if reward_info_keys:
                        # Re-extract the complex dicts for metric/history functions.
                        dicts_in_reward_extra_info = {
                            k: batch.non_tensor_batch[k].tolist()
                            for k in reward_info_keys
                            if k in batch.non_tensor_batch and len(batch.non_tensor_batch[k]) > 0 and isinstance(batch.non_tensor_batch[k][0], dict)
                        }
                        
                        self.history_dict = self._update_history_dict(batch, dicts_in_reward_extra_info, len_of_steps=10)
                        metrics.update(self._compute_tool_metrics(batch, dicts_in_reward_extra_info))
                    # Move the balance logic after down sampling

                    #############################################################################

                    # recompute old_log_probs
                    with marked_timer("old_log_prob", timing_raw, color="blue"):
                        old_log_prob = self.actor_rollout_wg.compute_log_prob(batch)
                        entropys = old_log_prob.batch["entropys"]
                        response_masks = batch.batch["response_mask"]
                        loss_agg_mode = (
                            self.config.actor_rollout_ref.actor.loss_agg_mode
                        )
                        entropy_agg = agg_loss(
                            loss_mat=entropys,
                            loss_mask=response_masks,
                            loss_agg_mode=loss_agg_mode,
                        )
                        old_log_prob_metrics = {
                            "actor/entropy": entropy_agg.detach().item()
                        }
                        metrics.update(old_log_prob_metrics)
                        old_log_prob.batch.pop("entropys")
                        batch = batch.union(old_log_prob)

                        if "rollout_log_probs" in batch.batch.keys():
                            from verl.utils.debug.metrics import calculate_debug_metrics
                            metrics.update(calculate_debug_metrics(batch))

                    if self.use_reference_policy:
                        with marked_timer("ref", timing_raw, color="olive"):
                            if not self.ref_in_actor:
                                ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(
                                    batch
                                )
                            else:
                                ref_log_prob = (
                                    self.actor_rollout_wg.compute_ref_log_prob(batch)
                                )
                            batch = batch.union(ref_log_prob)

                    if self.use_critic:
                        with marked_timer("values", timing_raw, color="cyan"):
                            values = self.critic_wg.compute_values(batch)
                            batch = batch.union(values)

                    with marked_timer("adv", timing_raw, color="brown"):
                        if self.config.reward_model.launch_reward_fn_async:
                            reward_tensor, reward_extra_infos_dict = ray.get(
                                future_reward
                            )
                            batch.batch["token_level_scores"] = reward_tensor
                            if reward_extra_infos_dict:
                                batch.non_tensor_batch.update(
                                    {
                                        k: np.array(v)
                                        for k, v in reward_extra_infos_dict.items()
                                    }
                                )
                            reward_extra_infos_dict_keys = list(
                                reward_extra_infos_dict.keys()
                            )
                            reward_extra_infos_dict = {
                                key: batch.non_tensor_batch[key].tolist()
                                for key in reward_extra_infos_dict_keys
                                if key in batch.non_tensor_batch # ##### FIX IS HERE #####
                            }
                        ################################################################################

                        # compute rewards. apply_kl_penalty if available
                        if self.config.algorithm.use_kl_in_reward:
                            batch, kl_metrics = apply_kl_penalty(
                                batch,
                                kl_ctrl=self.kl_ctrl_in_reward,
                                kl_penalty=self.config.algorithm.kl_penalty,
                            )
                            metrics.update(kl_metrics)
                        else:
                            batch.batch["token_level_rewards"] = batch.batch[
                                "token_level_scores"
                            ]

                        # compute advantages, executed on the driver process

                        norm_adv_by_std_in_grpo = self.config.algorithm.get(
                            "norm_adv_by_std_in_grpo", True
                        )  # GRPO adv normalization factor

                        batch = compute_advantage(
                            batch,
                            adv_estimator=self.config.algorithm.adv_estimator,
                            gamma=self.config.algorithm.gamma,
                            lam=self.config.algorithm.lam,
                            num_repeat=self.config.actor_rollout_ref.rollout.n,
                            norm_adv_by_std_in_grpo=norm_adv_by_std_in_grpo,
                            config=self.config.algorithm,
                        )

                    if self.use_critic:
                        with marked_timer("update_critic", timing_raw, color="pink"):
                            critic_output = self.critic_wg.update_critic(batch)
                        critic_output_metrics = reduce_metrics(critic_output.meta_info["metrics"])
                        metrics.update(critic_output_metrics)

                    if self.config.trainer.critic_warmup <= self.global_steps:
                        with marked_timer("update_actor", timing_raw, color="red"):
                            batch.meta_info["multi_turn"] = self.config.actor_rollout_ref.rollout.multi_turn.enable
                            actor_output = self.actor_rollout_wg.update_actor(batch)
                        actor_output_metrics = reduce_metrics(actor_output.meta_info["metrics"])
                        metrics.update(actor_output_metrics)

                    rollout_data_dir = self.config.trainer.get("rollout_data_dir", None)
                    if rollout_data_dir:
                        with marked_timer("dump_rollout_generations", timing_raw, color="green"):
                            inputs = self.tokenizer.batch_decode(batch.batch["prompts"], skip_special_tokens=True)
                            outputs = self.tokenizer.batch_decode(batch.batch["responses"], skip_special_tokens=True)
                            scores = batch.batch["token_level_scores"].sum(-1).cpu().tolist()
                            sample_gts = [item.non_tensor_batch.get("reward_model", {}).get("ground_truth", None) for item in batch]
                            
                            # FIX: Reconstruct the full reward info dict FROM THE BATCH for dumping.
                            # This ensures it has the same (potentially reordered) order as the batch data.
                            full_reward_infos_for_dumping = {
                                k: batch.non_tensor_batch[k].tolist() for k in reward_info_keys if k in batch.non_tensor_batch
                            }
                            reward_extra_infos_dict["tool_outputs"] = batch.non_tensor_batch.get("tool_outputs", [None]*len(batch))
                            self._dump_generations(
                                inputs=inputs, outputs=outputs, gts=sample_gts, scores=scores,
                                reward_extra_infos_dict=full_reward_infos_for_dumping,
                                dump_path=rollout_data_dir,
                            )
                    if (self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and
                       (is_last_step or self.global_steps % self.config.trainer.test_freq == 0)):
                        with marked_timer("testing", timing_raw, color="green"):
                            val_metrics: dict = self._validate()
                            if is_last_step:
                                last_val_metrics = val_metrics
                        metrics.update(val_metrics)

                    esi_close_to_expiration = should_save_ckpt_esi(
                        max_steps_duration=self.max_steps_duration,
                        redundant_time=self.config.trainer.esi_redundant_time,
                    )
                    if self.config.trainer.save_freq > 0 and (
                        is_last_step or self.global_steps % self.config.trainer.save_freq == 0 or esi_close_to_expiration):
                        if esi_close_to_expiration:
                            print("Force saving checkpoint: ESI instance expiration approaching.")
                        with marked_timer("save_checkpoint", timing_raw, color="green"):
                            self._save_checkpoint()

                with marked_timer("stop_profile", timing_raw):
                    next_step_profile = (self.global_steps + 1 in self.config.global_profiler.steps
                                         if self.config.global_profiler.steps is not None else False)
                    self._stop_profiling(
                        curr_step_profile and not next_step_profile
                        if self.config.global_profiler.profile_continuous_steps else curr_step_profile
                    )
                    prev_step_profile = curr_step_profile
                    curr_step_profile = next_step_profile

                steps_duration = timing_raw["step"]
                self.max_steps_duration = max(self.max_steps_duration, steps_duration)

                metrics.update({
                    "training/global_step": self.global_steps,
                    "training/epoch": epoch,
                })
                
                metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic, prefix=""))
                metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
                n_gpus = self.resource_pool_manager.get_n_gpus()
                metrics.update(compute_throughout_metrics(batch=batch, timing_raw=timing_raw, n_gpus=n_gpus))

                if isinstance(self.train_dataloader.sampler, AbstractCurriculumSampler):
                    self.train_dataloader.sampler.update(batch=batch)

                logger.log(data=metrics, step=self.global_steps)
                progress_bar.update(1)
                self.global_steps += 1

                if (hasattr(self.config.actor_rollout_ref.actor, "profiler") and
                   self.config.actor_rollout_ref.actor.profiler.tool == "torch_memory"):
                    self.actor_rollout_wg.dump_memory_snapshot(
                        tag=f"post_update_step{self.global_steps}", sub_dir=f"step{self.global_steps}")

                if is_last_step:
                    pprint(f"Final validation metrics: {last_val_metrics}")
                    progress_bar.close()
                    return

                if hasattr(self.train_dataset, "on_batch_end"):
                    self.train_dataset.on_batch_end(batch=batch)