"""
This is the environment state manager for the LLM agent.
author: Pingyue Zhang
date: 2025-03-30
"""

from dataclasses import dataclass, field
import json
from typing import Dict, List, Optional, Any, Union
import PIL.Image
import hydra
import random
import numpy as np

from ragen.env import REGISTERED_ENVS, REGISTERED_ENV_CONFIGS
from ragen.utils import register_resolvers

register_resolvers()

import multiprocessing
from functools import partial

use_parallel = True


@dataclass
class EnvStatus:
    """Status of an environment"""

    truncated: bool = False  # done but not success
    terminated: bool = False  # done and success
    num_actions: int = 0  # current action step (single action)
    rewards: List[float] = field(default_factory=list)  # rewards for each turn
    seed: Optional[int] = None  # what seed is used to reset this environment


class EnvStateManager:
    """Manager for the environment state
    The class is responsible for managing multiple (kinds of) environments

    """

    def __init__(self, config, mode: str = "train"):
        self.sys_config = config
        self.mode = mode
        self.config = getattr(self.sys_config.es_manager, mode)
        self.env_groups = int(self.config.env_groups)
        self.group_size = self.config.group_size
        seed_cfg = getattr(self.sys_config, "seed", None)
        if seed_cfg is not None:
            self.base_seed = seed_cfg.get(mode, None)
        else:
            self.base_seed = None
        self.seed_counter = 0
        self.n_cpu = multiprocessing.cpu_count()
        self._init_envs()
        self.rollout_cache = None

    def _init_envs(self):
        """Initialize the environments. train_envs and val_envs are lists of envs:
        Input: tags: ["SimpleSokoban", "HarderSokoban"]; n_groups: [1, 1]; group_size: 16
        Output: envs: List[Dict], each **entry** is a dict with keys: tag, group_id, env_id, env, env_config, status
        Example: [{"tag": "SimpleSokoban", "group_id": 0, "env_id": 0, "env": env, "config": env_config, "status": EnvStatus()},
            ...
            {"tag": "SimpleSokoban", "group_id": 0, "env_id": 15 (group_size - 1), ...},
            {"tag": "HarderSokoban", "group_id": 1, "env_id": 16, ...}
            ...]
        """
        assert (
            sum(self.config.env_configs.n_groups) == self.env_groups
        ), f"Sum of n_groups must equal env_groups. Got sum({self.config.env_configs.n_groups}) != {self.env_groups}"
        assert len(self.config.env_configs.tags) == len(
            self.config.env_configs.n_groups
        ), f"Number of tags must equal number of n_groups. Got {len(self.config.env_configs.tags)} != {len(self.config.env_configs.n_groups)}"

        global use_parallel
        if use_parallel:
            env_configs = self.config.env_configs
            done_groups = 0
            for tag, n_group in zip(env_configs.tags, env_configs.n_groups):
                func = partial(
                    self._init_one_env,
                    sys_config=self.sys_config,
                    group_size=self.group_size,
                    tag=tag,
                )
                with multiprocessing.Pool(
                    self.n_cpu // 4
                ) as pool:  # Only use 1/4 of the cores to avoid conflicts
                    self.envs = pool.map(
                        func,
                        range(
                            done_groups * self.group_size,
                            (done_groups + n_group) * self.group_size,
                        ),
                    )
                done_groups += n_group
            return

        self.envs = self._init_env_instances(self.config)

    @staticmethod
    def _create_env_config(cfg_template, env_class, sys_config):
        """创建环境配置，合并envs.yaml和全局配置"""
        params = cfg_template.env_config or {}
        
        # 从全局agent_proxy配置读取通用参数
        agent_proxy = sys_config.agent_proxy
        params['max_turn'] = agent_proxy.max_turn
        params['mid_turn_ratio'] = agent_proxy.mid_turn_ratio
        params['max_actions_per_turn'] = agent_proxy.max_actions_per_turn
        
        # 传递envs.yaml中的环境特定配置字段到env_config
        for attr in ['env_instruction', 'mid_turn_instruction', 'final_turn_instruction', 'tools', 'stage_tools']:
            if hasattr(cfg_template, attr):
                params[attr] = getattr(cfg_template, attr)
        return REGISTERED_ENV_CONFIGS[env_class](**params)

    @staticmethod
    def _init_one_env(env_id, sys_config, group_size, tag):
        cfg_template = sys_config.custom_envs[tag]
        env_config = EnvStateManager._create_env_config(cfg_template, cfg_template.env_type, sys_config)
        env_obj = REGISTERED_ENVS[cfg_template.env_type](env_config)
        return {
            "tag": tag,
            "group_id": env_id // group_size,
            "env_id": env_id,
            "env": env_obj,
            "config": env_config,
            "status": EnvStatus(),
            "max_actions_per_traj": cfg_template.max_actions_per_traj,
            "max_turn": sys_config.agent_proxy.max_turn,
        }

    def _init_env_instances(self, config):
        env_list = []
        done_groups = 0
        for tag, n_group in zip(config.env_configs.tags, config.env_configs.n_groups):
            for env_id in range(done_groups * self.group_size, (done_groups + n_group) * self.group_size):
                cfg_template = self.sys_config.custom_envs[tag]
                env_config = self._create_env_config(cfg_template, cfg_template.env_type, self.sys_config)
                env_obj = REGISTERED_ENVS[cfg_template.env_type](env_config)
                env_list.append({
                    "tag": tag,
                    "group_id": env_id // self.group_size,
                    "env_id": env_id,
                    "env": env_obj,
                    "config": env_config,
                    "status": EnvStatus(),
                    "max_actions_per_traj": cfg_template.max_actions_per_traj,
                    "max_turn": self.sys_config.agent_proxy.max_turn,
                })
            done_groups += n_group
        return env_list

    def reset(self, seed: Optional[int] = None):
        """
        Reset the environments and get initial observation
        build up rollout cache like [{"env_id": int, "history": List[Dict], "group_id": int}, ...]
        """

        def _expand_seed(seed: int):
            seeds = [
                [seed + i] * self.group_size for i in range(self.env_groups)
            ]  # [[seed, ..., seed], [seed+1, ..., seed+1], ...]
            return sum(seeds, [])

        envs = self.envs
        rollout_cache = [
            {
                "env_id": entry["env_id"],
                "history": [],
                "group_id": entry["group_id"],
                "tag": entry["tag"],
                "penalty": 0,
            }
            for entry in envs
        ]

        # reset all environments
        if seed is None:
            if self.mode == "train":
                seed = random.randint(0, 1000000)
            else:
                seed = 123 if self.base_seed is None else self.base_seed
        else:
            if self.mode == "train" and self.base_seed is not None:
                self.seed_counter = seed - self.base_seed + 1
        seeds = _expand_seed(seed)
        for seed, entry in zip(seeds, envs):
            entry["env"].reset(seed=seed, mode=self.mode)
            entry["status"] = EnvStatus(seed=seed)

        # update rollout cache
        for cache, env in zip(rollout_cache, envs):
            next_state = self._handle_mm_state(env["env"].render())
            cache["history"] = self._update_cache_history(
                cache["history"],
                next_state=next_state,
                actions_left=env["max_actions_per_traj"],
                num_actions_info=None,
                        )

        self.rollout_cache = rollout_cache
        return rollout_cache
    

    
    def step(self, all_env_inputs: List[Dict]):
        """Step the environments.
        1. extract valid actions from the action lookup table (if exists) and execute the actions, and update rollout cache
        2. Since rollout does not need to act over done envs, whenever the environment is done, we only update rollout cache, but not output env_outputs.
        Input:
        all_env_inputs: List[Dict]
            {env_id: int, llm_response: str, actions: List[str]}
            NOTE: should use env_id as index for existing some already done envs
        env_outputs: List[Dict]
            {env_id: int, history: List[Dict][{state: str, actions: List[str], reward: float, info: Dict, llm_response: str, llm_raw_response: str, (Optional)images: List[PIL.Image.Image]}]}
        """

        def _execute_actions(env, actions, turn_number=0):
            acc_reward, turn_info, turn_done = 0, {}, False
            executed_actions = []
            # 检查动作列表是否为空，如果为空，添加一个"empty"动作
            injected_empty_action = False
            empty_action = None
            if not actions or len(actions) == 0:
                # 构造empty动作的JSON字符串
                empty_action = json.dumps({
                    "tool_call": "empty",
                    "args": {}
                })
                actions = [empty_action]
                injected_empty_action = True

                print(f"警告: 传入的动作列表为空，添加empty动作")
                turn_info["empty_action"] = True
            for action in actions:
                _, reward, done, info = env.step(
                    action, turn_number=turn_number
                )
                acc_reward += reward
                turn_info.update(info)  # NOTE: currently use last info for multi-action
                # 若该动作是系统自动注入的 empty 动作，则不计入 executed_actions
                if not (injected_empty_action and empty_action is not None and action == empty_action):
                    executed_actions.append(action)
                if done:
                    turn_done = True
                    break

            return acc_reward, turn_info, turn_done, executed_actions

        def _log_env_state(
            status,
            history,
            cur_obs,
            max_actions_per_traj,
            executed_actions,
            all_actions,
            acc_reward,
            turn_done,
            turn_info,
            env_input,
        ):
            obs = self._handle_mm_state(cur_obs)
            status.num_actions += len(executed_actions)
            status.rewards.append(acc_reward)  # NOTE use turn-wise acc_reward
            actions_left = max_actions_per_traj - status.num_actions
            if turn_done:
                status.terminated = (
                    True  # TODO check terminated definition in gymnasium
                )
                status.truncated = not turn_info.get("success", False)
            if turn_done:
                # 如果回合结束，只更新最后一步的信息，不添加新的state
                if len(history) > 0:
                    history[-1].update({
                        "actions": executed_actions,
                        "reward": acc_reward,
                        "info": turn_info,
                        "llm_response": env_input["llm_response"],
                        "llm_raw_response": env_input["llm_raw_response"],
                    })
            else:
                # 如果回合未结束，正常添加新的state
                history = self._update_cache_history(
                    history,
                    next_state=obs,
                    actions_left=actions_left,
                    num_actions_info={
                        "actions": executed_actions,
                        "reward": acc_reward,
                        "info": turn_info,
                        "llm_response": env_input["llm_response"],
                        "llm_raw_response": env_input["llm_raw_response"],
                    },
                )
            # filter out invalid actions
            # history = [content for content in history[:-1] if content['actions']] + [history[-1]]
            return status, history

        global use_parallel
        if use_parallel and len(all_env_inputs) > 120:
            parallel_env_inputs = [
                {
                    **env_input,
                    "env": self.envs[env_input["env_id"]],
                    "rollout_cache_single": self.rollout_cache[env_input["env_id"]],
                }
                for env_input in all_env_inputs
            ]

            func = partial(self._step_env, sys_config=self.sys_config)
            with multiprocessing.Pool(self.n_cpu // 8) as pool:
                results = pool.map(func, parallel_env_inputs)

            env_outputs = []
            for turn_done, env, rollout_cache in results:
                self.envs[env["env_id"]] = env
                self.rollout_cache[env["env_id"]] = rollout_cache
                if not turn_done:
                    env_outputs.append(rollout_cache)
            return env_outputs

        envs = self.envs
        env_outputs = []

        for env_input in all_env_inputs:
            acc_reward, turn_info, turn_done = 0, {}, False
            entry = envs[env_input["env_id"]]
            env_id, env = entry["env_id"], entry["env"]
            actions_left_before = (
                entry["max_actions_per_traj"] - entry["status"].num_actions
            )

            # execute actions in envs
            valid_actions = self._extract_map_valid_actions(entry, env_input["actions"])
            acc_reward, turn_info, turn_done, executed_actions = _execute_actions(
                env,
                valid_actions[:actions_left_before],
                turn_number=env_input["turn_number"],
            )
            if len(valid_actions) != len(env_input["actions"]) or not valid_actions:
                self.rollout_cache[env_id][
                    "penalty"
                ] += self.sys_config.es_manager.format_penalty
            
            if env_input['think_content'] is not None:
                think_content = env_input['think_content']
                # acc_reward += self.sys_config.es_manager.think_reward
            status, history = _log_env_state(
                entry["status"],
                self.rollout_cache[env_id]["history"],
                entry["env"].render(),
                entry["max_actions_per_traj"],
                executed_actions,
                valid_actions,
                acc_reward,
                turn_done,
                turn_info,
                env_input,
            )
            entry["status"] = status
            if (
                entry["status"].num_actions >= entry["max_actions_per_traj"]
                and not turn_done
            ):
                entry["status"].truncated = True
                entry["status"].terminated = True
                turn_done = True
            self.rollout_cache[env_id]["history"] = history
            if (
                not turn_done
            ):  # NOTE done environments are not sent for further llm generation (for efficiency)
                env_outputs.append(self.rollout_cache[env_id])

        return env_outputs

    @staticmethod
    def _step_env(env_input, sys_config):
        def _extract_map_valid_actions(entry: Dict, actions: List[str]):
            """extract valid actions from the action lookup table (if exists)"""
            mapped_actions = []
            action_lookup = getattr(entry["env"].config, "action_lookup", None)
            if action_lookup is None:
                mapped_actions = actions
            else:  # the envs have pre-defined action lookup
                rev_action_lookup = {v.lower(): k for k, v in action_lookup.items()}
                actions = [action.lower() for action in actions]
                mapped_actions = [
                    rev_action_lookup[action]
                    for action in actions
                    if action in rev_action_lookup
                ]
            return mapped_actions

        def _execute_actions(env, actions, turn_number=0):
            acc_reward, turn_info, turn_done = 0, {}, False
            executed_actions = []
            # 检查动作列表是否为空，如果为空，添加一个"empty"动作
            injected_empty_action = False
            empty_action = None
            if not actions or len(actions) == 0:
                # 构造empty动作的JSON字符串
                empty_action = json.dumps({
                    "tool_call": "empty",
                    "args": {}
                })
                actions = [empty_action]
                injected_empty_action = True

                print(f"警告: 传入的动作列表为空，添加empty动作")
                turn_info["empty_action"] = True
            for action in actions:
                _, reward, done, info = env.step(
                    action, turn_number=turn_number
                )
                acc_reward += reward
                turn_info.update(info)  # NOTE: currently use last info for multi-action
                # 若该动作是系统自动注入的 empty 动作，则不计入 executed_actions
                if not (injected_empty_action and empty_action is not None and action == empty_action):
                    executed_actions.append(action)
                if done:
                    turn_done = True
                    break

            return acc_reward, turn_info, turn_done, executed_actions

        def _log_env_state(
            status,
            history,
            cur_obs,
            max_actions_per_traj,
            executed_actions,
            all_actions,
            acc_reward,
            turn_done,
            turn_info,
            env_input,
        ):
            def _handle_mm_state(state: Union[str, np.ndarray, list[np.ndarray]]):
                """Handle the state from the environment"""
                if isinstance(state, str):  # text state
                    return state
                elif isinstance(
                    state, np.ndarray
                ):  # when env state is a single image, convert it to a list to unify output format
                    state = [state]
                results = [PIL.Image.fromarray(_state, mode="RGB") for _state in state]
                return results

            def _update_cache_history(
                history: List[Dict],
                next_state,
                actions_left,
                num_actions_info: Optional[Dict] = None,
            ):
                """
                Update last step info and append state to history
                """
                if num_actions_info is not None:  # update last step info
                    assert len(history), "History should not be empty"
                    history[-1].update(num_actions_info)

                entry = {}  # append state to history
                if isinstance(next_state, str):  # text state
                    entry["state"] = next_state
                else:  # multimodal state
                    entry["state"] = "<images>" * len(next_state)
                    entry["images"] = next_state
                entry["actions_left"] = actions_left
                history.append(entry)
                return history

            obs = _handle_mm_state(cur_obs)
            status.num_actions += len(executed_actions)
            status.rewards.append(acc_reward)  # NOTE use turn-wise acc_reward
            actions_left = max_actions_per_traj - status.num_actions
            if turn_done:
                status.terminated = (
                    True  # TODO check terminated definition in gymnasium
                )
                status.truncated = not turn_info.get("success", False)
            if turn_done:
                # 如果回合结束，只更新最后一步的信息，不添加新的state
                if len(history) > 0:
                    history[-1].update({
                        "actions": executed_actions,
                        "reward": acc_reward,
                        "info": turn_info,
                        "llm_response": env_input["llm_response"],
                        "llm_raw_response": env_input["llm_raw_response"],
                    })
            else:
                # 如果回合未结束，正常添加新的state
                history = _update_cache_history(
                    history,
                    next_state=obs,
                    actions_left=actions_left,
                    num_actions_info={
                        "actions": executed_actions,
                        "reward": acc_reward,
                        "info": turn_info,
                        "llm_response": env_input["llm_response"],
                        "llm_raw_response": env_input["llm_raw_response"],
                    },
                )
            # filter out invalid actions
            # history = [content for content in history[:-1] if content['actions']] + [history[-1]]
            return status, history

        acc_reward, turn_info, turn_done = 0, {}, False
        entry = env_input["env"]
        env_id, env = entry["env_id"], entry["env"]
        actions_left_before = (
            entry["max_actions_per_traj"] - entry["status"].num_actions
        )

        # execute actions in envs
        valid_actions = _extract_map_valid_actions(entry, env_input["actions"])
        rollout_cache_single = env_input["rollout_cache_single"]
        acc_reward, turn_info, turn_done, executed_actions = _execute_actions(
            env,
            valid_actions[:actions_left_before],
            turn_number=env_input["turn_number"],
        )
        if len(valid_actions) != len(env_input["actions"]) or not valid_actions:
            rollout_cache_single["penalty"] += sys_config.es_manager.format_penalty

        status, history = _log_env_state(
            entry["status"],
            rollout_cache_single["history"],
            entry["env"].render(),
            entry["max_actions_per_traj"],
            executed_actions,
            valid_actions,
            acc_reward,
            turn_done,
            turn_info,
            env_input,
        )
        entry["status"] = status
        if (
            entry["status"].num_actions >= entry["max_actions_per_traj"]
            and not turn_done
        ):
            entry["status"].truncated = True
            entry["status"].terminated = True
            turn_done = True
        rollout_cache_single["history"] = history

        return turn_done, entry, rollout_cache_single

    def get_rollout_states(self):
        """
        获取所有环境的最终输出

        返回：
            rollout状态
        """
        envs = self.envs
        rollout_cache = self.rollout_cache
        # 回合级指标 - 这些指标将计算平均值
        TURN_LVL_METRICS = [
            "action_is_effective",
            "action_is_valid",
        ]

        # 累积指标 - 这些指标将计算总和
        SUM_METRICS = [
            "reward",
        ]

        # 状态指标 - 这些指标只记录最后一次的值，并设置默认值
        STATE_METRICS = {
            "top_1": 0.0,
            "top_3": 0.0,
            "top_5": 0.0,
            "root_cause_rank": 999,
            "success": 0.0,
            "turn_number": 0
        }

        # 将指标添加到rollout缓存
        for entry, cache in zip(envs, rollout_cache):
            status = entry["status"]

            # 重新获取env_info以包含最新的debug_info（在rollout结束后）
            try:
                updated_env_info = entry["env"].get_env_info()
                cache["env_info"] = updated_env_info
            except Exception as e:
                print(f"更新env_info失败 (env_id: {entry['env_id']}): {e}")
                # 保持原有的env_info
                pass

            # 创建环境指标字典
            env_metric = {
                "success": float(
                    status.terminated and (not status.truncated)
                ),  # 成功标志
                "num_actions": status.num_actions,  # 动作数量
            }

            # 收集自定义指标
            custom_metric = {}
            for turn in cache["history"]:
                for k, v in turn.get("info", {}).items():
                    if k == "success":
                        continue
                    if k not in custom_metric:
                        custom_metric[k] = []
                    custom_metric[k].append(float(v))

            # 处理自定义指标
            for k, v in custom_metric.items():
                if k in TURN_LVL_METRICS or ("Webshop" in k and k in TURN_LVL_METRICS):
                    # 回合级指标 - 计算平均值
                    env_metric[k] = np.sum(v) / max(1, len(v))
                elif k in SUM_METRICS:
                    # 累积指标 - 计算总和
                    env_metric[k] = np.sum(v)
                elif k in STATE_METRICS:
                    # 状态指标 - 只记录最后一次的值
                    env_metric[k] = v[-1] if v else STATE_METRICS[k]
                else:
                    # 默认行为 - 如果不确定，计算总和
                    env_metric[k] = np.sum(v)

            # 添加缺失的状态指标的默认值
            for k, default_val in STATE_METRICS.items():
                if k not in env_metric:
                    env_metric[k] = default_val

            # 更新缓存
            cache["history"][-1]["metrics"] = custom_metric
            env_metric = {
                f"{entry['tag']}/{k}": v for k, v in env_metric.items()
            }  # 添加标签前缀
            cache["metrics"] = env_metric

            # 为MetamathQA添加正确答案
            if entry["tag"] == "MetamathQA":
                cache["correct_answer"] = entry["env"].correct_answer

        # 计算MRR (Mean Reciprocal Rank)
        # 收集所有环境的root_cause_rank值并计算倒数排名
        reciprocal_ranks = []
        for entry, cache in zip(envs, rollout_cache):
            tag = entry["tag"]
            # 获取该环境的root_cause_rank
            rank_key = f"{tag}/root_cause_rank"
            if rank_key in cache["metrics"]:
                rank = cache["metrics"][rank_key]
                if rank > 0 and rank < 999:  # 有效排名
                    reciprocal_rank = 1.0 / rank
                else:
                    reciprocal_rank = 0.0  # 未找到正确根因
                reciprocal_ranks.append(reciprocal_rank)

        # 计算MRR并添加到每个环境的指标中
        if reciprocal_ranks:
            mrr = np.mean(reciprocal_ranks)
            for entry, cache in zip(envs, rollout_cache):
                tag = entry["tag"]
                cache["metrics"][f"{tag}/mrr"] = float(mrr)
        else:
            # 如果没有有效的排名数据，MRR为0
            for entry, cache in zip(envs, rollout_cache):
                tag = entry["tag"]
                cache["metrics"][f"{tag}/mrr"] = 0.0

        return rollout_cache

    def _update_cache_history(
        self,
        history: List[Dict],
        next_state,
        actions_left,
        num_actions_info: Optional[Dict] = None,
    ):
        """
        Update last step info and append state to history
        """
        if num_actions_info is not None:  # update last step info
            assert len(history), "History should not be empty"
            history[-1].update(num_actions_info)

        entry = {}  # append state to history
        if isinstance(next_state, str):  # text state
            entry["state"] = next_state
        else:  # multimodal state
            entry["state"] = "<images>" * len(next_state)
            entry["images"] = next_state
        entry["actions_left"] = actions_left
        history.append(entry)
        return history

    def _extract_map_valid_actions(self, entry: Dict, actions: List[str]):
        """extract valid actions from the action lookup table (if exists)"""
        mapped_actions = []
        action_lookup = getattr(entry["env"].config, "action_lookup", None)
        if action_lookup is None:
            mapped_actions = actions
        else:  # the envs have pre-defined action lookup
            rev_action_lookup = {v.lower(): k for k, v in action_lookup.items()}
            actions = [action.lower() for action in actions]
            mapped_actions = [
                rev_action_lookup[action]
                for action in actions
                if action in rev_action_lookup
            ]
        return mapped_actions

    def _handle_mm_state(self, state: Union[str, np.ndarray, list[np.ndarray]]):
        """Handle the state from the environment"""
        if isinstance(state, str):  # text state
            return state
        elif isinstance(
            state, np.ndarray
        ):  # when env state is a single image, convert it to a list to unify output format
            state = [state]
        results = [PIL.Image.fromarray(_state, mode="RGB") for _state in state]
        return results

    def render(self):
        rendered_list = [entry["env"].render() for entry in self.envs]
        return rendered_list

    def close(self):
        for entry in self.envs:
            entry["env"].close()
