# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import ast
import asyncio
import os
import re
import signal
from typing import Any, Callable

import ray
import torch
from ray.exceptions import GetTimeoutError

from recipe.simpletir.agent_utils import count_lines
from recipe.simpletir.utils.reward_score import _default_compute_score
from verl import DataProto

if os.getenv("SANDBOX_ENDPOINT", None) is not None:
    from sandbox.local_sandbox import parallel_sandbox
else:
    from sandbox.internal_sandbox import parallel_sandbox


# Keep this outside the main wrapper function for clarity and efficiency.
def _timeout_handler(signum, frame):
    """Signal handler function to raise a TimeoutError."""
    # print("Signal handler called!") # Debugging
    raise TimeoutError("Operation timed out!")


@ray.remote
def reward_func_timeout_ray(
    func: Callable, timeout_seconds: int, *args: Any, **kwargs: Any
):
    """A decorator that applies a timeout to the decorated function using signal.

    Args:
        timeout_seconds (int): Number of seconds before timing out the decorated function.
            Defaults to 10 seconds.

    Notes:
        Only works on Unix systems as it uses signal.alarm.
    """
    old_handler = signal.getsignal(signal.SIGALRM)
    signal.signal(signal.SIGALRM, _timeout_handler)
    signal.alarm(timeout_seconds)
    try:
        return func(*args, **kwargs)
    except TimeoutError:
        return {"score": 0.0, "extra_info": {"is_filter": "1"}}
    finally:
        # cancel alarm and restore old handler
        signal.alarm(0)
        signal.signal(signal.SIGALRM, old_handler)


def is_only_final_answer(code_str: str) -> bool:
    try:
        tree = ast.parse(code_str)
        stmts = tree.body

        if (
            stmts
            and isinstance(stmts[0], ast.Expr)
            and isinstance(stmts[0].value, ast.Constant)
            and isinstance(stmts[0].value.value, str)
        ):
            stmts = stmts[1:]

        if len(stmts) != 1:
            return False

        stmt = stmts[0]
        if not isinstance(stmt, ast.Expr):
            return False

        call = stmt.value
        if not isinstance(call, ast.Call):
            return False

        func = call.func
        if isinstance(func, ast.Name) and func.id == "final_answer":
            return True

        return False

    except Exception:
        return False


class MathRewardExecManager:
    """Math reward manager that executes code and only verifies the code stdout."""

    def __init__(self, tokenizer, num_examine, compute_score=None) -> None:
        self.tokenizer = tokenizer
        self.num_examine = num_examine  # the number of batches of decoded responses to print to the console
        self.compute_score = compute_score or _default_compute_score
        self.timeout_seconds = 5
        self.max_stdout_len = 1000

    def __call__(self, data: DataProto):
        # If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn
        if "rm_scores" in data.batch.keys():
            return data.batch["rm_scores"]

        reward_tensor = torch.zeros_like(data.batch["responses"], dtype=torch.float32)

        already_print_data_sources = {}

        response_ids = data.batch["responses"]
        sequences_strs = self.tokenizer.batch_decode(
            response_ids, skip_special_tokens=True
        )
        ground_truths = [
            data_item.non_tensor_batch["reward_model"]["ground_truth"]
            for data_item in data
        ]
        data_sources = data.non_tensor_batch["data_source"]
        extra_infos = [
            data_item.non_tensor_batch.get("extra_info", None) for data_item in data
        ]

        assert len(sequences_strs) == len(ground_truths) == len(data_sources)

        # Parse code actions from responses
        pattern = r"```(?:py|python)?\n(.*?)\n```"
        use_code = [0] * len(data)
        no_bug_code = [0] * len(data)
        true_tool_use = [0] * len(data)
        total_lines = [0] * len(data)
        code_lines = [0] * len(data)
        solution_strs = [""] * len(data)
        response_strs = []
        code_actions = []
        for i in range(len(data)):
            data_item = data[i]  # DataProtoItem

            prompt_ids = data_item.batch["prompts"]
            prompt_length = prompt_ids.shape[-1]

            response_ids = data_item.batch["responses"]
            valid_response_length = data_item.batch["attention_mask"][
                prompt_length:
            ].sum()
            valid_response_ids = response_ids[:valid_response_length]

            # decode
            response_str = self.tokenizer.decode(
                valid_response_ids, skip_special_tokens=True
            )
            response_strs.append(response_str)

            match = re.search(pattern, response_str, re.DOTALL)
            if match:
                code = match.group(1).strip()
                use_code[i] = 1
                code_actions.append((i, code))

        # Execute code actions in parallel
        if len(code_actions) > 0:
            tasks = []
            for code_action in code_actions:
                code_action = """
def final_answer(result):
    print(f"\\\\boxed{{{result}}}")

""" + code_action[1]
                tasks.append(code_action)

            sandbox_success, sandbox_stdout, sandbox_stderr = asyncio.run(
                parallel_sandbox(tasks, num_processes=256)
            )
            for j in range(len(code_actions)):
                stdout = str(sandbox_stdout[j])
                stderr = str(sandbox_stderr[j])
                if len(stdout) > 0:
                    solution_strs[code_actions[j][0]] += stdout[-self.max_stdout_len :]
                if len(stderr) == 0:
                    no_bug_code[code_actions[j][0]] = 1
                    if not is_only_final_answer(code_actions[j][1]):
                        true_tool_use[code_actions[j][0]] = 1

                total_line, code_line = count_lines(code_actions[j][1])
                total_lines.append(total_line)
                code_lines.append(code_line)

        # Compute rewards
        scores: list[float] = [0.0] * len(solution_strs)
        extra_info_dict: dict[
            str, list[float]
        ] = {}  # Key -> list of values for the batch
        extra_info_dict["use_code"] = use_code
        extra_info_dict["no_bug_code"] = no_bug_code
        extra_info_dict["true_tool_use"] = true_tool_use
        extra_info_dict["total_lines"] = total_lines
        extra_info_dict["code_lines"] = code_lines
        print(
            f"Scoring process started over {len(solution_strs)} samples, waiting for results..."
        )

        futures = []
        for i, response_str in enumerate(response_strs):
            ground_truth = ground_truths[i]
            solution_str = solution_strs[i]
            data_source = data_sources[i]
            extra_info = extra_infos[i]

            future = reward_func_timeout_ray.remote(
                self.compute_score,
                self.timeout_seconds,
                data_source,
                solution_str,
                ground_truth,
                extra_info,
            )
            futures.append(future)

        default_fail_score = {
            "score": 0.0,
            "extra_info": {"is_filter": 1},
        }  # Default on error which should be filtered

        for i, future in enumerate(futures):
            try:
                task_result = ray.get(future, timeout=self.timeout_seconds)

                if isinstance(task_result, dict):
                    assert "extra_info" in task_result, (
                        f"Extra info missing in task_result dict for item {i}. Result: {task_result}"
                    )
                    score_result = task_result
                    if "is_filter" not in task_result["extra_info"]:
                        score_result["extra_info"].update({"is_filter": 0})
                elif isinstance(task_result, (int, float)):
                    score_result = {
                        "score": float(task_result),
                        "extra_info": {"is_filter": 0},
                    }
                else:
                    print(
                        f"Unexpected task_result type for item {i}: {type(task_result)}. Using default score. Result: {task_result}"
                    )
                    ray.cancel(future, force=True)
                    score_result = default_fail_score
            except GetTimeoutError:
                print(
                    f"Timeout processing item {i} (gold='{str(ground_truths[i])[:50]}...', target='{str(solution_strs[i])[:50]}...'). Using default score."
                )
                score_result = default_fail_score
            except Exception as e:
                print(
                    f"Error processing item {i} (gold='{str(ground_truths[i])[:50]}...', target='{str(solution_strs[i])[:50]}...'): {e}"
                )
                import traceback

                traceback.print_exc()
                ray.cancel(future, force=True)
                score_result = default_fail_score

            scores[i] = float(score_result.get("score", 0.0))

            if "extra_info" in score_result and isinstance(
                score_result["extra_info"], dict
            ):
                for key, value in score_result["extra_info"].items():
                    if key not in extra_info_dict:
                        extra_info_dict[key] = [0.0] * len(solution_strs)
                    extra_info_dict[key][i] = value

        # batched scoring
        prompt_ids = data.batch["prompts"]
        prompt_length = prompt_ids.shape[-1]
        valid_response_length = data.batch["attention_mask"][:, prompt_length:].sum(
            dim=-1
        )
        data_sources = data.non_tensor_batch["data_source"]

        for i in range(len(data)):
            data_source = data_sources[i]
            # reward_tensor[i, valid_response_length[i].item() - 1] = scores[i]
            reward_tensor[i, valid_response_length[i].item() - 1] = (
                scores[i] * 0.5 if true_tool_use[i] == 0 else scores[i]
            )

            if data_source not in already_print_data_sources:
                already_print_data_sources[data_source] = 0

            if already_print_data_sources[data_source] < self.num_examine:
                already_print_data_sources[data_source] += 1

            if already_print_data_sources[data_source] < self.num_examine:
                already_print_data_sources[data_source] += 1

        return {"reward_tensor": reward_tensor, "extra_info": extra_info_dict}
