import asyncio
import json
import logging
import os
import copy
from typing import Any
from uuid import uuid4

from jinja2 import Template
from tokenizers import Tokenizer
from verl.experimental.agent_loop.agent_loop import AgentLoopBase, AgentLoopOutput, register
from verl.experimental.agent_loop.tool_parser import FunctionCall, ToolParser
from verl.tools.utils.tool_registry import initialize_tools_from_config
from verl.utils.profiler import simple_timer
from verl.utils.rollout_trace import rollout_trace_op

from agentic_system.environments.env_package.swe.swe.swe_bench_agent_env import SWEBenchShellEnv
import agentic_system.environments.env_package.swe.projection as proj
from agentic_system.environments.prompts.swe_bench import *

from swebench.harness.run_evaluation import run_instance
from swebench.harness.test_spec.test_spec import make_test_spec, TestSpec
import docker


logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))

class AgentLoopOutputWithScore(AgentLoopOutput):
    rm_score: float
    """reward socre"""

@register("swe_agent_1")
class SWEAgentLoop(AgentLoopBase):
    @classmethod
    def init_class(cls, config, tokenizer:Tokenizer, **kwargs):
        if cls._class_initialized:
            return
        cls._class_initialized = True
        print("Performing class-level ToolAgentLoop initialization")

        # Initialize tools from config file
        cls.tokenizer = tokenizer
        cls.max_user_turns = config.actor_rollout_ref.rollout.multi_turn.max_user_turns
        cls.max_assistant_turns = config.actor_rollout_ref.rollout.multi_turn.max_assistant_turns
        cls.max_parallel_calls = config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls
        cls.max_tool_response_length = config.actor_rollout_ref.rollout.multi_turn.max_tool_response_length
        cls.tool_response_truncate_side = config.actor_rollout_ref.rollout.multi_turn.tool_response_truncate_side
        tool_config_path = config.actor_rollout_ref.rollout.multi_turn.tool_config_path
        tool_list = initialize_tools_from_config(tool_config_path) if tool_config_path else []
        cls.tools = {tool.name: tool for tool in tool_list}
        cls.tool_schemas = [tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True) for tool in tool_list]
        cls.tool_parser = ToolParser.get_tool_parser(config.actor_rollout_ref.rollout.multi_turn.format, cls.tokenizer)
        print(f"Initialized tools: {cls.tools}")

        cls.prompt_length = config.actor_rollout_ref.rollout.prompt_length
        cls.response_length = config.actor_rollout_ref.rollout.response_length
        cls.system_prompt = tokenizer.apply_chat_template([{}], add_generation_prompt=False, tokenize=True)
        cls.env = SWEBenchShellEnv()
    # def batch_step(env_list, inst):


    @rollout_trace_op
    async def run(self, sampling_params: dict[str, Any], problem_statement, inst, **kwargs) -> AgentLoopOutputWithScore:
        
        # breakpoint()
        # print(f"### kwargs is {kwargs}")
        # print(f"### kwargs is {kwargs.keys()}")
        instance = json.loads(inst[0])
        image_data = copy.deepcopy(kwargs.get("multi_modal_data", {}).get("image", None))

        # print(f"#### instance {instance}")
        await self.loop.run_in_executor(
            None, self.env.reset, instance
        )
        # self.env.reset(instance)
        
        messages = []
        messages.append({"role":"system","content":SYSTEM_TEMPLATE})
        messages.append({"role":"user","content":Template(instance_template).render(task=instance["problem_statement"], **{}, **os.environ)})
        # messages.append({"role":"user","content":"你好"})
        # messages.append({"role":"assistant","content":Template(instance_template).render(task=instance["problem_statement"], **{}, **os.environ)})
        # print(f"@@@ message : {messages}")

        metrics = {}
        request_id = uuid4().hex
        prompt_ids = await self.loop.run_in_executor(
            None,
            lambda: self.tokenizer.apply_chat_template(
                messages, add_generation_prompt=True, tokenize=True
            ),
        )
        response_mask = []
        sampling_params["top_k"] = 1

        user_turns, assistant_turns = 0, 0
        reward = 0
        while True:
            with simple_timer("generate_sequences", metrics):
                output = await self.server_manager.generate(
                    request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params, image_data=None
                )
                # output1 = await self.server_manager.generate(
                #     request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params, image_data=None
                # )
                # print(f"@@@@@@@@@@@@@@out@@@@@@@@@@@@@@\n output ids: {output.token_ids}\noutput1 ids: {output1.token_ids}\n@@@###@@@@@@@out@@@@@@###@@@@")
                # print(f"output equal {output.token_ids == output1.token_ids}")
            response_ids = output.token_ids
            req_str = self.tokenizer.decode(prompt_ids)
            resp_str = self.tokenizer.decode(response_ids)
            # print(f"@@@@@@@@@@@@@@req@@@@@@@@@@@@@@\n prompt ids: {prompt_ids}\nprompt str: {req_str}\nsampling params :{sampling_params}\nrequest_id :{request_id}\nresponse :{resp_str}\noutput ids : {output.token_ids}\n###################################")
            
            prompt_ids += response_ids
            response_mask += [1] * len(response_ids)
            assistant_turns += 1

            # reach max response length
            if len(response_mask) >= self.response_length:
                # print(f"@@@ break-max length, break!! content:{self.tokenizer.decode(response_ids)} \n### req {req_str}")
                break

            # reach max assistant turns
            if self.max_assistant_turns and assistant_turns >= self.max_assistant_turns:
                # print(f"@@@ break-max_assistant_turns, break!! content:{self.tokenizer.decode(response_ids)} \n### req {req_str}")
                break

            # reach max user turns
            if self.max_user_turns and user_turns >= self.max_user_turns:
                # print(f"@@@ break-max_user_turns, break!! content:{self.tokenizer.decode(response_ids)} \n### req {req_str}")
                break

            # no tool calls
            response = await self.loop.run_in_executor(
                None,
                lambda: self.tokenizer.decode(
                    response_ids
                ),
            )
            actions_results, valids = proj.swe_bench_projection([response])
            if not valids[0]:
                # print(f"@@@ break-not valids, break!! content:{response} \n### req {req_str}")
                break
            
            with simple_timer("tool_calls", metrics):
                # res, _reward, is_done, info  = self.env.step(actions_results[0],valids[0])
                res, _reward, is_done, info  = await self.loop.run_in_executor(
                    None,
                    lambda: self.env.step(
                        actions_results[0],valids[0]
                    ),
                )
                reward += _reward
            # print(f"@@@ response : {res} \n### req {req_str}")

            # append tool_response_ids
            tool_response_ids = await self.loop.run_in_executor(
                None,
                lambda messages=[{"role":"tool", "content": res}]: self.tokenizer.apply_chat_template(
                    messages, add_generation_prompt=True, tokenize=True
                ),
            )
            tool_response_ids = tool_response_ids[len(self.system_prompt) :]
            if "COMPLETE_TASK_AND_SUBMIT_FINAL_OUTPUT" in res:
                # instance = json.loads(data_source[0])
                lines=res.lstrip().splitlines()
                instance_id = uuid4().hex
                pred= {"model_name_or_path":"fake_model","model_patch":"\n".join(lines[1:]),"instance_id":instance_id}
                test_spec = make_test_spec(instance=instance,namespace="swebench", instance_image_tag="latest")
                res = run_instance(test_spec=test_spec,pred=pred,rm_image=False,force_rebuild=False,client=docker.from_env(),run_id=instance_id,timeout=3000,rewrite_reports=False)
                if res:
                    instance_id, report = res
                    reward += 20 if report[instance_id]["resolved"] else 0
                break

            # NOTE: last turn should not be user turn, or the EOS token reward
            # can't be propagated to previous token in GAE.
            if len(response_mask) + len(tool_response_ids) >= self.response_length:
                break

            prompt_ids += tool_response_ids
            response_mask += [0] * len(tool_response_ids)
            user_turns += 1

        response_ids = prompt_ids[-len(response_mask) :]
        prompt_ids = prompt_ids[: len(prompt_ids) - len(response_mask)]

        output = AgentLoopOutput(
            prompt_ids=prompt_ids,
            response_ids=response_ids[: self.response_length],
            response_mask=response_mask[: self.response_length],
            num_turns=user_turns + assistant_turns + 1,
            metrics=metrics,
            reward_score=reward,
            extra_fields={}
        )
        output.extra_fields["self"] = output
        # output["rm_scores"] = reward
        return output

    async def _call_tool(self, tool_call: FunctionCall) -> dict[str, str]:
        """Call tool and return tool response."""
        tool, instance_id = None, None
        try:
            # TODO: append malformed tool_call to the prompt: invalid function name or arguments
            tool_name = tool_call.name
            tool_args = json.loads(tool_call.arguments)
            tool = self.tools[tool_name]

            instance_id = await tool.create()
            tool_response, _, _ = await tool.execute(instance_id, tool_args)
        except Exception as e:
            logger.exception(f"Error when executing tool: {e}")
            return e
        finally:
            if tool and instance_id:
                await tool.release(instance_id)

        if len(tool_response) > self.max_tool_response_length:
            if self.tool_response_truncate_side == "left":
                tool_response = tool_response[: self.max_tool_response_length] + "...(truncated)"
            elif self.tool_response_truncate_side == "right":
                tool_response = "(truncated)..." + tool_response[-self.max_tool_response_length :]
            else:
                length = self.max_tool_response_length // 2
                tool_response = tool_response[:length] + "...(truncated)..." + tool_response[-length:]

        return {
            "role": "tool",
            "content": tool_response,
        }
