import asyncio
import json
import logging
import os
import copy
from typing import Any
from uuid import uuid4

from jinja2 import Template
from tokenizers import Tokenizer
from verl.experimental.agent_loop.agent_loop import AgentLoopBase, AgentLoopOutput, register
from verl.experimental.agent_loop.tool_parser import FunctionCall, ToolParser
from verl.tools.utils.tool_registry import initialize_tools_from_config
from verl.utils.profiler import simple_timer
from verl.utils.rollout_trace import rollout_trace_op

# from agentic_system.environments.env_package.swe.swe.swe_bench_agent_env import SWEBenchShellEnv
# import agentic_system.environments.env_package.swe.projection as proj
# from agentic_system.environments.prompts.swe_bench import *
from agentic_system.environments.prompts.tool_bench_memory import SYSTEM_TEMPLATE,tool_mem_template,TOOL_BENCH_TEMPLATE_NO_HIS

# from swebench.harness.run_evaluation import run_instance
# from swebench.harness.test_spec.test_spec import make_test_spec, TestSpec
# import docker

from agentic_system.environments.env_package.mirrorapi.mirrirapi.mirrorapi_agent_env import MirrirAPIAgentSiteEnv
import agentic_system.environments.env_package.mirrorapi.projection as proj


logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))

class AgentLoopOutputWithScore(AgentLoopOutput):
    rm_score: float
    """reward socre"""

@register("toolbench_agent")
class ToolBenchMTAgentLoop(AgentLoopBase):
    @classmethod
    def init_class(cls, config, tokenizer:Tokenizer, **kwargs):
        if cls._class_initialized:
            return
        cls._class_initialized = True
        print("Performing class-level ToolAgentLoop initialization")

        # Initialize tools from config file
        cls.tokenizer = tokenizer
        cls.max_user_turns = config.actor_rollout_ref.rollout.multi_turn.max_user_turns
        cls.max_assistant_turns = config.actor_rollout_ref.rollout.multi_turn.max_assistant_turns
        cls.max_parallel_calls = config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls
        cls.max_tool_response_length = config.actor_rollout_ref.rollout.multi_turn.max_tool_response_length
        cls.tool_response_truncate_side = config.actor_rollout_ref.rollout.multi_turn.tool_response_truncate_side
        tool_config_path = config.actor_rollout_ref.rollout.multi_turn.tool_config_path
        tool_list = initialize_tools_from_config(tool_config_path) if tool_config_path else []
        cls.tools = {tool.name: tool for tool in tool_list}
        cls.tool_schemas = [tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True) for tool in tool_list]
        cls.tool_parser = ToolParser.get_tool_parser(config.actor_rollout_ref.rollout.multi_turn.format, cls.tokenizer)
        print(f"Initialized tools: {cls.tools}")

        cls.prompt_length = config.actor_rollout_ref.rollout.prompt_length
        cls.response_length = config.actor_rollout_ref.rollout.response_length
        cls.system_prompt = tokenizer.apply_chat_template([{}], add_generation_prompt=False, tokenize=True)
        cls.env = MirrirAPIAgentSiteEnv(max_steps=5)
        if config.trainer.validation_data_dir:
            log_file_name = os.path.join(config.trainer.validation_data_dir,f"{uuid4().hex}.log")
            if not os.path.exists(config.trainer.validation_data_dir):
                os.makedirs(config.trainer.validation_data_dir, exist_ok=True)
            cls.log_file = open(log_file_name, "w")
            print(f"agent loop log file {log_file_name}")
    # def batch_step(env_list, inst):


    @rollout_trace_op
    async def run(self, sampling_params: dict[str, Any], **kwargs) :
        # print(f"### kwargs is {kwargs}")
        # print(f"### kwargs is {kwargs.keys()}")
        # instance = json.loads(inst[0])
        image_data = copy.deepcopy(kwargs.get("multi_modal_data", {}).get("image", None))

        # print(f"#### instance {instance}")
        # await self.loop.run_in_executor(
        #     None,
        #     lambda: self.env.reset(task=kwargs['task']，api_list=kwargs['api_list_ori']，index=kwargs.get('index', 0))
        # )

        await self.loop.run_in_executor(
            None,
            lambda: self.env.reset(task=kwargs['task'], api_list=kwargs['api_list_ori'], index=kwargs.get('index', 0))
        )


        user_turns, assistant_turns = 0, 0
        reward = 0
        mt_res = []
        memory = "尚无记忆内容"
        observation = "本轮为第一轮，无执行结果"
        is_break = False
        task_id = uuid4().hex

        while True:
            response_mask = []
            messages = []
            messages.append({"role":"system","content":SYSTEM_TEMPLATE})
            #ToDo: 修改一下task的描述
            # task = " My friends and I are planning a road trip across Europe. Can you suggest some popular tourist destinations in each country? Additionally, we would like to send SMS notifications to our families to keep them updated on our whereabouts. Also, provide us with some travel quotes to inspire our journey"
            # messages.append({"role":"user","content":Template(tool_mem_template).render(api_list=kwargs['api_list_ori'],
            #                                                                             task=kwargs['task'],
            #                                                                             memory=memory,
            #                                                                             observation=observation,
            #                                                                             turn=assistant_turns,
            #                                                                             **{},
            #                                                                             **os.environ)})
            messages.append({"role":"user","content":Template(tool_mem_template).render(api_list=kwargs['api_list_ori'],
                                                                            task=kwargs['task'])})
            # messages = [{"role":"system","content": "you are a helpful AI"},
            #             {"role":"user","content":"where is the capital of china?"}]
            # breakpoint()
            metrics = {}
            request_id = uuid4().hex
            prompt_ids = await self.loop.run_in_executor(
                None,
                lambda: self.tokenizer.apply_chat_template(
                    messages, add_generation_prompt=True, tokenize=True
                ),
            )
            prompt_ids = prompt_ids if len(prompt_ids) <= self.prompt_length else prompt_ids[-self.prompt_length:]
            # prompt_str = await self.loop.run_in_executor(
            #     None,
            #     lambda: self.tokenizer.apply_chat_template(
            #         messages, add_generation_prompt=True, tokenize=False
            #     ),
            # )
            # breakpoint()
            with simple_timer("generate_sequences", metrics):
                output = await self.server_manager.generate(
                    request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params, image_data=image_data
                )
            response_ids = output.token_ids

            # 调试信息：检查 token IDs 和解码
            print(f"DEBUG: Raw token_ids = {response_ids}")
            print(f"DEBUG: Vocab size = {self.tokenizer.vocab_size}")
            out_of_range = [tid for tid in response_ids if tid >= self.tokenizer.vocab_size]
            if out_of_range:
                print(f"WARNING: Out of range token IDs: {out_of_range}")

            # 逐个解码前几个 tokens 检查
            if response_ids:
                first_10 = response_ids[:10]
                decoded_tokens = []
                for i, token_id in enumerate(first_10):
                    try:
                        decoded_single = self.tokenizer.decode([token_id], skip_special_tokens=True)
                        decoded_tokens.append(f"ID={token_id}->'{decoded_single}'")
                    except Exception as e:
                        decoded_tokens.append(f"ID={token_id}->ERROR:{e}")
                print(f"DEBUG: First 10 tokens: {' | '.join(decoded_tokens)}")

            # breakpoint()
            # no tool calls - 修复解码参数
            response = await self.loop.run_in_executor(
                None,
                lambda: self.tokenizer.decode(
                    response_ids,
                    skip_special_tokens=True,
                    clean_up_tokenization_spaces=True
                ),
            )
            self.log_file.write(f"+++++++++++++++++++++++\n@@@@ TASK: {task_id}--\n@@@@ PROMPT: {messages[-1]['content']}\n@@@@ RESPONSE: {response}\n------------------------")
            # print(f"@TASK:{task_id}--\n@prompt:{messages[-1]['content']}\n@response:{response}")
            # print(f"@TASK:{task_id}--\n@prompt:{prompt_str}@prompt_ids:{prompt_ids}\n@response:{response}\n@ids: {output.token_ids}")
            # breakpoint()
            # prompt_ids += response_ids
            response_mask += [1] * len(response_ids)
            assistant_turns += 1

            # reach max response length
            if len(response_mask) >= self.response_length:
                self.log_file.write(f"TASK :{task_id} ---- break lenght {len(response_mask)}\n")
                break

            # reach max assistant turns
            if self.max_assistant_turns and assistant_turns >= self.max_assistant_turns:
                self.log_file.write(f"TASK :{task_id} ---- break max_assistant_turns {assistant_turns}\n")
                break

            # reach max user turns
            if self.max_user_turns and user_turns >= self.max_user_turns:
                self.log_file.write(f"TASK :{task_id} ---- break max_user_turns {user_turns}\n")
                break
            # breakpoint()
            action_lst, invoke_lst, memory_lst, valid_lst = proj.mirrorapi_projection([response])
            action = action_lst[0]
            invoke = invoke_lst[0]
            valid = valid_lst[0]
            # memory = memory_lst[0]
            # breakpoint()
            if not valid:
                self.log_file.write(f"TASK :{task_id} ---- break vaild {response}\n")
                break
            # breakpoint()
            with simple_timer("tool_calls", metrics):
                res, _reward, is_done, info  = await self.loop.run_in_executor(
                    None,
                    lambda: self.env.step(
                        action, invoke, valid, assistant_turns
                    ),
                )
                reward += _reward
                # if _reward <= 0:
                self.log_file.write(f"TASK :{task_id} ---- step reward res : {res}, reward : {_reward}\n")
                #     break
            # self.log_file.write(f"@@@ response : {res} \n### req {req_str}")

            # append tool_response_ids
            # tool_response_ids = await self.loop.run_in_executor(
            #     None,
            #     lambda messages=[{"role":"tool", "content": res}]: self.tokenizer.apply_chat_template(
            #         messages, add_generation_prompt=True, tokenize=True
            #     ),
            # )
            # observation = res
            
            # tool_response_ids = tool_response_ids[len(self.system_prompt) :]
            if is_done:
                # instance = json.loads(data_source[0])
                # lines=res.lstrip().splitlines()
                # instance_id = uuid4().hex
                # pred= {"model_name_or_path":"fake_model","model_patch":"\n".join(lines[1:]),"instance_id":instance_id}
                # test_spec = make_test_spec(instance=instance,namespace="swebench", instance_image_tag="latest")
                # res = run_instance(test_spec=test_spec,pred=pred,rm_image=False,force_rebuild=False,client=docker.from_env(),run_id=instance_id,timeout=3000,rewrite_reports=False)
                # if res:
                #     # instance_id, report = res
                # reward += self.max_assistant_turns * 1.2 if res["completed"] and res["resolved"] else assistant_turns * 1 + 1
                # reward -= assistant_turns * 1
                    
                # self.log_file.write(f"TASK :{task_id} , final reward : {reward}\n")
                break

            # NOTE: last turn should not be user turn, or the EOS token reward
            # can't be propagated to previous token in GAE.
            # if len(response_mask) + len(tool_response_ids) >= self.response_length:
            #     is_break = True
            #     break

            # prompt_ids += tool_response_ids
            # response_mask += [0] * len(tool_response_ids)
            user_turns += 1

            # response_ids = prompt_ids[-len(response_mask) :]
            # prompt_ids = prompt_ids[: len(prompt_ids) - len(response_mask)]
            self.log_file.write(f"TASK :{task_id} prompt ids len {len(prompt_ids)} response ids len {len(response_ids)}\n")
            output = AgentLoopOutput(
                prompt_ids=prompt_ids if len(prompt_ids) < self.prompt_length else prompt_ids[-self.prompt_length:],
                response_ids=response_ids if len(response_ids) < self.response_length else response_ids[:self.response_length],
                response_mask=response_mask if len(response_mask) < self.response_length else response_mask[:self.response_length],
                num_turns=user_turns + assistant_turns + 1,
                metrics=metrics,
                reward_score=reward,
                extra_fields=dict()
            )
            mt_res.append(output)
            
            # self.log_file.write(f"prompt ids len {len(prompt_ids)} response ids len {len(response_ids)}\n")
        output = AgentLoopOutput(
            prompt_ids=prompt_ids if len(prompt_ids) < self.prompt_length else prompt_ids[-self.prompt_length:],
            response_ids=response_ids if len(response_ids) < self.response_length else response_ids[:self.response_length],
            response_mask=response_mask if len(response_mask) < self.response_length else response_mask[:self.response_length],
            num_turns=user_turns + assistant_turns + 1,
            metrics=metrics,
            reward_score=reward,
            extra_fields=dict()
        )
        mt_res.append(output)
        
        for o in mt_res:
            o.reward_score = mt_res[-1].reward_score
            o.extra_fields["count"] = len(mt_res)
            o.num_turns=mt_res[-1].num_turns
        self.log_file.write(f"TASK :{task_id} swe mt len : {len(mt_res)}\n")
        self.log_file.flush()
        return mt_res

    async def _call_tool(self, tool_call: FunctionCall) -> dict[str, str]:
        """Call tool and return tool response."""
        tool, instance_id = None, None
        try:
            # TODO: append malformed tool_call to the prompt: invalid function name or arguments
            tool_name = tool_call.name
            tool_args = json.loads(tool_call.arguments)
            tool = self.tools[tool_name]

            instance_id = await tool.create()
            tool_response, _, _ = await tool.execute(instance_id, tool_args)
        except Exception as e:
            logger.exception(f"Error when executing tool: {e}")
            return e
        finally:
            if tool and instance_id:
                await tool.release(instance_id)

        if len(tool_response) > self.max_tool_response_length:
            if self.tool_response_truncate_side == "left":
                tool_response = tool_response[: self.max_tool_response_length] + "...(truncated)"
            elif self.tool_response_truncate_side == "right":
                tool_response = "(truncated)..." + tool_response[-self.max_tool_response_length :]
            else:
                length = self.max_tool_response_length // 2
                tool_response = tool_response[:length] + "...(truncated)..." + tool_response[-length:]

        return {
            "role": "tool",
            "content": tool_response,
        }
