import asyncio
import json
import logging
import os
import copy
from typing import Any
from uuid import uuid4

from jinja2 import Template
from tokenizers import Tokenizer
from verl.experimental.agent_loop.agent_loop import AgentLoopBase, AgentLoopOutput, register
from verl.experimental.agent_loop.tool_parser import FunctionCall, ToolParser
from verl.tools.utils.tool_registry import initialize_tools_from_config
from verl.utils.profiler import simple_timer
from verl.utils.rollout_trace import rollout_trace_op

from agentic_system.environments.env_package.swe.swe.swe_bench_agent_env import SWEBenchShellEnv
import agentic_system.environments.env_package.swe.projection as proj
from agentic_system.environments.prompts.swe_bench import *
from agentic_system.environments.prompts.swe_bench_memory import *

from swebench.harness.run_evaluation import run_instance
from swebench.harness.test_spec.test_spec import make_test_spec, TestSpec
import docker

from agentic_system.environments.env_package.swe.swe_bench_reward import SWEBenchReward

logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))

class AgentLoopOutputWithScore(AgentLoopOutput):
    rm_score: float
    """reward socre"""

@register("swe_agent")
class SWEMTAgentLoop(AgentLoopBase):
    @classmethod
    def init_class(cls, config, tokenizer:Tokenizer, **kwargs):
        if cls._class_initialized:
            return
        cls._class_initialized = True
        print("Performing class-level ToolAgentLoop initialization")

        # Initialize tools from config file
        cls.tokenizer = tokenizer
        cls.max_user_turns = config.actor_rollout_ref.rollout.multi_turn.max_user_turns
        cls.max_assistant_turns = config.actor_rollout_ref.rollout.multi_turn.max_assistant_turns
        cls.max_parallel_calls = config.actor_rollout_ref.rollout.multi_turn.max_parallel_calls
        cls.max_tool_response_length = config.actor_rollout_ref.rollout.multi_turn.max_tool_response_length
        cls.tool_response_truncate_side = config.actor_rollout_ref.rollout.multi_turn.tool_response_truncate_side
        tool_config_path = config.actor_rollout_ref.rollout.multi_turn.tool_config_path
        tool_list = initialize_tools_from_config(tool_config_path) if tool_config_path else []
        cls.tools = {tool.name: tool for tool in tool_list}
        cls.tool_schemas = [tool.tool_schema.model_dump(exclude_unset=True, exclude_none=True) for tool in tool_list]
        cls.tool_parser = ToolParser.get_tool_parser(config.actor_rollout_ref.rollout.multi_turn.format, cls.tokenizer)
        print(f"Initialized tools: {cls.tools}")

        cls.prompt_length = config.actor_rollout_ref.rollout.prompt_length
        cls.response_length = config.actor_rollout_ref.rollout.response_length
        cls.system_prompt = tokenizer.apply_chat_template([{}], add_generation_prompt=False, tokenize=True)
        cls.env = SWEBenchShellEnv()
        cls.reward = SWEBenchReward()   # add for steps reward enhancement
        if config.trainer.validation_data_dir:
            log_file_name = os.path.join(config.trainer.validation_data_dir,f"{uuid4().hex}.log")
            if not os.path.exists(config.trainer.validation_data_dir):
                os.makedirs(config.trainer.validation_data_dir, exist_ok=True)
            cls.log_file = open(log_file_name, "w")
            print(f"agent loop log file {log_file_name}")
    # def batch_step(env_list, inst):


    @rollout_trace_op
    async def run(self, sampling_params: dict[str, Any], problem_statement, inst, **kwargs) :
        
        # breakpoint()
        # print(f"### kwargs is {kwargs}")
        # print(f"### kwargs is {kwargs.keys()}")
        instance = json.loads(inst[0])
        image_data = copy.deepcopy(kwargs.get("multi_modal_data", {}).get("image", None))

        # print(f"#### instance {instance}")
        await self.loop.run_in_executor(
            None, self.env.reset, instance
        )
        # self.env.reset(instance)

        # sampling_params["top_k"] = 1

        user_turns, assistant_turns = 0, 0
        reward = 0
        mt_res = []
        memory = "尚无记忆内容"
        observation = "本轮为第一轮，无执行结果"
        is_break = False
        task_id = uuid4().hex
        steps = ""   # add for steps reward enhancement

        while True:
            response_mask = []
            messages = []
            messages.append({"role":"system","content":SYSTEM_TEMPLATE})
            messages.append({"role":"user","content":Template(instance_mem_template).render(task=instance["problem_statement"],
                                                                                        memory=memory,
                                                                                        observation=observation,
                                                                                        turn=assistant_turns,
                                                                                        **{},
                                                                                        **os.environ)})

            metrics = {}
            request_id = uuid4().hex
            prompt_ids = await self.loop.run_in_executor(
                None,
                lambda: self.tokenizer.apply_chat_template(
                    messages, add_generation_prompt=True, tokenize=True
                ),
            )
            prompt_ids = prompt_ids if len(prompt_ids) <= self.prompt_length else prompt_ids[-self.prompt_length:]
            # prompt_str = await self.loop.run_in_executor(
            #     None,
            #     lambda: self.tokenizer.apply_chat_template(
            #         messages, add_generation_prompt=True, tokenize=False
            #     ),
            # )
            with simple_timer("generate_sequences", metrics):
                output = await self.server_manager.generate(
                    request_id=request_id, prompt_ids=prompt_ids, sampling_params=sampling_params, image_data=image_data
                )
            response_ids = output.token_ids
            # no tool calls
            response = await self.loop.run_in_executor(
                None,
                lambda: self.tokenizer.decode(
                    response_ids
                ),
            )
            self.log_file.write(f"+++++++++++++++++++++++ TASK: {task_id} +++++++++++++++++++++++\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@ PROMPT @@@@@@@@@@@@@@@@@@@@@@@@@@@@\n {messages[-1]['content']}\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@ RESPONSE @@@@@@@@@@@@@@@@@@@@@@@@@@@@\n{response}\n---------------------------------------------------------------------------\n")
            # print(f"@TASK:{task_id}--\n@prompt:{messages[-1]['content']}\n@response:{response}")
            # print(f"@TASK:{task_id}--\n@prompt:{prompt_str}@prompt_ids:{prompt_ids}\n@response:{response}\n@ids: {output.token_ids}")
            
            # prompt_ids += response_ids
            response_mask += [1] * len(response_ids)
            assistant_turns += 1

            # reach max response length
            if len(response_mask) >= self.response_length:
                self.log_file.write(f"+++++++++++++++++++++++ TASK :{task_id} ---- break lenght {len(response_mask)}\n")
                break

            # reach max assistant turns
            if self.max_assistant_turns and assistant_turns >= self.max_assistant_turns:
                self.log_file.write(f"+++++++++++++++++++++++ TASK :{task_id} ---- break max_assistant_turns {assistant_turns}\n")
                break

            # reach max user turns
            if self.max_user_turns and user_turns >= self.max_user_turns:
                self.log_file.write(f"+++++++++++++++++++++++ TASK :{task_id} ---- break max_user_turns {user_turns}\n")
                break


            action, valid = proj.swe_bench_mem_projection(response)
            memory = action["memory"]
            if not valid:
                self.log_file.write(f"+++++++++++++++++++++++ TASK :{task_id} ---- break vaild {response}\n")
                break
            
            with simple_timer("tool_calls", metrics):
                res, _reward, is_done, info  = await self.loop.run_in_executor(
                    None,
                    lambda: self.env.step(
                        action, valid
                    ),
                )
                reward += _reward
                # if _reward <= 0:
                self.log_file.write(f"+++++++++++++++++++++++ TASK :{task_id} ---- step reward : {_reward}\n res : {res}\n--------------------------------------------\n")
                res_limited = res[:256] + f"\ncontent is truncated for too long({len(res)})" if len(res) > 256 else res
                steps += Template(steps_template).render(turn=assistant_turns, action=action.get("command", ""), result=res_limited)   # add for steps reward enhancement
                #     break
            # memory reward
            memory_score, memory_score_reponse = await self.loop.run_in_executor(
                None, 
                self.reward.memory_reward, 
                messages[-1]['content'], response
            )
            reward += memory_score
            self.log_file.write(f"+++++++++++++++++++++++ TASK :{task_id} ---- memory score reward : {memory_score}, score detail: {memory_score_reponse}\n--------------------------------------------\n")
            # self.log_file.write(f"@@@ response : {res} \n### req {req_str}")

            # append tool_response_ids
            # tool_response_ids = await self.loop.run_in_executor(
            #     None,
            #     lambda messages=[{"role":"tool", "content": res}]: self.tokenizer.apply_chat_template(
            #         messages, add_generation_prompt=True, tokenize=True
            #     ),
            # )
            observation = res
            
            # tool_response_ids = tool_response_ids[len(self.system_prompt) :]
            if "COMPLETE_TASK_AND_SUBMIT_FINAL_OUTPUT" in res:
                # instance = json.loads(data_source[0])
                lines=res.lstrip().splitlines()
                instance_id = uuid4().hex
                pred= {"model_name_or_path":"fake_model","model_patch":"\n".join(lines[1:]),"instance_id":instance_id}
                test_spec = make_test_spec(instance=instance,namespace="swebench", instance_image_tag="latest")
                res = run_instance(test_spec=test_spec,pred=pred,rm_image=False,force_rebuild=False,client=docker.from_env(),run_id=instance_id,timeout=3000,rewrite_reports=False)
                if res:
                    # instance_id, report = res
                    reward += self.max_assistant_turns * 1.2 if res["completed"] and res["resolved"] else assistant_turns * 1 + 1
                    reward -= assistant_turns * 1
                    self.log_file.write(f"+++++++++++++++++++++++ TASK :{task_id} ---- break final reward : {reward}\n res : {res}, \n--------------------------------------------\n")
                break

            # NOTE: last turn should not be user turn, or the EOS token reward
            # can't be propagated to previous token in GAE.
            # if len(response_mask) + len(tool_response_ids) >= self.response_length:
            #     is_break = True
            #     break

            # prompt_ids += tool_response_ids
            # response_mask += [0] * len(tool_response_ids)
            user_turns += 1

            # response_ids = prompt_ids[-len(response_mask) :]
            # prompt_ids = prompt_ids[: len(prompt_ids) - len(response_mask)]
            self.log_file.write(f"+++++++++++++++++++++++ TASK :{task_id} prompt ids len {len(prompt_ids)} response ids len {len(response_ids)}\n")
            output = AgentLoopOutput(
                prompt_ids=prompt_ids if len(prompt_ids) < self.prompt_length else prompt_ids[-self.prompt_length:],
                response_ids=response_ids if len(response_ids) < self.response_length else response_ids[:self.response_length],
                response_mask=response_mask if len(response_mask) < self.response_length else response_mask[:self.response_length],
                num_turns=user_turns + assistant_turns + 1,
                metrics=metrics,
                reward_score=reward,
                extra_fields=dict()
            )
            mt_res.append(output)
            
            # self.log_file.write(f"prompt ids len {len(prompt_ids)} response ids len {len(response_ids)}\n")
        # add for steps reward enhancement
        procedure_score, single_step_scores, steps_score_reponse = await self.loop.run_in_executor(
            None, 
            self.reward.steps_reward, 
            instance["problem_statement"], steps
        )
        reward += procedure_score
        self.log_file.write(f"+++++++++++++++++++++++ TASK :{task_id} ---- procedure score reward : {procedure_score}, single step scores reward : {single_step_scores}, score detail: {steps_score_reponse}\n--------------------------------------------\n")
        self.log_file.write(f"+++++++++++++++++++++++ TASK :{task_id} ---- final (with steps) reward : {reward}\n--------------------------------------------\n")

        output = AgentLoopOutput(
            prompt_ids=prompt_ids if len(prompt_ids) < self.prompt_length else prompt_ids[-self.prompt_length:],
            response_ids=response_ids if len(response_ids) < self.response_length else response_ids[:self.response_length],
            response_mask=response_mask if len(response_mask) < self.response_length else response_mask[:self.response_length],
            num_turns=user_turns + assistant_turns + 1,
            metrics=metrics,
            reward_score=reward,
            extra_fields=dict()
        )
        mt_res.append(output)
        
        index = 0
        for o in mt_res:
            single_step_qa = single_step_scores[index] if index < len(single_step_scores) else 1
            ratio = 1 if single_step_qa == 1 else 0.8
            index += 1
            # 如果该步骤是重复冗余动作，则仅获得部分final_reward
            o.reward_score = mt_res[-1].reward_score * ratio
            o.extra_fields["count"] = len(mt_res)
            o.num_turns=mt_res[-1].num_turns
        self.log_file.write(f"+++++++++++++++++++++++TASK :{task_id} swe mt len : {len(mt_res)}\n")
        self.log_file.flush()
        return mt_res

    async def _call_tool(self, tool_call: FunctionCall) -> dict[str, str]:
        """Call tool and return tool response."""
        tool, instance_id = None, None
        try:
            # TODO: append malformed tool_call to the prompt: invalid function name or arguments
            tool_name = tool_call.name
            tool_args = json.loads(tool_call.arguments)
            tool = self.tools[tool_name]

            instance_id = await tool.create()
            tool_response, _, _ = await tool.execute(instance_id, tool_args)
        except Exception as e:
            logger.exception(f"Error when executing tool: {e}")
            return e
        finally:
            if tool and instance_id:
                await tool.release(instance_id)

        if len(tool_response) > self.max_tool_response_length:
            if self.tool_response_truncate_side == "left":
                tool_response = tool_response[: self.max_tool_response_length] + "...(truncated)"
            elif self.tool_response_truncate_side == "right":
                tool_response = "(truncated)..." + tool_response[-self.max_tool_response_length :]
            else:
                length = self.max_tool_response_length // 2
                tool_response = tool_response[:length] + "...(truncated)..." + tool_response[-length:]

        return {
            "role": "tool",
            "content": tool_response,
        }
