# from agentic_system.reward_manager.base_reward import RewardBase
import logging
import re
from jinja2 import Template
import os
from agentic_system.environments.prompts.swe_bench_memory import *
import agentic_system.environments.env_package.swe.utils as utils

logger = logging.getLogger(__file__)
logger.setLevel(os.getenv("VERL_LOGGING_LEVEL", "WARN"))

class SWEBenchReward(object):
    def __init__(self,**kwargs):
        self.reward = None

    def format_reward(self, response):
        return

    def response_reward(self, response, reward_client, resp_sys_prompt):
        return

    def information_gain_reward(self, response, reward_client, info_gain_sys_prompt):
        return

    def action_reward(self, response):
        return

    # add for steps reward enhancement
    def steps_reward(self, task, steps):
        prompt = Template(steps_rm_prompt_template).render(task=task, steps=steps)
        # print(f"rm prompt: {prompt}\n", end="", flush=True)
        try:
            thought, response = utils.call_chat_api(prompt)
        except Exception as e:
            response = ""
            logger.exception(f"Error when executing steps rewarding: {e}")
        # print(f"rm reponse: {response}\n", end="", flush=True)
        completeness_score, efficiency_score, accuracy_score = 0, 0, 0
        single_step_scores = []
        if response:
            pattern = r".*?完整性维度(\d+)分.*?高效性维度(\d+)分.*?精准性维度(\d+)分.*?"
            match = re.match(pattern, response, flags=re.DOTALL)
            if match:
                completeness_score = int(match.group(1))
                efficiency_score = int(match.group(2))
                accuracy_score = int(match.group(3))
            single_step_scores = [int(match) for match in re.findall(r'<turn\d+>(\d+)分</turn\d+>', response)]
        # print(f"procedure score got: completeness_score={completeness_score}, efficiency_score={efficiency_score}, accuracy_score={accuracy_score}\n")
        # print(f"single step scores: {single_step_scores}\n")
        scores = [completeness_score, efficiency_score, accuracy_score]
        procedure_score = sum(scores) / len(scores)
        return procedure_score, single_step_scores, response
    
    def memory_reward(self, task, memory):
        prompt = Template(memory_rm_prompt_template).render(task=task, memory=memory)
        # print(f"rm prompt: {prompt}\n", end="", flush=True)
        try:
            thought, response = utils.call_chat_api(prompt)
        except Exception as e:
            response = ""
            logger.exception(f"Error when executing memory rewarding: {e}")
        # print(f"rm reponse: {response}\n", end="", flush=True)
        completeness_score, relevance_score, clearness_score, orientation_score, consistency_score = 0, 0, 0, 0, 0
        if response:
            pattern = r".*?完整性维度(\d+)分.*?相关性维度(\d+)分.*?清晰性维度(\d+)分.*?行动导向维度(\d+)分.*?一致性维度(\d+)分.*?"
            match = re.match(pattern, response, flags=re.DOTALL)
            if match:
                completeness_score = int(match.group(1))
                relevance_score = int(match.group(2))
                clearness_score = int(match.group(3))
                orientation_score = int(match.group(4))
                consistency_score = int(match.group(5))
        # print(f"memory score got: completeness_score={completeness_score}, relevance_score={relevance_score}, clearness_score={clearness_score}, orientation_score={orientation_score}, consistency_score={consistency_score}\n")
        scores = [completeness_score, relevance_score, clearness_score, orientation_score, consistency_score]
        memory_score = sum(scores) / len(scores)
        # print(f"memory scores average: {memory_score}\n")
        return memory_score, response
