# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/hendrycks_math/utils.py
import re
import numpy as np

def math_compute_score(solution_str, ground_truth) -> float:
    retval = 0.0
    answer = None  # 在try块外部初始化answer，防止意外错误
    try:
        string_in_last_boxed = last_boxed_only_string(solution_str)
        if string_in_last_boxed is not None:
            answer = remove_boxed(string_in_last_boxed)
            if is_equiv(answer, ground_truth):
                retval = 1.0
    except Exception as e:
        print(e)
    #print(f"answer: {answer}, ground_truth: {ground_truth}, score: {retval}, ")
    return retval


# string normalization from https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/hendrycks_math.py
def is_equiv(str1, str2, verbose=False):
    if str1 is None and str2 is None:
        print("WARNING: Both None")
        return True
    if str1 is None or str2 is None:
        return False

    try:
        ss1 = strip_string(str1)
        ss2 = strip_string(str2)
        if verbose:
            print(ss1, ss2)
        return ss1 == ss2
    except Exception:
        return str1 == str2


def remove_boxed(s):
    if "\\boxed " in s:
        left = "\\boxed "
        assert s[: len(left)] == left
        return s[len(left) :]

    left = "\\boxed{"

    assert s[: len(left)] == left
    assert s[-1] == "}"

    return s[len(left) : -1]


def last_boxed_only_string(string):
    idx = string.rfind("\\boxed")
    if "\\boxed " in string:
        return "\\boxed " + string.split("\\boxed ")[-1].split("$")[0]
    if idx < 0:
        idx = string.rfind("\\fbox")
        if idx < 0:
            return None

    i = idx
    right_brace_idx = None
    num_left_braces_open = 0
    while i < len(string):
        if string[i] == "{":
            num_left_braces_open += 1
        if string[i] == "}":
            num_left_braces_open -= 1
            if num_left_braces_open == 0:
                right_brace_idx = i
                break
        i += 1

    retval = None if right_brace_idx is None else string[idx : right_brace_idx + 1]

    return retval


def fix_fracs(string):
    substrs = string.split("\\frac")
    new_str = substrs[0]
    if len(substrs) > 1:
        substrs = substrs[1:]
        for substr in substrs:
            new_str += "\\frac"
            if substr[0] == "{":
                new_str += substr
            else:
                try:
                    assert len(substr) >= 2
                except:  # noqa: E722
                    return string
                a = substr[0]
                b = substr[1]
                if b != "{":
                    if len(substr) > 2:
                        post_substr = substr[2:]
                        new_str += "{" + a + "}{" + b + "}" + post_substr
                    else:
                        new_str += "{" + a + "}{" + b + "}"
                else:
                    if len(substr) > 2:
                        post_substr = substr[2:]
                        new_str += "{" + a + "}" + b + post_substr
                    else:
                        new_str += "{" + a + "}" + b
    string = new_str
    return string


def fix_a_slash_b(string):
    if len(string.split("/")) != 2:
        return string
    a = string.split("/")[0]
    b = string.split("/")[1]
    try:
        a = int(a)
        b = int(b)
        assert string == "{}/{}".format(a, b)
        new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
        return new_string
    except:  # noqa: E722
        return string


def remove_right_units(string):
    # "\\text{ " only ever occurs (at least in the val set) when describing units
    if "\\text{ " in string:
        splits = string.split("\\text{ ")
        assert len(splits) == 2
        return splits[0]
    else:
        return string


def fix_sqrt(string):
    if "\\sqrt" not in string:
        return string
    splits = string.split("\\sqrt")
    new_string = splits[0]
    for split in splits[1:]:
        if split[0] != "{":
            a = split[0]
            new_substr = "\\sqrt{" + a + "}" + split[1:]
        else:
            new_substr = "\\sqrt" + split
        new_string += new_substr
    return new_string


def strip_string(string):
    # ======== 修改后的智能处理逻辑 ========

    # 步骤1: 创建一个临时字符串，移除常见的分隔符，用于判断是否为选择题答案
    temp_str = string.strip()
    # 移除各种括号和分隔符
    for char in ["(", ")", "（", "）", ",", "，", " "]:
        temp_str = temp_str.replace(char, "")
    
    # 步骤2: 检查处理后的临时字符串是否为纯字母
    # 如果是，我们假定它是一个选择题答案
    if temp_str and temp_str.isalpha():
        # 统一转为大写
        temp_str = temp_str.upper()
        # 对字母进行排序，以处理多选答案的顺序问题 (例如 BA -> AB)
        return "".join(sorted(temp_str))

    # 步骤3: 如果不是纯字母，我们假定它是一个数学公式，并执行原始的标准化流程
    # **注意：接下来的所有操作都是在未经修改的原始 `string` 上进行的**

    # linebreaks
    string = string.replace("\n", "")

    # remove inverse spaces
    string = string.replace("\\!", "")

    # replace \\ with \
    string = string.replace("\\\\", "\\")

    # replace tfrac and dfrac with frac
    string = string.replace("tfrac", "frac")
    string = string.replace("dfrac", "frac")

    # remove \left and \right
    string = string.replace("\\left", "")
    string = string.replace("\\right", "")

    # Remove circ (degrees)
    string = string.replace("^{\\circ}", "")
    string = string.replace("^\\circ", "")

    # remove dollar signs
    string = string.replace("\\$", "")

    # remove units (on the right)
    string = remove_right_units(string)

    # remove percentage
    string = string.replace("\\%", "")
    string = string.replace("\%", "")  # noqa: W605

    # " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
    string = string.replace(" .", " 0.")
    string = string.replace("{.", "{0.")
    # if empty, return empty string
    if len(string) == 0:
        return string
    if string[0] == ".":
        string = "0" + string

    # to consider: get rid of e.g. "k = " or "q = " at beginning
    if len(string.split("=")) == 2 and len(string.split("=")[0]) <= 2:
        string = string.split("=")[1]

    # fix sqrt3 --> sqrt{3}
    string = fix_sqrt(string)

    # remove spaces **within mathematical expressions**
    string = string.replace(" ", "")

    # \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1).
    # Also does a/b --> \\frac{a}{b}
    string = fix_fracs(string)

    # manually change 0.5 --> \frac{1}{2}
    if string == "0.5":
        string = "\\frac{1}{2}"

    # NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
    string = fix_a_slash_b(string)

    return string




def compute_score(data_source: str, solution_str: str, ground_truth: str, extra_info=None,history_dict=None) -> dict:
    
    """
    Compute reward score for model solutions with robust handling of various formats.

    Returns a weighted combination of:
    - Accuracy reward (1.0 weight): Whether the answer is semantically correct
    """
    #extract answer from solution_str,bboxed
    
    acc_reward=math_compute_score(solution_str,ground_truth)
    # 5. Check tool usage - look for tool_call/tool_response patterns instead of vision tokens
    has_tool_usage = bool(
        re.search(r"<tool_call>.*?</tool_call>", solution_str, re.DOTALL)
        or re.search(r"<tool_response>.*?</tool_response>", solution_str, re.DOTALL)
    )
    uncounted_failures=re.findall(r"<tool_response>.*?Error when executing tool:*?</tool_response>", solution_str, re.DOTALL)
    # Count tool calls and responses
    tool_calls = re.findall(r"<tool_call>.*?</tool_call>", solution_str, re.DOTALL)
    tool_responses = re.findall(r"<tool_response>.*?</tool_response>", solution_str, re.DOTALL)
    num_of_tool_calls = len(tool_calls)
    num_of_tool_responses = len(tool_responses)
    
    # Count successful tool usage patterns
    successful_patterns = {
        'general': r"Successfully use the tool!",
        'zoom': r"Successfully use the tool! Zoomed in on the image to the region",
        'depth': r"Successfully use the tool! Generated the depth map",
        'sam': r"Successfully use the tool! Segmented the image",
        "llmdet":r"Successfully use the tool! Detected objects",
        "edge":r"Successfully use the tool! Detected edges",
        "ocr":r"Successfully use the tool! Extracted text and layout information from the image:",
    }
    
    success_counts = {}
    for tool_type, pattern in successful_patterns.items():
        success_counts[tool_type] = len(re.findall(
            rf"<tool_response>.*?{pattern}.*?</tool_response>", 
            solution_str, 
            re.DOTALL
        ))
    # Count failure patterns
    failure_patterns = {
        'depth': [
            r"Error during depth estimation:",
            r"Error: Depth estimation API returned no results",
        ],
        'sam': [
            r"Error during image segmentation:",
            r"Error: Segmentation API returned no results"
        ],
        'zoom': [
            r"Error during image crop:",
            r"Error: Bbox",
            r"Error: bbox_2d must be a list of 4 numbers",
            r"Error: Cropping operation failed",
            r"Error: The aspect ratio of the original image is too extreme"
        ],
        "llmdet": [
            r"Error during object detection:",
            r"Error: LLMDet API returned no results.",
            r"Error: 'text' parameter must be a non-empty list of strings."
        ],
        "edge": [
            r"Error: Edge detection operation failed.",
            r"Error during edge detection:"
        ],
        "ocr": [
            r"Error: OCR API returned no results.",
            r"Error during OCR processing:"
        ],
    }
    
    failure_counts = {}
    for tool_type, patterns in failure_patterns.items():
        count = 0
        for pattern in patterns:
            count += len(re.findall(
                rf"<tool_response>.*?{pattern}.*?</tool_response>",
                solution_str,
                re.DOTALL
            ))
        failure_counts[tool_type] = count
    total_counts={}
    for tool_type in failure_counts.keys():
        total_counts[tool_type] = success_counts.get(tool_type,0) + failure_counts.get(tool_type,0)
    # Determine successful usage flags
    has_valid_tool_interaction = num_of_tool_calls >= 1 and num_of_tool_responses >= 1
    
    success={}
    for key,value in success_counts.items():
        success[key] = value >= 1 and has_valid_tool_interaction
    fail={}
    for key,value in failure_counts.items():
        fail[key] = value >= 1 and has_valid_tool_interaction

    final_score = acc_reward
    
    image_id_pattern = r"with image_id:\s*(\d+)"
    image_id_matches = re.findall(image_id_pattern, solution_str)
    image_ids_dict = {} #{id: count}
    if image_id_matches:
        for img_id in image_id_matches:
            if img_id in image_ids_dict:
                image_ids_dict[img_id] += 1
            else:
                image_ids_dict[img_id] = 1
    
    # Prepare result dictionary with score and additional metrics
    result = {
        'score': final_score,  # Primary reward score for GRPO training
        
        # Classification metrics for validation
        'accuracy': 1.0 if acc_reward > 0.5 else 0.0,
        'is_correct': 1.0 if acc_reward > 0.5 else 0.0,
        
        # Component rewards for analysis
        'accuracy_reward': acc_reward,
        'successful_usage': success,
        "successful_usage_times":success_counts,
        "uncounted_failures":len(uncounted_failures),
        "failed_usage_times":failure_counts,
        "total_usage_times":total_counts,
        # Quality indicators
        'tool_used': 1.0 if has_tool_usage else 0.0,
        # Debug information (truncated)
        'solution_preview': solution_str[:200],
        'data_source': data_source,
        "image_id_usage_counts":image_ids_dict
    }
    
    # Store additional information in extra_info if provided (backward compatibility)
    if extra_info is not None:
        extra_info.update({
            'acc': 1.0 if acc_reward > 0.5 else 0.0,
            'tool_used': 1.0 if has_tool_usage else 0.0,
            "successful_tool_usage": 1.0 if success_counts["general"] else 0.0,
            'raw_solution': solution_str[:200]
        })
    
    return result


if __name__ == "__main__":
    import time
    from concurrent.futures import ThreadPoolExecutor, as_completed
    
    # Define test cases
    test_cases = [
        {
            "name": "Test Case 1: With depth tool",
            "predict_str": "<think>\nI need to determine which object, the black color leather armchair or the gray color modern side table, is closer to the abstract painting in a black frame.\n</think>\n<tool_call>\n{\"name\": \"estimate_depth\", \"arguments\": {\"image_id\": 0}}\n</tool_call>\n<think>\nThe tool `estimate_depth` can help in understanding the 3D depth of objects in the image. Although it's not pixel perfect, it will give a rough idea of the distances between objects.\n</think>user\n<tool_response>\nSuccessfully use the tool! Generated the depth map.with image_id: 0\n</tool_response>\nassistant\n<think>\nBased on the depth map, I can estimate that the gray color modern side table (C) is closer to the abstract painting in a black frame than the black color leather armchair (B).\n</think>\n<thinking>\nThe gray color modern side table appears closer to the abstract painting in the depth map, indicating it's likely closer in the real world as well.\n</thinking>\n<answer>\nThe gray color modern side table is closer to the abstract painting in a black frame.\n\\boxed{B}\n</answer>",
            "ground_truth": "B",
            "extra_info": {
                "answer": "B",
                "id": 0,
                "image": "/cpfs/user/honglingyi/DATA/LLM/Vstar/gqa/images/713270.jpg",
                "question": "Find the area of the figure. Round to the nearest tenth if necessary.",
            }
        },
        {
            "name": "Test Case 2: Multiple failed tool calls",
            "predict_str": "<think>\nThe image shows a room with a desk, chair, and a window on the right side. There is also a dark, rectangle-shaped object on the wall, which appears to be a TV screen, to the left of the window. The TV screen is labeled as 'A' in the image. The window is labeled as 'B'. The dark rectangle object to the left of the window is indeed on the wall, not the window itself.\n\nThus, the answer is False, but the question asks for the letter, and the letter 'A' corresponds to the TV screen. The correct interpretation based on the question's phrasing and the image is that the rectangle on the wall is indeed to the left of the window.\n\n</think>\n<tool_call>\n{\"name\": \"answer\", \"arguments\": {\"letter\": \"A\"}}\n</tool_call>\n\\boxed{A}user\n<tool_response>\nError executing tool: Error when executing tool: Tool 'answer' not found.\n</tool_response>\nassistant\n<think>\nThe image and the question provided suggest there is a dark rectangle object on the wall to the left of the window. The window is labeled as 'B' and the rectangle on the wall, which is likely the TV screen, is labeled as 'A'. The question asks if the rectangle (dark shape on the wall) is to the left of the window.\n\nFrom the visual, it is clear that the TV screen 'A' is to the left of the window 'B'. The answer to the question is 'yes'.\n\n</think>\n<tool_call>\n{\"name\": \"answer\", \"arguments\": {\"text\": \"yes\"}}\n</tool_call>\n\\boxed{B}user\n<tool_response>\nError executing tool: Error when executing tool: Tool 'answer' not found.\n</tool_response>\nassistant\n<think>\nThe image shows a room with a dark rectangle object labeled 'A' which appears to be a TV screen. The window is labeled 'B' and is clearly to the right of the TV screen. Therefore, the rectangle (dark shape on the wall) is indeed to the left of the window.\n\nSo, the final answer is 'yes', and the corresponding letter is 'B'.\n\n</think>\n<tool_call>\n{\"name\": \"answer\", \"arguments\": {\"text\": \"yes\"}}\n</tool_call>\n\\boxed{B}user\n<tool_response>\nError executing tool: Error when executing tool: Tool 'answer' not found.\n</tool_response>\nassistant\n<think>\nGiven the analysis of the image, the dark rectangle shaped object labeled 'A' on the wall is to the left of the window labeled 'B'. Therefore, the answer to the question \"Is dark rectangle window to the left of Window?\" is 'yes'.\n\n</think>\n<tool_call>\n{\"name\": \"answer\", \"arguments\": {\"text\": \"yes\"}}\n</tool_call>\n\\boxed{B}user\n<tool_response>\nError executing tool: Error when executing tool: Tool 'answer' not found.\n</tool_response>\nassistant\n<think>\nThe analysis of the image shows clearly that the dark rectangle labeled 'A' is to the left of the window labeled 'B'. Therefore, the answer to the question \"Is dark rectangle window to the left of Window?\" is 'yes'.\n\n</think>\n<tool_call>\n{\"name\": \"answer\", \"arguments\": {\"text\": \"yes\"}}\n</tool_call>\n\\boxed{B}",
            "ground_truth": "(B)",
            "extra_info": {
                "answer": "(B)",
                "id": 0,
                "image": "/cpfs/user/honglingyi/DATA/LLM/Vstar/gqa/images/713270.jpg",
                "question": "Is the color of the sock red?\n",
            }
        }
    ]
    
    def run_test_case(test_case):
        """Run a single test case and return results"""
        print(f"\n=== {test_case['name']} ===")
        time_start = time.time()
        score = compute_score(
            "common_reasoning", 
            test_case["predict_str"], 
            test_case["ground_truth"], 
            test_case["extra_info"]
        )
        time_end = time.time()
        elapsed = time_end - time_start
        
        result = {
            "name": test_case["name"],
            "score": score,
            "time": elapsed
        }
        print(f"Score: {score}")
        print(f"Time: {elapsed:.4f}s")
        return result
    
    # Single-threaded execution
    print("=" * 60)
    print("SINGLE-THREADED EXECUTION")
    print("=" * 60)
    single_thread_start = time.time()
    single_results = []
    for test_case in test_cases:
        single_results.append(run_test_case(test_case))
    single_thread_time = time.time() - single_thread_start
    
    # Multi-threaded execution
    print("\n" + "=" * 60)
    print("MULTI-THREADED EXECUTION (2 threads)")
    print("=" * 60)
    multi_thread_start = time.time()
    multi_results = []
    
    with ThreadPoolExecutor(max_workers=2) as executor:
        futures = {executor.submit(run_test_case, test_case): test_case for test_case in test_cases}
        for future in as_completed(futures):
            try:
                result = future.result()
                multi_results.append(result)
            except Exception as e:
                print(f"Test case failed with exception: {e}")
    
    multi_thread_time = time.time() - multi_thread_start
    
    # Summary
    print("\n" + "=" * 60)
    print("SUMMARY")
    print("=" * 60)
    print(f"Single-threaded total time: {single_thread_time:.4f}s")
    print(f"Multi-threaded total time: {multi_thread_time:.4f}s")
    print(f"Speedup: {single_thread_time / multi_thread_time:.2f}x")
