import re
from typing import Dict, Tuple, Optional
from verl.utils.reward_score.prime_code.testing_util import *

def extract_code_snippet(solution_str: str):
    """ Extracts the final answer from the model's response string by format of ```python <CODE> ```.
    
    Args:
        solution_str: Raw response string from the language model
        
    Returns:
        Tuple containing (extracted_code, extracted_answer, processed_string)
    """
    processed_str = "<think>" + solution_str.split("<think>", 1)[-1].strip()

    # Extract final answer using XML-style tags
    answer_pattern = r'<answer>(.*?)</answer>'
    matches = list(re.finditer(answer_pattern, processed_str, re.DOTALL))
    
    if not matches:
        return None, None, processed_str
    else:
        final_answer = matches[-1].group(1).strip()
        code_blocks = re.findall(r'```python(.*?)```', processed_str, re.DOTALL)
        return_code = sorted(code_blocks, key=len, reverse=True)[0].strip() if code_blocks else None

        return return_code, final_answer, processed_str

def validate_response_structure(processed_str: str) -> bool:
    """Performs comprehensive validation of response structure.
    
    Args:
        processed_str: Processed response string from the model
        
    Returns:
        Boolean indicating whether all formatting requirements are met
    """
    pattern = r"^<think>.*?</think>\s*<answer>.*?</answer>\s*<|endoftext|>$"
    matches = re.match(pattern, processed_str, re.DOTALL)
    return True if matches else False

def compute_scores(
    data_source: str,
    solution_str: str, 
    ground_truth: Dict[str, str],
) -> float:
    """ Computes comprehensive score for model response.
    
    Args:
        solution_str: Raw model response string
        ground_truth: Dictionary containing ground truth data
        
    Returns:
        Total score (sum of format and answer rewards)
    """

    # Extract model answer
    return_code, final_answer, processed_str = extract_code_snippet(solution_str)

    # Validate response structure
    format_correct = validate_response_structure(processed_str)
    # print('>>> processed_str:', processed_str, '\n - - -\n')
    # print('>>> format_correct:', format_correct)
    format_score = 1.0 if format_correct else -1.0

    # Validate answer content
    # print('>>> return_code:', return_code, '\n - - -\n')
    if format_correct and return_code:
        test_cases = json.loads(ground_truth)

        if test_cases['fn_name']:
            sample = {
                "input_output": json.dumps(
                    {
                        "fn_name": test_cases['fn_name'],
                        "inputs": process_in(test_cases['inputs'], True)[:10],
                        "outputs": process_ot(test_cases['outputs'], True)[:10],
                    }
            )}
        else:
            sample = {
                "input_output": json.dumps(
                    {
                        "inputs": process_in(test_cases['inputs'], False)[:10],
                        "outputs": process_ot(test_cases['outputs'], False)[:10],
                    }
            )}

        if return_code:
            try:
                e, _ = check_correctness(sample, return_code, 2, debug=False)
                answer_score = 2 * (sum(map(lambda x: x == True, e)) / len(e))
                # return_flag = all([fg==True for fg in e])
                # answer_score = 2.0 if return_flag else 0.0
            except Exception:  # if it fails for any reason, return 0.0
                answer_score = 0.0
        else:
            answer_score = 0.0
    else:
        answer_score = 0.0
    
    return format_score + answer_score