import re


def extract_xml_answer(text: str) -> str:
    """Extracts the answer from an XML formatted text string.
    
    Args:
        text: The input text containing XML formatted answers.
        
    Returns:
        The extracted answer as a string, stripped of any leading/trailing whitespace.
    """
    answer = text.split("<josie_answers>") [-1]
    answer = answer.split("</josie_answers>") [0]
    return answer.strip

def accuracy_reward_func(prompts, completions, answer, **kwargs) -> list[float]:
    """Calculates reward based on the accuracy of extracted answers compared to ground truth.
    
    Args:
        prompts: The list of input prompts.
        completions: The model's generated responses for each prompt.
        answer: The ground truth answers to compare against.
        
    Returns:
        A list of reward scores, where each score is 2.0 if the extracted response matches
        the ground truth answer, and 0.0 otherwise.
    """
    responses = [completion[0]['content'] for completion in completions]
    q = prompts[0][-1]['content']
    extracted_responses = [extract_xml_answer(r) for r in responses]
    print('-'*20, f"Question:\n{q}", f"\nAnswer:\n{answer[0]}", f"\nResponse:\n{responses[0]}", f"\nExtracted:\n{extracted_responses[0]}")
    return [2.0 if r == a else 0.0 for r, a in zip(extracted_responses, answer)]

def int_reward_func(completions, **kwargs) -> list[float]:
    """Rewards numerical responses based on whether they are digits.
    
    Args:
        completions: The model's generated responses.
        
    Returns:
        A list of reward scores, where each score is 0.5 if the response is a digit,
        and 0.0 otherwise.
    """
    responses = [completion[0]['content'] for completion in completions]
    extracted_responses = [extract_xml_answer(r) for r in responses]
    return [0.5 if r.isdigit() else 0.0 for r in extracted_responses]

def strict_format_reward_func(completions, **kwargs) -> list[float]:
    """Rewards completions that strictly adhere to a specific XML format.
    
    Args:
        completions: The model's generated responses.
        
    Returns:
        A list of reward scores, where each score is 0.5 if the response matches
        the specified format, and 0.0 otherwise.
    """
    pattern = r"^<josie_thinks>\n.*?\n</josie_thinks>\n<josie_answers>\n.*?\n</josie_answers>\n$"
    responses = [completion[0]["content"] for completion in completions]
    matches = [re.match(pattern, r) for r in responses] 
    return [0.5 if match else 0.0 for match in matches]

def soft_format_reward_func(completions, **kwargs) -> list[float]:
    """Rewards completions that follow a specific XML format with some flexibility.
    
    Args:
        completions: The model's generated responses.
        
    Returns:
        A list of reward scores, where each score is 0.5 if the response matches
        the specified format, and 0.0 otherwise.
    """
    pattern = r"<josie_thinks>.*?</josie_thinks>\s*<josie_answers>.*?</josie_answers>"
    responses = [completion[0]["content"] for completion in completions]
    matches = [re.match(pattern, r) for r in responses] 
    return [0.5 if match else 0.0 for match in matches]

def count_xml(text) -> float:
    """Calculates a score based on XML formatting correctness and structure.
    
    Args:
        text: The input XML formatted text.
        
    Returns:
        A score between 0.0 and 0.5, based on how well the text adheres to the XML format.
    """
    count = 0.0
    if text.count("<josie_thinks>\n") == 1:
        count += 0.125
    if text.count("\n</josie_thinks>\n") == 1:
        count += 0.125
    if text.count("\n<josie_answers>\n") == 1:
        count += 0.125
        count -= len(text.split("\n</josie_answers>\n")[-1])*0.001
    if text.count("\n</josie_answers>") == 1:
        count += 0.125
        count -= (len(text.split("\n</josie_answers>")[-1]) - 1)*0.001
    return count

def xmlcount_reward_func(completions, **kwargs) -> list[float]:
    """Calculates reward scores based on XML formatting correctness.
    
    Args:
        completions: The model's generated responses.
        
    Returns:
        A list of scores, where each score is calculated using the count_xml function.
    """
    contents = [completion[0]["content"] for completion in completions]
    return [count_xml(c) for c in contents]