import re
import time
import regex
import concurrent.futures
import multiprocessing as mp
from multiprocessing import Process, Queue

from mindspeed_llm.tasks.posttrain.verifier.math_eval_toolkit.parser import extract_answer
from mindspeed_llm.tasks.posttrain.verifier.math_eval_toolkit.grader import math_equal


def qwen_math_equal_subprocess(prediction, reference, timeout_seconds=10):
    def worker(q, prediction, reference):
        result = math_equal(prediction=prediction, reference=reference, timeout=False)
        q.put(result)

    q = Queue()
    p = Process(target=worker, args=(q, prediction, reference))
    p.start()

    p.join(timeout=timeout_seconds)

    if p.is_alive():
        p.terminate()
        p.join()
        return False

    try:
        return q.get_nowait()
    except Exception as e:
        return False


def extract_answer_subprocess(model_output, timeout_seconds=10):
    def worker(q, model_output):
        result = extract_answer(pred_str=model_output, data_name="math")
        q.put(result)

    q = Queue()
    p = Process(target=worker, args=(q, model_output))
    p.start()

    p.join(timeout=timeout_seconds)

    if p.is_alive():
        p.terminate()
        p.join()
        return ""

    try:
        return q.get_nowait()
    except Exception as e:
        return ""


def preprocess_box_response_for_qwen_prompt_call(sequences, answers, **kwargs):
    scores = []
    for sequence, answer in zip(sequences, answers):
        model_output = re.sub(r'^.*?<\|im_start\|>assistant', '<|im_start|>assistant', sequence, flags=re.DOTALL,
                              count=1)
        stop_words = ["</s>", "<|im_end|>", "<|endoftext|>"]
        for stop_word in stop_words:
            if stop_word in model_output:
                model_output = model_output.split(stop_word)[0].strip()
        ext_answer = extract_answer_subprocess(model_output=model_output,
                                               timeout_seconds=kwargs.get('timeout_seconds', 2))
        if qwen_math_equal_subprocess(prediction=ext_answer, reference=answer):
            box_match = 1.0
        else:
            box_match = -0.5

        if "boxed" not in model_output:
            box_match = -1.0

        scores.append(box_match)

    return scores


# 启用多个进程同时处理任务，每个进程只处理一部分的sequences
def preprocess_box_response_for_qwen_prompt(sequences, answers):
    def worker(q, sequence_batch, answer_batch):
        results = []
        for seq, ans in zip(sequence_batch, answer_batch):
            result = preprocess_box_response_for_qwen_prompt_call([seq], [ans])
            results.append(result[0])
        q.put(results)

    max_num_workers = 32
    timeout = 2
    num_workers = min(mp.cpu_count() - 1, max_num_workers)
    batch_size = len(sequences) // num_workers
    processes = []
    q = Queue()

    lengths = []
    for i in range(num_workers):
        start_index = i * batch_size
        end_index = (i + 1) * batch_size if i < num_workers - 1 else len(sequences)
        s = end_index - start_index
        lengths.append(s)
        sequence_batch = sequences[start_index:end_index]
        answer_batch = answers[start_index:end_index]
        p = Process(target=worker, args=(q, sequence_batch, answer_batch))
        processes.append(p)
        p.start()

    results = []
    for i, p in enumerate(processes):
        p.join(2 * lengths[i])
        if p.is_alive():
            p.terminate()
            p.join()
            results.extend([-1.0] * lengths[i])
        else:
            try:
                results.extend(q.get_nowait())
            except Exception:
                results.extend([-1.0] * lengths[i])
    return results


def orig_base_model_accuracy_reward(sequences, answers, **kwargs):
    scores = []

    for sequence, answer in zip(sequences, answers):
        ext_answer = extract_answer_subprocess(model_output=sequence)

        if qwen_math_equal_subprocess(prediction=ext_answer, reference=answer):
            box_match = 1.0
        else:
            box_match = 0.0

        scores.append(box_match)
    return scores


def process_model_accuracy_reward(item):
    sequence, answer = item
    ext_answer = extract_answer_subprocess(model_output=sequence)
    return 1.0 if qwen_math_equal_subprocess(prediction=ext_answer, reference=answer) else 0.0


def base_model_accuracy_reward(q, sequences, answers, timeout):
    scores = []
    
    for sequence, answer in zip(sequences, answers):
        format_correct = validate_response_structure(sequence)
        ext_answer = extract_answer(sequence, data_name='math')
        box_match = 0.0
        if math_equal(prediction=ext_answer, reference=answer) and format_correct:
            box_match = 1.0
        
        scores.append(box_match)
    q.put(scores)
    return scores


def format_reward(sequences, **kwargs):
    """
    Reward function that checks if the completion has a specific format.

    Args:
        sequences: A list of sequences, where each completion is a tuple containing a list of dictionaries.
                     Each dictionary should have a "content" key with the text to be checked.

    Returns:
        A list of floats, where each float is 1.0 if the corresponding completion matches the required format,
        and 0.0 otherwise.

    Raises:
        ValueError: If the input sequences are not in the expected format.
    """
    pattern = r"^<think>.*?</think>\s*<answer>.*?</answer>$"

    if not isinstance(sequences, list):
        raise ValueError("Input sequences must be a list.")

    rewards = []
    for completion in sequences:
        if re.match(pattern, completion, re.DOTALL | re.MULTILINE):
            rewards.append(1.0)
        else:
            rewards.append(0.0)

    return rewards


def validate_response_structure(processed_str: str) -> bool:
    """Performs comprehensive validation of response structure.

    Args:
        processed_str: Processed response string from the model

    Returns:
        Boolean indicating whether all formatting requirements are met
    """
    validation_passed = True

    tags = {
        'think_start': ('<think>', 1),
        'think_end': ('</think>', 1),
        'answer_start': ('<answer>', 1),
        'boxed_start': (r'\\boxed\{.*?\}', 1),
        'answer_end': ('</answer>', 1)
    }

    positions = {}
    for tag_name, (tag_str, expected_count) in tags.items():
        if tag_name == 'boxed_start':
            match = re.findall(tag_str, processed_str)
            count = len(match)
            pos = re.search(tag_str, processed_str)
            if pos is not None:
                positions[tag_name] = re.search(tag_str, processed_str).start()
            else:
                positions[tag_name] = -1
        else:
            count = processed_str.count(tag_str)
            positions[tag_name] = processed_str.find(tag_str)

        if count != expected_count:
            validation_passed = False

    if (positions['think_start'] > positions['think_end'] or
            positions['think_end'] > positions['answer_start'] or
            positions['answer_start'] > positions['boxed_start'] or
            positions['boxed_start'] > positions['answer_end'] or
            not processed_str.startswith('<think>') or
            not processed_str.endswith('</answer>')
    ):
        validation_passed = False
    else:
        pass

    return validation_passed


def strict_format_reward(sequences, **kwargs):
    """
    Reward function that checks if the completion has a specific format.

    Args:
        sequences: A list of sequences, where each completion is a tuple containing a list of dictionaries.
                     Each dictionary should have a "content" key with the text to be checked.

    Returns:
        A list of floats, where each float is 1.0 if the corresponding completion matches the required format,
        and 0.0 otherwise.

    Raises:
        ValueError: If the input sequences are not in the expected format.
    """

    rewards = []
    for completion in sequences:
        reward = -1.0
        format_correct = validate_response_structure(completion)
        if format_correct:
            reward = 1.0
        rewards.append(reward)
    return rewards


def reasoning_steps_reward(sequences, **kwargs):
    r"""Reward function that checks for clear step-by-step reasoning.
    Regex pattern:
        Step \d+: - matches "Step 1:", "Step 2:", etc.
        ^\d+\. - matches numbered lists like "1.", "2.", etc. at start of line
        \n- - matches bullet points with hyphens
        \n\* - matches bullet points with asterisks
        First,|Second,|Next,|Finally, - matches transition words
    """
    pattern = r"(Step \d+:|^\d+\.|\n-|\n\*|First,|Second,|Next,|Finally,)"
    matches = [len(re.findall(pattern, content)) for content in sequences]

    return [min(1.0, count / 3) for count in matches]


class RuleBasedRMProxy:
    def __init__(self):
        self.chinese_pattern = re.compile(r'[\u4e00-\u9fff]')
        self.english_pattern = re.compile(r'[a-zA-Z]')
        self.boxed_pattern = re.compile(
            r"\\boxed\{((?:[^{}]|\\{|\\}|(?:\{(?:[^{}]|\\{|\\}|(?:\{(?:[^{}]|\\{|\\}|(?:\{[^{}]*\}))*\}))*\}))*\})")
        self.valid_char_pattern = re.compile(
            r'[a-zA-Z0-9\s\.,!?"\'\(\)\{\}\[\]_\-+=<>/@#$%^&*\\|:;~`\u2200-\u22FF]')
        self.repeat_pattern = re.compile(r'(.{5,}?)\1{4,}')

    def check_mixed_languages(self, text):
        chinese_chars = len(self.chinese_pattern.findall(text))
        english_chars = len(self.english_pattern.findall(text))
        return chinese_chars >= 20 and english_chars >= 20

    def check_garbled_characters(self, text):
        valid_chars = self.valid_char_pattern.sub('', text)
        if not text:
            return False
        invalid_ratio = len(valid_chars) / len(text)
        return invalid_ratio > 0.3

    def has_repeated_patterns(self, text):
        return bool(self.repeat_pattern.search(text))

    def correctness_score(self, label, response):
        matches = self.boxed_pattern.findall(response)
        if not matches:
            return -1.0

        pred = matches[-1][:-1]
        return 1.0 if math_equal(label, pred) else -0.5

    def split_and_score(self, response, label):
        if "\\boxed" not in response or response.count("\\boxed") >= 5:
            return -1.0

        if self.check_garbled_characters(response):
            return -1.0

        if self.has_repeated_patterns(response):
            return -1.0

        return self.correctness_score(label, response)

    def split_and_score_mp(self, response, label, timeout_n):
        """Using multiprocess"""

        def worker(q, self, response, label):
            result = self.split_and_score(response, label)
            q.put(result)

        q = Queue()
        p = Process(target=worker, args=(q, self, response, label))
        p.start()
        p.join(timeout=timeout_n)

        if p.is_alive():
            print("Process timeout, terminate the process")
            p.terminate()
            p.join()
            return -1.0
        try:
            return q.get_nowait()
        except Exception as e:
            return -1.0


# 启用多个进程同时处理任务，每个进程只处理一部分的sequences
def func_from_jiaoda(q, sequences, answers, timeout_seconds=4):
    checker = RuleBasedRMProxy()
    results = []

    for _, (seq, ans) in enumerate(zip(sequences, answers)):
        result = checker.split_and_score_mp(seq, ans, timeout_seconds)
        results.append(result)
    q.put(results)
    return results

def multiprocee_executor(worker, sequences, answers, timeout_seconds=10, max_num_workers=32, timeout_rewward=0.0):
    num_workers = min(mp.cpu_count() - 1, max_num_workers)
    batch_size = len(sequences) // num_workers
    processes = []
    lengths = []
    
    task_q_list = []
    print('batch size is:', batch_size, 'num worker is:', num_workers)
    for i in range(num_workers):
        start_index = i * batch_size
        end_index = (i + 1) * batch_size if i < num_workers - 1 else len(sequences)
        s = end_index - start_index
        lengths.append(s)
        sequence_batch = sequences[start_index:end_index]
        answer_batch = answers[start_index:end_index]
        q = Queue()
        p = Process(target=worker, args=(q, sequence_batch, answer_batch, timeout_seconds))
        processes.append(p)
        task_q_list.append(q)
        p.start()

    results = []
    for i, p in enumerate(processes):
        p.join(timeout=timeout_seconds)
        if p.is_alive():
            p.terminate()
            results.extend([timeout_rewward] * lengths[i])
            print(f'The process has timeouted, return a list of {timeout_rewward} with list size {lengths[i]} at {i}-th, total process {len(processes)}')
        else:
            try:
                results.extend(task_q_list[i].get_nowait())
            except Exception:
                results.extend([timeout_rewward] * lengths[i])

    return results

def _test_rule_verifier():
    text = ["""<think>\nFirst, we use the property of a parallelogram that opposite sides are equal in length. 
    Therefore, we have:\n\\[AB = CD \\quad \\text{and} \\quad BC = AD\\]\n\nFrom the given measurements:\n\\
    [AB = 38 \\text{ cm}\\]\n\\[BC = 3y^3 \\text{ cm}\\]\n\\[CD = 2x + 4 \\text{ cm}\\]\n\\[AD = 24 \\text{ cm}\\]\n\n
    Setting \\(AB = CD\\), we get:\n\\[38 = 2x + 4\\]\n\nSetting \\(BC = AD\\), we get:\n\\[3y^3 = 24\\]\n\nNow, 
    we solve each equation for \\(x\\) and \\(y\\).\n</think>\n<answer>\nFirst, solve for \\(x\\):\n\\[38 = 2x + 4\\]
    \n\\[38 - 4 = 2x\\]\n\\[34 = 2x\\]\n\\[x = \\frac{34}{2} = 17\\]\n\nNext, solve for \\(y\\):\n\\[3y^3 = 24\\]\n\\
    [y^3 = \\frac{24}{3} = 8\\]\n\\[y = \\sqrt[3]{8} = 2\\]\n\nNow, find the product of \\(x\\) and \\(y\\):\n\\[xy = 
    17 \\times 2 = 34\\]\n\nTherefore, the product of \\(x\\) and \\(y\\) is \\(\\boxed{34}\\).</answer>"""]

    label = ['34']
    print('reward_verifier=', preprocess_box_response_for_qwen_prompt(text, label))
    print('format_verifier=', format_reward(text))
    print('strict_format_verifier=', strict_format_reward(text))
    print('step_verifier=', reasoning_steps_reward(text))
    print('func_from_jiaoda=', func_from_jiaoda(text, label))

def _test_rule_verifier_file(response_file, label_file):
    response = []
    with open(response_file, 'r') as fp:
        for line in fp:
            response.append(line)
    labels = []
    with open(label_file, 'r') as fp:
        for line in fp:
            labels.append(line)
    time3 = time.time()
    print("tests start:")
    start = 0
    for i in range(1):
        rest = multiprocee_executor(func_from_jiaoda, response[start:], labels[start:])
        print('func_from_jiaoda=', len(rest))
        print(f'At {i}-th time:')
    time4 = time.time()
    print(f"total time of func from jiaoda: {(time4 - time3)}s")


if __name__ == "__main__":
    _test_rule_verifier()
